diff --git a/HISTORY.md b/HISTORY.md index d07bf48fd..bd2ec880c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -8,6 +8,7 @@ Specifically, the following measure-space optimization algorithms have been adde - `KLMinWassFwdBwd` - `KLMinNaturalGradDescent` - `KLMinSqrtNaturalGradDescent` + - `FisherMinBatchMatch` ## Interface Change diff --git a/docs/make.jl b/docs/make.jl index b28e42cad..34c0080d3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -29,6 +29,7 @@ makedocs(; "`KLMinWassFwdBwd`" => "klminwassfwdbwd.md", "`KLMinNaturalGradDescent`" => "klminnaturalgraddescent.md", "`KLMinSqrtNaturalGradDescent`" => "klminsqrtnaturalgraddescent.md", + "`FisherMinBatchMatch`" => "fisherminbatchmatch.md", ], "Variational Families" => "families.md", "Optimization" => "optimization.md", diff --git a/docs/src/fisherminbatchmatch.md b/docs/src/fisherminbatchmatch.md new file mode 100644 index 000000000..dae9b167f --- /dev/null +++ b/docs/src/fisherminbatchmatch.md @@ -0,0 +1,61 @@ +# [`FisherMinBatchMatch`](@id fisherminbatchmatch) + +## Description + +This algorithm, known as batch-and-match (BaM) aims to minimize the covariance-weighted 2nd-order Fisher divergence by running a proximal point-type method[^CMPMGBS24]. +On certain low-dimensional problems, BaM can converge very quickly without any tuning. +Since `FisherMinBatchMatch` is a measure-space algorithm, its use is restricted to full-rank Gaussian variational families (`FullRankGaussian`) that make the measure-valued operations tractable. + +```@docs +FisherMinBatchMatch +``` + +The associated objective value can be estimated through the following: + +```@docs; canonical=false +estimate_objective( + ::Random.AbstractRNG, + ::KLMinWassFwdBwd, + ::MvLocationScale, + ::Any; + ::Int, +) +``` + +[^CMPMGBS24]: Cai, D., Modi, C., Pillaud-Vivien, L., Margossian, C. C., Gower, R. M., Blei, D. M., & Saul, L. K. (2024). Batch and match: black-box variational inference with a score-based divergence. In *Proceedings of the International Conference on Machine Learning*. +## [Methodology](@id fisherminbatchmatch_method) + +This algorithm aims to solve the problem + +```math + \mathrm{minimize}_{q \in \mathcal{Q}}\quad \mathrm{F}_{\mathrm{cov}}(q, \pi), +``` + +where $\mathcal{Q}$ is some family of distributions, often called the variational family, and $\mathrm{F}_{\mathrm{cov}}$ is a divergence defined as + +```math +\mathrm{F}_{\mathrm{cov}}(q, \pi) = \mathbb{E}_{z \sim q} {\left\lVert \nabla \log \frac{q}{\pi} (z) \right\rVert}_{\mathrm{Cov}(q)}^2 , +``` + +where ${\lVert x \rVert}_{A}^2 = x^{\top} A x $ is a weighted norm. +$\mathrm{F}_{\mathrm{cov}}$ can be viewed as a variant of the canonical 2nd-order Fisher divergence defined as + +```math +\mathrm{F}_{2}(q, \pi) = \sqrt{ \mathbb{E}_{z \sim q} {\left\lVert \nabla \log \frac{q}{\pi} (z) \right\rVert}^2 }. +``` + +The use of the weighted norm ${\lVert \cdot \rVert}_{\mathrm{Cov}(q)}^2$ facilitates the use of a proximal point-type method for minimizing $\mathrm{F}_{2}(q, \pi)$. +In particular, BaM iterates the update + +```math + q_{t+1} = \argmin_{q \in \mathcal{Q}} \left\{ \mathrm{F}_{\mathrm{cov}}(q, \pi) + \frac{2}{\lambda_t} \mathrm{KL}\left(q_t, q\right) \right\} . +``` + +Since $\mathrm{F}(q, \pi)$ is intractable, it is replaced with a Monte Carlo approximation with a number of samples `n_samples`. +Furthermore, by restricting $\mathcal{Q}$ to a Gaussian variational family, the update rule admits a closed form solution[^CMPMGBS24]. +Notice that the update does not involve the parameterization of $q_t$, which makes `FisherMinBatchMatch` a measure-space algorithm. + +Historically, the idea of using a proximal point-type update for minimizing a Fisher divergence-like objective was initially coined as Gaussian score matching[^MGMYBS23]. +BaM can be viewed as a successor to this algorithm. + +[^MGMYBS23]: Modi, C., Gower, R., Margossian, C., Yao, Y., Blei, D., & Saul, L. (2023). Variational inference with Gaussian score matching. In *Advances in Neural Information Processing Systems*, 36. diff --git a/docs/src/index.md b/docs/src/index.md index 8fafa6e51..234c28d9f 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -20,3 +20,4 @@ For using the algorithms implemented in `AdvancedVI`, refer to the corresponding - [KLMinNaturalGradDescent](@ref klminnaturalgraddescent) - [KLMinSqrtNaturalGradDescent](@ref klminsqrtnaturalgraddescent) - [KLMinWassFwdBwd](@ref klminwassfwdbwd) + - [FisherMinBatchMatch](@ref fisherminbatchmatch) diff --git a/src/AdvancedVI.jl b/src/AdvancedVI.jl index d9f1fb26f..1d07fd975 100644 --- a/src/AdvancedVI.jl +++ b/src/AdvancedVI.jl @@ -358,7 +358,9 @@ include("algorithms/gauss_expected_grad_hess.jl") include("algorithms/klminwassfwdbwd.jl") include("algorithms/klminsqrtnaturalgraddescent.jl") include("algorithms/klminnaturalgraddescent.jl") +include("algorithms/fisherminbatchmatch.jl") -export KLMinWassFwdBwd, KLMinSqrtNaturalGradDescent, KLMinNaturalGradDescent +export KLMinWassFwdBwd, + KLMinSqrtNaturalGradDescent, KLMinNaturalGradDescent, FisherMinBatchMatch end diff --git a/src/algorithms/fisherminbatchmatch.jl b/src/algorithms/fisherminbatchmatch.jl new file mode 100644 index 000000000..b794a12af --- /dev/null +++ b/src/algorithms/fisherminbatchmatch.jl @@ -0,0 +1,195 @@ + +""" + FisherMinBatchMatch(n_samples, subsampling) + FisherMinBatchMatch(; n_samples, subsampling) + +Covariance-weighted Fisher divergence minimization via the batch-and-match algorithm, which is a proximal point-type optimization scheme. + +# (Keyword) Arguments +- `n_samples::Int`: Number of samples (batchsize) used to compute the moments required for the batch-and-match update. (default: `32`) +- `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. (default: `nothing`) + +!!! warning + `FisherMinBatchMatch` with subsampling enabled results in a biased algorithm and may not properly optimize the covariance-weighted Fisher divergence. + +!!! note + `FisherMinBatchMatch` requires a sufficiently large `n_samples` to converge quickly. + +!!! note + The `subsampling` strategy is only applied to the target `LogDensityProblem` but not to the variational approximation `q`. That is, `FisherMinBatchMatch` does not support amortization or structured variational families. + +# Output +- `q`: The last iterate of the algorithm. + +# Callback Signature +The `callback` function supplied to `optimize` needs to have the following signature: + + callback(; rng, iteration, q, info) + +The keyword arguments are as follows: +- `rng`: Random number generator internally used by the algorithm. +- `iteration`: The index of the current iteration. +- `q`: Current variational approximation. +- `info`: `NamedTuple` containing the information generated during the current iteration. + +# Requirements +- The variational family is [`FullRankGaussian`](@ref FullRankGaussian). +- The target distribution has unconstrained support. +- The target `LogDensityProblems.logdensity(prob, x)` has at least first-order differentiation capability. +""" +@kwdef struct FisherMinBatchMatch{Sub<:Union{Nothing,<:AbstractSubsampling}} <: + AbstractVariationalAlgorithm + n_samples::Int = 32 + subsampling::Sub = nothing +end + +struct BatchMatchState{Q,P,Sigma,Sub,UBuf,GradBuf} + q::Q + prob::P + sigma::Sigma + iteration::Int + sub_st::Sub + u_buf::UBuf + grad_buf::GradBuf +end + +function init( + rng::Random.AbstractRNG, + alg::FisherMinBatchMatch, + q::MvLocationScale{<:LowerTriangular,<:Normal,L}, + prob, +) where {L} + (; n_samples, subsampling) = alg + capability = LogDensityProblems.capabilities(typeof(prob)) + if capability < LogDensityProblems.LogDensityOrder{1}() + throw( + ArgumentError( + "`FisherMinBatchMatch` requires at least first-order differentiation capability. The capability of the supplied `LogDensityProblem` is $(capability).", + ), + ) + end + sub_st = isnothing(subsampling) ? nothing : init(rng, subsampling) + params, _ = Optimisers.destructure(q) + n_dims = LogDensityProblems.dimension(prob) + u_buf = Matrix{eltype(params)}(undef, n_dims, n_samples) + grad_buf = Matrix{eltype(params)}(undef, n_dims, n_samples) + return BatchMatchState(q, prob, cov(q), 0, sub_st, u_buf, grad_buf) +end + +output(::FisherMinBatchMatch, state) = state.q + +function rand_batch_match_samples_with_objective!( + rng::Random.AbstractRNG, + q::MvLocationScale, + n_samples::Int, + prob, + u_buf=Matrix{eltype(q)}(undef, LogDensityProblems.dimension(prob), n_samples), + grad_buf=Matrix{eltype(q)}(undef, LogDensityProblems.dimension(prob), n_samples), +) + μ = q.location + C = q.scale + u = Random.randn!(rng, u_buf) + z = C*u .+ μ + logπ_sum = zero(eltype(μ)) + for b in 1:n_samples + logπb, gb = LogDensityProblems.logdensity_and_gradient(prob, view(z, :, b)) + grad_buf[:, b] = gb + logπ_sum += logπb + end + logπ_avg = logπ_sum/n_samples + + # Estimate objective values + # + # F = E[| ∇log(q/π) (z) |_{CC'}^2] (definition) + # = E[| C' (∇logq(z) - ∇logπ(z)) |^2] (Σ = CC') + # = E[| C' ( -(CC')\((Cu + μ) - μ) - ∇logπ(z)) |^2] (z = Cu + μ) + # = E[| C' ( -(CC')\(Cu) - ∇logπ(z)) |^2] + # = E[| -u - C'∇logπ(z)) |^2] + fisher = sum(abs2, -u_buf - (C'*grad_buf))/n_samples + + return u_buf, z, grad_buf, fisher, logπ_avg +end + +function step( + rng::Random.AbstractRNG, + alg::FisherMinBatchMatch, + state, + callback, + objargs...; + kwargs..., +) + (; n_samples, subsampling) = alg + (; q, prob, sigma, iteration, sub_st, u_buf, grad_buf) = state + + d = LogDensityProblems.dimension(prob) + μ = q.location + C = q.scale + Σ = sigma + iteration += 1 + + # Maybe apply subsampling + prob_sub, sub_st′, sub_inf = if isnothing(subsampling) + prob, sub_st, NamedTuple() + else + batch, sub_st′, sub_inf = step(rng, subsampling, sub_st) + prob_sub = subsample(prob, batch) + prob_sub, sub_st′, sub_inf + end + + u_buf, z, grad_buf, fisher, logπ_avg = rand_batch_match_samples_with_objective!( + rng, q, n_samples, prob_sub, u_buf, grad_buf + ) + + # BaM updates + zbar, C = mean_and_cov(z, 2) + gbar, Γ = mean_and_cov(grad_buf, 2) + + μmz = μ - zbar + λ = convert(eltype(μ), d*n_samples / iteration) + + U = Symmetric(λ*Γ + (λ/(1 + λ)*gbar)*gbar') + V = Symmetric(Σ + λ*C + (λ/(1 + λ)*μmz)*μmz') + + Σ′ = Hermitian(2*V/(I + real(sqrt(I + 4*U*V)))) + μ′ = 1/(1 + λ)*μ + λ/(1 + λ)*(Σ′*gbar + zbar) + q′ = MvLocationScale(μ′[:, 1], cholesky(Σ′).L, q.dist) + + elbo = logπ_avg + entropy(q) + info = (iteration=iteration, covweighted_fisher=fisher, elbo=elbo) + + state = BatchMatchState(q′, prob, Σ′, iteration, sub_st′, u_buf, grad_buf) + + if !isnothing(callback) + info′ = callback(; rng, iteration, q, state) + info = !isnothing(info′) ? merge(info′, info) : info + end + state, false, info +end + +""" + estimate_objective([rng,] alg, q, prob; n_samples) + +Estimate the covariance-weighted Fisher divergence of the variational approximation `q` against the target log-density `prob`. + +# Arguments +- `rng::Random.AbstractRNG`: Random number generator. +- `alg::FisherMinBatchMatch`: Variational inference algorithm. +- `q::MvLocationScale{<:Any,<:Normal,<:Any}`: Gaussian variational approximation. +- `prob`: The target log-joint likelihood implementing the `LogDensityProblem` interface. + +# Keyword Arguments +- `n_samples::Int`: Number of Monte Carlo samples for estimating the objective. (default: Same as the the number of samples used for estimating the gradient during optimization.) + +# Returns +- `obj_est`: Estimate of the objective value. +""" +function estimate_objective( + rng::Random.AbstractRNG, + alg::FisherMinBatchMatch, + q::MvLocationScale{S,<:Normal,L}, + prob; + n_samples::Int=alg.n_samples, +) where {S,L} + _, _, _, fisher, _ = rand_batch_match_samples_with_objective!(rng, q, n_samples, prob) + return fisher +end diff --git a/test/algorithms/fisherminbatchmatch.jl b/test/algorithms/fisherminbatchmatch.jl new file mode 100644 index 000000000..3880b1c9d --- /dev/null +++ b/test/algorithms/fisherminbatchmatch.jl @@ -0,0 +1,142 @@ + +@testset "FisherMinBatchMatch" begin + begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=2) + (; model, n_dims, μ_true, L_true) = modelstats + + alg = FisherMinBatchMatch() + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + @testset "callback" begin + T = 10 + callback(; iteration, kwargs...) = (iteration_check=iteration,) + _, info, _ = optimize(alg, T, model, q0; callback, show_progress=PROGRESS) + @test [i.iteration_check for i in info] == 1:T + end + + @testset "estimate_objective" begin + q_true = FullRankGaussian(μ_true, LowerTriangular(Matrix(L_true))) + + obj_est = estimate_objective(alg, q_true, model) + @test isfinite(obj_est) + + obj_est = estimate_objective(alg, q_true, model; n_samples=10^6) + @test obj_est ≈ 0 atol=1e-2 + end + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + T = 10 + + q_avg, _, _ = optimize(rng, alg, T, model, q0; show_progress=PROGRESS) + μ = q_avg.location + L = q_avg.scale + + rng_repl = StableRNG(seed) + q_avg, _, _ = optimize(rng_repl, alg, T, model, q0; show_progress=PROGRESS) + μ_repl = q_avg.location + L_repl = q_avg.scale + @test μ == μ_repl + @test L == L_repl + end + end + + @testset "error low capability" begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=0) + (; model, n_dims) = modelstats + + alg = FisherMinBatchMatch() + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + @test_throws "first-order" optimize(alg, 1, model, q0) + end + + @testset "type stability type=$(realtype), capability=$(capability)" for realtype in [ + Float64, Float32 + ], + capability in [1, 2] + + modelstats = normal_meanfield(Random.default_rng(), realtype; capability) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + alg = FisherMinBatchMatch() + T = 10 + + L0 = LowerTriangular(Matrix{realtype}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(realtype, n_dims), L0) + + q, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + @test eltype(q.location) == eltype(μ_true) + @test eltype(q.scale) == eltype(L_true) + end + + @testset "convergence" begin + modelstats = normal_meanfield(Random.default_rng(), Float64) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + T = 1000 + alg = FisherMinBatchMatch() + + q_avg, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q_avg.location - μ_true) + sum(abs2, q_avg.scale - L_true) + + @test Δλ ≤ Δλ0/2 + end + + @testset "subsampling" begin + n_data = 8 + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 10 + batchsize = 3 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = FisherMinBatchMatch(; subsampling) + + q, _, _ = optimize(rng, alg_sub, T, model, q0; show_progress=PROGRESS) + μ = q.location + L = q.scale + + rng_repl = StableRNG(seed) + q, _, _ = optimize(rng_repl, alg_sub, T, model, q0; show_progress=PROGRESS) + μ_repl = q.location + L_repl = q.scale + @test μ == μ_repl + @test L == L_repl + end + + @testset "convergence" begin + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 1000 + batchsize = 1 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = FisherMinBatchMatch(; subsampling) + + q, stats, _ = optimize(alg_sub, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q.location - μ_true) + sum(abs2, q.scale - L_true) + + @test Δλ ≤ Δλ0/2 + end + end +end diff --git a/test/runtests.jl b/test/runtests.jl index c8f5f66b2..64371ca47 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -69,6 +69,7 @@ if GROUP == "All" || GROUP == "GENERAL" include("algorithms/klminwassfwdbwd.jl") include("algorithms/klminsqrtnaturalgraddescent.jl") include("algorithms/klminnaturalgraddescent.jl") + include("algorithms/fisherminbatchmatch.jl") end if GROUP == "All" || GROUP == "AD"