Skip to content

Commit

Permalink
tests pass with latest versions
Browse files Browse the repository at this point in the history
  • Loading branch information
tomerarnon committed Jul 12, 2020
1 parent 1a10023 commit 0008e99
Show file tree
Hide file tree
Showing 23 changed files with 231 additions and 165 deletions.
312 changes: 183 additions & 129 deletions Manifest.toml

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions src/NeuralVerification.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ import LazySets: dim, HalfSpace # necessary to avoid conflict with Polyhedra

using Requires

# abstract type Solver end # no longer needed
abstract type Solver end

# For optimization methods:
import JuMP.MOI.OPTIMAL, JuMP.MOI.INFEASIBLE
JuMP.Model(solver) = Model(with_optimizer(solver.optimizer))
JuMP.Model(solver::Solver) = Model(solver.optimizer)
JuMP.value(vars::Vector{VariableRef}) = value.(vars)

include("utils/activation.jl")
Expand Down
2 changes: 1 addition & 1 deletion src/adversarial/dlv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ in *International Conference on Computer Aided Verification*, 2017.](https://arx
[https://github.com/VeriDeep/DLV](https://github.com/VeriDeep/DLV)
"""
@with_kw struct DLV
@with_kw struct DLV <: Solver
optimizer = GLPK.Optimizer
ϵ::Float64 = 1.0
end
Expand Down
2 changes: 1 addition & 1 deletion src/adversarial/fastLin.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Sound but not complete.
"Towards Fast Computation of Certified Robustness for ReLU Networks,"
*ArXiv Preprint ArXiv:1804.09699*, 2018.](https://arxiv.org/abs/1804.09699)
"""
@with_kw struct FastLin
@with_kw struct FastLin <: Solver
maxIter::Int64 = 10
ϵ0::Float64 = 100.0
accuracy::Float64 = 0.1
Expand Down
2 changes: 1 addition & 1 deletion src/adversarial/fastLip.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Sound but not complete.
"Towards Fast Computation of Certified Robustness for ReLU Networks,"
*ArXiv Preprint ArXiv:1804.09699*, 2018.](https://arxiv.org/abs/1804.09699)
"""
@with_kw struct FastLip
@with_kw struct FastLip <: Solver
maxIter::Int64 = 10
ϵ0::Float64 = 100.0
accuracy::Float64 = 0.1
Expand Down
2 changes: 1 addition & 1 deletion src/adversarial/reluVal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Sound but not complete.
[https://github.com/tcwangshiqi-columbia/ReluVal](https://github.com/tcwangshiqi-columbia/ReluVal)
"""
@with_kw struct ReluVal
@with_kw struct ReluVal <: Solver
max_iter::Int64 = 10
tree_search::Symbol = :DFS # only :DFS/:BFS allowed? If so, we should assert this.
end
Expand Down
2 changes: 1 addition & 1 deletion src/optimization/certify.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Sound but not complete.
"Certified Defenses against Adversarial Examples,"
*ArXiv Preprint ArXiv:1801.09344*, 2018.](https://arxiv.org/abs/1801.09344)
"""
@with_kw struct Certify
@with_kw struct Certify <: Solver
optimizer = SCS.Optimizer
end

Expand Down
2 changes: 1 addition & 1 deletion src/optimization/convDual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Sound but not complete.
[https://github.com/locuslab/convex_adversarial](https://github.com/locuslab/convex_adversarial)
"""
struct ConvDual end
struct ConvDual <: Solver end

function solve(solver::ConvDual, problem::Problem)
o = dual_value(solver, problem.network, problem.input, problem.output)
Expand Down
2 changes: 1 addition & 1 deletion src/optimization/duality.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Sound but not complete.
"A Dual Approach to Scalable Verification of Deep Networks,"
*ArXiv Preprint ArXiv:1803.06567*, 2018.](https://arxiv.org/abs/1803.06567)
"""
@with_kw struct Duality
@with_kw struct Duality <: Solver
optimizer = GLPK.Optimizer
end

Expand Down
2 changes: 1 addition & 1 deletion src/optimization/iLP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Sound but not complete.
"Measuring Neural Net Robustness with Constraints,"
in *Advances in Neural Information Processing Systems*, 2016.](https://arxiv.org/abs/1605.07262)
"""
@with_kw struct ILP
@with_kw struct ILP <: Solver
optimizer = GLPK.Optimizer
iterative::Bool = true
end
Expand Down
2 changes: 1 addition & 1 deletion src/optimization/mipVerify.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ V. Tjeng, K. Xiao, and R. Tedrake,
[https://github.com/vtjeng/MIPVerify.jl](https://github.com/vtjeng/MIPVerify.jl)
"""
@with_kw struct MIPVerify
@with_kw struct MIPVerify <: Solver
optimizer = GLPK.Optimizer
end

Expand Down
2 changes: 1 addition & 1 deletion src/optimization/nsVerify.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Sound and complete.
"An Approach to Reachability Analysis for Feed-Forward Relu Neural Networks,"
*ArXiv Preprint ArXiv:1706.07351*, 2017.](https://arxiv.org/abs/1706.07351)
"""
@with_kw struct NSVerify
@with_kw struct NSVerify <: Solver
optimizer = GLPK.Optimizer
m::Float64 = 1000.0 # The big M in the linearization
end
Expand Down
27 changes: 17 additions & 10 deletions src/optimization/utils/variables.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,26 @@ function init_variables(model::Model, layers::Vector{Layer}; binary = false, inc
return vars
end


_model(a::Number) = nothing
_model(a::VariableRef) = a.model
_model(a::Array) = _model(first(a))
_model(a::GenericAffExpr) = isempty(a.terms) ? nothing : _model(first(keys(a.terms)))
_model(as...) = something(_model.(as)..., missing)

# These should only be hit if we're comparing 0 with 0 or if we somehow
# hit a branch that is numbers only (no variables or expressions). Shouldn't happen
symbolic_max(m::Missing, a, b) = @show zero(promote_type(typeof(a), typeof(b)))
symbolic_abs(m::Missing, a, b) = @show zero(promote_type(typeof(a), typeof(b)))

function symbolic_max(m::Model, a, b)
aux = @variable(m)
@constraint(m, aux >= a)
@constraint(m, aux >= b)
return aux
end
symbolic_max(a::VariableRef, b::VariableRef) = symbolic_max(a.model, a, b)
symbolic_max(a::GenericAffExpr, b::GenericAffExpr) = symbolic_max(first(first(a.terms)).model, a, b)
symbolic_max(a::Array{<:GenericAffExpr}, b::Array{<:GenericAffExpr}) = symbolic_max.(a, b)
symbolic_max(a, b) = symbolic_max(_model(a, b), a, b)


function symbolic_abs(m::Model, v)
aux = @variable(m) #get an anonymous variable
Expand All @@ -44,9 +55,8 @@ function symbolic_abs(m::Model, v)
@constraint(m, aux >= -v)
return aux
end
symbolic_abs(v::VariableRef) = symbolic_abs(v.m, v)
symbolic_abs(v::GenericAffExpr) = symbolic_abs(first(first(v.terms)).model, v)
symbolic_abs(v::Array{<:GenericAffExpr}) = symbolic_abs.(v)
symbolic_abs(v) = symbolic_abs(_model(v), v)


function symbolic_infty_norm(m::Model, v::Array{<:GenericAffExpr})
aux = @variable(m)
Expand All @@ -55,7 +65,4 @@ function symbolic_infty_norm(m::Model, v::Array{<:GenericAffExpr})
@constraint(m, aux .>= -v)
return aux
end
# # in general, default to symbolic_abs behavior:
# symbolic_infty_norm(v) = symbolic_abs(v)
# only Array{<:GenericAffExpr} is needed
symbolic_infty_norm(v::Array{<:GenericAffExpr}) = symbolic_infty_norm(first(first(first(v).terms)).model, v)
symbolic_infty_norm(v::Array{<:GenericAffExpr}) = symbolic_infty_norm(_model(v), v)
2 changes: 1 addition & 1 deletion src/reachability/ai2.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ T. Gehr, M. Mirman, D. Drashsler-Cohen, P. Tsankov, S. Chaudhuri, and M. Vechev,
"Ai2: Safety and Robustness Certification of Neural Networks with Abstract Interpretation,"
in *2018 IEEE Symposium on Security and Privacy (SP)*, 2018.
"""
struct Ai2 end
struct Ai2 <: Solver end

function solve(solver::Ai2, problem::Problem)
reach = forward_network(solver, problem.network, problem.input)
Expand Down
2 changes: 1 addition & 1 deletion src/reachability/exactReach.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Sound and complete.
"Reachable Set Computation and Safety Verification for Neural Networks with ReLU Activations,"
*ArXiv Preprint ArXiv:1712.08163*, 2017.](https://arxiv.org/abs/1712.08163)
"""
struct ExactReach end
struct ExactReach <: Solver end

function solve(solver::ExactReach, problem::Problem)
reach = forward_network(solver, problem.network, problem.input)
Expand Down
2 changes: 1 addition & 1 deletion src/reachability/maxSens.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Sound but not complete.
"Output Reachable Set Estimation and Verification for Multi-Layer Neural Networks,"
*ArXiv Preprint ArXiv:1708.03322*, 2017.](https://arxiv.org/abs/1708.03322)
"""
@with_kw struct MaxSens
@with_kw struct MaxSens <: Solver
resolution::Float64 = 1.0
tight::Bool = false
end
Expand Down
4 changes: 2 additions & 2 deletions src/satisfiability/bab.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Sound and complete.
"A Unified View of Piecewise Linear Neural Network Verification,"
*ArXiv Preprint ArXiv:1711.00455*, 2017.](https://arxiv.org/abs/1711.00455)
"""
@with_kw struct BaB
@with_kw struct BaB <: Solver
optimizer = GLPK.Optimizer
ϵ::Float64 = 0.1
end
Expand Down Expand Up @@ -109,7 +109,7 @@ end

function approx_bound(nnet::Network, dom::Hyperrectangle, optimizer, type::Symbol)
bounds = get_bounds(nnet, dom)
model = Model(with_optimizer(optimizer))
model = Model(optimizer)
neurons = init_neurons(model, nnet)
add_set_constraint!(model, dom, first(neurons))
encode_network!(model, nnet, neurons, bounds, TriangularRelaxedLP())
Expand Down
6 changes: 3 additions & 3 deletions src/satisfiability/planet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ in *International Symposium on Automated Technology for Verification and Analysi
[https://github.com/progirep/planet](https://github.com/progirep/planet)
"""
@with_kw struct Planet
@with_kw struct Planet <: Solver
optimizer = GLPK.Optimizer
eager::Bool = false
end
Expand Down Expand Up @@ -79,7 +79,7 @@ end


function elastic_filtering(problem::Problem, δ::Vector{Vector{Bool}}, bounds::Vector{Hyperrectangle}, optimizer)
model = Model(with_optimizer(optimizer))
model = Model(optimizer)
neurons = init_neurons(model, problem.network)
add_set_constraint!(model, problem.input, first(neurons))
add_complementary_set_constraint!(model, problem.output, last(neurons))
Expand Down Expand Up @@ -129,7 +129,7 @@ end
# Only use tighten_bounds for feasibility check
function tighten_bounds(problem::Problem, optimizer)
bounds = get_bounds(problem)
model = Model(with_optimizer(optimizer))
model = Model(optimizer)
neurons = init_neurons(model, problem.network)
add_set_constraint!(model, problem.input, first(neurons))
add_complementary_set_constraint!(model, problem.output, last(neurons))
Expand Down
2 changes: 1 addition & 1 deletion src/satisfiability/reluplex.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Sound and complete.
"Reluplex: An Efficient SMT Solver for Verifying Deep Neural Networks," in
*International Conference on Computer Aided Verification*, 2017.](https://arxiv.org/abs/1702.01135)
"""
@with_kw struct Reluplex
@with_kw struct Reluplex <: Solver
optimizer = GLPK.Optimizer
end

Expand Down
4 changes: 2 additions & 2 deletions src/satisfiability/sherlock.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Sound but not complete.
[https://github.com/souradeep-111/sherlock](https://github.com/souradeep-111/sherlock)
"""
@with_kw struct Sherlock
@with_kw struct Sherlock <: Solver
optimizer = GLPK.Optimizer
ϵ::Float64 = 0.1
end
Expand Down Expand Up @@ -63,7 +63,7 @@ function local_search(problem::Problem, x::Vector{Float64}, optimizer, type::Sym
nnet = problem.network
act_pattern = get_activation(nnet, x)
gradient = get_gradient(nnet, x)
model = Model(with_optimizer(optimizer))
model = Model(optimizer)
neurons = init_neurons(model, nnet)
add_set_constraint!(model, problem.input, first(neurons))
encode_network!(model, nnet, neurons, act_pattern, StandardLP())
Expand Down
6 changes: 5 additions & 1 deletion src/utils/flux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ activation(::typeof(relu)) = ReLU()

layer(x) = error("Can't use $x as a layer.")

layer(d::Dense) = Layer(data(d.W), data(d.b), activation(d.σ))
if isdefined(Flux, :Tracker)
layer(d::Dense) = Layer(Flux.Tracker.data(d.W), Flux.Tracker.data(d.b), activation(d.σ))
else
layer(d::Dense) = Layer(d.W, d.b, activation(d.σ))
end

network(c::Chain) = Network([layer.(c.layers)...])

Expand Down
1 change: 1 addition & 0 deletions src/utils/problem.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ LazySets.tohrep(PC::PolytopeComplement) = PolytopeComplement(convert(HPolytope,
Base.in(pt, PC::PolytopeComplement) = pt PC.P
complement(PC::PolytopeComplement) = PC.P
complement(P::LazySet) = PolytopeComplement(P)
Base.:(==)(pc1::PolytopeComplement, pc2::PolytopeComplement) = pc1.P == pc2.P
# etc.


Expand Down
4 changes: 2 additions & 2 deletions test/complements.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
@test [5.0, 5.0] HS && [5.0, 5.0] PC
@test [-1.0, -1.0] HS && [-1.0, -1.0] PC

@test complement(PC) === HS
@test complement(HS) === PC
@test complement(PC) == HS
@test complement(HS) == PC

# Hyperrectangle contained in HS
hr = Hyperrectangle(low = [1.0, 1.0], high = [2.0, 2.0])
Expand Down

0 comments on commit 0008e99

Please sign in to comment.