Skip to content

Commit

Permalink
Merge 21d9615 into 81f13af
Browse files Browse the repository at this point in the history
  • Loading branch information
Wei-TianHao committed Jul 15, 2020
2 parents 81f13af + 21d9615 commit d535f2d
Show file tree
Hide file tree
Showing 10 changed files with 6,031 additions and 49 deletions.
3 changes: 2 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
[extras]
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"

[targets]
test = ["Test", "Flux"]
test = ["Test", "Flux", "JLD2"]
5,655 changes: 5,655 additions & 0 deletions examples/cars_workshop/.ipynb_checkpoints/4 visualverification-checkpoint.ipynb

Large diffs are not rendered by default.

15 changes: 15 additions & 0 deletions examples/networks/R2_R2.nnet
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
2
2,2,2
0
0
0
0
0
1,1
-1,1
-1
-1
1,0
0,1
0
0
128 changes: 128 additions & 0 deletions examples/networks/mnist_1000.nnet

Large diffs are not rendered by default.

147 changes: 102 additions & 45 deletions src/adversarial/neurify.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,19 @@ Sound but not complete.
"""

@with_kw struct Neurify
max_iter::Int64 = 1000
max_iter::Int64 = 10
tree_search::Symbol = :DFS # only :DFS/:BFS allowed? If so, we should assert this.

# Becuase of over-approximation, a split may not bisect the input set.
# Therefore, the gradient remains unchanged (since input didn't change).
# And this node will be chosen to split forever.
# To prevent this, we split each node only once if the gradient of this node doesn't change.
# Each element in splits is a tuple (gradient_of_the_node, layer_index, node_index).

splits = Set() # To prevent infinity loop.

# But in some cases (which I don't have an example, just a sense),
# it can happen that a node indeed needs to be split twice with the same gradient.
end

struct SymbolicInterval{F<:AbstractPolytope}
Expand All @@ -41,74 +52,101 @@ SymbolicInterval(x::Matrix{Float64}, y::Matrix{Float64}) = SymbolicInterval(x, y
# Data to be passed during forward_layer
struct SymbolicIntervalGradient
sym::SymbolicInterval
::Vector{Vector{Float64}}
::Vector{Vector{Float64}} # mask for computing gradient.
::Vector{Vector{Float64}}
r::Vector{Vector{Float64}} # range of a input interval (upper_bound - lower_bound)
end

function solve(solver::Neurify, problem::Problem)

while !isempty(solver.splits) pop!(solver.splits) end
problem = Problem(problem.network, convert(HPolytope, problem.input), convert(HPolytope, problem.output))

reach_lc = problem.input.constraints
output_lc = problem.output.constraints

n = size(reach_lc, 1)
m = size(reach_lc[1].a, 1)
model = Model(with_optimizer(GLPK.Optimizer))
@variable(model, x[1:m], base_name="x")
@constraint(model, [i in 1:n], reach_lc[i].a' * x <= reach_lc[i].b)

reach = forward_network(solver, problem.network, problem.input)
result = check_inclusion(reach.sym, problem.output, problem.network) # This called the check_inclusion function in ReluVal, because the constraints are Hyperrectangle
reach = forward_network(solver, problem.network, problem.input)
result, max_violation_con = check_inclusion(solver, reach.sym, problem.output, problem.network) # This calls the check_inclusion function in ReluVal, because the constraints are Hyperrectangle
result.status == :unknown || return result
reach_list = SymbolicIntervalGradient[reach]

reach_list = [(reach, max_violation_con)]

for i in 2:solver.max_iter
length(reach_list) > 0 || return BasicResult(:holds)
reach = pick_out!(reach_list, solver.tree_search)
intervals = constraint_refinement(solver, problem.network, reach)
reach, max_violation_con = pick_out!(reach_list, solver.tree_search)
intervals = constraint_refinement(solver, problem.network, reach, max_violation_con)
for interval in intervals
reach = forward_network(solver, problem.network, interval)
result = check_inclusion(reach.sym, problem.output, problem.network)
result, max_violation_con = check_inclusion(solver, reach.sym, problem.output, problem.network)
result.status == :violated && return result
result.status == :holds || (push!(reach_list, reach))
result.status == :holds || (push!(reach_list, (reach, max_violation_con)))
end
end
return BasicResult(:unknown)
end

function check_inclusion(reach::SymbolicInterval{HPolytope{N}}, output::AbstractPolytope, nnet::Network) where N
function check_inclusion(solver, reach::SymbolicInterval{HPolytope{N}}, output::AbstractPolytope, nnet::Network) where N
# The output constraint is in the form A*x < b
# We try to maximize output constraint to find a violated case, or to verify the inclusion,
# suppose the output is [1, 0, -1] * x < 2, Then we are maximizing reach.Up[1] * 1 + reach.Low[3] * (-1)

# We try to maximize output constraint to find a violated case, or to verify the inclusion.
# Suppose the output is [1, 0, -1] * x < 2, Then we are maximizing reach.Up[1] * 1 + reach.Low[3] * (-1)

# return a result and the most violated constraint.
reach_lc = reach.interval.constraints
output_lc = output.constraints

n = size(reach_lc, 1)
m = size(reach_lc[1].a, 1)
model =Model(with_optimizer(GLPK.Optimizer))
@variable(model, x[1:m])
@constraint(model, [i in 1:n], reach_lc[i].a' * x <= reach_lc[i].b)

for i in 1:size(output.constraints, 1)
max_violation = -1e9
max_violation_con = nothing
for i in 1:size(output_lc, 1)
obj = zeros(size(reach.Low, 2))
for j in 1:size(reach.Low, 1)
if output.constraints[i].a[j] > 0
obj += output.constraints[i].a[j] * reach.Up[j,:]
if output_lc[i].a[j] > 0
obj += output_lc[i].a[j] * reach.Up[j,:]
else
obj += output.constraints[i].a[j] * reach.Low[j,:]
obj += output_lc[i].a[j] * reach.Low[j,:]
end
end
@objective(model, Min, obj' * [x; [1]])
obj = transpose(obj)
@objective(model, Max, obj * [x; [1]])
optimize!(model)
# println("termination_status")
# println(termination_status(model))
y = compute_output(nnet, value(x))
if !(y, output)
if termination_status(model) == MOI.OPTIMAL
y = compute_output(nnet, value(x))
if !(y, output)
return CounterExampleResult(:violated, value(x)), nothing
end
if objective_value(model) > output_lc[i].b
if objective_value(model) - output_lc[i].b > max_violation
max_violation = objective_value(model) - output_lc[i].b
max_violation_con = output_lc[i].a
end
end
else
if (value(x), reach.interval)
return CounterExampleResult(:violated, value(x))
println("Not OPTIMAL, but x in the input set")
println("This is usually caused by open input set.")
println("Please check your input constraints.")
exit()
end
end
if objective_value(model) > output.constraints[i].b
return BasicResult(:unknown)
println("No solution, please check the problem definition.")
exit()
end

end
return BasicResult(:holds)
max_violation > 0 && return BasicResult(:unknown), max_violation_con
return BasicResult(:holds), nothing
end

function constraint_refinement(solver::Neurify, nnet::Network, reach::SymbolicIntervalGradient)
i, j, gradient = get_nodewise_gradient(nnet, reach.LΛ, reach.)
function constraint_refinement(solver::Neurify, nnet::Network, reach::SymbolicIntervalGradient, max_violation_con::Vector{Float64})
i, j, influence = get_nodewise_influence(solver, nnet, reach, max_violation_con)
# We can generate three more constraints
# Symbolic representation of node i j is Low[i][j,:] and Up[i][j,:]
nnet_new = Network(nnet.layers[1:i])
Expand All @@ -126,27 +164,41 @@ function constraint_refinement(solver::Neurify, nnet::Network, reach::SymbolicIn
return intervals
end

function get_nodewise_gradient(nnet::Network, ::Vector{Vector{Float64}}, UΛ::Vector{Vector{Float64}})
function get_nodewise_influence(solver::Neurify, nnet::Network, reach::SymbolicIntervalGradient, max_violation_con::Vector{Float64})
n_output = size(nnet.layers[end].weights, 1)
n_length = length(nnet.layers)
LG = Matrix(1.0I, n_output, n_output)
UG = Matrix(1.0I, n_output, n_output)
# We want to find the node with the largest influence
# Influence is defined as gradient * interval width
# The gradient is with respect to a loss defined by the most violated constraint.
LG = transpose(copy(max_violation_con))
UG = transpose(copy(max_violation_con))
max_tuple = (0, 0, 0.0)
for (k, layer) in enumerate(reverse(nnet.layers))
i = n_length - k + 1
for j in size(layer.bias)
if LΛ[i][j] (0.0, 1.0) && UΛ[i][j] (0.0, 1.0)
max_gradient = max(abs(LG[j]), abs(UG[j]))
if max_gradient > max_tuple[3]
max_tuple = (i, j, max_gradient)
if layer.activation != Id()
# Only split Relu nodes
# A split over id node may not reduce over-approximation (the input set may not bisected).
for j in 1:size(layer.bias,1)
if (0 < reach.LΛ[i][j] < 1) && (0 < reach.UΛ[i][j] < 1)
max_gradient = max(abs(LG[j]), abs(UG[j]))
influence = max_gradient * reach.r[i][j]
if in((i,j, influence), solver.splits) # To prevent infinity loop
continue
end
# If we use > here, in the case that largest gradient is 0, this function will return (0, 0 ,0)
if influence >= max_tuple[3]
max_tuple = (i, j, influence)
end
end
end
end

i >= 1 || break
LG_hat = max.(LG, 0.0) * Diagonal(LΛ[i]) + min.(LG, 0.0) * Diagonal(UΛ[i])
UG_hat = min.(UG, 0.0) * Diagonal(LΛ[i]) + max.(UG, 0.0) * Diagonal(UΛ[i])
LG_hat = max.(LG, 0.0) * Diagonal(reach.LΛ[i]) + min.(LG, 0.0) * Diagonal(reach.UΛ[i])
UG_hat = min.(UG, 0.0) * Diagonal(reach.LΛ[i]) + max.(UG, 0.0) * Diagonal(reach.UΛ[i])
LG, UG = interval_map_right(layer.weights, LG_hat, UG_hat)
end
push!(solver.splits, max_tuple)
return max_tuple
end

Expand All @@ -160,7 +212,8 @@ function forward_linear(solver::Neurify, input::AbstractPolytope, layer::Layer)
sym = SymbolicInterval(hcat(W, b), hcat(W, b), input)
= Vector{Vector{Int64}}(undef, 0)
= Vector{Vector{Int64}}(undef, 0)
return SymbolicIntervalGradient(sym, LΛ, UΛ)
r = Vector{Vector{Int64}}(undef, 0)
return SymbolicIntervalGradient(sym, LΛ, UΛ, r)
end

# Symbolic forward_linear
Expand All @@ -171,14 +224,15 @@ function forward_linear(solver::Neurify, input::SymbolicIntervalGradient, layer:
output_Up[:, end] += b
output_Low[:, end] += b
sym = SymbolicInterval(output_Low, output_Up, input.sym.interval)
return SymbolicIntervalGradient(sym, input.LΛ, input.UΛ)
return SymbolicIntervalGradient(sym, input.LΛ, input., input.r)
end

# Symbolic forward_act
function forward_act(input::SymbolicIntervalGradient, layer::Layer{ReLU})
n_node, n_input = size(input.sym.Up)
output_Low, output_Up = input.sym.Low[:, :], input.sym.Up[:, :]
mask_lower, mask_upper = zeros(Float64, n_node), ones(Float64, n_node)
interval_width = zeros(Float64, n_node)
for i in 1:n_node
if upper_bound(input.sym.Up[i, :], input.sym.interval) <= 0.0
# Update to zero
Expand All @@ -201,20 +255,23 @@ function forward_act(input::SymbolicIntervalGradient, layer::Layer{ReLU})
output_Up[i, end] -= up_low
output_Up[i, :] = up_slop * output_Up[i, :]
mask_lower[i], mask_upper[i] = low_slop, up_slop
interval_width[i] = up_up - low_low
end
end
sym = SymbolicInterval(output_Low, output_Up, input.sym.interval)
= push!(input.LΛ, mask_lower)
= push!(input.UΛ, mask_upper)
return SymbolicIntervalGradient(sym, LΛ, UΛ)
r = push!(input.r, interval_width)
return SymbolicIntervalGradient(sym, LΛ, UΛ, r)
end

function forward_act(input::SymbolicIntervalGradient, layer::Layer{Id})
sym = input.sym
n_node = size(input.sym.Up, 1)
= push!(input.LΛ, ones(Float64, n_node))
= push!(input.UΛ, ones(Float64, n_node))
return SymbolicIntervalGradient(sym, LΛ, UΛ)
r = push!(input.r, ones(Float64, n_node))
return SymbolicIntervalGradient(sym, LΛ, UΛ, r)
end


Expand Down
4 changes: 2 additions & 2 deletions src/utils/util.jl
Original file line number Diff line number Diff line change
Expand Up @@ -253,13 +253,13 @@ Inputs:
Outputs:
- `(lbound, ubound)` (after the mapping)
"""
function interval_map(W::Matrix{N}, l::AbstractVecOrMat, u::AbstractVecOrMat) where N
function interval_map(W::AbstractMatrix{N}, l::AbstractVecOrMat, u::AbstractVecOrMat) where N
l_new = max.(W, zero(N)) * l + min.(W, zero(N)) * u
u_new = max.(W, zero(N)) * u + min.(W, zero(N)) * l
return (l_new, u_new)
end

function interval_map_right(W::Matrix{N}, l::AbstractVecOrMat, u::AbstractVecOrMat) where N
function interval_map_right(W::AbstractMatrix{N}, l::AbstractVecOrMat, u::AbstractVecOrMat) where N
l_new = l * max.(W, zero(N)) + u * min.(W, zero(N))
u_new = u * max.(W, zero(N)) + l * min.(W, zero(N))
return (l_new, u_new)
Expand Down
Binary file added test/MNIST_1000_data.jld2
Binary file not shown.
85 changes: 85 additions & 0 deletions test/mnist_1000.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
using LazySets, Test, LinearAlgebra, GLPKMathProgInterface
using NeuralVerification
import NeuralVerification: ReLU, Id
using JLD2

function test_solver(solver, test_selected)

# index of all violated cases in the first 1000 images.
violation_idx = [25, 142, 179, 183, 283, 285, 387, 392, 612, 647, 737]
# index of selected holding cases in the first 1000 images.
satisfied_idx = [10, 100, 200, 300, 400, 500 ,600, 700, 800, 900, 999, 213, 391, 660, 911]

if test_selected
test_idx = [violation_idx; satisfied_idx]
else
test_idx = 1:1000
end

@load "MNIST_1000_data.jld2" train_x train_y

net_file = "$(@__DIR__)/../examples/networks/mnist_1000.nnet"
mnist_net = read_nnet(net_file, last_layer_activation = Id())

for i = test_idx

input_center = reshape(train_x[:,:,i], 28*28)
label = train_y[i]
A = zeros(Float64, 10, 10)
A[diagind(A)] .= 1
A[:, label+1] = A[:, label+1] .- 1
A = A[1:end .!= label+1,:]

b = zeros(Float64, 9)

Y = HPolytope(A, b)
pred = argmax(NeuralVerification.compute_output(mnist_net, input_center))-1

if pred != label
continue
end

epsilon = 1.0/255.0
upper = input_center .+ epsilon
lower = input_center .- epsilon
clamp!(upper, 0.0, 1.0)
clamp!(lower, 0.0, 1.0)
X = Hyperrectangle(low=lower, high=upper)

problem_mnist = Problem(mnist_net, X, Y)

result = solve(solver, problem_mnist)

if result.status == :violated
@test i violation_idx
noisy = argmax(NeuralVerification.compute_output(mnist_net, result.counter_example))-1
@test noisy != pred
elseif result.status == :holds
@test !(i violation_idx)
else #result.status == :unknown

end
end
end

# if test_selected = true, only test selected index. Otherwise, test all 1000 images.
test_selected = true

# Please note the number of tests maybe different for different solvers.
# Because the test cases depends on the result.

if test_selected
@testset "MNIST selected, ReluVal" begin
test_solver(ReluVal(), test_selected)
end
@testset "MNIST selected, Neurify" begin
test_solver(Neurify(), test_selected)
end
else
@testset "MNIST 1000, ReluVal" begin
test_solver(ReluVal(), test_selected)
end
@testset "MNIST 1000, Neurify" begin
test_solver(Neurify(), test_selected)
end
end
4 changes: 3 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@ include("inactive_relus.jl")
if Base.find_package("Flux") != nothing
include("flux.jl")
end
include("complements.jl")
include("complements.jl")
include("splitting.jl")
include("mnist_1000.jl")

0 comments on commit d535f2d

Please sign in to comment.