Skip to content

Commit

Permalink
merge calc_slope (Neurify) and relaxed_ReLU (ConvDual) into one funct…
Browse files Browse the repository at this point in the history
…ion and put into util.jl
  • Loading branch information
changliuliu committed Aug 2, 2020
1 parent 91fab42 commit 9f3a907
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 16 deletions.
10 changes: 3 additions & 7 deletions src/adversarial/neurify.jl
Expand Up @@ -239,11 +239,7 @@ function forward_linear(solver::Neurify, input::SymbolicIntervalGradient, layer:
sym = SymbolicInterval(output_Low, output_Up, input.sym.interval)
return SymbolicIntervalGradient(sym, input.LΛ, input.UΛ, input.r)
end
function calc_slop(up::Float64, low::Float64)
(up <= 0) && return 0
(low >= 0) && return 1
return up / (up - low)
end

# Symbolic forward_act
function forward_act(input::SymbolicIntervalGradient, layer::Layer{ReLU})
n_node, n_input = size(input.sym.Up)
Expand All @@ -260,8 +256,8 @@ function forward_act(input::SymbolicIntervalGradient, layer::Layer{ReLU})
low_low = lower_bound(input.sym.Low[i, :], input.sym.interval)
interval_width[i] = up_up - low_low

up_slop = calc_slop(up_up, up_low)
low_slop = calc_slop(low_up, low_low)
up_slop = act_gradient(up_low, up_up)
low_slop = act_gradient(low_low, low_up)

output_Up[i, :] = up_slop * output_Up[i, :]
output_Up[i, end] += up_slop * max(-up_low, 0)
Expand Down
10 changes: 2 additions & 8 deletions src/optimization/convDual.jl
Expand Up @@ -63,7 +63,7 @@ modifies v and returns o
function backprop!(v::Vector{Float64}, u::Vector{Float64}, l::Vector{Float64})
o = 0.0
for j in 1:length(v)
val = relaxed_ReLU(l[j], u[j])
val = act_gradient(l[j], u[j])
if val < 1.0 # if val is 1, it means ReLU result is identity so do not update (NOTE is that the right reasoning?)
v[j] = v[j] * val
o += v[j] * l[j]
Expand Down Expand Up @@ -94,7 +94,7 @@ function get_bounds(nnet::Network, input::Vector{Float64}, ϵ::Float64)
n_input = length(layers[i-1].bias)
n_output = length(layers[i].bias)

last_input_ReLU = relaxed_ReLU.(last(l), last(u))
last_input_ReLU = act_gradient.(last(l), last(u))
push!(input_ReLU, last_input_ReLU)
D = Diagonal(last_input_ReLU) # a matrix whose diagonal values are the relaxed_ReLU values (maybe should be sparse?)

Expand Down Expand Up @@ -161,9 +161,3 @@ function new_μ(n_input, n_output, input_ReLU, WD)
end
return sub_μ
end

function relaxed_ReLU(l::Float64, u::Float64)
u <= 0.0 && return 0.0
l >= 0.0 && return 1.0
return u / (u - l)
end
21 changes: 20 additions & 1 deletion src/utils/util.jl
Expand Up @@ -229,6 +229,25 @@ Currently only support ReLU and Id.
act_gradient(act::ReLU, z_hat::Vector) = z_hat .>= 0.0
act_gradient(act::Id, z_hat::Vector) = trues(length(z_hat))

"""
act_gradient(act::ReLU, l::Float64, u::Float64)
Returns the slope of a ReLU activation based on its lower and upper bounds
Inputs:
- `l::Float64`: lower bound
- `u::Float64`: upper bound
Return:
- `slop::Float64`: 0 if u<0; 1 if l>0; u/(u-l) otherwise
"""
function act_gradient(act::ReLU, l::Float64, u::Float64)
u <= 0.0 && return 0.0
l >= 0.0 && return 1.0
return u / (u - l)
end

act_gradient(l::Float64, u::Float64) = act_gradient(ReLU(), l, u)

"""
get_gradient(nnet::Network, input::AbstractPolytope)
Expand Down Expand Up @@ -423,4 +442,4 @@ function split_interval(dom::Hyperrectangle, i::Int64)
input_upper[i] = dom.center[i] + dom.radius[i]
input_split_right = Hyperrectangle(low = input_lower, high = input_upper)
return (input_split_left, input_split_right)
end
end

0 comments on commit 9f3a907

Please sign in to comment.