Skip to content

Commit

Permalink
Merge 1da61b5 into f466104
Browse files Browse the repository at this point in the history
  • Loading branch information
castrong committed Jun 30, 2020
2 parents f466104 + 1da61b5 commit 8acd05d
Show file tree
Hide file tree
Showing 5 changed files with 136 additions and 2 deletions.
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a"
PicoSAT = "ff2beb65-d7cd-5ff1-a187-74671133a339"
Polyhedra = "67491407-f73d-577b-9b50-8179a7c68029"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
Expand Down
2 changes: 2 additions & 0 deletions src/NeuralVerification.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ using Interpolations # only for PiecewiseLinear
import LazySets: dim, HalfSpace # necessary to avoid conflict with Polyhedra

using Requires
using Printf # for write_nnet

# abstract type Solver end # no longer needed

Expand Down Expand Up @@ -50,6 +51,7 @@ export
AdversarialResult,
ReachabilityResult,
read_nnet,
write_nnet,
solve,
forward_network,
check_inclusion
Expand Down
102 changes: 101 additions & 1 deletion src/utils/util.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,106 @@ function read_layer(output_dim::Int64, f::IOStream, act = ReLU())
return Layer(weights, bias, act)
end

"""
write_nnet(fname::String, network::Network)
write out a neural network in the .nnet format. Based on
code here: https://github.com/sisl/NNet/blob/master/utils/writeNNet.py.
The nnet format can be found at: https://github.com/sisl/NNet
The Network object doesn't currently store upper and lower bounds on the input
so we write the upper and lower bounds to be floatmax(Float16) and -floatmax(Float16)
We write out our floats with 10 characters past the decimal point.
"""
function write_nnet(fname::String, network::Network)
open(fname, "w") do f
#####################
# First, we write the header lines:
# The first line written is just a line of text
# The second line gives the four values:
# Number of fully connected layers in the network
# Number of inputs to the network
# Number of outputs from the network
# Maximum size of any hidden layer
# The third line gives the sizes of each layer, including the input and output layers
# The fourth line gives an outdated flag, so this can be ignored
# The fifth line specifies the minimum values each input can take
# The sixth line specifies the maximum values each input can take
# Inputs passed to the network are truncated to be between this range
# The seventh line gives the mean value of each input and of all outputs
# The eighth line gives the range of each input and of all outputs
# These two lines are used to map raw inputs to the 0 mean, unit range of the inputs and outputs
# used during training
# The ninth line begins the network weights and biases
####################
layers = network.layers

# line 1
write(f, "// Neural Network File Format by Kyle Julian, Stanford 2016\n") # line 1

# line 2 (i)
write(f, string(length(network.layers), ",")) # number of layer connections (# of weight bias pairs)

num_inputs = size(network.layers[1].weights, 2)
write(f, string(num_inputs, ",")) # number of inputs

num_outputs = length(network.layers[end].bias)
write(f, string(num_outputs, ",")) # number of outputs

hidden_layer_sizes = [size(layer.weights, 1) for layer in network.layers][1:end-1] # chop off the output layer
# mimicking https://github.com/sisl/NNet/blob/master/utils/writeNNet.py,
# set the max hidden layer size to just be the input size
write(f, string(num_inputs), ",\n") # max size of any hidden layer

# line 3
layer_sizes = [num_inputs, hidden_layer_sizes..., num_outputs]
write(f, string(join(layer_sizes, ','), ","))
write(f, "\n")

# line 4
write(f, "0,\n") # outdated flag, ignore

# line 5, 6
write(f, string(join(fill(-floatmax(Float16), num_inputs), ","), ",\n"))
write(f, string(join(fill(floatmax(Float16), num_inputs), ","), ",\n"))

# line 7, 8 - mean, range of 0, 1 means it won't rescale the input at all
# is asserting our inputs are already scaled to be mean 0, range 1
write(f, string(join(fill(0.0, num_inputs), ","), ",\n"))
write(f, string(join(fill(1.0, num_inputs), ","), ",\n"))

##################
# Write weights and biases of neural network
# First, the weights from the input layer to the first hidden layer are written
# Then, the biases of the first hidden layer are written
# The pattern is repeated by next writing the weights from the first hidden layer to the second hidden layer,
# followed by the biases of the second hidden layer.
##################
for layer in layers
# each output of a layer gets a line for its corresponding weights
# and corresponding bias

# Write the current weight
weights = layer.weights
for i = 1:size(weights, 1)
for j = 1:size(weights, 2)
write(f, @sprintf("%.10e,", weights[i, j])) #five digits written. More can be used, but that requires more space.
end
write(f, "\n")
end

# Write the current bias
bias = layer.bias
for i = 1:length(bias)
write(f, @sprintf("%.10e,", bias[i])) # ten digits written. More can be used, but that requires more space.
write(f, "\n")
end

end

close(f)
end
end
"""
compute_output(nnet::Network, input::Vector{Float64})
Expand Down Expand Up @@ -350,4 +450,4 @@ function split_interval(dom::Hyperrectangle, i::Int64)
input_upper[i] = dom.center[i] + dom.radius[i]
input_split_right = Hyperrectangle(low = input_lower, high = input_upper)
return (input_split_left, input_split_right)
end
end
3 changes: 2 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ include("inactive_relus.jl")
if Base.find_package("Flux") != nothing
include("flux.jl")
end
include("complements.jl")
include("complements.jl")
include("write_nnet_test.jl")
30 changes: 30 additions & 0 deletions test/write_nnet_test.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Test by creating a network, writing it, then reading it back in and make sure that all weights and biases match
@testset "Read nnet test" begin

# 3 --> 3 --> 2 --> 5
l1 = NeuralVerification.Layer([3.0 2.0 1.0; 5.0 6.0 7.0; 8.0 9.0 10.0], [0.8; 1.0; 1.2], NeuralVerification.ReLU())
l2 = NeuralVerification.Layer([1.5 2.5 3.5; 4.5 6.5 7.5], [-1.0; -3.0], NeuralVerification.ReLU())
# The .nnet file doesn't store an activation at each layer,
l3 = NeuralVerification.Layer([10.0 -1.0; -2.0 3.0; 4.0 5.0; 10.0 7.0; -3.5 -4.5], [0.0; -1.0; 0.0; 10.0; -10.0], NeuralVerification.Id())

# Write out the network
network_file = string(tempname(), ".nnet")
nnet = NeuralVerification.Network([l1, l2, l3])
NeuralVerification.write_nnet(network_file, nnet)

# Read back in the network
new_nnet = NeuralVerification.read_nnet(network_file)

# Test that all weights, biases, and activations are the same
@test new_nnet.layers[1].weights == l1.weights;
@test new_nnet.layers[1].bias == l1.bias;
@test new_nnet.layers[1].activation == l1.activation;

@test new_nnet.layers[2].weights == l2.weights;
@test new_nnet.layers[2].bias == l2.bias;
@test new_nnet.layers[2].activation == l2.activation;

@test new_nnet.layers[3].weights == l3.weights;
@test new_nnet.layers[3].bias == l3.bias;
@test new_nnet.layers[3].activation == l3.activation;
end

0 comments on commit 8acd05d

Please sign in to comment.