Skip to content

Commit

Permalink
Merge pull request #71 from ArtLinkov/master
Browse files Browse the repository at this point in the history
Reworked vanilla network layer logic, can now work without hidden layers (added linear regression example to specs)
  • Loading branch information
ArtLinkov committed May 21, 2018
2 parents b914ea0 + ccbf3b4 commit c5454c2
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 63 deletions.
Empty file removed a.out
Empty file.
1 change: 1 addition & 0 deletions spec/linear_data/.~lock.data.csv#
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
,art,art-ThinkPad-P51s-W10DG,21.05.2018 13:30,file:///home/art/.config/libreoffice/4;
File renamed without changes.
129 changes: 86 additions & 43 deletions spec/network_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,49 @@ require "csv"
system("cd #{__DIR__}/test_data && tar xvf tests.tar.xz")

describe SHAInet::Network do
it "Test on a linear regression model" do
# data structures to hold the input and results
inputs = Array(Array(Float64)).new
outputs = Array(Array(Float64)).new

# read the file
raw = File.read("./spec/linear_data/data.csv")
csv = CSV.new(raw, headers: true)

# load the data structures
while (csv.next)
inputs << [csv.row["Height"].to_f64]
outputs << [csv.row["Weight"].to_f64]
end

# normalize the data
training = SHAInet::TrainingData.new(inputs, outputs)

# create a network
model = SHAInet::Network.new
model.add_layer(:input, 1, :memory, SHAInet.none)
# model.add_layer(:hidden, 1, :memory, SHAInet.none)
model.add_layer(:output, 1, :memory, SHAInet.none)
model.fully_connect

# Update learing rate (default is 0.005)
model.learning_rate = 0.01

# train the network using Stochastic Gradient Descent with momentum
model.train(training.raw_data, :sgdm, :mse, 5000, 0.0, 100)

# model.show

# Test model
output = model.run([1.47]).first
error = ((output - 51.008)/51.008).abs
(error < 0.05).should eq(true) # require less than 5% error

output = model.run([1.83]).first
error = ((output - 73.066)/73.066).abs
(error < 0.05).should eq(true) # require less than 5% error
end

it "Initialize" do
nn = SHAInet::Network.new
nn.should be_a(SHAInet::Network)
Expand Down Expand Up @@ -220,51 +263,51 @@ describe SHAInet::Network do
((result.first < 0.3) && (result[1] < 0.3) && (result.last > 0.9)).should eq(true)
end

it "trains , saves, loads, runs" do
puts "---"
puts "train, save, loads and run works (Adam, mini-batch_train, mse, sigmoid)"
label = {
"setosa" => [0.to_f64, 0.to_f64, 1.to_f64],
"versicolor" => [0.to_f64, 1.to_f64, 0.to_f64],
"virginica" => [1.to_f64, 0.to_f64, 0.to_f64],
}
iris = SHAInet::Network.new
iris.add_layer(:input, 4, :memory, SHAInet.sigmoid)
iris.add_layer(:hidden, 4, :memory, SHAInet.sigmoid)
iris.add_layer(:output, 3, :memory, SHAInet.sigmoid)
iris.fully_connect

iris.learning_rate = 0.7
iris.momentum = 0.3
# it "trains , saves, loads, runs" do
# puts "---"
# puts "train, save, loads and run works (Adam, mini-batch_train, mse, sigmoid)"
# label = {
# "setosa" => [0.to_f64, 0.to_f64, 1.to_f64],
# "versicolor" => [0.to_f64, 1.to_f64, 0.to_f64],
# "virginica" => [1.to_f64, 0.to_f64, 0.to_f64],
# }
# iris = SHAInet::Network.new
# iris.add_layer(:input, 4, :memory, SHAInet.sigmoid)
# iris.add_layer(:hidden, 4, :memory, SHAInet.sigmoid)
# iris.add_layer(:output, 3, :memory, SHAInet.sigmoid)
# iris.fully_connect

# iris.learning_rate = 0.7
# iris.momentum = 0.3

outputs = Array(Array(Float64)).new
inputs = Array(Array(Float64)).new
CSV.each_row(File.read(__DIR__ + "/test_data/iris.csv")) do |row|
row_arr = Array(Float64).new
row[0..-2].each do |num|
row_arr << num.to_f64
end
inputs << row_arr
outputs << label[row[-1]]
end
normalized = SHAInet::TrainingData.new(inputs, outputs)
normalized.normalize_min_max

iris.train_batch(
data: normalized.data.shuffle,
training_type: :adam,
cost_function: :mse,
epochs: 5000,
error_threshold: 0.000001,
mini_batch_size: 50,
log_each: 1000)
# outputs = Array(Array(Float64)).new
# inputs = Array(Array(Float64)).new
# CSV.each_row(File.read(__DIR__ + "/test_data/iris.csv")) do |row|
# row_arr = Array(Float64).new
# row[0..-2].each do |num|
# row_arr << num.to_f64
# end
# inputs << row_arr
# outputs << label[row[-1]]
# end
# normalized = SHAInet::TrainingData.new(inputs, outputs)
# normalized.normalize_min_max

iris.save_to_file("./my_net.nn")
nn = SHAInet::Network.new
nn.load_from_file("./my_net.nn")
result = nn.run(normalized.normalized_inputs.first)
((result.first < 0.3) && (result[1] < 0.3) && (result.last > 0.9)).should eq(true)
end
# iris.train_batch(
# data: normalized.data.shuffle,
# training_type: :adam,
# cost_function: :mse,
# epochs: 5000,
# error_threshold: 0.000001,
# mini_batch_size: 50,
# log_each: 1000)

# iris.save_to_file("./my_net.nn")
# nn = SHAInet::Network.new
# nn.load_from_file("./my_net.nn")
# result = nn.run(normalized.normalized_inputs.first)
# ((result.first < 0.3) && (result[1] < 0.3) && (result.last > 0.9)).should eq(true)
# end

# it "works on the mnist dataset using adam and batch" do
# mnist = SHAInet::Network.new
Expand Down
56 changes: 40 additions & 16 deletions src/shainet/basic/network.cr
Original file line number Diff line number Diff line change
Expand Up @@ -109,32 +109,41 @@ module SHAInet
def verify_net_before_train
if @input_layers.empty?
raise NeuralNetRunError.new("No input layers defined")
elsif @hidden_layers.empty?
raise NeuralNetRunError.new("Need atleast one hidden layer")
# elsif @hidden_layers.empty?
# raise NeuralNetRunError.new("Need atleast one hidden layer")
elsif @output_layers.empty?
raise NeuralNetRunError.new("No output layers defined")
end
end

# Connect all the layers in order (input and output don't connect between themselves): input, hidden, output
def fully_connect
# Connect all input layers to the first hidden layer
@input_layers.each do |source|
connect_ltl(source, @hidden_layers.first, :full)
end
if @hidden_layers.empty?
# Connect all input layers to all output layers
@output_layers.each do |out_layer|
@input_layers.each do |in_layer|
connect_ltl(in_layer, out_layer, :full)
end
end
else
# Connect all input layers to the first hidden layer
@input_layers.each do |source|
connect_ltl(source, @hidden_layers.first, :full)
end

# Connect all hidden layer between each other hierarchically
@hidden_layers.size.times do |index|
next if index + 2 > @hidden_layers.size
connect_ltl(@hidden_layers[index], @hidden_layers[index + 1], :full)
end
# Connect all hidden layer between each other hierarchically
@hidden_layers.size.times do |index|
next if index + 2 > @hidden_layers.size
connect_ltl(@hidden_layers[index], @hidden_layers[index + 1], :full)
end

# Connect last hidden layer to all output layers
@output_layers.each do |layer|
connect_ltl(@hidden_layers.last, layer, :full)
# Connect last hidden layer to all output layers
@output_layers.each do |layer|
connect_ltl(@hidden_layers.last, layer, :full)
end
end
rescue e : Exception
raise NeuralNetRunError.new("Error fully connecting network: #{e}")
# rescue e : Exception
# raise NeuralNetRunError.new("Error fully connecting network: #{e}")
end

# Connect two specific layers with synapses
Expand Down Expand Up @@ -221,6 +230,10 @@ module SHAInet

actual_output = run(input_data, stealth = true)

# Stop scan if we have NaNs in the output
actual_output.each { |ar| raise NeuralNetRunError.new(
"Found a NaN value, run stopped.\noutput:#{actual_output}") if ar.nan? }

# Get the error signal for the final layer, based on the cost function (error gradient is stored in the output neurons)
@error_signal = [] of Float64 # Collect all the errors for current run

Expand Down Expand Up @@ -308,6 +321,11 @@ module SHAInet
l.neurons.each { |neuron| neuron.hidden_error_prop } # Update neuron error based on errors*weights of neurons from the next layer
end

# Propogate the errors backwards through the input layers
@input_layers.reverse_each do |l|
l.neurons.each { |neuron| neuron.hidden_error_prop } # Update neuron error based on errors*weights of neurons from the next layer
end

# Calculate MSE
if @error_signal.size == 1
error_avg = 0.0
Expand Down Expand Up @@ -376,11 +394,17 @@ module SHAInet
data_slice.each do |data_point|
evaluate(data_point[0], data_point[1], cost_function) # Get error gradient from output layer based on current input
all_errors << @total_error

# Propogate the errors backwards through the hidden layers
@hidden_layers.reverse_each do |l|
l.neurons.each { |neuron| neuron.hidden_error_prop } # Update neuron error based on errors*weights of neurons from the next layer
end

# Propogate the errors backwards through the input layers
@input_layers.reverse_each do |l|
l.neurons.each { |neuron| neuron.hidden_error_prop } # Update neuron error based on errors*weights of neurons from the next layer
end

# Sum all gradients from each data point for the batch update
@all_synapses.each_with_index { |synapse, i| @w_gradient[i] += (synapse.source_neuron.activation)*(synapse.dest_neuron.gradient) }
@all_neurons.each_with_index { |neuron, i| @b_gradient[i] += neuron.gradient }
Expand Down
7 changes: 3 additions & 4 deletions src/shainet/basic/neuron.cr
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,11 @@ module SHAInet
# Allows the neuron to absorb the error from its' own target neurons through the synapses
# Then, it sums the information and a derivative of the activation function is applied to normalize the data
def hidden_error_prop : Float64
new_errors = [] of Float64
weighted_error_sum = Float64.new(0)
@synapses_out.each do |synapse| # Calculate weighted error from each target neuron, returns Array(Float64)
new_errors << synapse.propagate_backward
weighted_error_sum += synapse.propagate_backward
end
weighted_error_sum = new_errors.reduce { |acc, i| acc + i } # Sum weighted error from target neurons (instead of using w_matrix*delta), returns Float64
@gradient = weighted_error_sum*@sigma_prime # New error of the neuron
@gradient = weighted_error_sum*@sigma_prime # New error of the neuron
end

def clone
Expand Down

0 comments on commit c5454c2

Please sign in to comment.