Skip to content

Commit

Permalink
example: fix batchsize config in regression-example (#342)
Browse files Browse the repository at this point in the history
  • Loading branch information
iblislin authored and pluskid committed Nov 27, 2017
1 parent 45d6279 commit e0f625a
Showing 1 changed file with 34 additions and 10 deletions.
44 changes: 34 additions & 10 deletions examples/regression-example.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,31 @@ generate_inputs(mean, var, size) = rand(MvNormal(mean, var), size)
output(data) = sin.(data[1:1,:]).*sin.(data[2:2,:])./(data[1:1,:].*data[2:2,:])

# create training and evaluation data sets
mean=[0.0;0.0]
var=[1.0 0.0;0.0 1.0]
mean=[0.0; 0.0]
var=[1.0 0.0; 0.0 1.0]
samplesize = 5000
TrainInput = generate_inputs(mean, var, samplesize)
TrainOutput = output(TrainInput)
ValidationInput = generate_inputs(mean, var, samplesize)
ValidationOutput = output(ValidationInput)

# how to set up data providers using data in memory
batchsize = 100 # can adjust this later, but must be defined now for next line
trainprovider = mx.ArrayDataProvider(:data => TrainInput, batch_size=batchsize, shuffle=true, :label => TrainOutput)
evalprovider = mx.ArrayDataProvider(:data => ValidationInput, batch_size=batchsize, shuffle=true, :label => ValidationOutput)
function data_source(batchsize = 100)
train = mx.ArrayDataProvider(
:data => TrainInput,
:label => TrainOutput,
batch_size = batchsize,
shuffle = true,
)
valid = mx.ArrayDataProvider(
:data => ValidationInput,
:label => ValidationOutput,
batch_size = batchsize,
shuffle = true,
)

train, valid
end

# create a two hidden layer MPL: try varying num_hidden, and change tanh to relu,
# or add/remove a layer
Expand All @@ -35,7 +48,7 @@ net = @mx.chain mx.Variable(:data) =>
mx.Activation(act_type=:tanh) =>
mx.FullyConnected(num_hidden=3) =>
mx.Activation(act_type=:tanh) =>
mx.FullyConnected(num_hidden=1) =>
mx.FullyConnected(num_hidden=1) =>
mx.LinearRegressionOutput(mx.Variable(:label))

# final model definition, don't change, except if using gpu
Expand All @@ -47,11 +60,22 @@ optimizer = mx.ADAM()

# train, reporting loss for training and evaluation sets
# initial training with small batch size, to get to a good neighborhood
batchsize = 200
mx.fit(model, optimizer, initializer=mx.NormalInitializer(0.0,0.1), eval_metric=mx.MSE(), trainprovider, eval_data=evalprovider, n_epoch = 20)
trainprovider, evalprovider = data_source(#= batchsize =# 200)
mx.fit(model, optimizer, trainprovider,
initializer = mx.NormalInitializer(0.0, 0.1),
eval_metric = mx.MSE{mx.NDArray{Float32,1}}(),
eval_data = evalprovider,
n_epoch = 20,
callbacks = [mx.speedometer()])
# more training with the full sample
batchsize = samplesize
mx.fit(model, optimizer, eval_metric=mx.MSE(), trainprovider, eval_data=evalprovider, n_epoch = 20)
trainprovider, evalprovider = data_source(#= batchsize =# samplesize)
mx.fit(model, optimizer, trainprovider,
initializer = mx.NormalInitializer(0.0, 0.1),
eval_metric = mx.MSE{mx.NDArray{Float32,1}}(),
eval_data = evalprovider,
n_epoch = 500, # previous setting is batchsize = 200, epoch = 20
# implies we did (5000 / 200) * 20 times update in previous `fit`
callbacks = [mx.speedometer()])

# obtain predictions
plotprovider = mx.ArrayDataProvider(:data => ValidationInput, :label => ValidationOutput)
Expand Down

0 comments on commit e0f625a

Please sign in to comment.