Skip to content

Commit

Permalink
Merge 7e5fd21 into 1c45ace
Browse files Browse the repository at this point in the history
  • Loading branch information
JulienPascal committed Jan 24, 2019
2 parents 1c45ace + 7e5fd21 commit 838028a
Show file tree
Hide file tree
Showing 4 changed files with 129 additions and 28 deletions.
13 changes: 9 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,19 @@ os:
- osx
julia:
- 0.6
- nightly
- 0.7
- 1.0
notifications:
email: false
git:
depth: 99999999

## uncomment the following lines to allow failures on nightly julia
## (tests will run but not make your overall status red)
#matrix:
# allow_failures:
# - julia: nightly
matrix:
allow_failures:
- julia: 0.7
- julia: 1.0

## uncomment and modify the following lines to manually install system packages
#addons:
Expand All @@ -32,6 +34,9 @@ addons:
homebrew:
packages:
- hdf5
before_script:
- julia -e 'ENV["CONDA_JL_VERSION"]="2"; Pkg.add("Conda"); Pkg.build("Conda")'
- julia -e 'Pkg.add("PyCall"); ENV["PYTHON"] = ""; Pkg.build("PyCall")'
## uncomment the following lines to override the default test script
#script:
# - julia -e 'Pkg.clone(pwd()); Pkg.build("SModels"); Pkg.test("SModels"; coverage=true)'
Expand Down
66 changes: 44 additions & 22 deletions src/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -309,28 +309,6 @@ function calculate_mean_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Flo

end

"""
calculate_max_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Float64,1})
Function to calculate the maximum percentage error
"""
function calculate_max_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Float64,1})

perError = zeros(length(yTrue))
meanPredicted = mean(yPredicted)

for i=1:length(yTrue)
if yPredicted[i] != 0
perError[i] = (yPredicted[i] - yTrue[i])/yTrue[i]
else
perError[i] = (yPredicted[i] - yTrue[i])/meanPredicted
end
end

return maximum(perError)

end


"""
calculate_mean_per_error(yTrue::Array{Float64,2}, yPredicted::Array{Float64,2})
Expand Down Expand Up @@ -368,6 +346,17 @@ function calculate_mean_per_error(yTrue::Array{Array{Float64,1},1}, yPredicted::

end

"""
calculate_mean_per_error(yTrue::Array{Array{Float64,1},1}, yPredicted::Array{Float64,1})
Function to calculate the mean percentage error
"""
function calculate_mean_per_error(yTrue::Array{Array{Float64,1},1}, yPredicted::Array{Float64,1})

return calculate_mean_per_error(convert_to_vector(yTrue), yPredicted)

end

"""
calculate_maximum_abs_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Float64,1})
Expand Down Expand Up @@ -473,6 +462,39 @@ function calculate_median_abs_per_error(yTrue::Array{Array{Float64,1},1}, yPredi

end

"""
calculate_median_abs_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Float64,1})
Function to calculate the median of the absolute value fo the percentage difference
"""
function calculate_median_abs_per_error(yTrue::Array{Float64,1}, yPredicted::Array{Float64,1})

perError = zeros(length(yTrue))
meanPredicted = mean(yPredicted)

for i=1:length(yTrue)
if yPredicted[i] != 0
perError[i] = abs((yPredicted[i] - yTrue[i])/yTrue[i])
else
perError[i] = abs((yPredicted[i] - yTrue[i])/meanPredicted)
end

end

return median(perError)

end

"""
calculate_median_abs_per_error(yTrue::Array{Array{Float64,1},1}, yPredicted::Array{Float64,1})
Function to calculate the median of the absolute value fo the percentage difference
"""
function calculate_median_abs_per_error(yTrue::Array{Array{Float64,1},1}, yPredicted::Array{Float64,1})

return calculate_median_abs_per_error(convert_to_vector(yTrue), yPredicted)

end

"""
date_now()
Expand Down
11 changes: 9 additions & 2 deletions src/train.jl
Original file line number Diff line number Diff line change
Expand Up @@ -278,11 +278,13 @@ function train_surrogate_model(sModelsProblem::SModelsProblem; verbose::Bool=fal
fit!(clfr, XTrainScaled, yTrain)
yPredicted = predict(clfr, XTrainScaled) # Train set
mean_per_regressorTrain = calculate_mean_per_error(yTest, yPredicted)
median_per_regressorTrain = calculate_median_abs_per_error(yTest, yPredicted)
max_abs_per_regressorTrain = calculate_maximum_abs_per_error(yTest, yPredicted)
# Test Set:
#-----------
yPredicted = predict(clfr, XTestScaled)
mean_per_regressor = calculate_mean_per_error(yTest, yPredicted)
median_per_regressor = calculate_median_abs_per_error(yTest, yPredicted)
max_abs_per_regressor = calculate_maximum_abs_per_error(yTest, yPredicted)

# If robust = true, use only the data for which convergence was reached
Expand All @@ -293,22 +295,27 @@ function train_surrogate_model(sModelsProblem::SModelsProblem; verbose::Bool=fal
fit!(clfr, XTrainScaledRobust, yTrainRobust)
yPredicted = predict(clfr, XTestScaledRobust) # Train set
mean_per_regressorTrain = calculate_mean_per_error(yTestRobust, yPredicted)
median_per_regressorTrain = calculate_median_abs_per_error(yTestRobust, yPredicted)
max_abs_per_regressorTrain = calculate_maximum_abs_per_error(yTestRobust, yPredicted)
# Test Set:
#-----------
yPredicted = predict(clfr, XTestScaledRobust)
mean_per_regressor = calculate_mean_per_error(yTestRobust, yPredicted)
median_per_regressor = calculate_median_abs_per_error(yTestRobust, yPredicted)
max_abs_per_regressor = calculate_maximum_abs_per_error(yTestRobust, yPredicted)
end

if verbose == true
info("Regressor:")
info("Train set:")
info("Mean Percentage Error Train Set = $(mean_per_regressorTrain)")
info("Median Percentage Error Train Set = $(median_per_regressorTrain)")
info("Maximum Abs Percentage Error Train Set = $(max_abs_per_regressorTrain)")
info("Test set:")
info("Mean Percentage Error Test Set = $(mean_per_regressor)")
info("Median Percentage Error Train Set = $(median_per_regressor)")
info("Maximum Abs Percentage Error Test Set = $(max_abs_per_regressor)")
info("Train sample size = $(size_trainSample)")
end

# If requested, save the model
Expand Down Expand Up @@ -346,7 +353,7 @@ function train_surrogate_model(sModelsProblem::SModelsProblem; verbose::Bool=fal
if verbose == true
info("Current max percentage error regressor = $(max_abs_per_regressor)")
info("Desired max percentage error regressor = $(sModelsProblem.options.desiredMaxPerErrorRegressor)")
info("Size of the train sample = $(size(XTrain,1))")
info("Size of the train sample = $(size_trainSample)")
end

# Test on the classifier
Expand Down Expand Up @@ -405,7 +412,7 @@ function evaluateModel!(sModelsProblem::SModelsProblem, XDistributed::Array, YDi
for i=1:size(XDistributed, 1)

# set penalty value by default:
YDistributed[i,:] = ones(sModelsProblem.dimX)*sModelsProblem.options.penaltyValue
YDistributed[i,:] = ones(sModelsProblem.dimY)*sModelsProblem.options.penaltyValue
YConvergenceDistributed[i] = sModelsProblem.options.nonConvergenceFlag

try
Expand Down
67 changes: 67 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,73 @@ end
end


#cross-validation to choose the hidden-layer done "manually"
#-----------------------------------------------------------
@testset "Testing package on 2d->1d function" begin

function function_1d(z::Array{Float64,1})
return cos(z[1])*exp(z[2])
end

# create output matrix
function create_y_1d(X::Array{Array{Float64,1},1})

y = zeros(size(X,1))

# Looping over X
for xIndex = 1:size(X,1)
y[xIndex] = function_1d([X[xIndex][1], X[xIndex][2]])
end

return y
end

aTolMeanPerError = 0.05
aTolMedianPerError = 0.05
aTolMaxPerError = 0.5

upperBoundX = [1.0; 1.0]
lowerBoundX = [-1.0; -1.0]


opts = SModelsOptions(sModelType = :MLPRegressor, classifierType = :MLPClassifier, batchSizeWorker = 10, desiredMinObs = 80)

surrogatePb = SModelsProblem(lowerBound = lowerBoundX, #lower bound for the parameter space
upperBound = upperBoundX, #upper bound for the parameter space
dimX = 2, #dimension of the input parameter
dimY = 1, #dimension of the output vector
options = opts)


set_model_function!(surrogatePb, function_1d)

# training the surrogate model
surrogatem, classifier = train_surrogate_model(surrogatePb, verbose = true, saveToDisk = false, robust = true)

#input
X = createX()
#ouptut
y = create_y_1d(X)

# non-robust:
XScaled = transform(surrogatePb.scaler, X)
yPredicted = predict(surrogatem, XScaled)
# classifier:
yConvergencePredicted = round.(Int64, predict(classifier, XScaled))

println("Regressor:")
println("Mean Percentage Different Set = $(calculate_mean_per_error(y, yPredicted))")
println("Median Abs Percentage Different Set = $(calculate_median_abs_per_error(y, yPredicted))")
println("Maximum Abs Percentage Error Different Set = $(calculate_maximum_abs_per_error(y, yPredicted))")

@test calculate_mean_per_error(y, yPredicted) < aTolMeanPerError
@test calculate_median_abs_per_error(y, yPredicted) < aTolMedianPerError
@test calculate_maximum_abs_per_error(y, yPredicted) < aTolMaxPerError
# Model never fails. The classsifier should predict this feature:
@test sum(yConvergencePredicted) == length(yConvergencePredicted)

end

#cross-validation to choose the hidden-layer done "manually"
#-----------------------------------------------------------
@testset "Testing package on 2d->2d function" begin
Expand Down

0 comments on commit 838028a

Please sign in to comment.