From b0e51b6422a882bbcb6771a67791e843c992ca15 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 31 Mar 2017 19:26:15 +0200 Subject: [PATCH 01/57] Initial bits for nlopt integration. --- CMakeLists.txt | 20 ++++ cmake_modules/FindNLOPT.cmake | 43 +++++++++ include/pagmo/algorithms/bee_colony.hpp | 28 ++++++ include/pagmo/detail/nlopt_utils.hpp | 122 ++++++++++++++++++++++++ tests/CMakeLists.txt | 4 + tests/nlopt_utils.cpp | 38 ++++++++ 6 files changed, 255 insertions(+) create mode 100644 cmake_modules/FindNLOPT.cmake create mode 100644 include/pagmo/detail/nlopt_utils.hpp create mode 100644 tests/nlopt_utils.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 98e011686..d45499db6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,9 @@ option(PAGMO_BUILD_PYGMO "Build PyGMO." OFF) # Build option: enable features depending on Eigen3. option(PAGMO_WITH_EIGEN3 "Enable features depending on Eigen3 (such as CMAES). Requires Eigen3." OFF) +# Build option: enable NLopt. +option(PAGMO_WITH_NLOPT "Enable wrappers for the NLopt algorithms." OFF) + # Build option: install header. option(PAGMO_INSTALL_HEADERS "Enable the installation of PaGMO's header files." ON) @@ -66,6 +69,13 @@ if(PAGMO_WITH_EIGEN3) message(STATUS "Eigen version detected: ${EIGEN3_VERSION}") endif() +# NLopt setup +if(PAGMO_WITH_NLOPT) + find_package(NLOPT REQUIRED) + message(STATUS "NLopt include directory: ${NLOPT_INCLUDE_DIRS}") + message(STATUS "NLopt libraries: ${NLOPT_LIBRARIES}") +endif() + # Python setup. # NOTE: we do it here because we need to detect the Python bits *before* # looking for boost.python. @@ -91,7 +101,9 @@ target_link_libraries(pagmo INTERFACE Threads::Threads Boost::boost) target_include_directories(pagmo INTERFACE $ $) + if(PAGMO_WITH_EIGEN3) + # Link pagmo to eigen3. add_library(Eigen3::eigen3 INTERFACE IMPORTED) set_target_properties(Eigen3::eigen3 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${EIGEN3_INCLUDE_DIR}") target_link_libraries(pagmo INTERFACE Eigen3::eigen3) @@ -99,6 +111,14 @@ if(PAGMO_WITH_EIGEN3) target_compile_definitions(pagmo INTERFACE PAGMO_WITH_EIGEN3) endif() +if(PAGMO_WITH_NLOPT) + # Link pagmo to NLopt. + add_library(NLOPT::nlopt UNKNOWN IMPORTED) + set_target_properties(NLOPT::nlopt PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${NLOPT_INCLUDE_DIRS}") + set_target_properties(NLOPT::nlopt PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C" IMPORTED_LOCATION "${NLOPT_LIBRARIES}") + target_link_libraries(pagmo INTERFACE NLOPT::nlopt) +endif() + if(PAGMO_BUILD_TESTS) add_subdirectory("${CMAKE_SOURCE_DIR}/tests") endif() diff --git a/cmake_modules/FindNLOPT.cmake b/cmake_modules/FindNLOPT.cmake new file mode 100644 index 000000000..f1d8f422e --- /dev/null +++ b/cmake_modules/FindNLOPT.cmake @@ -0,0 +1,43 @@ +# Copyright (c) 2015-2016, Humanoid Lab, Georgia Tech Research Corporation +# Copyright (c) 2015-2017, Graphics Lab, Georgia Tech Research Corporation +# Copyright (c) 2016-2017, Personal Robotics Lab, Carnegie Mellon University +# This file is provided under the "BSD-style" License + +# Find NLOPT +# +# This sets the following variables: +# NLOPT_FOUND +# NLOPT_INCLUDE_DIRS +# NLOPT_LIBRARIES +# NLOPT_DEFINITIONS +# NLOPT_VERSION + +find_package(PkgConfig QUIET) + +# Check to see if pkgconfig is installed. +pkg_check_modules(PC_NLOPT nlopt QUIET) + +# Definitions +set(NLOPT_DEFINITIONS ${PC_NLOPT_CFLAGS_OTHER}) + +# Include directories +find_path(NLOPT_INCLUDE_DIRS + NAMES nlopt.h + HINTS ${PC_NLOPT_INCLUDEDIR} + PATHS "${CMAKE_INSTALL_PREFIX}/include") + +# Libraries +find_library(NLOPT_LIBRARIES + NAMES nlopt nlopt_cxx + HINTS ${PC_NLOPT_LIBDIR}) + +# Version +set(NLOPT_VERSION ${PC_NLOPT_VERSION}) + +# Set (NAME)_FOUND if all the variables and the version are satisfied. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NLOPT + FAIL_MESSAGE DEFAULT_MSG + REQUIRED_VARS NLOPT_INCLUDE_DIRS NLOPT_LIBRARIES + VERSION_VAR NLOPT_VERSION) + diff --git a/include/pagmo/algorithms/bee_colony.hpp b/include/pagmo/algorithms/bee_colony.hpp index 2146e2b32..a003aa945 100644 --- a/include/pagmo/algorithms/bee_colony.hpp +++ b/include/pagmo/algorithms/bee_colony.hpp @@ -1,3 +1,31 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + #ifndef PAGMO_ALGORITHMS_BEE_COLONY_HPP #define PAGMO_ALGORITHMS_BEE_COLONY_HPP diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp new file mode 100644 index 000000000..0c210e606 --- /dev/null +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -0,0 +1,122 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_DETAIL_NLOPT_UTILS_HPP +#define PAGMO_DETAIL_NLOPT_UTILS_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace pagmo +{ + +namespace detail +{ + +struct nlopt_obj { + explicit nlopt_obj(::nlopt_algorithm algo, problem &prob) + : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy) + { + // Extract and set problem dimension. + const auto n = boost::numeric_cast(prob.get_nx()); + m_value.reset(::nlopt_create(algo, n)); + // Init the nlopt_obj. + if (!m_value) { + pagmo_throw(std::invalid_argument, "the creation of an nlopt_opt object failed"); + } + if (prob.get_nobj() != 1u) { + // TODO + pagmo_throw(std::invalid_argument, "" + get_name()); + } + if (prob.get_nc()) { + // TODO + pagmo_throw(std::invalid_argument, "" + get_name()); + } + m_dv.resize(prob.get_nx()); + // Set the objfun + gradient. + auto res = ::nlopt_set_min_objective(m_value.get(), + [](unsigned dim, const double *x, double *grad, void *f_data) { + auto &nlo = *static_cast(f_data); + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + assert(dim == p.get_nx()); + if (grad && !p.has_gradient()) { + // TODO + pagmo_throw(std::invalid_argument, "" + nlo.get_name()); + } + assert(dv.size() == dim); + std::copy(x, x + dim, dv.begin()); + const auto fitness = p.fitness(dv); + const auto gradient = p.gradient(dv); + auto g_it = gradient.begin(); + const auto g_end = gradient.end(); + auto i = 0u; + for (auto sp_it = sp.begin(); i < dim && g_it != g_end; + ++i, ++g_it, ++sp_it) { + assert(sp_it->first == 0u); + } + return fitness[0]; + }, + static_cast(this)); + } + nlopt_obj(const nlopt_obj &other) + : m_prob(other.m_prob), m_sp(other.m_sp), m_value(::nlopt_copy(other.m_value.get()), ::nlopt_destroy), + m_dv(other.m_dv) + { + if (!m_value) { + pagmo_throw(std::invalid_argument, "the copy of an nlopt_opt object failed"); + } + } + nlopt_obj(nlopt_obj &&) = default; + nlopt_obj &operator=(const nlopt_obj &) = delete; + nlopt_obj &operator=(nlopt_obj &&) = delete; + std::string get_name() const + { + return ::nlopt_algorithm_name(::nlopt_get_algorithm(m_value.get())); + } + problem &m_prob; + sparsity_pattern m_sp; + std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; + vector_double m_dv; +}; +} +} + +#endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 49ef41d27..7b3bba025 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -62,6 +62,10 @@ if(PAGMO_WITH_EIGEN3) ADD_PAGMO_TESTCASE(eigen3_serialization) endif() +if(PAGMO_WITH_NLOPT) + ADD_PAGMO_TESTCASE(nlopt_utils) +endif() + # Here are problematic tests for MSVC. if(NOT YACMA_COMPILER_IS_MSVC) # This test compiles but MSVC seems to have troubles in diff --git a/tests/nlopt_utils.cpp b/tests/nlopt_utils.cpp new file mode 100644 index 000000000..2350a8d1f --- /dev/null +++ b/tests/nlopt_utils.cpp @@ -0,0 +1,38 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#define BOOST_TEST_MODULE nlopt_utils_test +#include + +#include + +using namespace pagmo; + +BOOST_AUTO_TEST_CASE(nlopt_basic) +{ +} From 0fb2b4a80707c6c49f415e56b1df4662d0c500a1 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 1 Apr 2017 23:21:58 +0200 Subject: [PATCH 02/57] Remove safe_cast in favor of boost::numeric_cast, since now we depend on boost. --- doc/sphinx/docs/cpp/miscellanea/generic.rst | 4 --- include/pagmo/algorithms/moead.hpp | 2 +- include/pagmo/problem.hpp | 3 ++- include/pagmo/utils/generic.hpp | 27 --------------------- include/pagmo/utils/multi_objective.hpp | 5 ++-- tests/generic.cpp | 10 -------- 6 files changed, 6 insertions(+), 45 deletions(-) diff --git a/doc/sphinx/docs/cpp/miscellanea/generic.rst b/doc/sphinx/docs/cpp/miscellanea/generic.rst index 5b3c50876..12dfddb54 100644 --- a/doc/sphinx/docs/cpp/miscellanea/generic.rst +++ b/doc/sphinx/docs/cpp/miscellanea/generic.rst @@ -19,10 +19,6 @@ A number of utilities to compute quantities that are of general relevance. -------------------------------------------------------------------------- -.. doxygenfunction:: pagmo::safe_cast - --------------------------------------------------------------------------- - .. doxygenfunction:: pagmo::binomial_coefficient -------------------------------------------------------------------------- diff --git a/include/pagmo/algorithms/moead.hpp b/include/pagmo/algorithms/moead.hpp index 733757b71..a634e7b81 100644 --- a/include/pagmo/algorithms/moead.hpp +++ b/include/pagmo/algorithms/moead.hpp @@ -43,7 +43,7 @@ see https://www.gnu.org/licenses/. */ #include "../problem.hpp" #include "../problems/decompose.hpp" #include "../rng.hpp" -#include "../utils/generic.hpp" // safe_cast, kNN +#include "../utils/generic.hpp" // kNN #include "../utils/multi_objective.hpp" // ideal namespace pagmo diff --git a/include/pagmo/problem.hpp b/include/pagmo/problem.hpp index c2f3752dc..79de8200d 100644 --- a/include/pagmo/problem.hpp +++ b/include/pagmo/problem.hpp @@ -31,6 +31,7 @@ see https://www.gnu.org/licenses/. */ #include #include +#include #include #include #include @@ -1118,7 +1119,7 @@ class problem } // We resize rather than push back here, so that an std::length_error is called quickly rather // than an std::bad_alloc after waiting the growth - m_hs_dim.resize(nf); + m_hs_dim.resize(boost::numeric_cast(nf)); for (vector_double::size_type i = 0u; i < nf; ++i) { m_hs_dim[i] = (nx * (nx - 1u) / 2u + nx); // lower triangular } diff --git a/include/pagmo/utils/generic.hpp b/include/pagmo/utils/generic.hpp index 2524c531c..7a9ea4d58 100644 --- a/include/pagmo/utils/generic.hpp +++ b/include/pagmo/utils/generic.hpp @@ -178,33 +178,6 @@ inline vector_double random_decision_vector(const vector_double &lb, const vecto return random_decision_vector({lb, ub}, r_engine); } -/// Safely cast between unsigned types -/** - * Performs a cast between unsigned types throwing if the input cannot be represented in the new type - * - * Example: - * @code{.unparsed} - * unsigned short s = std::numeric_limits::max(); - * unsigned long l = std::numeric_limits::max(); - * auto res1 = safe_cast(s); // Will always work - * auto res2 = safe_cast(l); // Will throw an std::overflow_error if precision is lost - * @endcode - * - * @param x an unsigned value \p x to be casted to \p T - * @return the input \p x safey casted to \p T - * @throws std::overflow_error if \p x cannot be represented by the new type - */ -template -inline T safe_cast(const U &x) -{ - static_assert(std::is_unsigned::value && std::is_unsigned::value, - "Safe cast can only be used on unsigned types"); - if (x > std::numeric_limits::max()) { - pagmo_throw(std::overflow_error, "Converting between unsigned types caused a loss"); - } - return static_cast(x); -} - /// Binomial coefficient /** * An implementation of the binomial coefficient using gamma functions diff --git a/include/pagmo/utils/multi_objective.hpp b/include/pagmo/utils/multi_objective.hpp index 99327956c..e35f8aed0 100644 --- a/include/pagmo/utils/multi_objective.hpp +++ b/include/pagmo/utils/multi_objective.hpp @@ -37,6 +37,7 @@ see https://www.gnu.org/licenses/. */ */ #include +#include #include #include #include @@ -589,7 +590,7 @@ inline vector_double nadir(const std::vector &points) * @returns an std:vector containing the weight vectors * * @throws if the population size is not compatible with the selected weight generation method -**/ + */ inline std::vector decomposition_weights(vector_double::size_type n_f, vector_double::size_type n_w, const std::string &weight_generation, detail::random_engine_type &r_engine) @@ -652,7 +653,7 @@ inline std::vector decomposition_weights(vector_double::size_type retval[i][i] = 1.; } // Then we add points on the simplex randomly genrated using Halton low discrepancy sequence - halton ld_seq{safe_cast(n_f - 1u), safe_cast(n_f)}; + halton ld_seq{boost::numeric_cast(n_f - 1u), boost::numeric_cast(n_f)}; for (decltype(n_w) i = n_f; i < n_w; ++i) { retval.push_back(sample_from_simplex(ld_seq())); } diff --git a/tests/generic.cpp b/tests/generic.cpp index 354ba3b6c..5d43ced28 100644 --- a/tests/generic.cpp +++ b/tests/generic.cpp @@ -130,16 +130,6 @@ BOOST_AUTO_TEST_CASE(force_bounds_test) } } -BOOST_AUTO_TEST_CASE(safe_cast_test) -{ - unsigned short s = std::numeric_limits::max(); - unsigned long l = std::numeric_limits::max(); - BOOST_CHECK_NO_THROW(safe_cast(s)); - if (l > s) { - BOOST_CHECK_THROW(safe_cast(l), std::overflow_error); - } -} - BOOST_AUTO_TEST_CASE(binomial_coefficient_test) { BOOST_CHECK_EQUAL(binomial_coefficient(0u, 0u), 1u); From 14f8427491a8c9cd3eebd7133a39590947eefdd0 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 2 Apr 2017 01:10:04 +0200 Subject: [PATCH 03/57] problem: various improvements in the sanity checking of sparsity and hessians. --- include/pagmo/problem.hpp | 61 ++++++++++++++---- pygmo/_problem_test.py | 53 ++++++++++++++++ pygmo/docstrings.cpp | 15 +++-- tests/problem.cpp | 126 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 237 insertions(+), 18 deletions(-) diff --git a/include/pagmo/problem.hpp b/include/pagmo/problem.hpp index 79de8200d..1d5aed37c 100644 --- a/include/pagmo/problem.hpp +++ b/include/pagmo/problem.hpp @@ -1120,9 +1120,7 @@ class problem // We resize rather than push back here, so that an std::length_error is called quickly rather // than an std::bad_alloc after waiting the growth m_hs_dim.resize(boost::numeric_cast(nf)); - for (vector_double::size_type i = 0u; i < nf; ++i) { - m_hs_dim[i] = (nx * (nx - 1u) / 2u + nx); // lower triangular - } + std::fill(m_hs_dim.begin(), m_hs_dim.end(), nx * (nx - 1u) / 2u + nx); // lower triangular } // 8 - Constraint tolerance m_c_tol.resize(m_nec + m_nic); @@ -1373,8 +1371,8 @@ class problem * @return the gradient sparsity pattern. * * @throws std::invalid_argument if the sparsity pattern returned by the UDP is invalid (specifically, if - * it contains duplicate pairs of indices or if the indices in the pattern are incompatible with the properties of - * the problem). + * it contains duplicate pairs of indices, or if the indices in the pattern are incompatible with the properties of + * the problem, or if the size of the returned pattern is different from the size recorded upon construction). * @throws unspecified memory errors in standard containers. */ sparsity_pattern gradient_sparsity() const @@ -1382,6 +1380,17 @@ class problem if (has_gradient_sparsity()) { auto retval = ptr()->gradient_sparsity(); check_gradient_sparsity(retval); + // Check the size is consistent with the stored size. + // NOTE: we need to do this check here, and not in check_gradient_sparsity(), + // because check_gradient_sparsity() is sometimes called when m_gs_dim has not been + // initialised yet (e.g., in the ctor). + if (retval.size() != m_gs_dim) { + pagmo_throw(std::invalid_argument, + "Invalid gradient sparsity pattern: the returned sparsity pattern has a size of " + + std::to_string(retval.size()) + + ", while the sparsity pattern size stored upon problem construction is " + + std::to_string(m_gs_dim)); + } return retval; } return detail::dense_gradient(get_nf(), get_nx()); @@ -1434,7 +1443,8 @@ class problem * * @throws std::invalid_argument if either: * - the length of \p dv differs from the output of get_nx(), or - * - the length of the returned hessians does not match the corresponding hessians sparsity pattern dimensions. + * - the length of the returned hessians does not match the corresponding hessians sparsity pattern dimensions, or + * - the size of the return value is not equal to the fitness dimension. * @throws not_implemented_error if the UDP does not satisfy pagmo::has_hessians. * @throws unspecified any exception thrown by the %hessians() method of the UDP. */ @@ -1443,7 +1453,7 @@ class problem // 1 - checks the decision vector check_decision_vector(dv); // 2 - computes the hessians - std::vector retval(ptr()->hessians(dv)); + auto retval(ptr()->hessians(dv)); // 3 - checks the hessians check_hessians_vector(retval); // 4 - increments hessians evaluation counter @@ -1484,8 +1494,9 @@ class problem * @return the hessians sparsity pattern. * * @throws std::invalid_argument if the sparsity pattern returned by the UDP is invalid (specifically, if - * its size is invalid, if it contains duplicate pairs of indices or if the returned indices do not correspond - * to a lower triangular representation of a symmetric matrix). + * its size is invalid, if it contains duplicate pairs of indices, if the returned indices do not correspond + * to a lower triangular representation of a symmetric matrix, or if the sizes of the components differ from + * the sizes recorded upon construction). * @throws not_implemented_error if the %hessians_sparsity() method of the UDP is invoked without being * available. This indicates in general an inconsistency in the implementation of the UDP. * @throws unspecified memory errors in standard containers. @@ -1495,6 +1506,25 @@ class problem if (m_has_hessians_sparsity) { auto retval = ptr()->hessians_sparsity(); check_hessians_sparsity(retval); + // Check the sizes are consistent with the stored sizes. + // NOTE: we need to do this check here, and not in check_hessians_sparsity(), + // because check_hessians_sparsity() is sometimes called when m_hs_dim has not been + // initialised yet (e.g., in the ctor). + // NOTE: in check_hessians_sparsity() we have already checked the size of retval. It has + // to be the same as the fitness dimension. The same check is run when m_hs_dim is originally + // created, hence they must be equal. + assert(retval.size() == m_hs_dim.size()); + auto r_it = retval.begin(); + for (const auto &dim : m_hs_dim) { + if (r_it->size() != dim) { + pagmo_throw(std::invalid_argument, + "Invalid hessian sparsity pattern: the returned sparsity pattern has a size of " + + std::to_string(r_it->size()) + + ", while the sparsity pattern size stored upon problem construction is " + + std::to_string(dim)); + } + ++r_it; + } return retval; } return detail::dense_hessians(get_nf(), get_nx()); @@ -1975,7 +2005,7 @@ class problem + std::to_string(hs.size()) + ", expected: " + std::to_string(nf)); } // 2 - We check that all hessian sparsity patterns have - // valid indexes. + // valid indices. for (const auto &one_hs : hs) { check_hessian_sparsity(one_hs); } @@ -1984,7 +2014,7 @@ class problem { const auto nx = get_nx(); // 1 - We check that the hessian sparsity pattern has - // valid indexes. Assuming a lower triangular representation of + // valid indices. Assuming a lower triangular representation of // a symmetric matrix. Example, for a 4x4 dense symmetric // [(0,0), (1,0), (1,1), (2,0), (2,1), (2,2), (3,0), (3,1), (3,2), (3,3)] for (const auto &pair : hs) { @@ -2034,8 +2064,15 @@ class problem void check_hessians_vector(const std::vector &hs) const { - // Checks that the hessians returned have the same dimensions of the + // 1 - Check that hs has size get_nf() + if (hs.size() != get_nf()) { + pagmo_throw(std::invalid_argument, "The hessians vector has a size of " + std::to_string(hs.size()) + + ", but the fitness dimension of the problem is " + + std::to_string(get_nf()) + ". The two values must be equal"); + } + // 2 - Check that the hessians returned have the same dimensions of the // corresponding sparsity patterns + // NOTE: the dimension of m_hs_dim is guaranteed to be get_nf() on construction. for (decltype(hs.size()) i = 0u; i < hs.size(); ++i) { if (hs[i].size() != m_hs_dim[i]) { pagmo_throw(std::invalid_argument, "On the hessian no. " + std::to_string(i) + ": Components returned: " diff --git a/pygmo/_problem_test.py b/pygmo/_problem_test.py index 6e32503df..eebe85135 100644 --- a/pygmo/_problem_test.py +++ b/pygmo/_problem_test.py @@ -1190,6 +1190,23 @@ def gradient_sparsity(self): self.assertRaises(TypeError, lambda: problem(p())) + class p(object): + counter = 0 + + def get_bounds(self): + return ([0], [1]) + + def fitness(self, a): + return [42] + + def gradient_sparsity(self): + if p.counter == 0: + p.counter = p.counter + 1 + return [] + return [(0, 0)] + + self.assertRaises(ValueError, lambda: problem(p()).gradient_sparsity()) + def run_has_hessians_tests(self): from .core import problem @@ -1342,6 +1359,22 @@ def hessians(self, a): problem(p()).hessians([1, 2])[1])) self.assertRaises(ValueError, lambda: problem(p()).hessians([1])) + class p(object): + + def get_bounds(self): + return ([0]*6, [1]*6) + + def fitness(self, a): + return [42, -42] + + def get_nobj(self): + return 2 + + def hessians(self, a): + return [] + + self.assertRaises(ValueError, lambda: problem(p()).hessians([1]*6)) + def run_has_hessians_sparsity_tests(self): from .core import problem @@ -1678,6 +1711,26 @@ def hessians_sparsity(self): self.assert_((problem(p()).hessians_sparsity()[1] == array([[0, 0], [1, 0]])).all()) + class p(object): + counter = 0 + + def get_bounds(self): + return ([0]*6, [1]*6) + + def fitness(self, a): + return [42, 42] + + def get_nobj(self): + return 2 + + def hessians_sparsity(self): + if p.counter == 0: + p.counter = p.counter + 1 + return [[(1, 0)], [(1, 0)]] + return [[(1, 0)], [(1, 0), (2, 0)]] + + self.assertRaises(ValueError, lambda: problem(p()).hessians_sparsity()) + def run_seed_tests(self): from .core import problem diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index b3394fdee..2f113032e 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -778,8 +778,9 @@ Python object of any kind. Specifically: shape, dimensions, etc.), * at least one element of the returned iterable Python object does not consist of a collection of exactly 2 elements, - * if the sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices - or if the indices in the pattern are incompatible with the properties of the problem) + * if the sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices, + or if the indices in the pattern are incompatible with the properties of the problem, or if the size of the + returned pattern is different from the size recorded upon construction) OverflowError: if the NumPy array returned by the UDP contains integer values which are negative or outside an implementation-defined range unspecified: any exception thrown by: @@ -847,8 +848,9 @@ array, and it must return the hessians vector as an iterable Python object (e.g. ``list`` of 1D NumPy float array: the hessians of *dv* Raises: - ValueError: if either the length of *dv* differs from the value returned by :func:`~pygmo.core.problem.get_nx()`, or - the length of returned hessians does not match the corresponding hessians sparsity pattern dimensions + ValueError: if the length of *dv* differs from the value returned by :func:`~pygmo.core.problem.get_nx()`, or + the length of returned hessians does not match the corresponding hessians sparsity pattern dimensions, or + the size of the return value is not equal to the fitness dimension NotImplementedError: if the UDP does not provide a ``hessians()`` method unspecified: any exception thrown by the ``hessians()`` method of the UDP, or by failures at the intersection between C++ and Python (e.g., type conversion errors, mismatched function signatures, etc.) @@ -918,8 +920,9 @@ returned object will then be interpreted as a sparsity pattern in the same way a shape, dimensions, etc.), * at least one element of a returned iterable Python object does not consist of a collection of exactly 2 elements, - * if a sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices - or if the indices in the pattern are incompatible with the properties of the problem) + * if a sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices, + if the indices in the pattern are incompatible with the properties of the problem or if the size of the pattern + differs from the size recorded upon construction) OverflowError: if the NumPy arrays returned by the UDP contain integer values which are negative or outside an implementation-defined range unspecified: any exception thrown by: diff --git a/tests/problem.cpp b/tests/problem.cpp index cce9bfc2e..8e4dd1d92 100644 --- a/tests/problem.cpp +++ b/tests/problem.cpp @@ -1108,3 +1108,129 @@ BOOST_AUTO_TEST_CASE(thread_safety_test) BOOST_CHECK(problem{ts2{}}.get_thread_safety() == thread_safety::none); BOOST_CHECK(problem{ts3{}}.get_thread_safety() == thread_safety::basic); } + +struct gs1 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + sparsity_pattern gradient_sparsity() const + { + if (!n_grad_invs) { + ++n_grad_invs; + return {}; + } + return {{0, 0}}; + } + static int n_grad_invs; +}; + +int gs1::n_grad_invs = 0; + +struct gs2 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + sparsity_pattern gradient_sparsity() const + { + return {{0, 0}}; + } +}; + +BOOST_AUTO_TEST_CASE(custom_gs) +{ + // Test a gradient sparsity that changes after the first invocation of gradient_sparsity(). + problem p{gs1{}}; + BOOST_CHECK_THROW(p.gradient_sparsity(), std::invalid_argument); + p = problem{gs2{}}; + BOOST_CHECK_NO_THROW(p.gradient_sparsity()); +} + +struct hs1 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + vector_double::size_type get_nobj() const + { + return 2; + } + std::vector hessians_sparsity() const + { + if (!n_hess_invs) { + ++n_hess_invs; + return {{{1, 0}}, {{1, 0}}}; + } + return {{{1, 0}}, {{1, 0}, {2, 0}}}; + } + static int n_hess_invs; +}; + +int hs1::n_hess_invs = 0; + +struct hs2 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + vector_double::size_type get_nobj() const + { + return 2; + } + std::vector hessians_sparsity() const + { + return {{{1, 0}}, {{1, 0}, {2, 0}}}; + } +}; + +BOOST_AUTO_TEST_CASE(custom_hs) +{ + // Test a hessians sparsity that changes after the first invocation of hessians_sparsity(). + problem p{hs1{}}; + BOOST_CHECK_THROW(p.hessians_sparsity(), std::invalid_argument); + p = problem{hs2{}}; + BOOST_CHECK_NO_THROW(p.hessians_sparsity()); +} + +struct hess1 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + vector_double::size_type get_nobj() const + { + return 2; + } + std::vector hessians(const vector_double &) const + { + return {{}}; + } +}; + +BOOST_AUTO_TEST_CASE(broken_hessian) +{ + // Test a hessians method that returns a number of vectors different from get_nf(). + problem p{hess1{}}; + BOOST_CHECK_THROW(p.hessians({1, 1, 1, 1, 1, 1}), std::invalid_argument); +} From 6b857978a5651a37009cef969ef573401f9115b1 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 2 Apr 2017 14:41:31 +0200 Subject: [PATCH 04/57] Restrict the sparsity patterns to be always sorted. --- include/pagmo/problem.hpp | 89 ++++++++++++++++++--------------------- pygmo/_problem_test.py | 29 +++++++++++++ pygmo/docstrings.cpp | 8 ++-- tests/problem.cpp | 37 ++++++++++++++++ 4 files changed, 110 insertions(+), 53 deletions(-) diff --git a/include/pagmo/problem.hpp b/include/pagmo/problem.hpp index 1d5aed37c..9cdfb5bee 100644 --- a/include/pagmo/problem.hpp +++ b/include/pagmo/problem.hpp @@ -1360,7 +1360,7 @@ class problem /// Gradient sparsity pattern. /** * This method will return the gradient sparsity pattern of the problem. The gradient sparsity pattern is a - * collection of the indices \f$(i,j)\f$ of the non-zero elements of + * lexicographically sorted collection of the indices \f$(i,j)\f$ of the non-zero elements of * \f$ g_{ij} = \frac{\partial f_i}{\partial x_j}\f$. * * If problem::has_gradient_sparsity() returns \p true, @@ -1371,8 +1371,9 @@ class problem * @return the gradient sparsity pattern. * * @throws std::invalid_argument if the sparsity pattern returned by the UDP is invalid (specifically, if - * it contains duplicate pairs of indices, or if the indices in the pattern are incompatible with the properties of - * the problem, or if the size of the returned pattern is different from the size recorded upon construction). + * it is not strictly sorted lexicographically, or if the indices in the pattern are incompatible with the + * properties of the problem, or if the size of the returned pattern is different from the size recorded upon + * construction). * @throws unspecified memory errors in standard containers. */ sparsity_pattern gradient_sparsity() const @@ -1482,7 +1483,7 @@ class problem /// Hessians sparsity pattern. /** * This method will return the hessians sparsity pattern of the problem. Each component \f$ l\f$ of the hessians - * sparsity pattern is a collection of the indices \f$(i,j)\f$ of the non-zero elements of + * sparsity pattern is a lexicographically sorted collection of the indices \f$(i,j)\f$ of the non-zero elements of * \f$h^l_{ij} = \frac{\partial f^l}{\partial x_i\partial x_j}\f$. Since the Hessian matrix * is symmetric, only lower triangular elements are allowed. * @@ -1493,13 +1494,10 @@ class problem * * @return the hessians sparsity pattern. * - * @throws std::invalid_argument if the sparsity pattern returned by the UDP is invalid (specifically, if - * its size is invalid, if it contains duplicate pairs of indices, if the returned indices do not correspond - * to a lower triangular representation of a symmetric matrix, or if the sizes of the components differ from - * the sizes recorded upon construction). - * @throws not_implemented_error if the %hessians_sparsity() method of the UDP is invoked without being - * available. This indicates in general an inconsistency in the implementation of the UDP. - * @throws unspecified memory errors in standard containers. + * @throws std::invalid_argument if a sparsity pattern returned by the UDP is invalid (specifically, if + * if it is not strictly sorted lexicographically, if the returned indices do not + * correspond to a lower triangular representation of a symmetric matrix, or if the size of the pattern differs + * from the size recorded upon construction). */ std::vector hessians_sparsity() const { @@ -1956,43 +1954,31 @@ class problem return m_ptr.get(); } - // A small helper to check if a vector containes unique elements. - // This version for floating point types is also protected against possible nans - template = 0> - static bool all_unique(std::vector x) - { - std::sort(x.begin(), x.end(), detail::less_than_f); - auto it = std::unique(x.begin(), x.end(), detail::equal_to_f); - return it == x.end(); - } - // The version for non floating point types is not protected vs possible nans - // (e.g if used with std::pair it could be troublesome) - template = 0> - static bool all_unique(std::vector x) - { - std::sort(x.begin(), x.end()); - auto it = std::unique(x.begin(), x.end()); - return it == x.end(); - } - void check_gradient_sparsity(const sparsity_pattern &gs) const { + // Cache a couple of quantities. const auto nx = get_nx(); const auto nf = get_nf(); - // 1 - We check that the gradient sparsity pattern has - // valid indices. - for (const auto &pair : gs) { - if ((pair.first >= nf) || (pair.second >= nx)) { + + // Check the pattern. + for (auto it = gs.begin(); it != gs.end(); ++it) { + if ((it->first >= nf) || (it->second >= nx)) { pagmo_throw(std::invalid_argument, "Invalid pair detected in the gradient sparsity pattern: (" - + std::to_string(pair.first) + ", " + std::to_string(pair.second) + + std::to_string(it->first) + ", " + std::to_string(it->second) + ")\nFitness dimension is: " + std::to_string(nf) + "\nDecision vector dimension is: " + std::to_string(nx)); } - } - // 2 - We check all pairs are unique - if (!all_unique(gs)) { - pagmo_throw(std::invalid_argument, - "Multiple entries of the same index pair was detected in the gradient sparsity pattern"); + if (it == gs.begin()) { + continue; + } + if (!(*(it - 1) < *it)) { + pagmo_throw( + std::invalid_argument, + "The gradient sparsity pattern is not strictly sorted in ascending order: the indices pair (" + + std::to_string((it - 1)->first) + ", " + std::to_string((it - 1)->second) + + ") is greater than or equal to the successive indices pair (" + std::to_string(it->first) + + ", " + std::to_string(it->second) + ")"); + } } } void check_hessians_sparsity(const std::vector &hs) const @@ -2013,23 +1999,28 @@ class problem void check_hessian_sparsity(const sparsity_pattern &hs) const { const auto nx = get_nx(); - // 1 - We check that the hessian sparsity pattern has + // We check that the hessian sparsity pattern has // valid indices. Assuming a lower triangular representation of // a symmetric matrix. Example, for a 4x4 dense symmetric // [(0,0), (1,0), (1,1), (2,0), (2,1), (2,2), (3,0), (3,1), (3,2), (3,3)] - for (const auto &pair : hs) { - if ((pair.first >= nx) || (pair.second > pair.first)) { + for (auto it = hs.begin(); it != hs.end(); ++it) { + if ((it->first >= nx) || (it->second > it->first)) { pagmo_throw(std::invalid_argument, "Invalid pair detected in the hessians sparsity pattern: (" - + std::to_string(pair.first) + ", " + std::to_string(pair.second) + + std::to_string(it->first) + ", " + std::to_string(it->second) + ")\nDecision vector dimension is: " + std::to_string(nx) + "\nNOTE: hessian is a symmetric matrix and PaGMO represents " "it as lower triangular: i.e (i,j) is not valid if j>i"); } - } - // 2 - We check all pairs are unique - if (!all_unique(hs)) { - pagmo_throw(std::invalid_argument, - "Multiple entries of the same index pair were detected in the hessian sparsity pattern"); + if (it == hs.begin()) { + continue; + } + if (!(*(it - 1) < *it)) { + pagmo_throw(std::invalid_argument, + "The hessian sparsity pattern is not strictly sorted in ascending order: the indices pair (" + + std::to_string((it - 1)->first) + ", " + std::to_string((it - 1)->second) + + ") is greater than or equal to the successive indices pair (" + + std::to_string(it->first) + ", " + std::to_string(it->second) + ")"); + } } } void check_decision_vector(const vector_double &dv) const diff --git a/pygmo/_problem_test.py b/pygmo/_problem_test.py index eebe85135..c98a000b0 100644 --- a/pygmo/_problem_test.py +++ b/pygmo/_problem_test.py @@ -1207,6 +1207,19 @@ def gradient_sparsity(self): self.assertRaises(ValueError, lambda: problem(p()).gradient_sparsity()) + class p(object): + + def get_bounds(self): + return ([0]*6, [1]*6) + + def fitness(self, a): + return [42] + + def gradient_sparsity(self): + return [(0, 0),(0,2),(0,1)] + + self.assertRaises(ValueError, lambda: problem(p())) + def run_has_hessians_tests(self): from .core import problem @@ -1731,6 +1744,22 @@ def hessians_sparsity(self): self.assertRaises(ValueError, lambda: problem(p()).hessians_sparsity()) + class p(object): + + def get_bounds(self): + return ([0]*6, [1]*6) + + def fitness(self, a): + return [42, 42] + + def get_nobj(self): + return 2 + + def hessians_sparsity(self): + return [[(1, 0)], [(1, 0), (2, 0), (1,1)]] + + self.assertRaises(ValueError, lambda: problem(p())) + def run_seed_tests(self): from .core import problem diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index 2f113032e..1457e2ec0 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -754,7 +754,7 @@ std::string problem_gradient_sparsity_docstring() Gradient sparsity pattern. -This method will return the gradient sparsity pattern of the problem. The gradient sparsity pattern is a +This method will return the gradient sparsity pattern of the problem. The gradient sparsity pattern is a lexicographically sorted collection of the indices :math:`(i,j)` of the non-zero elements of :math:`g_{ij} = \frac{\partial f_i}{\partial x_j}`. If :func:`~pygmo.core.problem.has_gradient_sparsity()` returns ``True``, then the ``gradient_sparsity()`` method of the @@ -778,7 +778,7 @@ Python object of any kind. Specifically: shape, dimensions, etc.), * at least one element of the returned iterable Python object does not consist of a collection of exactly 2 elements, - * if the sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices, + * if the sparsity pattern returned by the UDP is invalid (specifically, if it is not strictly sorted lexicographically, or if the indices in the pattern are incompatible with the properties of the problem, or if the size of the returned pattern is different from the size recorded upon construction) OverflowError: if the NumPy array returned by the UDP contains integer values which are negative or outside an @@ -894,7 +894,7 @@ std::string problem_hessians_sparsity_docstring() Hessians sparsity pattern. This method will return the hessians sparsity pattern of the problem. Each component :math:`l` of the hessians -sparsity pattern is a collection of the indices :math:`(i,j)` of the non-zero elements of +sparsity pattern is a lexicographically sorted collection of the indices :math:`(i,j)` of the non-zero elements of :math:`h^l_{ij} = \frac{\partial f^l}{\partial x_i\partial x_j}`. Since the Hessian matrix is symmetric, only lower triangular elements are allowed. @@ -920,7 +920,7 @@ returned object will then be interpreted as a sparsity pattern in the same way a shape, dimensions, etc.), * at least one element of a returned iterable Python object does not consist of a collection of exactly 2 elements, - * if a sparsity pattern returned by the UDP is invalid (specifically, if it contains duplicate pairs of indices, + * if a sparsity pattern returned by the UDP is invalid (specifically, if it is not strictly sorted lexicographically, if the indices in the pattern are incompatible with the properties of the problem or if the size of the pattern differs from the size recorded upon construction) OverflowError: if the NumPy arrays returned by the UDP contain integer values which are negative or outside an diff --git a/tests/problem.cpp b/tests/problem.cpp index 8e4dd1d92..603cfb6c2 100644 --- a/tests/problem.cpp +++ b/tests/problem.cpp @@ -1146,6 +1146,21 @@ struct gs2 { } }; +struct gs3 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + sparsity_pattern gradient_sparsity() const + { + return {{0, 0}, {0, 2}, {0, 1}}; + } +}; + BOOST_AUTO_TEST_CASE(custom_gs) { // Test a gradient sparsity that changes after the first invocation of gradient_sparsity(). @@ -1153,6 +1168,8 @@ BOOST_AUTO_TEST_CASE(custom_gs) BOOST_CHECK_THROW(p.gradient_sparsity(), std::invalid_argument); p = problem{gs2{}}; BOOST_CHECK_NO_THROW(p.gradient_sparsity()); + // Gradient sparsity not sorted. + BOOST_CHECK_THROW(p = problem{gs3{}}, std::invalid_argument); } struct hs1 { @@ -1200,6 +1217,25 @@ struct hs2 { } }; +struct hs3 { + vector_double fitness(const vector_double &) const + { + return {0, 0}; + } + std::pair get_bounds() const + { + return {{0, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1}}; + } + vector_double::size_type get_nobj() const + { + return 2; + } + std::vector hessians_sparsity() const + { + return {{{1, 0}, {2, 1}, {1, 1}}, {{1, 0}, {2, 0}}}; + } +}; + BOOST_AUTO_TEST_CASE(custom_hs) { // Test a hessians sparsity that changes after the first invocation of hessians_sparsity(). @@ -1207,6 +1243,7 @@ BOOST_AUTO_TEST_CASE(custom_hs) BOOST_CHECK_THROW(p.hessians_sparsity(), std::invalid_argument); p = problem{hs2{}}; BOOST_CHECK_NO_THROW(p.hessians_sparsity()); + BOOST_CHECK_THROW(p = problem{hs3{}}, std::invalid_argument); } struct hess1 { From b687dcec3bcd329337a19873b51912632db3b6fe Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 2 Apr 2017 14:42:29 +0200 Subject: [PATCH 05/57] Some further WIP for the nlopt bits. --- include/pagmo/algorithms/nlopt.hpp | 79 +++++++++++++++++++++++++ include/pagmo/detail/nlopt_utils.hpp | 86 ++++++++++++++++++++++++---- tests/CMakeLists.txt | 1 + tests/nlopt.cpp | 44 ++++++++++++++ 4 files changed, 198 insertions(+), 12 deletions(-) create mode 100644 include/pagmo/algorithms/nlopt.hpp create mode 100644 tests/nlopt.cpp diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp new file mode 100644 index 000000000..8e885d7bd --- /dev/null +++ b/include/pagmo/algorithms/nlopt.hpp @@ -0,0 +1,79 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_ALGORITHMS_DE_HPP +#define PAGMO_ALGORITHMS_DE_HPP + +#include +#include + +namespace pagmo +{ + +class nlopt +{ + using nlopt_obj = detail::nlopt_obj; + +public: + nlopt() : nlopt(NLOPT_LN_COBYLA) + { + } + explicit nlopt(::nlopt_algorithm algo) : m_algo(algo) + { + } + population evolve(population pop) const + { + if (!pop.size()) { + return pop; + } + auto &prob = pop.get_problem(); + nlopt_obj no(m_algo, prob); + auto initial_guess = pop.get_x()[pop.best_idx()]; + if (initial_guess.size() != prob.get_nx()) { + // TODO + throw; + } + double fitness; + const auto res = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); + if (res < 0) { + // TODO + print(initial_guess, '\n'); + std::cout << "failed!!\n"; + throw; + } + print("Res: ", res, "\n"); + pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + return pop; + } + +private: + ::nlopt_algorithm m_algo; +}; +} + +#endif diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 0c210e606..666306bbe 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -56,45 +56,107 @@ struct nlopt_obj { // Extract and set problem dimension. const auto n = boost::numeric_cast(prob.get_nx()); m_value.reset(::nlopt_create(algo, n)); - // Init the nlopt_obj. + // Try to init the nlopt_obj. if (!m_value) { pagmo_throw(std::invalid_argument, "the creation of an nlopt_opt object failed"); } + // NLopt does not handle MOO. if (prob.get_nobj() != 1u) { - // TODO + // TODO error message pagmo_throw(std::invalid_argument, "" + get_name()); } + // Constraints support will come later. if (prob.get_nc()) { - // TODO + // TODO error message pagmo_throw(std::invalid_argument, "" + get_name()); } + // This is just a vector_double that is re-used across objfun invocations. + // It will hold the current decision vector. m_dv.resize(prob.get_nx()); // Set the objfun + gradient. auto res = ::nlopt_set_min_objective(m_value.get(), [](unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. auto &nlo = *static_cast(f_data); + + // A few shortcuts. auto &p = nlo.m_prob; auto &dv = nlo.m_dv; auto &sp = nlo.m_sp; + + // A couple of sanity checks. assert(dim == p.get_nx()); + assert(dv.size() == dim); + if (grad && !p.has_gradient()) { - // TODO + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + // TODO error message pagmo_throw(std::invalid_argument, "" + nlo.get_name()); } - assert(dv.size() == dim); + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. std::copy(x, x + dim, dv.begin()); + + // Compute fitness and, if needed, gradient. const auto fitness = p.fitness(dv); - const auto gradient = p.gradient(dv); - auto g_it = gradient.begin(); - const auto g_end = gradient.end(); - auto i = 0u; - for (auto sp_it = sp.begin(); i < dim && g_it != g_end; - ++i, ++g_it, ++sp_it) { - assert(sp_it->first == 0u); + if (grad) { + const auto gradient = p.gradient(dv); + auto g_it = gradient.begin(); + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (const auto &t : sp) { + if (t.first == 0u) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. + grad[t.second] = *g_it; + ++g_it; + } else { + break; + } + } } + + // Return the objfun value. return fitness[0]; }, static_cast(this)); + if (res != NLOPT_SUCCESS) { + // TODO + throw; + } + + // Box bounds. + const auto bounds = prob.get_bounds(); + res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + if (res != NLOPT_SUCCESS) { + // TODO + throw; + } + res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + if (res != NLOPT_SUCCESS) { + // TODO + throw; + } + + // TODO hard-coded. + res = ::nlopt_set_ftol_abs(m_value.get(), 1E-12); + // res = ::nlopt_set_maxeval(m_value.get(), 10000); + if (res != NLOPT_SUCCESS) { + // TODO + throw; + } } nlopt_obj(const nlopt_obj &other) : m_prob(other.m_prob), m_sp(other.m_sp), m_value(::nlopt_copy(other.m_value.get()), ::nlopt_destroy), diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7b3bba025..5ada8f25b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -63,6 +63,7 @@ if(PAGMO_WITH_EIGEN3) endif() if(PAGMO_WITH_NLOPT) + ADD_PAGMO_TESTCASE(nlopt) ADD_PAGMO_TESTCASE(nlopt_utils) endif() diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp new file mode 100644 index 000000000..07babd86a --- /dev/null +++ b/tests/nlopt.cpp @@ -0,0 +1,44 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#define BOOST_TEST_MODULE nlopt_test +#include + +#include +#include +#include +#include + +using namespace pagmo; + +BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) +{ + population pop{ackley{10}, 20}; + algorithm algo{nlopt{NLOPT_LD_MMA}}; + std::cout << algo.evolve(pop) << '\n'; +} From 453a13c022b0951ad47a008b70924a9eb7513492 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 2 Apr 2017 22:09:35 +0200 Subject: [PATCH 06/57] WIP. --- include/pagmo/algorithms/nlopt.hpp | 102 +++++++++++++++++++++++++++-- tests/nlopt.cpp | 2 +- 2 files changed, 98 insertions(+), 6 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 8e885d7bd..9ed756af7 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -29,31 +29,121 @@ see https://www.gnu.org/licenses/. */ #ifndef PAGMO_ALGORITHMS_DE_HPP #define PAGMO_ALGORITHMS_DE_HPP +#include +#include +#include +#include +#include +#include +#include +#include + #include +#include #include +#include +#include namespace pagmo { +namespace detail +{ + +template +struct nlopt_data { + using names_map_t = boost::bimap>; + static const names_map_t names; +}; + +inline typename nlopt_data<>::names_map_t nlopt_names_map() +{ + typename nlopt_data<>::names_map_t retval; + using value_type = typename nlopt_data<>::names_map_t::value_type; + retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); + retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); + retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); + retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); + retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); + return retval; +} + +template +const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); +} + class nlopt { using nlopt_obj = detail::nlopt_obj; + using nlopt_data = detail::nlopt_data<>; public: - nlopt() : nlopt(NLOPT_LN_COBYLA) + nlopt() : nlopt("neldermead") { } - explicit nlopt(::nlopt_algorithm algo) : m_algo(algo) + explicit nlopt(const std::string &algo) + : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")) { + if (nlopt_data::names.left.find(m_algo) == nlopt_data::names.left.end()) { + std::ostringstream oss; + std::transform(nlopt_data::names.left.begin(), nlopt_data::names.left.end(), + std::ostream_iterator(oss, "\n"), + [](const uncvref_t &v) { return v.first; }); + pagmo_throw(std::invalid_argument, + "unknown/unsupported NLopt algorithm '" + algo + "'. The valid algorithms are:\n" + oss.str()); + } + } + void set_selection(const std::string &select) + { + if (select != "best" && select != "worst" && select != "random") { + pagmo_throw(std::invalid_argument, + "the individual selection policy must be one of ['best', 'worst', 'random'], but '" + select + + "' was provided instead"); + } + m_select = select; + } + void set_selection(population::size_type n) + { + m_select = n; + } + void set_replacement(const std::string &replace) + { + if (replace != "best" && replace != "worst" && replace != "random") { + pagmo_throw(std::invalid_argument, + "the individual replacement policy must be one of ['best', 'worst', 'random'], but '" + replace + + "' was provided instead"); + } + m_replace = replace; + } + void set_replacement(population::size_type n) + { + m_replace = n; } population evolve(population pop) const { if (!pop.size()) { + // In case of an empty pop, just return it. return pop; } + auto &prob = pop.get_problem(); - nlopt_obj no(m_algo, prob); - auto initial_guess = pop.get_x()[pop.best_idx()]; + nlopt_obj no(nlopt_data::names.left.at(m_algo), prob); + + // Setup of the initial guess. + vector_double initial_guess; + if (boost::any_cast(&m_select)) { + const auto &s_select = boost::any_cast(m_select); + if (s_select == "best") { + initial_guess = pop.get_x()[pop.best_idx()]; + } else if (s_select == "worst") { + initial_guess = pop.get_x()[pop.worst_idx()]; + } else { + assert(s_select == "random"); + } + } else { + assert(boost::any_cast(&m_select)); + } + if (initial_guess.size() != prob.get_nx()) { // TODO throw; @@ -72,7 +162,9 @@ class nlopt } private: - ::nlopt_algorithm m_algo; + std::string m_algo; + boost::any m_select; + boost::any m_replace; }; } diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 07babd86a..0b137f4d1 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -39,6 +39,6 @@ using namespace pagmo; BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) { population pop{ackley{10}, 20}; - algorithm algo{nlopt{NLOPT_LD_MMA}}; + algorithm algo{nlopt{"cobylsa"}}; std::cout << algo.evolve(pop) << '\n'; } From 290cec3d97ad08496967ec5416bfac44806b4dc0 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Mon, 3 Apr 2017 01:42:02 +0200 Subject: [PATCH 07/57] More work. --- include/pagmo/algorithms/nlopt.hpp | 135 ++++++++----- include/pagmo/detail/nlopt_utils.hpp | 280 +++++++++++++++++++-------- tests/nlopt.cpp | 10 +- 3 files changed, 294 insertions(+), 131 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 9ed756af7..f0df6cdbf 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -31,15 +31,16 @@ see https://www.gnu.org/licenses/. */ #include #include -#include +#include #include +#include #include #include #include -#include #include #include +#include #include #include #include @@ -47,52 +48,42 @@ see https://www.gnu.org/licenses/. */ namespace pagmo { -namespace detail -{ - -template -struct nlopt_data { - using names_map_t = boost::bimap>; - static const names_map_t names; -}; - -inline typename nlopt_data<>::names_map_t nlopt_names_map() -{ - typename nlopt_data<>::names_map_t retval; - using value_type = typename nlopt_data<>::names_map_t::value_type; - retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); - retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); - retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); - retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); - retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); - return retval; -} - -template -const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); -} - class nlopt { using nlopt_obj = detail::nlopt_obj; using nlopt_data = detail::nlopt_data<>; public: - nlopt() : nlopt("neldermead") + nlopt() : nlopt("sbplx") { } explicit nlopt(const std::string &algo) - : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")) + : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")), + m_rselect_seed(random_device::next()), m_e(static_cast(m_rselect_seed)) { + // Check version. + int major, minor, bugfix; + ::nlopt_version(&major, &minor, &bugfix); + if (major < 2) { + pagmo_throw(std::runtime_error, "Only NLopt version >= 2 is supported"); + } + + // Check the algorithm. if (nlopt_data::names.left.find(m_algo) == nlopt_data::names.left.end()) { + // The selected algorithm is unknown or not among the supported ones. std::ostringstream oss; std::transform(nlopt_data::names.left.begin(), nlopt_data::names.left.end(), std::ostream_iterator(oss, "\n"), [](const uncvref_t &v) { return v.first; }); - pagmo_throw(std::invalid_argument, - "unknown/unsupported NLopt algorithm '" + algo + "'. The valid algorithms are:\n" + oss.str()); + pagmo_throw(std::invalid_argument, "unknown/unsupported NLopt algorithm '" + algo + + "'. The supported algorithms are:\n" + oss.str()); } } + void set_random_selection_seed(unsigned seed) + { + m_rselect_seed = seed; + m_e.seed(static_cast(m_rselect_seed)); + } void set_selection(const std::string &select) { if (select != "best" && select != "worst" && select != "random") { @@ -106,6 +97,10 @@ class nlopt { m_select = n; } + boost::any get_selection() const + { + return m_select; + } void set_replacement(const std::string &replace) { if (replace != "best" && replace != "worst" && replace != "random") { @@ -119,6 +114,10 @@ class nlopt { m_replace = n; } + boost::any get_replacement() const + { + return m_replace; + } population evolve(population pop) const { if (!pop.size()) { @@ -127,7 +126,8 @@ class nlopt } auto &prob = pop.get_problem(); - nlopt_obj no(nlopt_data::names.left.at(m_algo), prob); + nlopt_obj no(nlopt_data::names.left.at(m_algo), prob, m_sc_stopval, m_sc_ftol_rel, m_sc_ftol_abs, m_sc_xtol_rel, + m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime); // Setup of the initial guess. vector_double initial_guess; @@ -139,32 +139,79 @@ class nlopt initial_guess = pop.get_x()[pop.worst_idx()]; } else { assert(s_select == "random"); + std::uniform_int_distribution dist(0, pop.size() - 1u); + initial_guess = pop.get_x()[dist(m_e)]; } } else { - assert(boost::any_cast(&m_select)); + const auto idx = boost::any_cast(m_select); + if (idx >= pop.size()) { + pagmo_throw(std::out_of_range, "cannot select the individual at index " + std::to_string(idx) + + " for evolution: the population has a size of only " + + std::to_string(pop.size())); + } + initial_guess = pop.get_x()[idx]; } - + // Check the initial guess. if (initial_guess.size() != prob.get_nx()) { - // TODO - throw; + pagmo_throw(std::invalid_argument, "the size of the initial guess, " + std::to_string(initial_guess.size()) + + ", is different from the dimension of the problem, " + + std::to_string(prob.get_nx())); } - double fitness; - const auto res = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); - if (res < 0) { - // TODO - print(initial_guess, '\n'); - std::cout << "failed!!\n"; - throw; + const auto bounds = prob.get_bounds(); + for (decltype(bounds.first.size()) i = 0; i < bounds.first.size(); ++i) { + if (std::isnan(initial_guess[i])) { + pagmo_throw(std::invalid_argument, + "the value of the initial guess at index " + std::to_string(i) + " is NaN"); + } + if (initial_guess[i] < bounds.first[i] || initial_guess[i] > bounds.second[i]) { + pagmo_throw(std::invalid_argument, "the value of the initial guess at index " + std::to_string(i) + + " is outside the problem's bounds"); + } } - print("Res: ", res, "\n"); + + // Run the optimisation and store the status returned by NLopt. + double fitness; + m_last_opt_result = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); + + // Store the new individual into the population. pop.set_xf(pop.best_idx(), initial_guess, {fitness}); return pop; } + std::string get_name() const + { + return "NLopt - " + m_algo; + } + std::string get_extra_info() const + { + int major, minor, bugfix; + ::nlopt_version(&major, &minor, &bugfix); + return "\tNLopt version: " + std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(bugfix) + + "\n\tLast optimisation return code: " + detail::nlopt_res2string(m_last_opt_result) + + "\n\tStopping criteria:\n\t\tstopval: " + + (m_sc_stopval == -HUGE_VAL ? "disabled" : detail::to_string(m_sc_stopval)) + "\n\t\tftol_rel: " + + (m_sc_ftol_rel <= 0. ? "disabled" : detail::to_string(m_sc_ftol_rel)) + "\n\t\tftol_abs: " + + (m_sc_ftol_abs <= 0. ? "disabled" : detail::to_string(m_sc_ftol_abs)) + "\n\t\txtol_rel: " + + (m_sc_xtol_rel <= 0. ? "disabled" : detail::to_string(m_sc_xtol_rel)) + "\n\t\txtol_abs: " + + (m_sc_xtol_abs <= 0. ? "disabled" : detail::to_string(m_sc_xtol_abs)) + "\n\t\tmaxeval: " + + (m_sc_maxeval <= 0. ? "disabled" : detail::to_string(m_sc_maxeval)) + "\n\t\tmaxtime: " + + (m_sc_maxtime <= 0. ? "disabled" : detail::to_string(m_sc_maxtime)) + "\n"; + } private: std::string m_algo; boost::any m_select; boost::any m_replace; + unsigned m_rselect_seed; + mutable detail::random_engine_type m_e; + mutable ::nlopt_result m_last_opt_result = NLOPT_SUCCESS; + // Stopping criteria. + double m_sc_stopval = -HUGE_VAL; + double m_sc_ftol_rel = 1E-8; + double m_sc_ftol_abs = 0.; + double m_sc_xtol_rel = 0.; + double m_sc_xtol_abs = 0.; + int m_sc_maxeval = 0; + int m_sc_maxtime = 0; }; } diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 666306bbe..97832c394 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -30,14 +30,17 @@ see https://www.gnu.org/licenses/. */ #define PAGMO_DETAIL_NLOPT_UTILS_HPP #include +#include #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -49,8 +52,70 @@ namespace pagmo namespace detail { +// Usual trick with global read-only data useful to the NLopt wrapper. +template +struct nlopt_data { + // The idea here is to establish a bijection between string name (e.g., "cobyla") + // and the enums used in the NLopt C API to refer to the algos (e.g., NLOPT_LN_COBYLA). + // We use a bidirectional map so that we can map both string -> enum and enum -> string, + // depending on what is needed. + using names_map_t = boost::bimap; + static const names_map_t names; + // A map to link a human-readable description to NLopt return codes. + using result_map_t = std::unordered_map<::nlopt_result, std::string>; + static const result_map_t results; +}; + +// Static init. +template +const typename nlopt_data::result_map_t nlopt_data::results = { + {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, + {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) + + ", Optimization stopped because stopval was reached)"}, + {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) + + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, + {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) + + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, + {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) + + ", Optimization stopped because maxeval was reached)"}, + {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) + + ", Optimization stopped because maxtime was reached)"}, + {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, + {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, + {NLOPT_OUT_OF_MEMORY, + "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, + {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) + + ", Halted because roundoff errors limited progress)"}, + {NLOPT_FORCED_STOP, + "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; + +// Initialise the mapping between algo names and enums for the supported algorithms. +inline typename nlopt_data<>::names_map_t nlopt_names_map() +{ + typename nlopt_data<>::names_map_t retval; + using value_type = typename nlopt_data<>::names_map_t::value_type; + retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); + retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); + retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); + retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); + retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); + return retval; +} + +// Static init using the helper function above. +template +const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); + +// Convert an NLopt result in a more descriptive string. +inline std::string nlopt_res2string(::nlopt_result err) +{ + return (nlopt_data<>::results.find(err) == nlopt_data<>::results.end() ? "??" : nlopt_data<>::results.at(err)); +} + struct nlopt_obj { - explicit nlopt_obj(::nlopt_algorithm algo, problem &prob) + using data = nlopt_data<>; + explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, + double xtol_rel, double xtol_abs, int maxeval, int maxtime) : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy) { // Extract and set problem dimension. @@ -58,104 +123,155 @@ struct nlopt_obj { m_value.reset(::nlopt_create(algo, n)); // Try to init the nlopt_obj. if (!m_value) { - pagmo_throw(std::invalid_argument, "the creation of an nlopt_opt object failed"); + pagmo_throw(std::invalid_argument, "the creation of the nlopt_opt object failed"); } + // NLopt does not handle MOO. if (prob.get_nobj() != 1u) { - // TODO error message - pagmo_throw(std::invalid_argument, "" + get_name()); + pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); } + // Constraints support will come later. if (prob.get_nc()) { // TODO error message - pagmo_throw(std::invalid_argument, "" + get_name()); + pagmo_throw(std::invalid_argument, ""); + } + + ::nlopt_result res; + + // Box bounds. + const auto bounds = prob.get_bounds(); + res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); } + // This is just a vector_double that is re-used across objfun invocations. // It will hold the current decision vector. m_dv.resize(prob.get_nx()); // Set the objfun + gradient. - auto res = ::nlopt_set_min_objective(m_value.get(), - [](unsigned dim, const double *x, double *grad, void *f_data) { - // Get *this back from the function data. - auto &nlo = *static_cast(f_data); - - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - // TODO error message - pagmo_throw(std::invalid_argument, "" + nlo.get_name()); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and, if needed, gradient. - const auto fitness = p.fitness(dv); - if (grad) { - const auto gradient = p.gradient(dv); - auto g_it = gradient.begin(); - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - - // First we fill the dense output gradient with zeroes. - std::fill(grad, grad + dim, 0.); - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (const auto &t : sp) { - if (t.first == 0u) { - // NOTE: we just need the gradient of the objfun, - // i.e., those (i,j) pairs in which i == 0. - grad[t.second] = *g_it; - ++g_it; - } else { - break; - } - } - } - - // Return the objfun value. - return fitness[0]; - }, - static_cast(this)); + res = ::nlopt_set_min_objective( + m_value.get(), + [](unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' a gradient was requested, but the optimisation problem '" + p.get_name() + + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and, if needed, gradient. + const auto fitness = p.fitness(dv); + if (grad) { + const auto gradient = p.gradient(dv); + auto g_it = gradient.begin(); + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. We know that the gradient + // of the objfun, if present, starts at the beginning of sp, as sp is + // sorted in lexicographic fashion. + grad[it->second] = *g_it; + } + } + + // Return the objfun value. + return fitness[0]; + }, + static_cast(this)); if (res != NLOPT_SUCCESS) { - // TODO - throw; + pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); } - // Box bounds. - const auto bounds = prob.get_bounds(); - res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + // Handle the various stopping criteria. + res = ::nlopt_set_stopval(m_value.get(), stopval); if (res != NLOPT_SUCCESS) { - // TODO - throw; + pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " + + std::to_string(stopval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); } - res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); if (res != NLOPT_SUCCESS) { - // TODO - throw; + pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " + + std::to_string(ftol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); } - - // TODO hard-coded. - res = ::nlopt_set_ftol_abs(m_value.get(), 1E-12); - // res = ::nlopt_set_maxeval(m_value.get(), 10000); + res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); if (res != NLOPT_SUCCESS) { - // TODO - throw; + pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " + + std::to_string(ftol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " + + std::to_string(xtol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " + + std::to_string(xtol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_maxeval(m_value.get(), maxeval); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " + + std::to_string(maxeval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_maxtime(m_value.get(), maxtime); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " + + std::to_string(maxtime) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); } } nlopt_obj(const nlopt_obj &other) @@ -169,10 +285,8 @@ struct nlopt_obj { nlopt_obj(nlopt_obj &&) = default; nlopt_obj &operator=(const nlopt_obj &) = delete; nlopt_obj &operator=(nlopt_obj &&) = delete; - std::string get_name() const - { - return ::nlopt_algorithm_name(::nlopt_get_algorithm(m_value.get())); - } + + // Data members. problem &m_prob; sparsity_pattern m_sp; std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 0b137f4d1..79107be99 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -32,13 +32,15 @@ see https://www.gnu.org/licenses/. */ #include #include #include -#include +#include using namespace pagmo; BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) { - population pop{ackley{10}, 20}; - algorithm algo{nlopt{"cobylsa"}}; - std::cout << algo.evolve(pop) << '\n'; + population pop{rosenbrock{10}, 20}; + algorithm algo{nlopt{"cobyla"}}; + pop = algo.evolve(pop); + std::cout << algo << '\n'; + std::cout << pop << '\n'; } From fd0cdf82a64b09382655f1dbb94a51cddfd780d1 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Mon, 3 Apr 2017 02:01:12 +0200 Subject: [PATCH 08/57] Small simplification. --- include/pagmo/detail/nlopt_utils.hpp | 11 ++--------- tests/nlopt.cpp | 1 + 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 97832c394..18ba26041 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -274,15 +274,8 @@ struct nlopt_obj { + nlopt_res2string(res)); } } - nlopt_obj(const nlopt_obj &other) - : m_prob(other.m_prob), m_sp(other.m_sp), m_value(::nlopt_copy(other.m_value.get()), ::nlopt_destroy), - m_dv(other.m_dv) - { - if (!m_value) { - pagmo_throw(std::invalid_argument, "the copy of an nlopt_opt object failed"); - } - } - nlopt_obj(nlopt_obj &&) = default; + nlopt_obj(const nlopt_obj &) = delete; + nlopt_obj(nlopt_obj &&) = delete; nlopt_obj &operator=(const nlopt_obj &) = delete; nlopt_obj &operator=(nlopt_obj &&) = delete; diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 79107be99..92d7d84ea 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -41,6 +41,7 @@ BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) population pop{rosenbrock{10}, 20}; algorithm algo{nlopt{"cobyla"}}; pop = algo.evolve(pop); + pop = algo.evolve(pop); std::cout << algo << '\n'; std::cout << pop << '\n'; } From 058afcff4c709105cf572a52a5085c84773da060 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Mon, 3 Apr 2017 22:04:50 +0200 Subject: [PATCH 09/57] Implement the gradient for the rosenbrock function. --- include/pagmo/problems/rosenbrock.hpp | 49 ++++++++++++++++----------- tests/rosenbrock.cpp | 11 ++++++ 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/include/pagmo/problems/rosenbrock.hpp b/include/pagmo/problems/rosenbrock.hpp index 23479d676..3e521eb8b 100644 --- a/include/pagmo/problems/rosenbrock.hpp +++ b/include/pagmo/problems/rosenbrock.hpp @@ -59,10 +59,11 @@ namespace pagmo struct rosenbrock { /// Constructor from dimension /** - * @param dim problem dimension - * @throw std::invalid_argument if \p dim is less than 2 + * @param dim problem dimension. + * + * @throw std::invalid_argument if \p dim is less than 2. */ - rosenbrock(unsigned int dim = 2u) : m_dim(dim) + rosenbrock(vector_double::size_type dim = 2u) : m_dim(dim) { if (dim < 2u) { pagmo_throw(std::invalid_argument, @@ -71,7 +72,7 @@ struct rosenbrock { }; /// Fitness computation /** - * Computes the fitness for this UDP + * Computes the fitness for this UDP. * * @param x the decision vector. * @@ -79,37 +80,47 @@ struct rosenbrock { */ vector_double fitness(const vector_double &x) const { - vector_double f(1, 0.); + double retval = 0.; for (decltype(m_dim) i = 0u; i < m_dim - 1u; ++i) { - f[0] += 100. * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1]) + (x[i] - 1) * (x[i] - 1); + retval += 100. * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1]) + (x[i] - 1) * (x[i] - 1); } - return f; + return {retval}; } /// Box-bounds /** - * - * It returns the box-bounds for this UDP. - * - * @return the lower and upper bounds for each of the decision vector components + * @return the lower (-5.) and upper (10.) bounds for each decision vector component. */ std::pair get_bounds() const { - vector_double lb(m_dim, -5.); - vector_double ub(m_dim, 10.); - return {lb, ub}; + return {vector_double(m_dim, -5.), vector_double(m_dim, 10.)}; } /// Problem name /** - * - * - * @return a string containing the problem name + * @return a string containing the problem name. */ std::string get_name() const { return "Multidimensional Rosenbrock Function"; } - /// Optimal solution + /// Gradient. + /** + * @param x the input decision vector. + * + * @return the gradient of the fitness function in \p x. + */ + vector_double gradient(const vector_double &x) const + { + vector_double retval(m_dim); + retval[0] = -400. * x[0] * (x[1] - x[0] * x[0]) - 2. * (1 - x[0]); + for (unsigned i = 1; i < m_dim - 1u; ++i) { + retval[i] + = -400. * x[i] * (x[i + 1u] - x[i] * x[i]) - 2. * (1 - x[i]) + 200. * (x[i] - x[i - 1u] * x[i - 1u]); + } + retval[m_dim - 1u] = 200. * (x[m_dim - 1u] - x[m_dim - 2u] * x[m_dim - 2u]); + return retval; + } + /// Optimal solution. /** * @return the decision vector corresponding to the best solution for this problem. */ @@ -131,7 +142,7 @@ struct rosenbrock { ar(m_dim); } /// Problem dimensions - unsigned int m_dim; + vector_double::size_type m_dim; }; } // namespace pagmo diff --git a/tests/rosenbrock.cpp b/tests/rosenbrock.cpp index c80ef6a68..5e95b4976 100644 --- a/tests/rosenbrock.cpp +++ b/tests/rosenbrock.cpp @@ -30,6 +30,7 @@ see https://www.gnu.org/licenses/. */ #include #include +#include #include #include #include @@ -61,6 +62,16 @@ BOOST_AUTO_TEST_CASE(rosenbrock_test) // Best known test auto x_best = ros2.best_known(); BOOST_CHECK((x_best == vector_double{1., 1.})); + // Gradient test. + auto g2 = ros2.gradient({.1, .2}); + BOOST_CHECK(std::abs(g2[0] + 9.4) < 1E-8); + BOOST_CHECK(std::abs(g2[1] - 38.) < 1E-8); + auto g5 = ros5.gradient({.1, .2, .3, .4, .5}); + BOOST_CHECK(std::abs(g5[0] + 9.4) < 1E-8); + BOOST_CHECK(std::abs(g5[1] - 15.6) < 1E-8); + BOOST_CHECK(std::abs(g5[2] - 13.4) < 1E-8); + BOOST_CHECK(std::abs(g5[3] - 6.4) < 1E-8); + BOOST_CHECK(std::abs(g5[4] - 68.) < 1E-8); } BOOST_AUTO_TEST_CASE(rosenbrock_serialization_test) From 4d1af1689007213df66182701c50abc456ea7db4 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Mon, 3 Apr 2017 22:05:34 +0200 Subject: [PATCH 10/57] Add a few more NLopt algos. --- include/pagmo/detail/nlopt_utils.hpp | 4 ++++ tests/nlopt.cpp | 5 ++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 18ba26041..d127c6de6 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -99,6 +99,10 @@ inline typename nlopt_data<>::names_map_t nlopt_names_map() retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); + retval.insert(value_type("mma", NLOPT_LD_MMA)); + retval.insert(value_type("ccsaq", NLOPT_LD_CCSAQ)); + retval.insert(value_type("slsqp", NLOPT_LD_SLSQP)); + retval.insert(value_type("lbfgs", NLOPT_LD_LBFGS)); return retval; } diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 92d7d84ea..826a79dee 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -38,9 +38,8 @@ using namespace pagmo; BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) { - population pop{rosenbrock{10}, 20}; - algorithm algo{nlopt{"cobyla"}}; - pop = algo.evolve(pop); + population pop{rosenbrock{100}, 20}; + algorithm algo{nlopt{"lbfgs"}}; pop = algo.evolve(pop); std::cout << algo << '\n'; std::cout << pop << '\n'; From 79d8240e8ec09897b7089fd1c8560051ff655dc7 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 01:05:21 +0200 Subject: [PATCH 11/57] More work on nlopt. --- include/pagmo/algorithms/nlopt.hpp | 75 ++++++++++++++++++++++++---- include/pagmo/detail/nlopt_utils.hpp | 36 ++++++++++++- tests/nlopt.cpp | 4 +- 3 files changed, 101 insertions(+), 14 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index f0df6cdbf..395fa22c1 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -26,17 +26,22 @@ You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with the PaGMO library. If not, see https://www.gnu.org/licenses/. */ -#ifndef PAGMO_ALGORITHMS_DE_HPP -#define PAGMO_ALGORITHMS_DE_HPP +#ifndef PAGMO_ALGORITHMS_NLOPT_HPP +#define PAGMO_ALGORITHMS_NLOPT_HPP #include #include +#include #include #include #include #include #include #include +#include +#include +#include +#include #include #include @@ -53,6 +58,13 @@ class nlopt using nlopt_obj = detail::nlopt_obj; using nlopt_data = detail::nlopt_data<>; +public: + using log_line_type = std::tuple; + using log_type = std::vector; + +private: + static_assert(std::is_same::value, "Invalid log line type."); + public: nlopt() : nlopt("sbplx") { @@ -126,8 +138,11 @@ class nlopt } auto &prob = pop.get_problem(); + + // Create the nlopt obj. + // NOTE: this will check also the problem's properties. nlopt_obj no(nlopt_data::names.left.at(m_algo), prob, m_sc_stopval, m_sc_ftol_rel, m_sc_ftol_abs, m_sc_xtol_rel, - m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime); + m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime, m_verbosity); // Setup of the initial guess. vector_double initial_guess; @@ -152,11 +167,8 @@ class nlopt initial_guess = pop.get_x()[idx]; } // Check the initial guess. - if (initial_guess.size() != prob.get_nx()) { - pagmo_throw(std::invalid_argument, "the size of the initial guess, " + std::to_string(initial_guess.size()) - + ", is different from the dimension of the problem, " - + std::to_string(prob.get_nx())); - } + // NOTE: this should be guaranteed by the population's invariants. + assert(initial_guess.size() == prob.get_nx()); const auto bounds = prob.get_bounds(); for (decltype(bounds.first.size()) i = 0; i < bounds.first.size(); ++i) { if (std::isnan(initial_guess[i])) { @@ -172,21 +184,61 @@ class nlopt // Run the optimisation and store the status returned by NLopt. double fitness; m_last_opt_result = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); + if (m_verbosity) { + // Print to screen the result of the optimisation, if we are being verbose. + std::cout << "\nOptimisation return status: " << detail::nlopt_res2string(m_last_opt_result) << '\n'; + } + + // Replace the log. + m_log = std::move(no.m_log); // Store the new individual into the population. - pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + if (boost::any_cast(&m_replace)) { + const auto &s_replace = boost::any_cast(m_replace); + if (s_replace == "best") { + pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + } else if (s_replace == "worst") { + pop.set_xf(pop.worst_idx(), initial_guess, {fitness}); + } else { + assert(s_replace == "random"); + std::uniform_int_distribution dist(0, pop.size() - 1u); + pop.set_xf(dist(m_e), initial_guess, {fitness}); + } + } else { + const auto idx = boost::any_cast(m_replace); + if (idx >= pop.size()) { + pagmo_throw(std::out_of_range, "cannot replace the individual at index " + std::to_string(idx) + + " after evolution: the population has a size of only " + + std::to_string(pop.size())); + } + pop.set_xf(idx, initial_guess, {fitness}); + } + + // Return the evolved pop. return pop; } std::string get_name() const { return "NLopt - " + m_algo; } + void set_verbosity(unsigned n) + { + m_verbosity = n; + } std::string get_extra_info() const { int major, minor, bugfix; ::nlopt_version(&major, &minor, &bugfix); return "\tNLopt version: " + std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(bugfix) - + "\n\tLast optimisation return code: " + detail::nlopt_res2string(m_last_opt_result) + + "\n\tLast optimisation return code: " + detail::nlopt_res2string(m_last_opt_result) + "\n\tVerbosity: " + + std::to_string(m_verbosity) + "\n\tIndividual selection " + + (boost::any_cast(&m_select) + ? "idx: " + std::to_string(boost::any_cast(m_select)) + : "policy: " + boost::any_cast(m_select)) + + "\n\tIndividual replacement " + + (boost::any_cast(&m_replace) + ? "idx: " + std::to_string(boost::any_cast(m_replace)) + : "policy: " + boost::any_cast(m_replace)) + "\n\tStopping criteria:\n\t\tstopval: " + (m_sc_stopval == -HUGE_VAL ? "disabled" : detail::to_string(m_sc_stopval)) + "\n\t\tftol_rel: " + (m_sc_ftol_rel <= 0. ? "disabled" : detail::to_string(m_sc_ftol_rel)) + "\n\t\tftol_abs: " @@ -212,6 +264,9 @@ class nlopt double m_sc_xtol_abs = 0.; int m_sc_maxeval = 0; int m_sc_maxtime = 0; + // Verbosity/log. + unsigned m_verbosity = 0; + mutable log_type m_log; }; } diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index d127c6de6..14d832280 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -34,6 +34,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -41,8 +42,10 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include +#include #include #include @@ -117,10 +120,15 @@ inline std::string nlopt_res2string(::nlopt_result err) } struct nlopt_obj { + // Single entry of the log (feval, fitness, dv). + using log_line_type = std::tuple; + // The log. + using log_type = std::vector; + // Shortcut to the static data. using data = nlopt_data<>; explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, - double xtol_rel, double xtol_abs, int maxeval, int maxtime) - : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy) + double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) + : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) { // Extract and set problem dimension. const auto n = boost::numeric_cast(prob.get_nx()); @@ -141,6 +149,7 @@ struct nlopt_obj { pagmo_throw(std::invalid_argument, ""); } + // Variable to hold the result of various operations. ::nlopt_result res; // Box bounds. @@ -172,6 +181,9 @@ struct nlopt_obj { auto &p = nlo.m_prob; auto &dv = nlo.m_dv; auto &sp = nlo.m_sp; + const auto verb = nlo.m_verbosity; + auto &f_count = nlo.m_objfun_counter; + auto &log = nlo.m_log; // A couple of sanity checks. assert(dim == p.get_nx()); @@ -217,6 +229,21 @@ struct nlopt_obj { } } + // Update the log if requested. + if (verb && !(f_count % verb)) { + if (!(f_count / verb % 50u)) { + // Every 50 lines print the column names. + print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", '\n'); + } + // Print to screen the log line. + print(std::setw(10), f_count, std::setw(15), fitness[0], '\n'); + // Record the log. + log.emplace_back(f_count, fitness[0], dv); + } + + // Update the counter. + ++f_count; + // Return the objfun value. return fitness[0]; }, @@ -278,6 +305,8 @@ struct nlopt_obj { + nlopt_res2string(res)); } } + + // Delete all other ctors/assignment ops. nlopt_obj(const nlopt_obj &) = delete; nlopt_obj(nlopt_obj &&) = delete; nlopt_obj &operator=(const nlopt_obj &) = delete; @@ -288,6 +317,9 @@ struct nlopt_obj { sparsity_pattern m_sp; std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; vector_double m_dv; + unsigned m_verbosity; + unsigned long m_objfun_counter = 0; + log_type m_log; }; } } diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 826a79dee..58c0b6d92 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -40,7 +40,7 @@ BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) { population pop{rosenbrock{100}, 20}; algorithm algo{nlopt{"lbfgs"}}; + algo.set_verbosity(10); pop = algo.evolve(pop); - std::cout << algo << '\n'; - std::cout << pop << '\n'; + std::cout << '\n' << algo << '\n'; } From cc3610f5bd1da73d6c3a79fa63b27e97a3653f43 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 02:52:21 +0200 Subject: [PATCH 12/57] First version that implements constraints handling. --- include/pagmo/algorithms/nlopt.hpp | 33 ++++- include/pagmo/detail/nlopt_utils.hpp | 193 ++++++++++++++++++++++++++- tests/nlopt.cpp | 7 +- 3 files changed, 218 insertions(+), 15 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 395fa22c1..718357ca4 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -53,6 +53,10 @@ see https://www.gnu.org/licenses/. */ namespace pagmo { +// TODO +// - cache +// - optimisation for dense gradients +// - error messages mentioning some algos don't support constraints etc. class nlopt { using nlopt_obj = detail::nlopt_obj; @@ -138,6 +142,7 @@ class nlopt } auto &prob = pop.get_problem(); + const auto nc = prob.get_nc(); // Create the nlopt obj. // NOTE: this will check also the problem's properties. @@ -196,13 +201,25 @@ class nlopt if (boost::any_cast(&m_replace)) { const auto &s_replace = boost::any_cast(m_replace); if (s_replace == "best") { - pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + if (nc) { + pop.set_x(pop.best_idx(), initial_guess); + } else { + pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + } } else if (s_replace == "worst") { - pop.set_xf(pop.worst_idx(), initial_guess, {fitness}); + if (nc) { + pop.set_x(pop.worst_idx(), initial_guess); + } else { + pop.set_xf(pop.worst_idx(), initial_guess, {fitness}); + } } else { assert(s_replace == "random"); std::uniform_int_distribution dist(0, pop.size() - 1u); - pop.set_xf(dist(m_e), initial_guess, {fitness}); + if (nc) { + pop.set_x(dist(m_e), initial_guess); + } else { + pop.set_xf(dist(m_e), initial_guess, {fitness}); + } } } else { const auto idx = boost::any_cast(m_replace); @@ -211,7 +228,11 @@ class nlopt + " after evolution: the population has a size of only " + std::to_string(pop.size())); } - pop.set_xf(idx, initial_guess, {fitness}); + if (nc) { + pop.set_x(idx, initial_guess); + } else { + pop.set_xf(idx, initial_guess, {fitness}); + } } // Return the evolved pop. @@ -258,9 +279,9 @@ class nlopt mutable ::nlopt_result m_last_opt_result = NLOPT_SUCCESS; // Stopping criteria. double m_sc_stopval = -HUGE_VAL; - double m_sc_ftol_rel = 1E-8; + double m_sc_ftol_rel = 0.; double m_sc_ftol_abs = 0.; - double m_sc_xtol_rel = 0.; + double m_sc_xtol_rel = 1E-8; double m_sc_xtol_abs = 0.; int m_sc_maxeval = 0; int m_sc_maxtime = 0; diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 14d832280..73c4a067f 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -35,6 +35,8 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include +#include #include #include #include @@ -143,12 +145,6 @@ struct nlopt_obj { pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); } - // Constraints support will come later. - if (prob.get_nc()) { - // TODO error message - pagmo_throw(std::invalid_argument, ""); - } - // Variable to hold the result of various operations. ::nlopt_result res; @@ -173,7 +169,7 @@ struct nlopt_obj { // Set the objfun + gradient. res = ::nlopt_set_min_objective( m_value.get(), - [](unsigned dim, const double *x, double *grad, void *f_data) { + [](unsigned dim, const double *x, double *grad, void *f_data) -> double { // Get *this back from the function data. auto &nlo = *static_cast(f_data); @@ -254,6 +250,189 @@ struct nlopt_obj { + nlopt_res2string(res)); } + // Vector-valued constraints. + const auto nic = boost::numeric_cast(prob.get_nic()); + const auto nec = boost::numeric_cast(prob.get_nec()); + const auto c_tol = prob.get_c_tol(); + + // Inequality. + if (nic) { + res = ::nlopt_add_inequality_mconstraint( + m_value.get(), nic, + [](unsigned m, double *result, unsigned n, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + + // A couple of sanity checks. + assert(n == p.get_nx()); + assert(dv.size() == n); + assert(m == p.get_nic()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an inequality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + n, dv.begin()); + + // Compute fitness and write it to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); + if (it_sp == sp.end()) { + // This means that there sparsity data for ineq constraints is empty. Just return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the ineq constraints start. + auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the ineq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (; it_sp != sp.end(); ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } + }, + static_cast(this), c_tol.data() + nec); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the inequality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + } + + // Equality. + if (nec) { + res = ::nlopt_add_equality_mconstraint( + m_value.get(), nec, + [](unsigned m, double *result, unsigned n, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + + // A couple of sanity checks. + assert(n == p.get_nx()); + assert(dv.size() == n); + assert(m == p.get_nec()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an equality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + n, dv.begin()); + + // Compute fitness and write it to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); + if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { + // This means that there sparsity data for eq constraints is empty: either we went + // at the end of sp, or the first index pair found refers to inequality constraints. Just + // return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the eq constraints start. + auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the eq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } + }, + static_cast(this), c_tol.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the equality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + } + // Handle the various stopping criteria. res = ::nlopt_set_stopval(m_value.get(), stopval); if (res != NLOPT_SUCCESS) { diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 58c0b6d92..43d436b2d 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -32,15 +32,18 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include using namespace pagmo; BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) { - population pop{rosenbrock{100}, 20}; - algorithm algo{nlopt{"lbfgs"}}; + population pop{hock_schittkowsky_71{}, 5}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + algorithm algo{nlopt{"slsqp"}}; algo.set_verbosity(10); pop = algo.evolve(pop); std::cout << '\n' << algo << '\n'; + std::cout << '\n' << pop << '\n'; } From 7f4282403cee55ed33c731b1c760d1f191b7b399 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:23:39 +0200 Subject: [PATCH 13/57] Don't provide a setter for the population's problem. --- pygmo/core.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/pygmo/core.cpp b/pygmo/core.cpp index de2e9940c..e1171206c 100644 --- a/pygmo/core.cpp +++ b/pygmo/core.cpp @@ -459,7 +459,6 @@ BOOST_PYTHON_MODULE(core) .add_property("problem", bp::make_function(lcast([](population &pop) -> problem & { return pop.get_problem(); }), bp::return_internal_reference<>()), - lcast([](population &pop, const problem &p) { pop.get_problem() = p; }), pygmo::population_problem_docstring().c_str()) .def("get_f", lcast([](const population &pop) { return pygmo::vv_to_a(pop.get_f()); }), pygmo::population_get_f_docstring().c_str()) From d1e944a99ab5eae7abc18b9dfd72c58f3192170b Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:24:09 +0200 Subject: [PATCH 14/57] Doc update. --- pygmo/docstrings.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index a2d5dac70..35a483dbc 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -228,15 +228,11 @@ std::string population_problem_docstring() { return R"(Population's problem. -This property gives direct access to the :class:`~pygmo.core.problem` stored within the population. +This read-only property gives direct access to the :class:`~pygmo.core.problem` stored within the population. Returns: :class:`~pygmo.core.problem`: a reference to the internal problem -Raises: - unspecified: any exception thrown by failures at the intersection between C++ and - Python (e.g., type conversion errors, mismatched function signatures, etc.) when setting the property - )"; } From 331f18260967f3bd1ac6d61b04e25be110670be6 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:24:24 +0200 Subject: [PATCH 15/57] More work on nlopt. --- include/pagmo/algorithms/nlopt.hpp | 2 +- include/pagmo/detail/nlopt_utils.hpp | 41 ++++++++++++++++++---------- tests/nlopt.cpp | 2 +- 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 718357ca4..4c1de6653 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -63,7 +63,7 @@ class nlopt using nlopt_data = detail::nlopt_data<>; public: - using log_line_type = std::tuple; + using log_line_type = std::tuple; using log_type = std::vector; private: diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp index 73c4a067f..0660e2482 100644 --- a/include/pagmo/detail/nlopt_utils.hpp +++ b/include/pagmo/detail/nlopt_utils.hpp @@ -50,6 +50,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include namespace pagmo { @@ -123,7 +124,7 @@ inline std::string nlopt_res2string(::nlopt_result err) struct nlopt_obj { // Single entry of the log (feval, fitness, dv). - using log_line_type = std::tuple; + using log_line_type = std::tuple; // The log. using log_type = std::vector; // Shortcut to the static data. @@ -192,8 +193,8 @@ struct nlopt_obj { pagmo_throw(std::invalid_argument, "during an optimization with the NLopt algorithm '" + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' a gradient was requested, but the optimisation problem '" + p.get_name() - + "' does not provide it"); + + "' a fitness gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); } // Copy the decision vector in our temporary dv vector_double, @@ -227,14 +228,26 @@ struct nlopt_obj { // Update the log if requested. if (verb && !(f_count % verb)) { + // Constraints bits. + const auto ctol = p.get_c_tol(); + auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), + ctol.data()); + auto c1ineq = detail::test_ineq_constraints( + fitness.data() + 1 + p.get_nec(), fitness.data() + fitness.size(), ctol.data() + p.get_nec()); + auto nv = p.get_nc() - c1eq.first - c1ineq.first; + auto l = c1eq.second + c1ineq.second; + const auto feas = p.feasibility_f(fitness); + if (!(f_count / verb % 50u)) { // Every 50 lines print the column names. - print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", '\n'); + print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", + std::setw(15), "viol. norm:", '\n'); } // Print to screen the log line. - print(std::setw(10), f_count, std::setw(15), fitness[0], '\n'); + print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, + feas ? "" : " i", '\n'); // Record the log. - log.emplace_back(f_count, fitness[0], dv); + log.emplace_back(f_count, fitness[0], nv, l, feas); } // Update the counter. @@ -259,7 +272,7 @@ struct nlopt_obj { if (nic) { res = ::nlopt_add_inequality_mconstraint( m_value.get(), nic, - [](unsigned m, double *result, unsigned n, const double *x, double *grad, void *f_data) { + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { // Get *this back from the function data. auto &nlo = *static_cast(f_data); @@ -269,8 +282,8 @@ struct nlopt_obj { auto &sp = nlo.m_sp; // A couple of sanity checks. - assert(n == p.get_nx()); - assert(dv.size() == n); + assert(dim == p.get_nx()); + assert(dv.size() == dim); assert(m == p.get_nic()); if (grad && !p.has_gradient()) { @@ -287,7 +300,7 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - std::copy(x, x + n, dv.begin()); + std::copy(x, x + dim, dv.begin()); // Compute fitness and write it to the output. // NOTE: fitness is nobj + nec + nic. @@ -347,7 +360,7 @@ struct nlopt_obj { if (nec) { res = ::nlopt_add_equality_mconstraint( m_value.get(), nec, - [](unsigned m, double *result, unsigned n, const double *x, double *grad, void *f_data) { + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { // Get *this back from the function data. auto &nlo = *static_cast(f_data); @@ -357,8 +370,8 @@ struct nlopt_obj { auto &sp = nlo.m_sp; // A couple of sanity checks. - assert(n == p.get_nx()); - assert(dv.size() == n); + assert(dim == p.get_nx()); + assert(dv.size() == dim); assert(m == p.get_nec()); if (grad && !p.has_gradient()) { @@ -375,7 +388,7 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - std::copy(x, x + n, dv.begin()); + std::copy(x, x + dim, dv.begin()); // Compute fitness and write it to the output. // NOTE: fitness is nobj + nec + nic. diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 43d436b2d..9a3f798a9 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -42,7 +42,7 @@ BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) population pop{hock_schittkowsky_71{}, 5}; pop.get_problem().set_c_tol({1E-6, 1E-6}); algorithm algo{nlopt{"slsqp"}}; - algo.set_verbosity(10); + algo.set_verbosity(1); pop = algo.evolve(pop); std::cout << '\n' << algo << '\n'; std::cout << '\n' << pop << '\n'; From 7ce85a00fc6a301af20e6a8f6d19fcf5722207ce Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:23:39 +0200 Subject: [PATCH 16/57] Don't provide a setter for the population's problem. --- pygmo/core.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/pygmo/core.cpp b/pygmo/core.cpp index de2e9940c..e1171206c 100644 --- a/pygmo/core.cpp +++ b/pygmo/core.cpp @@ -459,7 +459,6 @@ BOOST_PYTHON_MODULE(core) .add_property("problem", bp::make_function(lcast([](population &pop) -> problem & { return pop.get_problem(); }), bp::return_internal_reference<>()), - lcast([](population &pop, const problem &p) { pop.get_problem() = p; }), pygmo::population_problem_docstring().c_str()) .def("get_f", lcast([](const population &pop) { return pygmo::vv_to_a(pop.get_f()); }), pygmo::population_get_f_docstring().c_str()) From f7e9a58992230f160c5632b4b37b65b3a55c9705 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:24:09 +0200 Subject: [PATCH 17/57] Doc update. --- pygmo/docstrings.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index a2d5dac70..35a483dbc 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -228,15 +228,11 @@ std::string population_problem_docstring() { return R"(Population's problem. -This property gives direct access to the :class:`~pygmo.core.problem` stored within the population. +This read-only property gives direct access to the :class:`~pygmo.core.problem` stored within the population. Returns: :class:`~pygmo.core.problem`: a reference to the internal problem -Raises: - unspecified: any exception thrown by failures at the intersection between C++ and - Python (e.g., type conversion errors, mismatched function signatures, etc.) when setting the property - )"; } From 15c5aa24704e42714914910a9f70c7e59cb6496b Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:31:23 +0200 Subject: [PATCH 18/57] Small test addition. --- pygmo/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pygmo/test.py b/pygmo/test.py index 47d2ea71b..6cac06b1a 100644 --- a/pygmo/test.py +++ b/pygmo/test.py @@ -177,10 +177,10 @@ def run_problem_test(self): pop = population(rosenbrock(), size=10) self.assertTrue(pop.problem.extract(null_problem) is None) self.assertTrue(pop.problem.extract(rosenbrock) is not None) - pop.problem = problem(zdt(param=10)) - self.assertRaises(ValueError, lambda: pop.best_idx()) - self.assertTrue(pop.problem.extract(null_problem) is None) - self.assertTrue(pop.problem.extract(zdt) is not None) + + def prob_setter(): + pop.problem = problem(zdt(param=10)) + self.assertRaises(AttributeError, prob_setter) def run_push_back_test(self): from .core import population, rosenbrock From 01063c4e357feb855331d7e79f6223ab4325e3b2 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:41:47 +0200 Subject: [PATCH 19/57] Move all nlopt functionality in one file. --- include/pagmo/algorithms/nlopt.hpp | 472 +++++++++++++++++++++++- include/pagmo/detail/nlopt_utils.hpp | 519 --------------------------- tests/CMakeLists.txt | 1 - tests/nlopt_utils.cpp | 38 -- 4 files changed, 471 insertions(+), 559 deletions(-) delete mode 100644 include/pagmo/detail/nlopt_utils.hpp delete mode 100644 tests/nlopt_utils.cpp diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 4c1de6653..765a3d24c 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -31,28 +31,498 @@ see https://www.gnu.org/licenses/. */ #include #include +#include +#include #include #include +#include +#include #include +#include +#include +#include #include #include #include #include #include #include +#include #include #include -#include #include #include #include +#include #include #include +#include +#include namespace pagmo { +namespace detail +{ + +// Usual trick with global read-only data useful to the NLopt wrapper. +template +struct nlopt_data { + // The idea here is to establish a bijection between string name (e.g., "cobyla") + // and the enums used in the NLopt C API to refer to the algos (e.g., NLOPT_LN_COBYLA). + // We use a bidirectional map so that we can map both string -> enum and enum -> string, + // depending on what is needed. + using names_map_t = boost::bimap; + static const names_map_t names; + // A map to link a human-readable description to NLopt return codes. + using result_map_t = std::unordered_map<::nlopt_result, std::string>; + static const result_map_t results; +}; + +// Static init. +template +const typename nlopt_data::result_map_t nlopt_data::results = { + {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, + {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) + + ", Optimization stopped because stopval was reached)"}, + {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) + + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, + {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) + + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, + {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) + + ", Optimization stopped because maxeval was reached)"}, + {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) + + ", Optimization stopped because maxtime was reached)"}, + {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, + {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, + {NLOPT_OUT_OF_MEMORY, + "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, + {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) + + ", Halted because roundoff errors limited progress)"}, + {NLOPT_FORCED_STOP, + "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; + +// Initialise the mapping between algo names and enums for the supported algorithms. +inline typename nlopt_data<>::names_map_t nlopt_names_map() +{ + typename nlopt_data<>::names_map_t retval; + using value_type = typename nlopt_data<>::names_map_t::value_type; + retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); + retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); + retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); + retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); + retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); + retval.insert(value_type("mma", NLOPT_LD_MMA)); + retval.insert(value_type("ccsaq", NLOPT_LD_CCSAQ)); + retval.insert(value_type("slsqp", NLOPT_LD_SLSQP)); + retval.insert(value_type("lbfgs", NLOPT_LD_LBFGS)); + return retval; +} + +// Static init using the helper function above. +template +const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); + +// Convert an NLopt result in a more descriptive string. +inline std::string nlopt_res2string(::nlopt_result err) +{ + return (nlopt_data<>::results.find(err) == nlopt_data<>::results.end() ? "??" : nlopt_data<>::results.at(err)); +} + +struct nlopt_obj { + // Single entry of the log (feval, fitness, dv). + using log_line_type = std::tuple; + // The log. + using log_type = std::vector; + // Shortcut to the static data. + using data = nlopt_data<>; + explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, + double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) + : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) + { + // Extract and set problem dimension. + const auto n = boost::numeric_cast(prob.get_nx()); + m_value.reset(::nlopt_create(algo, n)); + // Try to init the nlopt_obj. + if (!m_value) { + pagmo_throw(std::invalid_argument, "the creation of the nlopt_opt object failed"); + } + + // NLopt does not handle MOO. + if (prob.get_nobj() != 1u) { + pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); + } + + // Variable to hold the result of various operations. + ::nlopt_result res; + + // Box bounds. + const auto bounds = prob.get_bounds(); + res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + + // This is just a vector_double that is re-used across objfun invocations. + // It will hold the current decision vector. + m_dv.resize(prob.get_nx()); + // Set the objfun + gradient. + res = ::nlopt_set_min_objective( + m_value.get(), + [](unsigned dim, const double *x, double *grad, void *f_data) -> double { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + const auto verb = nlo.m_verbosity; + auto &f_count = nlo.m_objfun_counter; + auto &log = nlo.m_log; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' a fitness gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and, if needed, gradient. + const auto fitness = p.fitness(dv); + if (grad) { + const auto gradient = p.gradient(dv); + auto g_it = gradient.begin(); + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. We know that the gradient + // of the objfun, if present, starts at the beginning of sp, as sp is + // sorted in lexicographic fashion. + grad[it->second] = *g_it; + } + } + + // Update the log if requested. + if (verb && !(f_count % verb)) { + // Constraints bits. + const auto ctol = p.get_c_tol(); + auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), + ctol.data()); + auto c1ineq = detail::test_ineq_constraints( + fitness.data() + 1 + p.get_nec(), fitness.data() + fitness.size(), ctol.data() + p.get_nec()); + auto nv = p.get_nc() - c1eq.first - c1ineq.first; + auto l = c1eq.second + c1ineq.second; + const auto feas = p.feasibility_f(fitness); + + if (!(f_count / verb % 50u)) { + // Every 50 lines print the column names. + print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", + std::setw(15), "viol. norm:", '\n'); + } + // Print to screen the log line. + print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, + feas ? "" : " i", '\n'); + // Record the log. + log.emplace_back(f_count, fitness[0], nv, l, feas); + } + + // Update the counter. + ++f_count; + + // Return the objfun value. + return fitness[0]; + }, + static_cast(this)); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + + // Vector-valued constraints. + const auto nic = boost::numeric_cast(prob.get_nic()); + const auto nec = boost::numeric_cast(prob.get_nec()); + const auto c_tol = prob.get_c_tol(); + + // Inequality. + if (nic) { + res = ::nlopt_add_inequality_mconstraint( + m_value.get(), nic, + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nic()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an inequality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write it to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); + if (it_sp == sp.end()) { + // This means that there sparsity data for ineq constraints is empty. Just return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the ineq constraints start. + auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the ineq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (; it_sp != sp.end(); ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } + }, + static_cast(this), c_tol.data() + nec); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the inequality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + } + + // Equality. + if (nec) { + res = ::nlopt_add_equality_mconstraint( + m_value.get(), nec, + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + auto &sp = nlo.m_sp; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nec()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an equality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write it to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); + if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { + // This means that there sparsity data for eq constraints is empty: either we went + // at the end of sp, or the first index pair found refers to inequality constraints. Just + // return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the eq constraints start. + auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the eq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } + }, + static_cast(this), c_tol.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the equality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + } + + // Handle the various stopping criteria. + res = ::nlopt_set_stopval(m_value.get(), stopval); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " + + std::to_string(stopval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " + + std::to_string(ftol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " + + std::to_string(ftol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " + + std::to_string(xtol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " + + std::to_string(xtol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_maxeval(m_value.get(), maxeval); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " + + std::to_string(maxeval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + res = ::nlopt_set_maxtime(m_value.get(), maxtime); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " + + std::to_string(maxtime) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + } + + // Delete all other ctors/assignment ops. + nlopt_obj(const nlopt_obj &) = delete; + nlopt_obj(nlopt_obj &&) = delete; + nlopt_obj &operator=(const nlopt_obj &) = delete; + nlopt_obj &operator=(nlopt_obj &&) = delete; + + // Data members. + problem &m_prob; + sparsity_pattern m_sp; + std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; + vector_double m_dv; + unsigned m_verbosity; + unsigned long m_objfun_counter = 0; + log_type m_log; +}; +} + // TODO // - cache // - optimisation for dense gradients diff --git a/include/pagmo/detail/nlopt_utils.hpp b/include/pagmo/detail/nlopt_utils.hpp deleted file mode 100644 index 0660e2482..000000000 --- a/include/pagmo/detail/nlopt_utils.hpp +++ /dev/null @@ -1,519 +0,0 @@ -/* Copyright 2017 PaGMO development team - -This file is part of the PaGMO library. - -The PaGMO library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 3 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The PaGMO library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the PaGMO library. If not, -see https://www.gnu.org/licenses/. */ - -#ifndef PAGMO_DETAIL_NLOPT_UTILS_HPP -#define PAGMO_DETAIL_NLOPT_UTILS_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace pagmo -{ - -namespace detail -{ - -// Usual trick with global read-only data useful to the NLopt wrapper. -template -struct nlopt_data { - // The idea here is to establish a bijection between string name (e.g., "cobyla") - // and the enums used in the NLopt C API to refer to the algos (e.g., NLOPT_LN_COBYLA). - // We use a bidirectional map so that we can map both string -> enum and enum -> string, - // depending on what is needed. - using names_map_t = boost::bimap; - static const names_map_t names; - // A map to link a human-readable description to NLopt return codes. - using result_map_t = std::unordered_map<::nlopt_result, std::string>; - static const result_map_t results; -}; - -// Static init. -template -const typename nlopt_data::result_map_t nlopt_data::results = { - {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, - {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) - + ", Optimization stopped because stopval was reached)"}, - {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) - + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, - {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) - + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, - {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) - + ", Optimization stopped because maxeval was reached)"}, - {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) - + ", Optimization stopped because maxtime was reached)"}, - {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, - {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, - {NLOPT_OUT_OF_MEMORY, - "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, - {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) - + ", Halted because roundoff errors limited progress)"}, - {NLOPT_FORCED_STOP, - "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; - -// Initialise the mapping between algo names and enums for the supported algorithms. -inline typename nlopt_data<>::names_map_t nlopt_names_map() -{ - typename nlopt_data<>::names_map_t retval; - using value_type = typename nlopt_data<>::names_map_t::value_type; - retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); - retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); - retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); - retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); - retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); - retval.insert(value_type("mma", NLOPT_LD_MMA)); - retval.insert(value_type("ccsaq", NLOPT_LD_CCSAQ)); - retval.insert(value_type("slsqp", NLOPT_LD_SLSQP)); - retval.insert(value_type("lbfgs", NLOPT_LD_LBFGS)); - return retval; -} - -// Static init using the helper function above. -template -const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); - -// Convert an NLopt result in a more descriptive string. -inline std::string nlopt_res2string(::nlopt_result err) -{ - return (nlopt_data<>::results.find(err) == nlopt_data<>::results.end() ? "??" : nlopt_data<>::results.at(err)); -} - -struct nlopt_obj { - // Single entry of the log (feval, fitness, dv). - using log_line_type = std::tuple; - // The log. - using log_type = std::vector; - // Shortcut to the static data. - using data = nlopt_data<>; - explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, - double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) - : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) - { - // Extract and set problem dimension. - const auto n = boost::numeric_cast(prob.get_nx()); - m_value.reset(::nlopt_create(algo, n)); - // Try to init the nlopt_obj. - if (!m_value) { - pagmo_throw(std::invalid_argument, "the creation of the nlopt_opt object failed"); - } - - // NLopt does not handle MOO. - if (prob.get_nobj() != 1u) { - pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); - } - - // Variable to hold the result of various operations. - ::nlopt_result res; - - // Box bounds. - const auto bounds = prob.get_bounds(); - res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - - // This is just a vector_double that is re-used across objfun invocations. - // It will hold the current decision vector. - m_dv.resize(prob.get_nx()); - // Set the objfun + gradient. - res = ::nlopt_set_min_objective( - m_value.get(), - [](unsigned dim, const double *x, double *grad, void *f_data) -> double { - // Get *this back from the function data. - auto &nlo = *static_cast(f_data); - - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; - const auto verb = nlo.m_verbosity; - auto &f_count = nlo.m_objfun_counter; - auto &log = nlo.m_log; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - pagmo_throw(std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' a fitness gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and, if needed, gradient. - const auto fitness = p.fitness(dv); - if (grad) { - const auto gradient = p.gradient(dv); - auto g_it = gradient.begin(); - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - - // First we fill the dense output gradient with zeroes. - std::fill(grad, grad + dim, 0.); - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { - // NOTE: we just need the gradient of the objfun, - // i.e., those (i,j) pairs in which i == 0. We know that the gradient - // of the objfun, if present, starts at the beginning of sp, as sp is - // sorted in lexicographic fashion. - grad[it->second] = *g_it; - } - } - - // Update the log if requested. - if (verb && !(f_count % verb)) { - // Constraints bits. - const auto ctol = p.get_c_tol(); - auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), - ctol.data()); - auto c1ineq = detail::test_ineq_constraints( - fitness.data() + 1 + p.get_nec(), fitness.data() + fitness.size(), ctol.data() + p.get_nec()); - auto nv = p.get_nc() - c1eq.first - c1ineq.first; - auto l = c1eq.second + c1ineq.second; - const auto feas = p.feasibility_f(fitness); - - if (!(f_count / verb % 50u)) { - // Every 50 lines print the column names. - print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", - std::setw(15), "viol. norm:", '\n'); - } - // Print to screen the log line. - print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, - feas ? "" : " i", '\n'); - // Record the log. - log.emplace_back(f_count, fitness[0], nv, l, feas); - } - - // Update the counter. - ++f_count; - - // Return the objfun value. - return fitness[0]; - }, - static_cast(this)); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - - // Vector-valued constraints. - const auto nic = boost::numeric_cast(prob.get_nic()); - const auto nec = boost::numeric_cast(prob.get_nec()); - const auto c_tol = prob.get_c_tol(); - - // Inequality. - if (nic) { - res = ::nlopt_add_inequality_mconstraint( - m_value.get(), nic, - [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { - // Get *this back from the function data. - auto &nlo = *static_cast(f_data); - - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - assert(m == p.get_nic()); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - pagmo_throw( - std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' an inequality constraints gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and write it to the output. - // NOTE: fitness is nobj + nec + nic. - const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); - - if (grad) { - // Handle gradient, if requested. - const auto gradient = p.gradient(dv); - - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); - if (it_sp == sp.end()) { - // This means that there sparsity data for ineq constraints is empty. Just return. - return; - } - - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the ineq constraints start. - auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the ineq constraints. - auto g_it = gradient.data() + idx; - - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (; it_sp != sp.end(); ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; - } - } - }, - static_cast(this), c_tol.data() + nec); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the inequality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - } - - // Equality. - if (nec) { - res = ::nlopt_add_equality_mconstraint( - m_value.get(), nec, - [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { - // Get *this back from the function data. - auto &nlo = *static_cast(f_data); - - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - assert(m == p.get_nec()); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - pagmo_throw( - std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' an equality constraints gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and write it to the output. - // NOTE: fitness is nobj + nec + nic. - const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); - - if (grad) { - // Handle gradient, if requested. - const auto gradient = p.gradient(dv); - - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); - if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { - // This means that there sparsity data for eq constraints is empty: either we went - // at the end of sp, or the first index pair found refers to inequality constraints. Just - // return. - return; - } - - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the eq constraints start. - auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the eq constraints. - auto g_it = gradient.data() + idx; - - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; - } - } - }, - static_cast(this), c_tol.data()); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the equality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - } - - // Handle the various stopping criteria. - res = ::nlopt_set_stopval(m_value.get(), stopval); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " - + std::to_string(stopval) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " - + std::to_string(ftol_rel) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " - + std::to_string(ftol_abs) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " - + std::to_string(xtol_rel) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " - + std::to_string(xtol_abs) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_maxeval(m_value.get(), maxeval); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " - + std::to_string(maxeval) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - res = ::nlopt_set_maxtime(m_value.get(), maxtime); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " - + std::to_string(maxtime) + " for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - } - - // Delete all other ctors/assignment ops. - nlopt_obj(const nlopt_obj &) = delete; - nlopt_obj(nlopt_obj &&) = delete; - nlopt_obj &operator=(const nlopt_obj &) = delete; - nlopt_obj &operator=(nlopt_obj &&) = delete; - - // Data members. - problem &m_prob; - sparsity_pattern m_sp; - std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; - vector_double m_dv; - unsigned m_verbosity; - unsigned long m_objfun_counter = 0; - log_type m_log; -}; -} -} - -#endif diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5ada8f25b..7d7d60a37 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -64,7 +64,6 @@ endif() if(PAGMO_WITH_NLOPT) ADD_PAGMO_TESTCASE(nlopt) - ADD_PAGMO_TESTCASE(nlopt_utils) endif() # Here are problematic tests for MSVC. diff --git a/tests/nlopt_utils.cpp b/tests/nlopt_utils.cpp deleted file mode 100644 index 2350a8d1f..000000000 --- a/tests/nlopt_utils.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2017 PaGMO development team - -This file is part of the PaGMO library. - -The PaGMO library is free software; you can redistribute it and/or modify -it under the terms of either: - - * the GNU Lesser General Public License as published by the Free - Software Foundation; either version 3 of the License, or (at your - option) any later version. - -or - - * the GNU General Public License as published by the Free Software - Foundation; either version 3 of the License, or (at your option) any - later version. - -or both in parallel, as here. - -The PaGMO library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received copies of the GNU General Public License and the -GNU Lesser General Public License along with the PaGMO library. If not, -see https://www.gnu.org/licenses/. */ - -#define BOOST_TEST_MODULE nlopt_utils_test -#include - -#include - -using namespace pagmo; - -BOOST_AUTO_TEST_CASE(nlopt_basic) -{ -} From f1cb6515346255ca0a9cbf5cecce31abe5225651 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 21:59:33 +0200 Subject: [PATCH 20/57] Add missing NLopt algos and some cleanup work. --- include/pagmo/algorithms/nlopt.hpp | 52 +++++++++++++++++++----------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 765a3d24c..9545908f5 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -110,6 +110,8 @@ inline typename nlopt_data<>::names_map_t nlopt_names_map() using value_type = typename nlopt_data<>::names_map_t::value_type; retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); + retval.insert(value_type("newuoa", NLOPT_LN_NEWUOA)); + retval.insert(value_type("newuoa_bound", NLOPT_LN_NEWUOA_BOUND)); retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); @@ -117,6 +119,12 @@ inline typename nlopt_data<>::names_map_t nlopt_names_map() retval.insert(value_type("ccsaq", NLOPT_LD_CCSAQ)); retval.insert(value_type("slsqp", NLOPT_LD_SLSQP)); retval.insert(value_type("lbfgs", NLOPT_LD_LBFGS)); + retval.insert(value_type("tnewton_precond_restart", NLOPT_LD_TNEWTON_PRECOND_RESTART)); + retval.insert(value_type("tnewton_precond", NLOPT_LD_TNEWTON_PRECOND)); + retval.insert(value_type("tnewton_restart", NLOPT_LD_TNEWTON_RESTART)); + retval.insert(value_type("tnewton", NLOPT_LD_TNEWTON)); + retval.insert(value_type("var2", NLOPT_LD_VAR2)); + retval.insert(value_type("var1", NLOPT_LD_VAR1)); return retval; } @@ -238,12 +246,15 @@ struct nlopt_obj { if (verb && !(f_count % verb)) { // Constraints bits. const auto ctol = p.get_c_tol(); - auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), - ctol.data()); - auto c1ineq = detail::test_ineq_constraints( + const auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), + ctol.data()); + const auto c1ineq = detail::test_ineq_constraints( fitness.data() + 1 + p.get_nec(), fitness.data() + fitness.size(), ctol.data() + p.get_nec()); - auto nv = p.get_nc() - c1eq.first - c1ineq.first; - auto l = c1eq.second + c1ineq.second; + // This will be the total number of violated constraints. + const auto nv = p.get_nc() - c1eq.first - c1ineq.first; + // This will be the norm of the violation. + const auto l = c1eq.second + c1ineq.second; + // Test feasibility. const auto feas = p.feasibility_f(fitness); if (!(f_count / verb % 50u)) { @@ -310,7 +321,7 @@ struct nlopt_obj { // for use in the pagmo API. std::copy(x, x + dim, dv.begin()); - // Compute fitness and write it to the output. + // Compute fitness and write IC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); @@ -334,7 +345,7 @@ struct nlopt_obj { using pair_t = sparsity_pattern::value_type; auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); if (it_sp == sp.end()) { - // This means that there sparsity data for ineq constraints is empty. Just return. + // This means that the sparsity data for ineq constraints is empty. Just return. return; } @@ -345,12 +356,13 @@ struct nlopt_obj { pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); } // This is the index at which the ineq constraints start. - auto idx = std::distance(sp.begin(), it_sp); + const auto idx = std::distance(sp.begin(), it_sp); // Grab the start of the gradient data for the ineq constraints. auto g_it = gradient.data() + idx; // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. + // nonzero bits in grad. Run until sp.end() as the IC are at the + // end of the sparsity/gradient vector. for (; it_sp != sp.end(); ++it_sp, ++g_it) { grad[it_sp->second] = *g_it; } @@ -358,9 +370,10 @@ struct nlopt_obj { }, static_cast(this), c_tol.data() + nec); if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the inequality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); + pagmo_throw(std::invalid_argument, + "could not set the inequality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + "\nThis usually means that the algorithm does not support inequality constraints"); } } @@ -398,7 +411,7 @@ struct nlopt_obj { // for use in the pagmo API. std::copy(x, x + dim, dv.begin()); - // Compute fitness and write it to the output. + // Compute fitness and write EC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); @@ -435,12 +448,13 @@ struct nlopt_obj { pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); } // This is the index at which the eq constraints start. - auto idx = std::distance(sp.begin(), it_sp); + const auto idx = std::distance(sp.begin(), it_sp); // Grab the start of the gradient data for the eq constraints. auto g_it = gradient.data() + idx; // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. + // nonzero bits in grad. We terminate either at the end of sp, or when + // we encounter the first inequality constraint. for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { grad[it_sp->second] = *g_it; } @@ -448,9 +462,10 @@ struct nlopt_obj { }, static_cast(this), c_tol.data()); if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the equality constraints for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); + pagmo_throw(std::invalid_argument, + "could not set the equality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + "\nThis usually means that the algorithm does not support equality constraints"); } } @@ -526,7 +541,6 @@ struct nlopt_obj { // TODO // - cache // - optimisation for dense gradients -// - error messages mentioning some algos don't support constraints etc. class nlopt { using nlopt_obj = detail::nlopt_obj; From 900172ae89f34e246f1706430a55c85668dc9706 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 23:01:55 +0200 Subject: [PATCH 21/57] Implement optimized gradient handling in the dense case. --- include/pagmo/algorithms/nlopt.hpp | 160 ++++++++++++++++------------- 1 file changed, 91 insertions(+), 69 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 9545908f5..75250be8b 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -229,16 +229,22 @@ struct nlopt_obj { // vector has size m_gs_dim, so these two must have the same size. assert(gradient.size() == sp.size()); - // First we fill the dense output gradient with zeroes. - std::fill(grad, grad + dim, 0.); - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { - // NOTE: we just need the gradient of the objfun, - // i.e., those (i,j) pairs in which i == 0. We know that the gradient - // of the objfun, if present, starts at the beginning of sp, as sp is - // sorted in lexicographic fashion. - grad[it->second] = *g_it; + if (p.has_gradient_sparsity()) { + // Sparse gradient case. + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. We know that the gradient + // of the objfun, if present, starts at the beginning of sp, as sp is + // sorted in lexicographic fashion. + grad[it->second] = *g_it; + } + } else { + // Dense gradient case. + std::copy(gradient.data(), gradient.data() + p.get_nx(), grad); } } @@ -337,34 +343,42 @@ struct nlopt_obj { // vector has size m_gs_dim, so these two must have the same size. assert(gradient.size() == sp.size()); - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); - if (it_sp == sp.end()) { - // This means that the sparsity data for ineq constraints is empty. Just return. - return; - } - - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the ineq constraints start. - const auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the ineq constraints. - auto g_it = gradient.data() + idx; - - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. Run until sp.end() as the IC are at the - // end of the sparsity/gradient vector. - for (; it_sp != sp.end(); ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + if (p.has_gradient_sparsity()) { + // Sparse gradient. + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); + if (it_sp == sp.end()) { + // This means that the sparsity data for ineq constraints is empty. Just return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the ineq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the ineq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. Run until sp.end() as the IC are at the + // end of the sparsity/gradient vector. + for (; it_sp != sp.end(); ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } else { + // Dense gradient. + std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), + gradient.data() + gradient.size(), grad); } } }, @@ -427,36 +441,45 @@ struct nlopt_obj { // vector has size m_gs_dim, so these two must have the same size. assert(gradient.size() == sp.size()); - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); - if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { - // This means that there sparsity data for eq constraints is empty: either we went - // at the end of sp, or the first index pair found refers to inequality constraints. Just - // return. - return; - } - - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the eq constraints start. - const auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the eq constraints. - auto g_it = gradient.data() + idx; - - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. We terminate either at the end of sp, or when - // we encounter the first inequality constraint. - for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + if (p.has_gradient_sparsity()) { + // Sparse gradient case. + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); + if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { + // This means that there sparsity data for eq constraints is empty: either we went + // at the end of sp, or the first index pair found refers to inequality constraints. + // Just + // return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the eq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the eq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. We terminate either at the end of sp, or when + // we encounter the first inequality constraint. + for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } else { + // Dense gradient. + std::copy(gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), + grad); } } }, @@ -540,7 +563,6 @@ struct nlopt_obj { // TODO // - cache -// - optimisation for dense gradients class nlopt { using nlopt_obj = detail::nlopt_obj; From 0660f45d97768fad98b35c018ebf517d3c7abfcd Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Tue, 4 Apr 2017 23:47:38 +0200 Subject: [PATCH 22/57] Initial bits of support for the config file functionality. --- CMakeLists.txt | 15 ++++++++----- config.hpp.in | 40 +++++++++++++++++++++++++++++++++ include/pagmo/serialization.hpp | 2 ++ pygmo/expose_algorithms.cpp | 6 +++-- 4 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 config.hpp.in diff --git a/CMakeLists.txt b/CMakeLists.txt index d45499db6..6cdbd5e11 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,8 @@ -project(pagmo) +cmake_minimum_required(VERSION 3.2) -enable_testing() +project(pagmo VERSION 2.0) -cmake_minimum_required(VERSION 3.2) +enable_testing() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake_modules" "${CMAKE_SOURCE_DIR}/cmake_modules/yacma") @@ -100,6 +100,7 @@ add_library(pagmo INTERFACE) target_link_libraries(pagmo INTERFACE Threads::Threads Boost::boost) target_include_directories(pagmo INTERFACE $ + $ $) if(PAGMO_WITH_EIGEN3) @@ -107,8 +108,7 @@ if(PAGMO_WITH_EIGEN3) add_library(Eigen3::eigen3 INTERFACE IMPORTED) set_target_properties(Eigen3::eigen3 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${EIGEN3_INCLUDE_DIR}") target_link_libraries(pagmo INTERFACE Eigen3::eigen3) - # FIXME: this needs to go into config.hpp, once implemented. - target_compile_definitions(pagmo INTERFACE PAGMO_WITH_EIGEN3) + set(PAGMO_ENABLE_EIGEN3 "#define PAGMO_WITH_EIGEN3") endif() if(PAGMO_WITH_NLOPT) @@ -117,8 +117,12 @@ if(PAGMO_WITH_NLOPT) set_target_properties(NLOPT::nlopt PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${NLOPT_INCLUDE_DIRS}") set_target_properties(NLOPT::nlopt PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C" IMPORTED_LOCATION "${NLOPT_LIBRARIES}") target_link_libraries(pagmo INTERFACE NLOPT::nlopt) + set(PAGMO_ENABLE_NLOPT "#define PAGMO_WITH_NLOPT") endif() +# Configure config.hpp. +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/config.hpp.in" "${CMAKE_CURRENT_BINARY_DIR}/include/pagmo/config.hpp") + if(PAGMO_BUILD_TESTS) add_subdirectory("${CMAKE_SOURCE_DIR}/tests") endif() @@ -133,4 +137,5 @@ endif() if(PAGMO_INSTALL_HEADERS) install(DIRECTORY include/ DESTINATION include) + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/include/pagmo/config.hpp" DESTINATION include/pagmo) endif() diff --git a/config.hpp.in b/config.hpp.in new file mode 100644 index 000000000..bf462415b --- /dev/null +++ b/config.hpp.in @@ -0,0 +1,40 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_CONFIG_HPP +#define PAGMO_CONFIG_HPP + +// Start of defines instantiated by CMake. +// clang-format off +#define PAGMO_VERSION @pagmo_VERSION@ +@PAGMO_ENABLE_EIGEN3@ +@PAGMO_ENABLE_NLOPT@ +// clang-format on +// End of defines instantiated by CMake. + +#endif diff --git a/include/pagmo/serialization.hpp b/include/pagmo/serialization.hpp index 7d32981ad..4ed41e760 100644 --- a/include/pagmo/serialization.hpp +++ b/include/pagmo/serialization.hpp @@ -57,6 +57,8 @@ see https://www.gnu.org/licenses/. */ #pragma GCC diagnostic pop #endif +#include + #include #include #include diff --git a/pygmo/expose_algorithms.cpp b/pygmo/expose_algorithms.cpp index 4b236fb97..72d23ed52 100644 --- a/pygmo/expose_algorithms.cpp +++ b/pygmo/expose_algorithms.cpp @@ -56,7 +56,9 @@ see https://www.gnu.org/licenses/. */ #include #include -#ifdef PAGMO_WITH_EIGEN3 +#include + +#if defined(PAGMO_WITH_EIGEN3) #include #endif #include @@ -273,7 +275,7 @@ void expose_algorithms() expose_algo_log(de1220_, de1220_get_log_docstring().c_str()); de1220_.def("get_seed", &de1220::get_seed, generic_uda_get_seed_docstring().c_str()); // CMA-ES -#ifdef PAGMO_WITH_EIGEN3 +#if defined(PAGMO_WITH_EIGEN3) auto cmaes_ = expose_algorithm("cmaes", cmaes_docstring().c_str()); cmaes_.def(bp::init( (bp::arg("gen") = 1u, bp::arg("cc") = -1., bp::arg("cs") = -1., bp::arg("c1") = -1., bp::arg("cmu") = -1., From e908bde3bcd71760645ab6ff53df0149f5ae2f94 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 00:03:45 +0200 Subject: [PATCH 23/57] Add global pagmo.hpp header. --- include/pagmo/pagmo.hpp | 93 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 include/pagmo/pagmo.hpp diff --git a/include/pagmo/pagmo.hpp b/include/pagmo/pagmo.hpp new file mode 100644 index 000000000..ec3c40370 --- /dev/null +++ b/include/pagmo/pagmo.hpp @@ -0,0 +1,93 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_PAGMO_HPP +#define PAGMO_PAGMO_HPP + +#include + +#include +#include +#if defined(PAGMO_WITH_EIGEN3) +#include +#endif +#include +#include +#include +#include +#include +#include +#if defined(PAGMO_WITH_NLOPT) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(_MSC_VER) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif From b836e714c3c64ca47f1fe210dac295a6688e6d69 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 00:06:50 +0200 Subject: [PATCH 24/57] First attempt at enabling nlopt in the CI. --- appveyor.yml | 12 ++++++------ tools/install_deps.sh | 2 +- tools/install_travis.sh | 20 ++++++++++---------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 53456c1be..5cde0299b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,9 +18,9 @@ install: - if [%BUILD_TYPE%]==[Python35] set PATH=C:\Miniconda35-x64\Scripts;%PATH% - if [%BUILD_TYPE%]==[Python36] set PATH=C:\Miniconda36-x64\Scripts;%PATH% - conda config --add channels conda-forge --force -- if [%BUILD_TYPE%]==[Debug] conda create -y --name pagmo python=3.6 cmake boost eigen -- if [%BUILD_TYPE%]==[Python35] conda create -y --name pagmo python=3.5 cmake boost eigen -- if [%BUILD_TYPE%]==[Python36] conda create -y --name pagmo python=3.6 cmake boost eigen +- if [%BUILD_TYPE%]==[Debug] conda create -y --name pagmo python=3.6 cmake boost eigen nlopt +- if [%BUILD_TYPE%]==[Python35] conda create -y --name pagmo python=3.5 cmake boost eigen nlopt +- if [%BUILD_TYPE%]==[Python36] conda create -y --name pagmo python=3.6 cmake boost eigen nlopt - activate pagmo - if [%BUILD_TYPE%]==[Python35] conda install -y numpy dill ipyparallel - if [%BUILD_TYPE%]==[Python36] conda install -y numpy dill ipyparallel @@ -28,11 +28,11 @@ install: build_script: - mkdir build - cd build -- if [%BUILD_TYPE%]==[Debug] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=YES -DPAGMO_BUILD_TUTORIALS=YES -DPAGMO_WITH_EIGEN3=YES .. +- if [%BUILD_TYPE%]==[Debug] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=YES -DPAGMO_BUILD_TUTORIALS=YES -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes .. - if [%BUILD_TYPE%]==[Debug] cmake --build . --config Debug --target install -- if [%BUILD_TYPE%]==[Python35] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=YES -DPAGMO_BUILD_PYGMO=yes .. +- if [%BUILD_TYPE%]==[Python35] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_BUILD_PYGMO=yes .. - if [%BUILD_TYPE%]==[Python35] cmake --build . --config Release --target install -- if [%BUILD_TYPE%]==[Python36] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=YES -DPAGMO_BUILD_PYGMO=yes .. +- if [%BUILD_TYPE%]==[Python36] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_BUILD_PYGMO=yes .. - if [%BUILD_TYPE%]==[Python36] cmake --build . --config Release --target install test_script: diff --git a/tools/install_deps.sh b/tools/install_deps.sh index 7e5050652..f086183c1 100644 --- a/tools/install_deps.sh +++ b/tools/install_deps.sh @@ -15,7 +15,7 @@ bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" conda config --add channels conda-forge --force -conda_pkgs="boost>=1.55 cmake>=3.2 eigen" +conda_pkgs="boost>=1.55 cmake>=3.2 eigen nlopt" if [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "OSXPython36" ]]; then conda_pkgs="$conda_pkgs python=3.6 numpy dill ipyparallel" diff --git a/tools/install_travis.sh b/tools/install_travis.sh index f9ce4bb23..960880862 100644 --- a/tools/install_travis.sh +++ b/tools/install_travis.sh @@ -8,40 +8,40 @@ set -x export PATH="$deps_dir/bin:$PATH" if [[ "${PAGMO_BUILD}" == "ReleaseGCC48" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "DebugGCC48" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fsanitize=address -fuse-ld=gold" ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fsanitize=address -fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "CoverageGCC5" ]]; then - CXX=g++-5 CC=gcc-5 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="--coverage -fuse-ld=gold" ../; + CXX=g++-5 CC=gcc-5 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="--coverage -fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; bash <(curl -s https://codecov.io/bash) -x gcov-5; elif [[ "${PAGMO_BUILD}" == "DebugGCC6" ]]; then - CXX=g++-6 CC=gcc-6 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; + CXX=g++-6 CC=gcc-6 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "DebugClang38" ]]; then - CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "ReleaseClang38" ]]; then - CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "OSXDebug" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; + CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "OSXRelease" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "Python27" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes ../; make install VERBOSE=1; ipcluster start --daemonize=True; # Give some time for the cluster to start up. @@ -110,7 +110,7 @@ elif [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "Python27" ]]; the fi done elif [[ "${PAGMO_BUILD}" == "OSXPython36" || "${PAGMO_BUILD}" == "OSXPython27" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; + CXX=clang++ CC=clang cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; make install VERBOSE=1; ipcluster start --daemonize=True; # Give some time for the cluster to start up. From c5459e8b4d3071f718f9ffbb2e509b7dbb7713b9 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 01:04:32 +0200 Subject: [PATCH 25/57] nlopt: some small improvements and CI debug. --- appveyor.yml | 12 +-- include/pagmo/algorithms/nlopt.hpp | 143 +++++++++++++++++------------ 2 files changed, 88 insertions(+), 67 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 5cde0299b..59bb83083 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -3,12 +3,12 @@ environment: PLATFORMTOOLSET: "v140" matrix: - - BUILD_TYPE: "Python35" - COMPILER: MSVC15 - PLATFORM: "x64" - - BUILD_TYPE: "Python36" - COMPILER: MSVC15 - PLATFORM: "x64" + # - BUILD_TYPE: "Python35" + # COMPILER: MSVC15 + # PLATFORM: "x64" + # - BUILD_TYPE: "Python36" + # COMPILER: MSVC15 + # PLATFORM: "x64" - BUILD_TYPE: "Debug" COMPILER: MSVC15 PLATFORM: "x64" diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 75250be8b..8efdc8444 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -35,6 +35,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -60,6 +61,18 @@ see https://www.gnu.org/licenses/. */ #include #include +#if defined(_MSC_VER) + +#define pagmo_disable_checked_iter(expr) \ + _Pragma("warning(push, 0)") _Pragma("warning(disable : 4996)") expr; \ + _Pragma("warning(pop)") + +#else + +#define pagmo_disable_checked_iter(expr) expr + +#endif + namespace pagmo { @@ -80,29 +93,6 @@ struct nlopt_data { static const result_map_t results; }; -// Static init. -template -const typename nlopt_data::result_map_t nlopt_data::results = { - {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, - {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) - + ", Optimization stopped because stopval was reached)"}, - {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) - + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, - {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) - + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, - {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) - + ", Optimization stopped because maxeval was reached)"}, - {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) - + ", Optimization stopped because maxtime was reached)"}, - {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, - {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, - {NLOPT_OUT_OF_MEMORY, - "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, - {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) - + ", Halted because roundoff errors limited progress)"}, - {NLOPT_FORCED_STOP, - "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; - // Initialise the mapping between algo names and enums for the supported algorithms. inline typename nlopt_data<>::names_map_t nlopt_names_map() { @@ -132,6 +122,29 @@ inline typename nlopt_data<>::names_map_t nlopt_names_map() template const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); +// Other static init. +template +const typename nlopt_data::result_map_t nlopt_data::results = { + {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, + {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) + + ", Optimization stopped because stopval was reached)"}, + {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) + + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, + {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) + + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, + {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) + + ", Optimization stopped because maxeval was reached)"}, + {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) + + ", Optimization stopped because maxtime was reached)"}, + {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, + {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, + {NLOPT_OUT_OF_MEMORY, + "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, + {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) + + ", Halted because roundoff errors limited progress)"}, + {NLOPT_FORCED_STOP, + "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; + // Convert an NLopt result in a more descriptive string. inline std::string nlopt_res2string(::nlopt_result err) { @@ -147,8 +160,13 @@ struct nlopt_obj { using data = nlopt_data<>; explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) - : m_prob(prob), m_sp(prob.gradient_sparsity()), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) + : m_prob(prob), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) { + // If needed, init the sparsity pattern. + if (prob.has_gradient_sparsity()) { + m_sp = prob.gradient_sparsity(); + } + // Extract and set problem dimension. const auto n = boost::numeric_cast(prob.get_nx()); m_value.reset(::nlopt_create(algo, n)); @@ -183,6 +201,7 @@ struct nlopt_obj { // This is just a vector_double that is re-used across objfun invocations. // It will hold the current decision vector. m_dv.resize(prob.get_nx()); + // Set the objfun + gradient. res = ::nlopt_set_min_objective( m_value.get(), @@ -193,7 +212,6 @@ struct nlopt_obj { // A few shortcuts. auto &p = nlo.m_prob; auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; const auto verb = nlo.m_verbosity; auto &f_count = nlo.m_objfun_counter; auto &log = nlo.m_log; @@ -215,24 +233,26 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); + pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); // Compute fitness and, if needed, gradient. const auto fitness = p.fitness(dv); if (grad) { const auto gradient = p.gradient(dv); - auto g_it = gradient.begin(); - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); if (p.has_gradient_sparsity()) { // Sparse gradient case. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + auto g_it = gradient.begin(); + // First we fill the dense output gradient with zeroes. - std::fill(grad, grad + dim, 0.); + pagmo_disable_checked_iter(std::fill(grad, grad + dim, 0.)); // Then we iterate over the sparsity pattern, and fill in the // nonzero bits in grad. for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { @@ -244,7 +264,7 @@ struct nlopt_obj { } } else { // Dense gradient case. - std::copy(gradient.data(), gradient.data() + p.get_nx(), grad); + pagmo_disable_checked_iter(std::copy(gradient.data(), gradient.data() + p.get_nx(), grad)); } } @@ -304,7 +324,6 @@ struct nlopt_obj { // A few shortcuts. auto &p = nlo.m_prob; auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; // A couple of sanity checks. assert(dim == p.get_nx()); @@ -325,28 +344,30 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); + pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); // Compute fitness and write IC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); + pagmo_disable_checked_iter( + std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result)); if (grad) { // Handle gradient, if requested. const auto gradient = p.gradient(dv); - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - if (p.has_gradient_sparsity()) { // Sparse gradient. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + pagmo_disable_checked_iter(std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.)); // Now we need to go into the sparsity pattern and find where // the sparsity data for the constraints start. @@ -377,8 +398,8 @@ struct nlopt_obj { } } else { // Dense gradient. - std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), - gradient.data() + gradient.size(), grad); + pagmo_disable_checked_iter(std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), + gradient.data() + gradient.size(), grad)); } } }, @@ -402,7 +423,6 @@ struct nlopt_obj { // A few shortcuts. auto &p = nlo.m_prob; auto &dv = nlo.m_dv; - auto &sp = nlo.m_sp; // A couple of sanity checks. assert(dim == p.get_nx()); @@ -423,28 +443,29 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); + pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); // Compute fitness and write EC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); + pagmo_disable_checked_iter(std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result)); if (grad) { // Handle gradient, if requested. const auto gradient = p.gradient(dv); - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - if (p.has_gradient_sparsity()) { // Sparse gradient case. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + pagmo_disable_checked_iter(std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.)); // Now we need to go into the sparsity pattern and find where // the sparsity data for the constraints start. @@ -478,8 +499,8 @@ struct nlopt_obj { } } else { // Dense gradient. - std::copy(gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), - grad); + pagmo_disable_checked_iter(std::copy( + gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), grad)); } } }, From 0ef7d66855d4ff135ca5f19ad49e43edf5eb2091 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 02:21:32 +0200 Subject: [PATCH 26/57] More fixing. --- include/pagmo/algorithms/nlopt.hpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 8efdc8444..99723f86b 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -35,6 +35,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -64,8 +65,8 @@ see https://www.gnu.org/licenses/. */ #if defined(_MSC_VER) #define pagmo_disable_checked_iter(expr) \ - _Pragma("warning(push, 0)") _Pragma("warning(disable : 4996)") expr; \ - _Pragma("warning(pop)") + __pragma(warning(push)) __pragma(warning(disable : 4996)) expr; \ + __pragma(warning(pop)) #else @@ -89,7 +90,14 @@ struct nlopt_data { using names_map_t = boost::bimap; static const names_map_t names; // A map to link a human-readable description to NLopt return codes. - using result_map_t = std::unordered_map<::nlopt_result, std::string>; + // NOTE: in C++11 hashing of enums might not be available. Provide our own. + struct res_hasher { + std::size_t operator()(::nlopt_result res) const + { + return std::hash{}(static_cast(res)); + } + }; + using result_map_t = std::unordered_map<::nlopt_result, std::string, res_hasher>; static const result_map_t results; }; From 1f7860cdafe45ea7016df7d2db89f46e9a5cae51 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 02:43:52 +0200 Subject: [PATCH 27/57] Another attempt at MSVC. --- include/pagmo/algorithms/nlopt.hpp | 43 +++++++++++++++--------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 99723f86b..308d9ac5a 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -64,13 +64,9 @@ see https://www.gnu.org/licenses/. */ #if defined(_MSC_VER) -#define pagmo_disable_checked_iter(expr) \ - __pragma(warning(push)) __pragma(warning(disable : 4996)) expr; \ - __pragma(warning(pop)) - -#else - -#define pagmo_disable_checked_iter(expr) expr +// Disable a warning from MSVC. +#pragma warning(push, 0) +#pragma warning(disable : 4996) #endif @@ -241,7 +237,7 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); + std::copy(x, x + dim, dv.begin()); // Compute fitness and, if needed, gradient. const auto fitness = p.fitness(dv); @@ -260,7 +256,7 @@ struct nlopt_obj { auto g_it = gradient.begin(); // First we fill the dense output gradient with zeroes. - pagmo_disable_checked_iter(std::fill(grad, grad + dim, 0.)); + std::fill(grad, grad + dim, 0.); // Then we iterate over the sparsity pattern, and fill in the // nonzero bits in grad. for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { @@ -272,7 +268,7 @@ struct nlopt_obj { } } else { // Dense gradient case. - pagmo_disable_checked_iter(std::copy(gradient.data(), gradient.data() + p.get_nx(), grad)); + std::copy(gradient.data(), gradient.data() + p.get_nx(), grad); } } @@ -352,13 +348,12 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); + std::copy(x, x + dim, dv.begin()); // Compute fitness and write IC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - pagmo_disable_checked_iter( - std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result)); + std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); if (grad) { // Handle gradient, if requested. @@ -375,7 +370,7 @@ struct nlopt_obj { assert(gradient.size() == sp.size()); // Let's first fill it with zeroes. - pagmo_disable_checked_iter(std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.)); + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); // Now we need to go into the sparsity pattern and find where // the sparsity data for the constraints start. @@ -406,8 +401,8 @@ struct nlopt_obj { } } else { // Dense gradient. - pagmo_disable_checked_iter(std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), - gradient.data() + gradient.size(), grad)); + std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), + gradient.data() + gradient.size(), grad); } } }, @@ -451,12 +446,12 @@ struct nlopt_obj { // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. - pagmo_disable_checked_iter(std::copy(x, x + dim, dv.begin())); + std::copy(x, x + dim, dv.begin()); // Compute fitness and write EC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - pagmo_disable_checked_iter(std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result)); + std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); if (grad) { // Handle gradient, if requested. @@ -473,7 +468,7 @@ struct nlopt_obj { assert(gradient.size() == sp.size()); // Let's first fill it with zeroes. - pagmo_disable_checked_iter(std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.)); + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); // Now we need to go into the sparsity pattern and find where // the sparsity data for the constraints start. @@ -507,8 +502,8 @@ struct nlopt_obj { } } else { // Dense gradient. - pagmo_disable_checked_iter(std::copy( - gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), grad)); + std::copy(gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), + grad); } } }, @@ -826,4 +821,10 @@ class nlopt }; } +#if defined(_MSC_VER) + +#pragma warning(pop) + +#endif + #endif From f68a536ea0172706771d4e933296b3d7de3fe881 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 16:55:10 +0200 Subject: [PATCH 28/57] Some initial doc bits. --- doc/doxygen/Doxyfile | 4 +- doc/doxygen/images/nlopt.png | Bin 0 -> 13886 bytes doc/sphinx/docs/cpp/algorithms/nlopt.rst | 5 +++ doc/sphinx/docs/cpp/cpp_docs.rst | 1 + include/pagmo/algorithms/cmaes.hpp | 23 +++++++---- include/pagmo/algorithms/nlopt.hpp | 49 +++++++++++++++++++++-- 6 files changed, 71 insertions(+), 11 deletions(-) create mode 100644 doc/doxygen/images/nlopt.png create mode 100644 doc/sphinx/docs/cpp/algorithms/nlopt.rst diff --git a/doc/doxygen/Doxyfile b/doc/doxygen/Doxyfile index b09a9d8c7..0e590ab06 100644 --- a/doc/doxygen/Doxyfile +++ b/doc/doxygen/Doxyfile @@ -2058,7 +2058,9 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = PAGMO_DOXYGEN_INVOKED +PREDEFINED = PAGMO_DOXYGEN_INVOKED \ + PAGMO_WITH_EIGEN3 \ + PAGMO_WITH_NLOPT # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/doc/doxygen/images/nlopt.png b/doc/doxygen/images/nlopt.png new file mode 100644 index 0000000000000000000000000000000000000000..733c02b40ebff7bc5352c509273b026e4e6406b7 GIT binary patch literal 13886 zcmX9_1yCGKx5eFq1lZv21Okh@yDYB3gDvjC-EFbp7Tg_z1r`bJ?(P~Sz~lR0P0e)8 zR8ODo+viI6+=*0Gmcc|LL4$#T!IYDgRDUmr-p_m#r1xu3;U&gwhOKVptdg49u5xIY}{1&*if&j|9L%_Q9i9hVHNBJfU_{7+7wM ztxsE+sPg=}czMb#4U{&k_A$y_E}U@i8#M+Nmb|KA-lrEU} zgQRUUZWJhR1xkbP7^xQX$AXZhLYm?HFD-uW=Gn8L#>OD_YkarZ9QQxYmcffVQaP2P z$0dOpC;BAIt8>hp&7qj$p<_&LyCpdaJ;U!?Xa^!so2-PDanqfqjNeq5{Uc#k8V@`8 zhk`&pk+apV-{V=Z!xPD%>QB0zj_`Hu@*X+(V%tXKT~M}}R<}s(aO|Oeu;S(QO7~`x zkf^U(aKog9liCD8T|R_rQYQJY9 zVS#4oC>apL?UlpLiW`R3Hf7^gf(!9%GzhUM5h8uMumE19aIpRaPXT{iD~!IMfY|h2 zeiIRGGHk^IA@CPqe7_<%K>np0p@W?`rY{ySgcJa^Svvd_H$??#qXJADhYC|@fdAdv zBgaJz#LlpUGaPAI;4yl$AE0P@*8%svVD39TE@NZxprKFT6TK3~xifs@3*12KaL(jdt-0?LSU!zUpETnJ2dVP`G2Y75{ z$|tQd**O&mkL>{34t!^pEOEZ&7U#In%l+W^1bzre z`J?sXO9$w_3+P0Frt0?Rs{~85w}a&BsiB`n)h<{mJT`&SD)Zs}$h&HsQ2iR`-ovaT zt;h`|z`$lP3KW#d`(=zI&U(r@rE7l%OwrW}5(WrUusO_m6~;%E3buoG8c>J4P~voT z;%sxn;5h|G)!ZK-{m8#rDc*)p-iF$IC-4DeaM$NIAFQvk5zM~iz+K;N+H;>xpKi8J z#|y9XpoZVV$XFgSu#$2^ojq3(1YZZ${*yf(C-kHSyna~dY;)7-7k3To^*0o?F=`EA z=L_32K(P;Zl5M8lYpZfomVPCIr!oU$D3l1ugB0;aGi|jd1MB?oMOeAh>5t0ZQ-I)d z)zk3*8Y}+h)zG5UbHxZ0CurJipP-b*ZWaLB=|+%Xc!w0myVl3*?C4Fvdf~=-?!al^ zwc!NSQN1f!kl24c3TK6i5N=vKDHnR940TLQgurXJlxGLH+d#m#{%BY=kbZQ4V*CFD z6#tRfki8Y80mjcQKZlqR1Lw|-11Sm0u{1o(DZ^SR@e|AZGr*p)a#z(>Zi=K;qk^=^?QSS6VwSs{lv zNPMrXMLq0)S412Dd-oOEK}=5gD$^D**3mD5SfdcL6PB%y#>$m8gb=gKkF2DYTUtVg zf8P~@@l5OGHdY&$c9M~;|7FeDbNS#<^!W#*-z}FqnW-lq*YWyT>+wGZH2`Y5`<4u_ zLH6$!qC!74b4zQBN$^w@#KsSy=2ie^cD92Up;gOy-eLW90GXp<-Tx>OUk?z;g_Y6$ zyQfjM>g+J5IGWEyWOMh8@01EK8u6v-?of*l%oin?MOXayPJPS%7 z{Tpt3kPXmz$EKaVXmZ;D_HkAiiUAaF2sINDN11>WdIigeA_Fa3LAvPT!T^=)NwW9h zP#;`(ajlkLL#UPhhVEhj<&k%DF|l+*sMSLH`-Pz_TcRjMZ@8iYs!R`sv7x?eW2EE{fpZ#qb zb*MtSkpib0LY<~Kp1jYNXo}zY4u%8;)jdq(08X{q#t>g#{fs2OeNqO zG#^VsfNm@Q?RUupx0dXlyZ?BA0a{?vsOf%CQRadPa{u`P%+)P07*z4Ns1uZsX`6*; zLu?>&XR9oHNI_b0ZBl|DuY7Q1l5U8Mg*(2`e$S5^WwQIKBrNC`vo4Ci>YWX=%myDQ zYZe!X|Gix4^IO}IG~3*t{@7OvKlZdeKvE<3kM?IJ3F z)M^1zz@%v2uTo5x>M8u0%Jn{Tbo1x`KFd>t=YDwXZ`jv*x||! zJLLzBo{8bNoc6Ru%el3U;NLsDzo6qitVJpvX))@0RxBdU)^_)0ERMwU`swdi+yi(8IAwyU?dn3MS3% z9EE;L%ZLCGeeY(;#$nWn`;w3I-difIl@P=F+>w-Lo@KXa?6|H0L)y(AwPP2?*RGV` zWUiC0b?B`6uzzYiwBTi}>TxUWTj?o1O>W~tbr1KQgDHyPT zh8-VJXh2wz{&2T#dy!v;~K%#^G1m~x&MzLCP0 zlF+k!Y#nK~7I~v2Ujxc8X`!;!$e`fQn5351`d{!yEO47 zBaxzwRqQh z=tQt-QwpzPRe zxtqb!rLqIIx-5gG7NGqL)qlkoloX{XP_$WU?TMl#qT|dOnw1%<&iu?{Eo~`U)G|EUO;MksivUcHbY=M9LiaHSDLA zBE+n_zM}7*HyN_Syg{wEV9+k&Lup1IVzS$7FATh5Zil0s0CVGST4y5y4&r|GE+q!) z_9T9_g2fv90GM>!Ly%$nVZvc_pzFTfp`#MChXZGtDF|wrj^sN=a{!eIQB&fegmpMv z^+vX|)gS?YBgOuqFe@dTxqF*h^!w?vLTRnij0n3{@xOXA5@D~&JkBLDP~F)K{y>C` zWukw}w>u=eD9A0O?g04wgL-w=K3KfR)15tuqf_|hR%`b}xKZ-ntwd=NKO%b_;&Pw9 ztVM;D%Prj2z?4tRxmPYpEBP@WC4OJvt278B&HWdK-v6SL2>j;Ww}%&)Dc6u+pE;-E zNL^DpRGi$pwm7+V;_JFs2_IhG?ilnQQi(Z$gyT~|M^!(!w5{S(CbUokQ*g$VYuocf z?`&Hdl&40la=<*W7e*pu!9T{2e0cf%-n4XuAS|Z|>}m*O_N0$rPtX?V+i}o#{5;y0 z{nI+i`+~8JUFLi#X{V~)=_CX4_w=*766tU4Vz7q_S!9IcGiGF2*CPljT#2;dOoaxX znlS~tPMEFpe+mGmOLqEXToL0L2!r863IZ58)`w+5Py^OOLB8e|T zj+~pG4P14=#g529(g1DWSqd)GQkHegR2GU;i#`$jgGO_VSp09?Zn&_4vbCYP}%zqLPc zv{t?+5I+2;oq@V6rv^_n&-~0$4<$E){E<)}?)V=OP&M|@0Wl(|I(0^tZ8?IV>J|oB z@|MPjh}EpWx+V{adVLf4SD4&Tq+h48PXK@h@Vw_pja8^!dn!W_D?kY^5aZZYs~7%+ zwTC(idSuh`5_U$iTo3Gz^9q0$$?=`rC zz6xrK3QC%&lvU*=5C~*p~X;z+$;{(KdOZ$<{ z^)|EU%~9)P?KROTgwCAPxup@{Io*%mc+hT5(@`5L#rq-f zh;>%?ZT;*#q}n~Lg&L(4x-{PyQ8n=fyV4kN;>UNLXtpZs6{T9T{Ju0Yrvr%d>X5DX z+)MaNbsjNlGv_e`B_g)~J^muWyH#lj?CuffH~v(hUut@;VA1U+rNmFm#DJsD-d!#> zTg8$b*8Y9zjKKAieyPmh$1Nnh(yM0V4mJZ5Kpb2O z)oyTnHpaEpG5E|X;J1af(eZA_)^~rAF#VWoHf#jZ-FYWj@t#`@w;RIHPK2nGJJS#H z`P{mVkBbZADaJQ6)TOfe7>lAy)v;Fi7Qh_-@{PeBC$p%n4@4>^<#_QO`+z+p0Y2S$ z(*8|K*ujlQpwZWF=Ew*3TvDmEKW?zeS?P=)u=OBtUtS9he>XqEO8R@kLEcH9Fvskc z4h%gP{T@Hc{I9{O#P|o?HtZpM09}Puk7=9y{3qc0ZVt&UmJHjNOV)k8=NT3~mQ%2V ziXR=Yd4b*hq;eBDp4cEm{L{|WeaH{!D@s@4YtA~aSZVgg8P51g{ zAc^ZL(@M@$cD(bsy_|&6!Dfjci%|nSV~Ihdx|qL_iu3v5iSOsxaNMdNnT3jB%;|GW zQ*v1t-p78hF_mqB0!TOmjSQuapgy&bfn`&oXD8*ZCMOcVm3RJ+d9+J zS%&)du;|k7XPE{#aJyh^-a!ov%`YNFBX_Jr$>_4ZRqFH@;w?Sxqnr^NfohOe$d$li7cPfGuq4Y)kra0}y^h8U?Lox|Dn~M$R z1f=f%m|p;H+Sl^K*`v^h)*Uo6f-j*j2-sk2*qrxJYi?{*Fcn@2)7|Ozwxh8E)|_JZ z$SmT1weUzDTfOYZ^1-iB6p-iqwH-cOUrTn` z)#xDtyT(tPN)Pm^s&;jO4i*%!;VY^z7n*wuh!=jPZLy1FY4>3(U$t(5myxWwG@p|9 zOOO|q0P=;TW7VmmSPiPCVjN5CF(UGKQOo-KV<*baYWSM3@Ek?PXpk=L)Y{2$lTqf` zl`KFfca2KzS;4Qt4Z$D(RQzIzwW8ehD9Gr7|P)qeACq zXui&AxW$f^*~J#@c_t#VjtaGc;`Sa64^Q4%Hx@{kg}kv6QpU3W+hyTC%xs{>oY?+aNbLe-P>TzArAyX65o2{QdaJ{wwTU{{r*JvHg!W ziFN{u@*F?Ctly}tyE8@K%zuRT=B3ewm@0gCDNReA@V9@3p+bjHe=bVIcI-iu!L=O! z5{GT2e)>~?=&5RgGC?N;b)oItSg04Ot(+7AqP9zR{?bXS+ffTD^i>&6<+8 zUEK9UN7Iz#XbldsvYCAB>a1ww1MD+WGs4jRwJtXr?9_^4WEIDYc6 zc}$?Cqs_Y|_nVYh#3Oz%O61qD$hbx-^F@t8v-qE21Bb)0}+`#BMuex#@^=DPJtD@ zFaI<8w{N7y)X9UZAIuN5vrw(0PV(zIO`VRjje_WJSg@h=jv6W8dwfb+^P9#6tEKyF zLK0GH3z#2*T!Uw}X0EUCV)IJ`A#mjaPSdWF=1ox z6IOpM--|MP?~d0p)zw$lQ#`(KMxeu-<>Gy|7D-kC?N<8z-Q;)sn0L|Dj+axM!eWg= z{%OeYrd{0Ei)K7_J<+4(K2WNr0w$U)9$x49zfoZUq7?steQLe?a2hr3lKx!EeL|{S2H0iZg2Oy0C zt5(D0_AvB0m8KzvN{i1}0EH=3ju}|svIHv#<1a(qzQmvfZ&hZkB6`cXg^{r2*nBH_ znNGJowOmR%;g#kbx3EEnT2GW39!zr4E!^Q({GH{R&N6M)_ZS1;#UHPF`=^7XQ_e*R zGhE(2vaONs&#!$ie>YSy@BT4aF1#TlQ{krRwh@xt92P6zHMQ}8;+MLUL>D}oloYB<`H%@KQ&G6wc38W|1K?KT`SZOBGUz$vhJ^-ZS=8uF!~XIi77<+7 z4*}nM5;YZm5(HT!O~KPr^O7gfv+d$o^JQPKobO(Af5=h`xh3{0ptWa=f5u_idciN~yfkFrIz8X9D0c65#91a0~wW|~36%u~lT zMMd&*dEE&ZeJWzV{q3s)-PWR>NMJ#GOT-4pAHX&w4-WDwK}HCZ47vSmF}5*BpO9}e0B`Cv5qoXo}O#zcH``Q$;ECMjQTK% zZ>InW9ZpEH)rt!1N7rd7#L_(+2VEbrAV|E+?4>BnVomJ0K#XP7aMM#Hm$FI8;vjk~ zq>l;0@@8a5Iu1C=36dy|3C*nMQAZ(9cy!>Uft&kt<-09tJSFZ5Ky_Jh*CxI#;llW9 zp+gomRc&FAdt74SQpy!M{}Z?U!@iDH+Nd~DjMdHRC;6!qtk0^YlkrDrGR7*IoHHgC zC1EZ#cWsvIii~aRLVfBdY#mv5L5F&R{QJ($io+{B?x{+K62;gyMDQRd7K!0>bg}Vz)GwBc z4NArQWS6iR7hu5--yo3BG#}znG$d|db0=eOum5!rBvZ69+3EX|n z?fluJ@{i_#Vb1)Tu4BTXe{A18ycnjU2)hH&|G7TMsw(ZnIJbA>mTM&mYV^ z5EfsXLJ@>e{$cd*Kj;fZ#5Yu8Kue-_QUfH!I5LM_I@tQT{1@eS`hL=b&Z{a=9l8a3 zNT|={Qf=P{Dc}vdPBRh}9bf6xx{1(y|I5%0N|+7p=8% zP+LpWOE(YQH!s2Eob0q)gDm0L`!-dni}!zQ|9v!n%?o685o}<9lH32JEiWs&wp=$t;kLsl^c6KB;i3ONr?9D+lbQ(h&Tv`BqfgEHbI_)iVLHmIAEAvX zr}VK}%=dYW)8P#p8B2g71MJ1F-Xrt(iEm@8CqH63D?mM{K$foFBm~1~d6Wi#&9w*r znHS9wDe`kS%GQc79!R$Xhhg2oJvFIVCI#>q0F|^2|NPo~(e*Me#Yjsu9k0a!4Ju}T5r6Mh}Hgw3=VnTySJZYqNg`WF-Bc|RCM95f`MD!3Kcn{w7JfB2) zhri=)0qRl~8?;fu^0LKD-yEZmN+R|KHOs4wAncZmSuji8@Y*hMDCrlcOli^L@hQJG zW)))xaZC0uvcc9QfZtaFi$%2;`q6YvpuP3z6f8j5jMY*jg;Y2mGkQ2qk|yE0aGBGE zbV@`x&MNjaxd+e)uvPO`ofFEc#!}&nbJD}v7pxtoBZ7&=yR}8YHQd+#g=-Us4gBMu z4Uxh*;(N1wh;h&Du+sy$(O3~K#%(H-C-`fGPzVf8=wUuz4PTDYHY_sp`@yi z-}Em;ub3^*`vKh7vyHiiTD~azwo=6!W5AY!Sp7LcT!m#+iNqJXMct6#YjDgS@YVSp z#-_Tjp9+Be84>tvNt}h`5(&zP95>d)xWGd%1zZ;@bb6CcVJLsDd+D2UB~O6jVCC<7 zy@G@MZ~DD6v&`a{3tEd5+38!~MmVx0_jZ+CbwSmvZQn|)fsFyN9mn)D>cv^tOfj*0 zz`F*AoOU9Ue4)#rN6YFu-1xY}Jij+~e(gQdSLieQz}dE=NL%n($>R zm@AdWn?l*%CFI=aLp8emMEVckjj+>HKsdigc`ntba<1P6_2#n@Z|kO^GZXs)=N?#$HpT~)HJAX^=aD}@WYLfUOGmi8-qIMBAeCV!vb`zjfnd9viLAisWgx@RZD3ySht6N1G8e9^`F^rx;-%z2Ig z>yX5WMaF?bT!$c8YZBSqffO5Br8hTbd>X^|j9OeTt0)kcIVb0!wfLOWl2T@|^25s! z4J$vzQLr3EjL8aIC~h^=lBIjC85-^^9Jkl{c#RKmXmYk^^tyg#U0STtYS<#|qSZpY*v2 zx;%=({l&T*9*(gt&wH@*`(~07nJzpMDQp_wmU%9ef+ZMtwE32qI9PB%d`62_4Ze7B z*YkYT_DD|%(W#f*KuhuBrDjCPnbA}CgOCm(#KP2+vQEgLz(k);hfs%%_nZ}drLk7= zLz(B{BRvAdg7VC3vI&GDY57B{AbX6J{}Xl4eWKIC=yaHre{gTui1c{wu}c*TLrC7u zJ@AD*rtb5{gQ9|tanlvBvAMO8{@6>$&gu!^`Sv3Fz7`K|5PD3zQc={rL|c34WiR50BCR2SuFXWP|iU^cktcQTA*Be zI!T@q-%-VME{VlJO@X_z&0Q|j_xRB5_8>=GK0aVEDHDP?8y1waOn^?w5zDYJ1Bwy3 z4`4vxLY+VIGv^{c=iL7u8KO3~ouPzoO-w$ZEUS&qjQ>$UAgDF1R6(KLDi>z0zHANG zRDvU9F8TZIbq-Hz0SXd79fv~CDRs3OQ9+bU3!7Y%F>iI+?UIEMp7YaMmAE=X#uexk1` zDEPYGdXcYcz1~TLwl-7rH_c3;);^L*MPpG>*uHDr6uvnn=RkBs{O`*38ZH^u^x89) zI6t;y_Vi&V32m6XUaLren@yc{(|CRt9f9ejkfwjG@i(JLk$oQr6(IWamqOJvjBdY&vU0}uQs}3an8N&r_?A@5}4%_emfr8%L$&YfutC;l};8Rlx{Mdt>rT$Rpa))m_7t|5vJnS z+Nrs-4?_}AKUV5d>n`Hlz+ORjU}e@Jiy+-^EDsvIRb~7z2Le^ogh)#!|x(PV8sk6LF5#IZUzn{39ty zITo7~4wmkCX6pY4Q@o~k^w-h`F0+K^PE5@o=ICi5+{X9X%D^e!hM;ydMl#K@g&V)JxxhMLwY}|B`dZIP$q;H za`w-2tz<+$N!AZSgiXR?pCGHBvOy-3&_qkmhGbpahXv?EJ-0vwA|xsNMaZumTW`8f ze9xr$Gu#*}iH2CnDf^2m{WzjHoOUhKs!w zEF@L1(KG6-9g^oEh>aOEf;N{-rUe&B*}Kp}%EGJ{=M0t=Bg_K(VQ~^tM8JtdSoB!Y z4&cT&)I2YCa1oxQxKxDsX#dt}pFm_>r|DC`!_JknKr|;*;Tf}> zD(+I%%Y3Uf=u%bm;va!Vd=?aJ>Qp+;LP3BO45q^MI%4V#Q6B?@JosV9DWH%-(D$VfCN)altx*s#aN;^< zvcko$zjB(<>y4`YrWc+eqIek_l2&M8kG!0FoRxTEsn5_@7ShsH@k7>o2IfBu6W+$Q*3rh*-- zy8Um1NLI;{!HQ^7_{JCBaBd~+Y$Z?Oo=b8>$rgSgfB&ksYT>pLdE;tl<|vsLaj_)fy1I|#bi76xffQ0$msB2yvA^K z&s~UZKfkGvB!enR93wMx+mk4GH<@C>o(lz$rbKFK9;Jk}j>*~AMg@Yd9i-@*_KW;I z0F;eLk)*R@vQi^ru3;2xXiEYq6z1g2M#1gKCC?6HsKr-^Hs#qUqy#?`nRum3fHt>d zQI$7_G&1h31p{TJ^B6VI+qnahY_So#pE!b&W>q;1CbEgP9!45;mosS(YEruYLy3}8 z`*~Pb|Fy1-wz-AkHzy2UKFS5?^O?ehDzQ@-p z8*ruZb_%PTU&4dJm0YX~nHiC0PZM%biwFvJQvNU;y2Vz4iE^-nH={nIpXk4frd5Yjd7F8X5=tL`d3-7m&(0WH$}&Dn z1R82WL0oX)O7D~Rh5)UHp=!(Fimb(WY_BHQK+d8SdZQY z#NI#~5p&fP!#(V=^JXI#D5O=#*x>1PXNWxZZZve-5B!7M ztzZ{~mY1ec;m_S{V-8Y>RFn&mdNLaazRnRA(F!4D=ON%1G&M1l?+1#7uCrxFAApLs zf;mycztwx&hpm6dw9R|4PZ9QTSWf!OUT(loPD#>|@G<2iX4Dni8U_>JA#^AO_9f8YN=s-+v=?-zoXT(WUt5R)h^^GfvC52YyzjEIB5wB zbQgZ~4ecLiTJT8Bw|b|FaiSI-gr;kR_HdPWyS^SqT1V{NoG=AVHb~@EUMq6Qc+=a;9)vbN74M&q^4Jrh zTCC3Hym)RlW|#-9Q;GKmT~|gbkdQ{z&M!gYa33-Us;h{9)h^|vRHiA6E~_W~?m5Ps zE>PBuld*V?2FDyFDc@1NUDte994#o7Uypy+WVx_O}7kD#$h`f|Bgzl(P97f?yn zjX&ibRj-+beC+1W3?F~B434gWz)svz66rOCzR?qMran0K^BGs=36tRUvsM zb)Y+YWjSS-1|HR|)u${{_GUJxAB;p5GA5If)0Qdw%tWB}Xyx0hg}5ihsl0wQib&WKxEer<8G>mN-8UECzkIv{d@AB!Gw))Nv{5;sLnrGP<`-#> zZE+!)cCw%oVyc!a$uKJ^4)oFkZ~H(9haE&wFmWxeD4_^nD<=zJdvVx9nu2 zrgD+QtbL|FQ;Hnb3P^<$b&r?br500PCtMzFjQj<83Kz|}h{kVxVdI=jDh~^aU6S+# zp(SarfeKuIaDaNnB{i$ueu_pr(&yJrXk#N=#@*_# z=)L$h>~QdEx!D52S5r^R0eryTbT{z~O{2pH|0iA%%V7-#5&gd3|8co=t-Nn3hB}Qv z*&i=o{3q9IRSGkNQ%T&G_qtT%AMU8Oue)E)e~w8QppCDb8;8dwI3yELX@h6bktp)G z6|eK_>)t^-GC7X_pQV7#pH)x0t|{rnmzv!tEvWwud!G$}C)`;H-cPmxZ(_!Yy!OUh zKeP61As@MTB{INC-IesKr<4VYif_xw%2IL_{md&T!{U$X6C7Yzyw~Zjn@o@fg_dT! zQX=qN_R@x)>z|h!$mk4+0=VRIN10oks4+NhR%3>jpCO`eLmb^ zCtjdkIEA-+oA*z@qszk{G8618r>HJlhe=ET6dxUXffhom!W_t3%nXAU0=w_|+>Z$jzGu7fzZ+IRIa*YcXG(bI zm_5CC=0X*I^cC$b{+kw=)Y}*QZ(edLcS8e;*xDS(O>Pw47DSi`u&oyt5c_{^dtD9%e_jz{z zqZYDrboXxy`!y%EBKI|W{Nlgbm@y4C(d3 z(f^-M;CuQGnXuEFm(jlr_D5fbxy-wNR~N(14{Y-WSc=8JYwes%j@rNmMvLY*gc(2f zs=z;}hKLEhW+d+(e*Dqp;Va|_FFlS8vNMhAy#2*kmPJQ!!gEb8@{e;=yZWi?JCQJj z3;6f8Q+dk;-Huu`VjkmUm1ZIdwm0(?BSi~GWD}Rdpjo`QzPx&q+ITF;^VAugx)*SK s@8rrTC<4f|8*X_$eeU)E!o2z)AS-7ldf_p>|NaFdC#5V|Ep8I@e-!wY{r~^~ literal 0 HcmV?d00001 diff --git a/doc/sphinx/docs/cpp/algorithms/nlopt.rst b/doc/sphinx/docs/cpp/algorithms/nlopt.rst new file mode 100644 index 000000000..30cd4408e --- /dev/null +++ b/doc/sphinx/docs/cpp/algorithms/nlopt.rst @@ -0,0 +1,5 @@ +NLopt solvers +============= + +.. doxygenclass:: pagmo::nlopt + :members: \ No newline at end of file diff --git a/doc/sphinx/docs/cpp/cpp_docs.rst b/doc/sphinx/docs/cpp/cpp_docs.rst index b497404ee..f8703f296 100644 --- a/doc/sphinx/docs/cpp/cpp_docs.rst +++ b/doc/sphinx/docs/cpp/cpp_docs.rst @@ -32,6 +32,7 @@ Implemented algorithms algorithms/moead algorithms/mbh algorithms/cstrs_self_adaptive + algorithms/nlopt algorithms/nsga2 algorithms/pso algorithms/sade diff --git a/include/pagmo/algorithms/cmaes.hpp b/include/pagmo/algorithms/cmaes.hpp index 1c34651ad..b71c811eb 100644 --- a/include/pagmo/algorithms/cmaes.hpp +++ b/include/pagmo/algorithms/cmaes.hpp @@ -29,6 +29,10 @@ see https://www.gnu.org/licenses/. */ #ifndef PAGMO_ALGORITHMS_CMAES_HPP #define PAGMO_ALGORITHMS_CMAES_HPP +#include + +#if defined(PAGMO_WITH_EIGEN3) + #include #include #include @@ -51,18 +55,17 @@ namespace pagmo * \image html cmaes.png "CMA-ES logic." width=3cm * * CMA-ES is one of the most successful algorithm, classified as an Evolutionary Strategy, for derivative-free global - * optimization. - * The version implemented in PaGMO is the "classic" version described in the 2006 paper titled + * optimization. The version implemented in PaGMO is the "classic" version described in the 2006 paper titled * "The CMA evolution strategy: a comparing review.". * * **NOTE** Since at each generation all newly generated individuals sampled from the adapted distribution are - * reinserted - * into the population, CMA-ES may not preserve the best individual (not elitist). As a consequence the plot of the - * population best fitness may not be perfectly monotonically decreasing + * reinserted into the population, CMA-ES may not preserve the best individual (not elitist). As a consequence the plot + * of the population best fitness may not be perfectly monotonically decreasing * * **NOTE** The cmaes::evolve method cannot be called concurrently by different threads even if it is marked as const. - * The - * mutable members make such an operation result in an undefined behaviour in terms of algorithmic convergence. + * The mutable members make such an operation result in an undefined behaviour in terms of algorithmic convergence. + * + * **NOTE** This algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_EIGEN3`` option enabled. * * See: Hansen, Nikolaus. "The CMA evolution strategy: a comparing review." Towards a new evolutionary computation. * Springer Berlin Heidelberg, 2006. 75-102. @@ -611,4 +614,10 @@ class cmaes PAGMO_REGISTER_ALGORITHM(pagmo::cmaes) +#else // PAGMO_WITH_EIGEN3 + +#error The cmaes.hpp header was included, but pagmo was not compiled with eigen3 support + +#endif // PAGMO_WITH_EIGEN3 + #endif diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 308d9ac5a..56da2ad53 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -29,6 +29,10 @@ see https://www.gnu.org/licenses/. */ #ifndef PAGMO_ALGORITHMS_NLOPT_HPP #define PAGMO_ALGORITHMS_NLOPT_HPP +#include + +#if defined(PAGMO_WITH_NLOPT) + #include #include #include @@ -156,7 +160,7 @@ inline std::string nlopt_res2string(::nlopt_result err) } struct nlopt_obj { - // Single entry of the log (feval, fitness, dv). + // Single entry of the log (feval, fitness, n of unsatisfied const, constr. violation, feasibility). using log_line_type = std::tuple; // The log. using log_type = std::vector; @@ -585,8 +589,41 @@ struct nlopt_obj { }; } -// TODO -// - cache +/// NLopt algorithms. +/** + * \image html nlopt.png "NLopt logo." width=3cm + * + * This user-defined algorithm wraps a selection of solvers from the NLopt library, focusing on + * local optimisation (both gradient-based and derivative-free). The complete list of supported + * NLopt algorithms is: + * - COBYLA, + * - BOBYQA, + * - NEWUOA + bound constraints, + * - PRAXIS, + * - Nelder-Mead simplex, + * - sbplx, + * - MMA (Method of Moving Asymptotes), + * - CCSA, + * - SLSQP, + * - low-storage BFGS, + * - preconditioned truncated Newton, + * - shifted limited-memory variable-metric. + * + * The desired NLopt solver is selected upon construction of a pagmo::nlopt algorithm. Various properties + * of the solver (e.g., the stopping criteria) can be configured after construction via methods provided + * by this class. + * + * All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation + * is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient + * (otherwise a runtime error during the optimisation will be raised). Some solvers support equality and/or + * inequality constaints. Trying to solve a constrained problem with a solver which does not support + * constraints will raise a runtime error during the optimisation. + * + * This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option + * enabled. + */ +// TODO: +// - investiagate the use of a fitness cache, after we have good perf testing in place. class nlopt { using nlopt_obj = detail::nlopt_obj; @@ -827,4 +864,10 @@ class nlopt #endif +#else // PAGMO_WITH_NLOPT + +#error The nlopt.hpp header was included, but pagmo was not compiled with NLopt support + +#endif // PAGMO_WITH_NLOPT + #endif From c7359eff5c80876a47c364bc4b5135935de4d7bd Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 23:22:45 +0200 Subject: [PATCH 29/57] A few doc changes. --- doc/sphinx/install.rst | 25 ++++++++++++++----------- doc/sphinx/quickstart.rst | 15 ++++++++++++--- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/doc/sphinx/install.rst b/doc/sphinx/install.rst index 7dde80f0e..c100ee54d 100644 --- a/doc/sphinx/install.rst +++ b/doc/sphinx/install.rst @@ -11,11 +11,13 @@ C++ Pagmo is a header-only library which has the following third party dependencies: -* `Boost `_, headers only (needs the libraries only if you intend to compile the python bindings) -* `Eigen `_, headers only (optional) +* `Boost `_, **mandatory**, header-only (needs the libraries only if you + intend to compile the python bindings) +* `Eigen `_, optional, header-only +* `NLopt `_, optional, requires linking -After making sure the dependencies above are installed in your system and their headers visible to your compiler, you can download +After making sure the dependencies above are installed in your system, you can download the pagmo source via the ``git`` command .. code-block:: bash @@ -28,7 +30,7 @@ and configure your build using ``cmake``. When done, type (in your build directo make install -The headers will be installed in the ``CMAKE_INSTALL_PREFIX/include directory``. To check that all went well +The headers will be installed in the ``CMAKE_INSTALL_PREFIX/include`` directory. To check that all went well compile the :ref:`quick-start example `. ----------------------------------------------------------------------- @@ -38,8 +40,8 @@ Python The python module correponding to pagmo is called pygmo It can be installed either directly from ``conda`` or ``pip`` or by building the module from source. -Installing with pip -^^^^^^^^^^^^^^^^^^^ +Installation with pip/conda +^^^^^^^^^^^^^^^^^^^^^^^^^^^ The python package pygmo (python binding of the C++ code) can be installed using ``pip`` or ``conda``: .. code-block:: bash @@ -53,13 +55,14 @@ or conda config --add channels conda-forge conda install pygmo -Building the module -^^^^^^^^^^^^^^^^^^^ +Installation from source +^^^^^^^^^^^^^^^^^^^^^^^^ -To build the module you need to have the boost python libraries installed and to activate the ``BUILD_PYGMO`` option from within ``cmake``. +To build the module from source you need to have the Boost.Python libraries installed and to activate the cmake +``PAGMO_BUILD_PYGMO`` option. -Check carefully what python version is detected and what libraries are linked to. In particular select the correct boost_python -according to the python version (2 or 3) you want to compile the module for. +Check carefully what python version is detected and what libraries are linked to. In particular, select the correct Boost.Python +version according to the python version (2 or 3) you want to compile the module for. The ``CMAKE_INSTALL_PREFIX`` will be used to construct the final location of headers and Python module after install. diff --git a/doc/sphinx/quickstart.rst b/doc/sphinx/quickstart.rst index 6bf20638c..f1e399096 100644 --- a/doc/sphinx/quickstart.rst +++ b/doc/sphinx/quickstart.rst @@ -17,11 +17,20 @@ After following the :ref:`install` you will be able to compile and run your firs :language: c++ :linenos: -Place it into a getting_started.cpp text file and compile it (for eaxmple) with: +Place it into a ``getting_started.cpp`` text file and compile it (for example) with: .. code-block:: bash - g++ -std=c++11 getting_started.cpp -pthread + g++ -O2 -DNDEBUG -std=c++11 getting_started.cpp -pthread + +If you installed pagmo with support for optional 3rd party libraries, you might need to +add additional switches to the command-line invocation of the compiler. For instance, +if you enabled the optional NLopt support, you will have to link your executable to the +``nlopt`` library: + +.. code-block:: bash + + g++ -O2 -DNDEBUG -std=c++11 getting_started.cpp -pthread -lnlopt ----------------------------------------------------------------------- @@ -36,7 +45,7 @@ If you have successfully installed pygmo following the :ref:`install` you can tr :language: python :linenos: -Place it into a getting_started.py text file and run it with: +Place it into a ``getting_started.py`` text file and run it with: .. code-block:: bash From a469d01d17dddc79844e8aa5c1440a8689840fed Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Wed, 5 Apr 2017 23:23:00 +0200 Subject: [PATCH 30/57] nlopt documentation. --- include/pagmo/algorithms/nlopt.hpp | 256 +++++++++++++++++++++++++++-- 1 file changed, 241 insertions(+), 15 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 56da2ad53..5a088ce50 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -180,7 +180,7 @@ struct nlopt_obj { m_value.reset(::nlopt_create(algo, n)); // Try to init the nlopt_obj. if (!m_value) { - pagmo_throw(std::invalid_argument, "the creation of the nlopt_opt object failed"); + pagmo_throw(std::runtime_error, "the creation of the nlopt_opt object failed"); } // NLopt does not handle MOO. @@ -593,7 +593,8 @@ struct nlopt_obj { /** * \image html nlopt.png "NLopt logo." width=3cm * - * This user-defined algorithm wraps a selection of solvers from the NLopt library, focusing on + * This user-defined algorithm wraps a selection of solvers from the NLopt library, focusing on * local optimisation (both gradient-based and derivative-free). The complete list of supported * NLopt algorithms is: * - COBYLA, @@ -614,13 +615,28 @@ struct nlopt_obj { * by this class. * * All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation - * is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient - * (otherwise a runtime error during the optimisation will be raised). Some solvers support equality and/or - * inequality constaints. Trying to solve a constrained problem with a solver which does not support - * constraints will raise a runtime error during the optimisation. + * is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient. + * Some solvers support equality and/or inequality constaints. * - * This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option - * enabled. + * In order to support pagmo's population-based optimisation model, nlopt::evolve() will select + * a single individual from the input pagmo::population to be optimised by the NLopt solver. + * The optimised individual will then be inserted back into the population at the end of the optimisation. + * The selection and replacement strategies can be configured via set_selection(const std::string &), + * set_selection(population::size_type), set_replacement(const std::string &) and + * set_replacement(population::size_type). + * + * \verbatim embed:rst:leading-asterisk + * .. note:: + * + * This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option + * enabled (see the :ref:`installation instructions `). + * + * .. seealso:: + * + * The `NLopt website `_ contains a detailed description + * of each supported solver. + * + * \endverbatim */ // TODO: // - investiagate the use of a fitness cache, after we have good perf testing in place. @@ -630,25 +646,91 @@ class nlopt using nlopt_data = detail::nlopt_data<>; public: + /// Single data line for the algorithm's log. + /** + * A log data line is a tuple consisting of: + * - the number of objective function evaluations made so far, + * - the objective function value for the current decision vector, + * - the number of constraints violated by the current decision vector, + * - the constraints violation norm for the current decision vector, + * - a boolean flag signalling the feasibility of the current decision vector. + */ using log_line_type = std::tuple; + /// Log type. + /** + * The algorithm log is a collection of nlopt::log_line_type data lines, stored in chronological order + * during the optimisation if the verbosity of the algorithm is set to a nonzero value + * (see nlopt::set_verbosity()). + */ using log_type = std::vector; private: static_assert(std::is_same::value, "Invalid log line type."); public: - nlopt() : nlopt("sbplx") + /// Default constructor. + /** + * The default constructor initialises the pagmo::nlopt algorithm with the ``cobyla`` solver, + * the ``"best"`` individual selection strategy and the ``"worst"`` individual replacement strategy. + * + * @throws unspecified any exception thrown by pagmo::nlopt(const std::string &). + */ + nlopt() : nlopt("cobyla") { } + /// Constructor from solver name. + /** + * This constructor will initialise a pagmo::nlopt object which will use the NLopt algorithm specified by + * the input string \p algo, the ``"best"`` individual selection strategy and the ``"worst"`` individual + * replacement strategy. \p algo is translated to an NLopt algorithm type according to the following + * translation table: + * \verbatim embed:rst:leading-asterisk + * ================================ ==================================== + * ``algo`` string NLopt algorithm + * ================================ ==================================== + * ``"cobyla"`` ``NLOPT_LN_COBYLA`` + * ``"bobyqa"`` ``NLOPT_LN_BOBYQA`` + * ``"newuoa"`` ``NLOPT_LN_NEWUOA`` + * ``"newuoa_bound"`` ``NLOPT_LN_NEWUOA_BOUND`` + * ``"praxis"`` ``NLOPT_LN_PRAXIS`` + * ``"neldermead"`` ``NLOPT_LN_NELDERMEAD`` + * ``"sbplx"`` ``NLOPT_LN_SBPLX`` + * ``"mma"`` ``NLOPT_LD_MMA`` + * ``"ccsaq"`` ``NLOPT_LD_CCSAQ`` + * ``"slsqp"`` ``NLOPT_LD_SLSQP`` + * ``"lbfgs"`` ``NLOPT_LD_LBFGS`` + * ``"tnewton_precond_restart"`` ``NLOPT_LD_TNEWTON_PRECOND_RESTART`` + * ``"tnewton_precond"`` ``NLOPT_LD_TNEWTON_PRECOND`` + * ``"tnewton_restart"`` ``NLOPT_LD_TNEWTON_RESTART`` + * ``"tnewton"`` ``NLOPT_LD_TNEWTON`` + * ``"var2"`` ``NLOPT_LD_VAR2`` + * ``"var1"`` ``NLOPT_LD_VAR1`` + * ================================ ==================================== + * \endverbatim + * The parameters of the selected algorithm can be specified via the methods of this class. + * + * \verbatim embed:rst:leading-asterisk + * .. seealso:: + * + * The `NLopt website `_ contains a detailed + * description of each supported solver. + * + * \endverbatim + * + * @param algo the name of the NLopt algorithm that will be used by this pagmo::nlopt object. + * + * @throws std::runtime_error if the NLopt version is not at least 2. + * @throws std::invalid_argument if \p algo is not one of the allowed algorithm names. + */ explicit nlopt(const std::string &algo) - : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")), + : m_algo(algo), m_select(std::string("best")), m_replace(std::string("worst")), m_rselect_seed(random_device::next()), m_e(static_cast(m_rselect_seed)) { // Check version. int major, minor, bugfix; ::nlopt_version(&major, &minor, &bugfix); if (major < 2) { - pagmo_throw(std::runtime_error, "Only NLopt version >= 2 is supported"); + pagmo_throw(std::runtime_error, "Only NLopt version >= 2 is supported"); // LCOV_EXCL_LINE } // Check the algorithm. @@ -662,11 +744,35 @@ class nlopt + "'. The supported algorithms are:\n" + oss.str()); } } - void set_random_selection_seed(unsigned seed) + /// Set the seed for the ``"random"`` selection/replacement policies. + /** + * @param seed the value that will be used to seed the random number generator used by the ``"random"`` + * selection/replacement policies. + */ + void set_random_sr_seed(unsigned seed) { m_rselect_seed = seed; m_e.seed(static_cast(m_rselect_seed)); } + /// Set the individual selection policy. + /** + * This method will set the policy that is used in evolve() to select the individual + * that will be optimised. + * + * The input string must be one of ``"best"``, ``"worst"`` and ``"random"``: + * - ``"best"`` will select the best individual in the population, + * - ``"worst"`` will select the worst individual in the population, + * - ``"random"`` will randomly choose one individual in the population. + * + * set_random_sr_seed() can be used to seed the random number generator used by the ``"random"`` policy. + * + * Instead of a selection policy, a specific individual in the population can be selected via + * set_selection(population::size_type). + * + * @param select the selection policy. + * + * @throws std::invalid_argument if \p select is not one of ``"best"``, ``"worst"`` or ``"random"``. + */ void set_selection(const std::string &select) { if (select != "best" && select != "worst" && select != "random") { @@ -676,14 +782,48 @@ class nlopt } m_select = select; } + /// Set the individual selection index. + /** + * This method will set the index of the individual that is selected for optimisation + * in evolve(). + * + * @param n the index in the population of the individual to be selected for optimisation. + */ void set_selection(population::size_type n) { m_select = n; } + /// Get the individual selection policy or index. + /** + * This method will return a \p boost::any containing either the individual selection policy (as an \p std::string) + * or the individual selection index (as a population::size_type). The selection policy or index is set via + * set_selection(const std::string &) and set_selection(population::size_type). + * + * @return the individual selection policy or index. + */ boost::any get_selection() const { return m_select; } + /// Set the individual replacement policy. + /** + * This method will set the policy that is used in evolve() to select the individual + * that will be replaced by the optimised individual. + * + * The input string must be one of ``"best"``, ``"worst"`` and ``"random"``: + * - ``"best"`` will select the best individual in the population, + * - ``"worst"`` will select the worst individual in the population, + * - ``"random"`` will randomly choose one individual in the population. + * + * set_random_sr_seed() can be used to seed the random number generator used by the ``"random"`` policy. + * + * Instead of a replacement policy, a specific individual in the population can be selected via + * set_replacement(population::size_type). + * + * @param replace the replacement policy. + * + * @throws std::invalid_argument if \p replace is not one of ``"best"``, ``"worst"`` or ``"random"``. + */ void set_replacement(const std::string &replace) { if (replace != "best" && replace != "worst" && replace != "random") { @@ -693,14 +833,53 @@ class nlopt } m_replace = replace; } + /// Set the individual replacement index. + /** + * This method will set the index of the individual that is replaced after the optimisation + * in evolve(). + * + * @param n the index in the population of the individual to be replaced after the optimisation. + */ void set_replacement(population::size_type n) { m_replace = n; } + /// Get the individual replacement policy or index. + /** + * This method will return a \p boost::any containing either the individual replacement policy (as an \p + * std::string) or the individual replacement index (as a population::size_type). The replacement policy or index is + * set via set_replacement(const std::string &) and set_replacement(population::size_type). + * + * @return the individual replacement policy or index. + */ boost::any get_replacement() const { return m_replace; } + /// Evolve population. + /** + * This method will select an individual from \p pop, optimise it with the NLopt algorithm specified upon + * construction, replace an individual in \p pop with the optimised individual, and finally return \p pop. + * The individual selection and replacement criteria can be set via set_selection(const std::string &), + * set_selection(population::size_type), set_replacement(const std::string &) and + * set_replacement(population::size_type). + * + * @param pop the population to be optimised. + * + * @return the optimised population. + * + * @throws std::invalid_argument in the following cases: + * - the population's problem is multi-objective, + * - the setup of the NLopt algorithm fails (e.g., if the problem is constrained but the selected + * NLopt solver does not support constrained optimisation), + * - the selected NLopt solver needs gradients but they are not provided by the population's + * problem, + * - the components of the individual selected for optimisation contain NaNs or they are outside + * the problem's bounds, + * - the individual selection/replacement index if not smaller + * than the population's size + * @throws unspecified any exception thrown by the public interface of pagmo::problem. + */ population evolve(population pop) const { if (!pop.size()) { @@ -732,9 +911,9 @@ class nlopt } else { const auto idx = boost::any_cast(m_select); if (idx >= pop.size()) { - pagmo_throw(std::out_of_range, "cannot select the individual at index " + std::to_string(idx) - + " for evolution: the population has a size of only " - + std::to_string(pop.size())); + pagmo_throw(std::invalid_argument, "cannot select the individual at index " + std::to_string(idx) + + " for evolution: the population has a size of only " + + std::to_string(pop.size())); } initial_guess = pop.get_x()[idx]; } @@ -805,14 +984,61 @@ class nlopt // Return the evolved pop. return pop; } + /// Algorithm's name. + /** + * @return a human-readable name for the algorithm. + */ std::string get_name() const { return "NLopt - " + m_algo; } + /// Set verbosity. + /** + * This method will set the algorithm's verbosity. If \p n is zero, no output is produced during the optimisation + * and no logging is performed. If \p n is nonzero, then every \p n objective function evaluations the status + * of the optimisation will be both printed to screen and recorded internally. See nlopt::log_line_type and + * nlopt::log_type for information on the logging format. + * + * Example (verbosity 1): + * @code{.unparsed} + * fevals: fitness: violated: viol. norm: + * 0 68.6966 1 0.252343 i + * 1 29.3926 1 15.1127 i + * 2 54.2992 1 2.05694 i + * 3 54.2992 1 2.05694 i + * 4 15.4544 2 9.56984 i + * 5 16.6126 2 1.80223 i + * 6 16.8454 2 0.414897 i + * 7 16.9794 2 0.0818469 i + * 8 17.0132 2 0.00243968 i + * 9 17.014 2 2.58628e-05 i + * 10 17.014 0 0 + * 11 17.014 0 0 + * 12 17.014 0 0 + * 13 17.014 0 0 + * 14 17.014 0 0 + * 15 17.014 0 0 + * 16 17.014 0 0 + * 17 17.014 0 0 + * 18 17.014 0 0 + * 19 17.014 0 0 + * @endcode + * The little ``i`` at the end of some rows indicates that the decision vector + * is infeasible. + * + * By default, the verbosity level is zero. + * + * @param n the desired verbosity level. + */ void set_verbosity(unsigned n) { m_verbosity = n; } + /// Get extra information about the algorithm. + /** + * @return a human-readable string containing useful information about the algorithm's properties + * (e.g., the stopping criteria, the selection/replacement policies, etc.). + */ std::string get_extra_info() const { int major, minor, bugfix; From 449e98314fe9fa6e1260739ae06a9d70038cec3e Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 00:10:33 +0200 Subject: [PATCH 31/57] Small printing change. --- include/pagmo/problem.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/pagmo/problem.hpp b/include/pagmo/problem.hpp index 9cdfb5bee..9b81b162a 100644 --- a/include/pagmo/problem.hpp +++ b/include/pagmo/problem.hpp @@ -1864,7 +1864,7 @@ class problem os << "\tEquality constraints dimension:\t\t" << p.get_nec() << '\n'; os << "\tInequality constraints dimension:\t" << p.get_nic() << '\n'; if (p.get_nec() + p.get_nic() > 0u) { - stream(os, "\tTolerances on constraints:\t", p.get_c_tol(), '\n'); + stream(os, "\tTolerances on constraints: ", p.get_c_tol(), '\n'); } os << "\tLower bounds: "; stream(os, p.get_bounds().first, '\n'); From c2be478e1c472930dc348856dbf3a6b76af41d30 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 00:10:57 +0200 Subject: [PATCH 32/57] First fully featured fully documented version of nlopt. --- include/pagmo/algorithms/nlopt.hpp | 199 ++++++++++++++++++++++++++++- 1 file changed, 195 insertions(+), 4 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 5a088ce50..11da4c955 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -670,8 +670,8 @@ class nlopt public: /// Default constructor. /** - * The default constructor initialises the pagmo::nlopt algorithm with the ``cobyla`` solver, - * the ``"best"`` individual selection strategy and the ``"worst"`` individual replacement strategy. + * The default constructor initialises the pagmo::nlopt algorithm with the ``"cobyla"`` solver, + * the ``"best"`` individual selection strategy and the ``"best"`` individual replacement strategy. * * @throws unspecified any exception thrown by pagmo::nlopt(const std::string &). */ @@ -681,7 +681,7 @@ class nlopt /// Constructor from solver name. /** * This constructor will initialise a pagmo::nlopt object which will use the NLopt algorithm specified by - * the input string \p algo, the ``"best"`` individual selection strategy and the ``"worst"`` individual + * the input string \p algo, the ``"best"`` individual selection strategy and the ``"best"`` individual * replacement strategy. \p algo is translated to an NLopt algorithm type according to the following * translation table: * \verbatim embed:rst:leading-asterisk @@ -723,7 +723,7 @@ class nlopt * @throws std::invalid_argument if \p algo is not one of the allowed algorithm names. */ explicit nlopt(const std::string &algo) - : m_algo(algo), m_select(std::string("best")), m_replace(std::string("worst")), + : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")), m_rselect_seed(random_device::next()), m_e(static_cast(m_rselect_seed)) { // Check version. @@ -1062,6 +1062,197 @@ class nlopt + (m_sc_maxeval <= 0. ? "disabled" : detail::to_string(m_sc_maxeval)) + "\n\t\tmaxtime: " + (m_sc_maxtime <= 0. ? "disabled" : detail::to_string(m_sc_maxtime)) + "\n"; } + /// Get the optimisation log. + /** + * See nlopt::log_type for a description of the optimisation log. Logging is turned on/off via + * set_verbosity(). + * + * @return a const reference to the log. + */ + const log_type &get_log() const + { + return m_log; + } + /// Get the name of the solver that was used to construct this pagmo::nlopt algorithm. + /** + * @return the name of the NLopt solver used upon construction. + */ + std::string solver_name() const + { + return m_algo; + } + /// Get the result of the last optimisation. + /** + * @return the result of the last evolve() call, or ``NLOPT_SUCCESS`` if no optimisations have been + * run yet. + */ + ::nlopt_result last_opt_result() const + { + return m_last_opt_result; + } + /// Get the ``stopval`` stopping criterion. + /** + * The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than + * or equal to ``stopval`` is found. Defaults to ``-HUGE_VAL`` (that is, this stopping criterion + * is disabled by default). + * + * @return the ``stopval`` stopping criterion for this pagmo::nlopt. + */ + double get_stopval() const + { + return m_sc_stopval; + } + /// Set the ``stopval`` stopping criterion. + /** + * @param stopval the desired value for the ``stopval`` stopping criterion (see get_stopval()). + * + * @throws std::invalid_argument if \p stopval is NaN. + */ + void set_stopval(double stopval) + { + if (std::isnan(stopval)) { + pagmo_throw(std::invalid_argument, "The 'stopval' stopping criterion cannot be NaN"); + } + m_sc_stopval = stopval; + } + /// Get the ``ftol_rel`` stopping criterion. + /** + * The ``ftol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes the objective function value by less than ``ftol_rel`` multiplied by the absolute value of the + * function value. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``ftol_rel`` stopping criterion for this pagmo::nlopt. + */ + double get_ftol_rel() const + { + return m_sc_ftol_rel; + } + /// Set the ``ftol_rel`` stopping criterion. + /** + * @param ftol_rel the desired value for the ``ftol_rel`` stopping criterion (see get_ftol_rel()). + * + * @throws std::invalid_argument if \p ftol_rel is NaN. + */ + void set_ftol_rel(double ftol_rel) + { + if (std::isnan(ftol_rel)) { + pagmo_throw(std::invalid_argument, "The 'ftol_rel' stopping criterion cannot be NaN"); + } + m_sc_ftol_rel = ftol_rel; + } + /// Get the ``ftol_abs`` stopping criterion. + /** + * The ``ftol_abs`` stopping criterion instructs the solver to stop when an optimization step + * (or an estimate of the optimum) changes the function value by less than ``ftol_abs``. + * Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``ftol_abs`` stopping criterion for this pagmo::nlopt. + */ + double get_ftol_abs() const + { + return m_sc_ftol_abs; + } + /// Set the ``ftol_abs`` stopping criterion. + /** + * @param ftol_abs the desired value for the ``ftol_abs`` stopping criterion (see get_ftol_abs()). + * + * @throws std::invalid_argument if \p ftol_abs is NaN. + */ + void set_ftol_abs(double ftol_abs) + { + if (std::isnan(ftol_abs)) { + pagmo_throw(std::invalid_argument, "The 'ftol_abs' stopping criterion cannot be NaN"); + } + m_sc_ftol_abs = ftol_abs; + } + /// Get the ``xtol_rel`` stopping criterion. + /** + * The ``xtol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes every parameter by less than ``xtol_rel`` multiplied by the absolute value of the parameter. + * Defaults to 1E-8. + * + * @return the ``xtol_rel`` stopping criterion for this pagmo::nlopt. + */ + double get_xtol_rel() const + { + return m_sc_xtol_rel; + } + /// Set the ``xtol_rel`` stopping criterion. + /** + * @param xtol_rel the desired value for the ``xtol_rel`` stopping criterion (see get_xtol_rel()). + * + * @throws std::invalid_argument if \p xtol_rel is NaN. + */ + void set_xtol_rel(double xtol_rel) + { + if (std::isnan(xtol_rel)) { + pagmo_throw(std::invalid_argument, "The 'xtol_rel' stopping criterion cannot be NaN"); + } + m_sc_xtol_rel = xtol_rel; + } + /// Get the ``xtol_abs`` stopping criterion. + /** + * The ``xtol_abs`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes every parameter by less than ``xtol_abs``. + * Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``xtol_abs`` stopping criterion for this pagmo::nlopt. + */ + double get_xtol_abs() const + { + return m_sc_xtol_abs; + } + /// Set the ``xtol_abs`` stopping criterion. + /** + * @param xtol_abs the desired value for the ``xtol_abs`` stopping criterion (see get_xtol_abs()). + * + * @throws std::invalid_argument if \p xtol_abs is NaN. + */ + void set_xtol_abs(double xtol_abs) + { + if (std::isnan(xtol_abs)) { + pagmo_throw(std::invalid_argument, "The 'xtol_abs' stopping criterion cannot be NaN"); + } + m_sc_xtol_abs = xtol_abs; + } + /// Get the ``maxeval`` stopping criterion. + /** + * The ``maxeval`` stopping criterion instructs the solver to stop when the number of function evaluations exceeds + * ``maxeval``. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``maxeval`` stopping criterion for this pagmo::nlopt. + */ + int get_maxeval() const + { + return m_sc_maxeval; + } + /// Set the ``maxeval`` stopping criterion. + /** + * @param n the desired value for the ``maxeval`` stopping criterion (see get_maxeval()). + */ + void set_maxeval(int n) + { + m_sc_maxeval = n; + } + /// Get the ``maxtime`` stopping criterion. + /** + * The ``maxtime`` stopping criterion instructs the solver to stop when the optimization time (in seconds) exceeds + * ``maxtime``. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``maxtime`` stopping criterion for this pagmo::nlopt. + */ + int get_maxtime() const + { + return m_sc_maxtime; + } + /// Set the ``maxtime`` stopping criterion. + /** + * @param n the desired value for the ``maxtime`` stopping criterion (see get_maxtime()). + */ + void set_maxtime(int n) + { + m_sc_maxtime = n; + } private: std::string m_algo; From 1a7413ce9e8ee667553ed08b065c95f88eb4286a Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 00:28:39 +0200 Subject: [PATCH 33/57] Various small changes. [skip ci] --- include/pagmo/algorithms/nlopt.hpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 11da4c955..8c502e17f 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -612,7 +612,9 @@ struct nlopt_obj { * * The desired NLopt solver is selected upon construction of a pagmo::nlopt algorithm. Various properties * of the solver (e.g., the stopping criteria) can be configured after construction via methods provided - * by this class. + * by this class. Note that multiple stopping criteria can be active at the same time: the optimisation will + * stop as soon as at least one stopping criterion is satisfied. By default, only the ``xtol_rel`` stopping + * criterion is active (see get_xtol_rel()). * * All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation * is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient. @@ -862,7 +864,9 @@ class nlopt * construction, replace an individual in \p pop with the optimised individual, and finally return \p pop. * The individual selection and replacement criteria can be set via set_selection(const std::string &), * set_selection(population::size_type), set_replacement(const std::string &) and - * set_replacement(population::size_type). + * set_replacement(population::size_type). The NLopt solver will run until one of the stopping criteria + * is satisfied, and the return status of the NLopt solver will be recorded (it can be fetched with + * get_last_opt_result()). * * @param pop the population to be optimised. * @@ -876,8 +880,7 @@ class nlopt * problem, * - the components of the individual selected for optimisation contain NaNs or they are outside * the problem's bounds, - * - the individual selection/replacement index if not smaller - * than the population's size + * - the individual selection/replacement index is not smaller than the population's size. * @throws unspecified any exception thrown by the public interface of pagmo::problem. */ population evolve(population pop) const @@ -997,7 +1000,7 @@ class nlopt * This method will set the algorithm's verbosity. If \p n is zero, no output is produced during the optimisation * and no logging is performed. If \p n is nonzero, then every \p n objective function evaluations the status * of the optimisation will be both printed to screen and recorded internally. See nlopt::log_line_type and - * nlopt::log_type for information on the logging format. + * nlopt::log_type for information on the logging format. The internal log can be fetched via get_log(). * * Example (verbosity 1): * @code{.unparsed} @@ -1077,7 +1080,7 @@ class nlopt /** * @return the name of the NLopt solver used upon construction. */ - std::string solver_name() const + std::string get_solver_name() const { return m_algo; } @@ -1086,14 +1089,14 @@ class nlopt * @return the result of the last evolve() call, or ``NLOPT_SUCCESS`` if no optimisations have been * run yet. */ - ::nlopt_result last_opt_result() const + ::nlopt_result get_last_opt_result() const { return m_last_opt_result; } /// Get the ``stopval`` stopping criterion. /** * The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than - * or equal to ``stopval`` is found. Defaults to ``-HUGE_VAL`` (that is, this stopping criterion + * or equal to ``stopval`` is found. Defaults to the C constant ``-HUGE_VAL`` (that is, this stopping criterion * is disabled by default). * * @return the ``stopval`` stopping criterion for this pagmo::nlopt. From fba4b3bcf656482917ac24df7178d4f0208b656f Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 14:43:56 +0200 Subject: [PATCH 34/57] Tentative MSVC fix. --- include/pagmo/algorithms/nlopt.hpp | 42 ++++++++++++++++-------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 8c502e17f..35cf5f22a 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -66,19 +66,29 @@ see https://www.gnu.org/licenses/. */ #include #include -#if defined(_MSC_VER) +namespace pagmo +{ -// Disable a warning from MSVC. -#pragma warning(push, 0) -#pragma warning(disable : 4996) +namespace detail +{ -#endif +#if defined(_MSC_VER) -namespace pagmo +template +void inline unchecked_copy(Int size, const T *begin, T *dest) { + std::copy(stdext::make_checked_array_iterator(begin,size), stdext::make_checked_array_iterator(begin,size,size), stdext::make_checked_array_iterator(dest,size)); +} -namespace detail +#else + +template +void inline unchecked_copy(Int size, const T *begin, T *dest) { + std::copy(begin, begin + size, dest); +} + +#endif // Usual trick with global read-only data useful to the NLopt wrapper. template @@ -272,7 +282,7 @@ struct nlopt_obj { } } else { // Dense gradient case. - std::copy(gradient.data(), gradient.data() + p.get_nx(), grad); + detail::unchecked_copy(p.get_nx(), gradient.data(), grad); } } @@ -357,7 +367,7 @@ struct nlopt_obj { // Compute fitness and write IC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1 + p.get_nec(), fitness.data() + 1 + p.get_nec() + m, result); + detail::unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); if (grad) { // Handle gradient, if requested. @@ -405,8 +415,7 @@ struct nlopt_obj { } } else { // Dense gradient. - std::copy(gradient.data() + p.get_nx() * (1u + p.get_nec()), - gradient.data() + gradient.size(), grad); + detail::unchecked_copy(p.get_nic() * p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); } } }, @@ -455,7 +464,7 @@ struct nlopt_obj { // Compute fitness and write EC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - std::copy(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), result); + detail::unchecked_copy(p.get_nec(), fitness.data() + 1, result); if (grad) { // Handle gradient, if requested. @@ -506,8 +515,7 @@ struct nlopt_obj { } } else { // Dense gradient. - std::copy(gradient.data() + p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), - grad); + detail::unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); } } }, @@ -1278,12 +1286,6 @@ class nlopt }; } -#if defined(_MSC_VER) - -#pragma warning(pop) - -#endif - #else // PAGMO_WITH_NLOPT #error The nlopt.hpp header was included, but pagmo was not compiled with NLopt support From fdfc917449786c99943bd8974c8aaee2a8af8bd3 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 20:57:50 +0200 Subject: [PATCH 35/57] Minor bits. --- include/pagmo/algorithms/nlopt.hpp | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 35cf5f22a..1dd5411db 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -74,10 +74,13 @@ namespace detail #if defined(_MSC_VER) +// NOTE: this is a wrapper around std::copy() for use in MSVC in conjunction with raw pointers. +// In debug mode, MSVC will complain about unchecked iterators unless instructed otherwise. template void inline unchecked_copy(Int size, const T *begin, T *dest) { - std::copy(stdext::make_checked_array_iterator(begin,size), stdext::make_checked_array_iterator(begin,size,size), stdext::make_checked_array_iterator(dest,size)); + std::copy(stdext::make_checked_array_iterator(begin, size), stdext::make_checked_array_iterator(begin, size, size), + stdext::make_checked_array_iterator(dest, size)); } #else @@ -85,7 +88,7 @@ void inline unchecked_copy(Int size, const T *begin, T *dest) template void inline unchecked_copy(Int size, const T *begin, T *dest) { - std::copy(begin, begin + size, dest); + std::copy(begin, begin + size, dest); } #endif @@ -415,7 +418,8 @@ struct nlopt_obj { } } else { // Dense gradient. - detail::unchecked_copy(p.get_nic() * p.get_nx(), gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); + detail::unchecked_copy(p.get_nic() * p.get_nx(), + gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); } } }, @@ -1025,17 +1029,9 @@ class nlopt * 9 17.014 2 2.58628e-05 i * 10 17.014 0 0 * 11 17.014 0 0 - * 12 17.014 0 0 - * 13 17.014 0 0 - * 14 17.014 0 0 - * 15 17.014 0 0 - * 16 17.014 0 0 - * 17 17.014 0 0 - * 18 17.014 0 0 - * 19 17.014 0 0 * @endcode - * The little ``i`` at the end of some rows indicates that the decision vector - * is infeasible. + * The ``i`` at the end of some rows indicates that the decision vector is infeasible. Feasibility + * is checked against the problem's tolerance. * * By default, the verbosity level is zero. * From faa4fc3713266038c8dfe67e00c830d89e92ea7a Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 22:33:40 +0200 Subject: [PATCH 36/57] testing/doc bits. --- include/pagmo/algorithms/nlopt.hpp | 3 + tests/nlopt.cpp | 115 +++++++++++++++++++++++++++-- 2 files changed, 110 insertions(+), 8 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 1dd5411db..2cba9bfdd 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -651,6 +651,9 @@ struct nlopt_obj { * of each supported solver. * * \endverbatim + * + * **NOTE**: a moved-from pagmo::nlopt is destructible and assignable. Any other operation will result + * in undefined behaviour. */ // TODO: // - investiagate the use of a fitness cache, after we have good perf testing in place. diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 9a3f798a9..2e5d6bf27 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -29,6 +29,13 @@ see https://www.gnu.org/licenses/. */ #define BOOST_TEST_MODULE nlopt_test #include +#include +#include +#include +#include +#include +#include + #include #include #include @@ -37,13 +44,105 @@ see https://www.gnu.org/licenses/. */ using namespace pagmo; -BOOST_AUTO_TEST_CASE(nlopt_algorithm_construction) +BOOST_AUTO_TEST_CASE(nlopt_construction) { - population pop{hock_schittkowsky_71{}, 5}; - pop.get_problem().set_c_tol({1E-6, 1E-6}); - algorithm algo{nlopt{"slsqp"}}; - algo.set_verbosity(1); - pop = algo.evolve(pop); - std::cout << '\n' << algo << '\n'; - std::cout << '\n' << pop << '\n'; + algorithm a{nlopt{}}; + BOOST_CHECK_EQUAL(a.extract()->get_solver_name(), "cobyla"); + // Check params of default-constructed instance. + BOOST_CHECK_EQUAL(boost::any_cast(a.extract()->get_selection()), "best"); + BOOST_CHECK_EQUAL(boost::any_cast(a.extract()->get_replacement()), "best"); + BOOST_CHECK(a.extract()->get_name() != ""); + BOOST_CHECK(a.extract()->get_extra_info() != ""); + BOOST_CHECK(a.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(a.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(a.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_ftol_abs(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(a.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_maxeval(), 0); + BOOST_CHECK_EQUAL(a.extract()->get_maxtime(), 0); + // Change a few params and copy. + a.extract()->set_selection(12u); + a.extract()->set_replacement("random"); + a.extract()->set_ftol_abs(1E-5); + a.extract()->set_maxeval(123); + // Copy. + auto b{a}; + BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_replacement()), "random"); + BOOST_CHECK(b.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(b.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(b.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(b.extract()->get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(b.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(b.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(b.extract()->get_maxeval(), 123); + BOOST_CHECK_EQUAL(b.extract()->get_maxtime(), 0); + algorithm c; + c = b; + BOOST_CHECK_EQUAL(boost::any_cast(c.extract()->get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(c.extract()->get_replacement()), "random"); + BOOST_CHECK(c.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(c.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(c.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(c.extract()->get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(c.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(c.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(c.extract()->get_maxeval(), 123); + BOOST_CHECK_EQUAL(c.extract()->get_maxtime(), 0); + // Move. + auto tmp{*a.extract()}; + auto d{std::move(tmp)}; + BOOST_CHECK_EQUAL(boost::any_cast(d.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_replacement()), "random"); + BOOST_CHECK(d.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(d.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(d.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(d.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(d.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(d.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(d.get_maxeval(), 123); + BOOST_CHECK_EQUAL(d.get_maxtime(), 0); + nlopt e; + e = std::move(d); + BOOST_CHECK_EQUAL(boost::any_cast(e.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(e.get_replacement()), "random"); + BOOST_CHECK(e.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(e.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(e.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(e.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(e.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(e.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(e.get_maxeval(), 123); + BOOST_CHECK_EQUAL(e.get_maxtime(), 0); + // Revive moved-from. + d = std::move(e); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_replacement()), "random"); + BOOST_CHECK(d.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(d.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(d.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(d.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(d.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(d.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(d.get_maxeval(), 123); + BOOST_CHECK_EQUAL(d.get_maxtime(), 0); + // Check exception throwing on ctor. + BOOST_CHECK_THROW(nlopt{""}, std::invalid_argument); } + +BOOST_AUTO_TEST_CASE(nlopt_selection_replacement) +{ + nlopt a; + a.set_selection("worst"); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_selection()), "worst"); + BOOST_CHECK_THROW(a.set_selection("worstee"), std::invalid_argument); + a.set_selection(0); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_selection()), 0u); + a.set_replacement("worst"); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_replacement()), "worst"); + BOOST_CHECK_THROW(a.set_replacement("worstee"), std::invalid_argument); + a.set_replacement(0); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_replacement()), 0u); + a.set_random_sr_seed(123); +} \ No newline at end of file From 20e74b6a51f065fc79d99fe5e97293041bd2f943 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Thu, 6 Apr 2017 22:45:47 +0200 Subject: [PATCH 37/57] Restore appveyor builds. --- appveyor.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 59bb83083..5cde0299b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -3,12 +3,12 @@ environment: PLATFORMTOOLSET: "v140" matrix: - # - BUILD_TYPE: "Python35" - # COMPILER: MSVC15 - # PLATFORM: "x64" - # - BUILD_TYPE: "Python36" - # COMPILER: MSVC15 - # PLATFORM: "x64" + - BUILD_TYPE: "Python35" + COMPILER: MSVC15 + PLATFORM: "x64" + - BUILD_TYPE: "Python36" + COMPILER: MSVC15 + PLATFORM: "x64" - BUILD_TYPE: "Debug" COMPILER: MSVC15 PLATFORM: "x64" From d2209438f6461089467c6abd755ea043394477a7 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 00:46:16 +0200 Subject: [PATCH 38/57] Testing bits and an important bugfix about not throwing exceptions from C. --- include/pagmo/algorithms/nlopt.hpp | 508 +++++++++++++++-------------- tests/nlopt.cpp | 20 +- 2 files changed, 291 insertions(+), 237 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 2cba9bfdd..440fc82eb 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -40,6 +40,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -190,10 +191,10 @@ struct nlopt_obj { // Extract and set problem dimension. const auto n = boost::numeric_cast(prob.get_nx()); - m_value.reset(::nlopt_create(algo, n)); // Try to init the nlopt_obj. + m_value.reset(::nlopt_create(algo, n)); if (!m_value) { - pagmo_throw(std::runtime_error, "the creation of the nlopt_opt object failed"); + pagmo_throw(std::runtime_error, "the creation of the nlopt_opt object failed"); // LCOV_EXCL_LINE } // NLopt does not handle MOO. @@ -208,15 +209,19 @@ struct nlopt_obj { const auto bounds = prob.get_bounds(); res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } // This is just a vector_double that is re-used across objfun invocations. @@ -230,154 +235,45 @@ struct nlopt_obj { // Get *this back from the function data. auto &nlo = *static_cast(f_data); - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - const auto verb = nlo.m_verbosity; - auto &f_count = nlo.m_objfun_counter; - auto &log = nlo.m_log; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - pagmo_throw(std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' a fitness gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and, if needed, gradient. - const auto fitness = p.fitness(dv); - if (grad) { - const auto gradient = p.gradient(dv); - - if (p.has_gradient_sparsity()) { - // Sparse gradient case. - auto &sp = nlo.m_sp; - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - auto g_it = gradient.begin(); - - // First we fill the dense output gradient with zeroes. - std::fill(grad, grad + dim, 0.); - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. - for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { - // NOTE: we just need the gradient of the objfun, - // i.e., those (i,j) pairs in which i == 0. We know that the gradient - // of the objfun, if present, starts at the beginning of sp, as sp is - // sorted in lexicographic fashion. - grad[it->second] = *g_it; - } - } else { - // Dense gradient case. - detail::unchecked_copy(p.get_nx(), gradient.data(), grad); - } - } - - // Update the log if requested. - if (verb && !(f_count % verb)) { - // Constraints bits. - const auto ctol = p.get_c_tol(); - const auto c1eq = detail::test_eq_constraints(fitness.data() + 1, fitness.data() + 1 + p.get_nec(), - ctol.data()); - const auto c1ineq = detail::test_ineq_constraints( - fitness.data() + 1 + p.get_nec(), fitness.data() + fitness.size(), ctol.data() + p.get_nec()); - // This will be the total number of violated constraints. - const auto nv = p.get_nc() - c1eq.first - c1ineq.first; - // This will be the norm of the violation. - const auto l = c1eq.second + c1ineq.second; - // Test feasibility. - const auto feas = p.feasibility_f(fitness); - - if (!(f_count / verb % 50u)) { - // Every 50 lines print the column names. - print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", - std::setw(15), "viol. norm:", '\n'); - } - // Print to screen the log line. - print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, - feas ? "" : " i", '\n'); - // Record the log. - log.emplace_back(f_count, fitness[0], nv, l, feas); - } - - // Update the counter. - ++f_count; - - // Return the objfun value. - return fitness[0]; - }, - static_cast(this)); - if (res != NLOPT_SUCCESS) { - pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" - + data::names.right.at(algo) + "', the error is: " - + nlopt_res2string(res)); - } - - // Vector-valued constraints. - const auto nic = boost::numeric_cast(prob.get_nic()); - const auto nec = boost::numeric_cast(prob.get_nec()); - const auto c_tol = prob.get_c_tol(); - - // Inequality. - if (nic) { - res = ::nlopt_add_inequality_mconstraint( - m_value.get(), nic, - [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { - // Get *this back from the function data. - auto &nlo = *static_cast(f_data); - + // NOTE: the idea here is that we wrap everything in a try/catch block, + // and, if any exception is thrown, we record it into the nlo object + // and re-throw it later. We do this because we are using the NLopt C API, + // and if we let exceptions out of here we run in undefined behaviour. + // We do the same for the constraints functions. + try { // A few shortcuts. auto &p = nlo.m_prob; auto &dv = nlo.m_dv; + const auto verb = nlo.m_verbosity; + auto &f_count = nlo.m_objfun_counter; + auto &log = nlo.m_log; // A couple of sanity checks. assert(dim == p.get_nx()); assert(dv.size() == dim); - assert(m == p.get_nic()); if (grad && !p.has_gradient()) { // If grad is not null, it means we are in an algorithm // that needs the gradient. If the problem does not support it, // we error out. - pagmo_throw( - std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' an inequality constraints gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' a fitness gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); } // Copy the decision vector in our temporary dv vector_double, // for use in the pagmo API. std::copy(x, x + dim, dv.begin()); - // Compute fitness and write IC to the output. - // NOTE: fitness is nobj + nec + nic. + // Compute fitness and, if needed, gradient. const auto fitness = p.fitness(dv); - detail::unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); - if (grad) { - // Handle gradient, if requested. const auto gradient = p.gradient(dv); if (p.has_gradient_sparsity()) { - // Sparse gradient. + // Sparse gradient case. auto &sp = nlo.m_sp; // NOTE: problem::gradient() has already checked that // the returned vector has size m_gs_dim, i.e., the stored @@ -385,42 +281,168 @@ struct nlopt_obj { // problem::gradient_sparsity() also checks that the returned // vector has size m_gs_dim, so these two must have the same size. assert(gradient.size() == sp.size()); + auto g_it = gradient.begin(); - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); - if (it_sp == sp.end()) { - // This means that the sparsity data for ineq constraints is empty. Just return. - return; + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. We know that the gradient + // of the objfun, if present, starts at the beginning of sp, as sp is + // sorted in lexicographic fashion. + grad[it->second] = *g_it; } + } else { + // Dense gradient case. + detail::unchecked_copy(p.get_nx(), gradient.data(), grad); + } + } - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, - "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the ineq constraints start. - const auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the ineq constraints. - auto g_it = gradient.data() + idx; + // Update the log if requested. + if (verb && !(f_count % verb)) { + // Constraints bits. + const auto ctol = p.get_c_tol(); + const auto c1eq = detail::test_eq_constraints(fitness.data() + 1, + fitness.data() + 1 + p.get_nec(), ctol.data()); + const auto c1ineq + = detail::test_ineq_constraints(fitness.data() + 1 + p.get_nec(), + fitness.data() + fitness.size(), ctol.data() + p.get_nec()); + // This will be the total number of violated constraints. + const auto nv = p.get_nc() - c1eq.first - c1ineq.first; + // This will be the norm of the violation. + const auto l = c1eq.second + c1ineq.second; + // Test feasibility. + const auto feas = p.feasibility_f(fitness); + + if (!(f_count / verb % 50u)) { + // Every 50 lines print the column names. + print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", + std::setw(15), "viol. norm:", '\n'); + } + // Print to screen the log line. + print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, + feas ? "" : " i", '\n'); + // Record the log. + log.emplace_back(f_count, fitness[0], nv, l, feas); + } - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. Run until sp.end() as the IC are at the - // end of the sparsity/gradient vector. - for (; it_sp != sp.end(); ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + // Update the counter. + ++f_count; + + // Return the objfun value. + return fitness[0]; + } catch (...) { + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); + return HUGE_VAL; + } + }, + static_cast(this)); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + } + + // Vector-valued constraints. + const auto nic = boost::numeric_cast(prob.get_nic()); + const auto nec = boost::numeric_cast(prob.get_nec()); + const auto c_tol = prob.get_c_tol(); + + // Inequality. + if (nic) { + res = ::nlopt_add_inequality_mconstraint( + m_value.get(), nic, + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + try { + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nic()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an inequality constraints gradient was requested, but the " + "optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write IC to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + detail::unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + if (p.has_gradient_sparsity()) { + // Sparse gradient. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); + if (it_sp == sp.end()) { + // This means that the sparsity data for ineq constraints is empty. Just return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the ineq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the ineq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. Run until sp.end() as the IC are at the + // end of the sparsity/gradient vector. + for (; it_sp != sp.end(); ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } else { + // Dense gradient. + detail::unchecked_copy(p.get_nic() * p.get_nx(), + gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); } - } else { - // Dense gradient. - detail::unchecked_copy(p.get_nic() * p.get_nx(), - gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); } + } catch (...) { + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); } }, static_cast(this), c_tol.data() + nec); @@ -440,87 +462,92 @@ struct nlopt_obj { // Get *this back from the function data. auto &nlo = *static_cast(f_data); - // A few shortcuts. - auto &p = nlo.m_prob; - auto &dv = nlo.m_dv; - - // A couple of sanity checks. - assert(dim == p.get_nx()); - assert(dv.size() == dim); - assert(m == p.get_nec()); - - if (grad && !p.has_gradient()) { - // If grad is not null, it means we are in an algorithm - // that needs the gradient. If the problem does not support it, - // we error out. - pagmo_throw( - std::invalid_argument, - "during an optimization with the NLopt algorithm '" - + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) - + "' an equality constraints gradient was requested, but the optimisation problem '" - + p.get_name() + "' does not provide it"); - } - - // Copy the decision vector in our temporary dv vector_double, - // for use in the pagmo API. - std::copy(x, x + dim, dv.begin()); - - // Compute fitness and write EC to the output. - // NOTE: fitness is nobj + nec + nic. - const auto fitness = p.fitness(dv); - detail::unchecked_copy(p.get_nec(), fitness.data() + 1, result); - - if (grad) { - // Handle gradient, if requested. - const auto gradient = p.gradient(dv); - - if (p.has_gradient_sparsity()) { - // Sparse gradient case. - auto &sp = nlo.m_sp; - // NOTE: problem::gradient() has already checked that - // the returned vector has size m_gs_dim, i.e., the stored - // size of the sparsity pattern. On the other hand, - // problem::gradient_sparsity() also checks that the returned - // vector has size m_gs_dim, so these two must have the same size. - assert(gradient.size() == sp.size()); - - // Let's first fill it with zeroes. - std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); - - // Now we need to go into the sparsity pattern and find where - // the sparsity data for the constraints start. - using pair_t = sparsity_pattern::value_type; - auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); - if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { - // This means that there sparsity data for eq constraints is empty: either we went - // at the end of sp, or the first index pair found refers to inequality constraints. - // Just - // return. - return; - } - - // Need to do a bit of horrid overflow checking :/. - using diff_type = std::iterator_traits::difference_type; - using udiff_type = std::make_unsigned::type; - if (sp.size() > static_cast(std::numeric_limits::max())) { - pagmo_throw(std::overflow_error, - "Overflow error, the sparsity pattern size is too large."); - } - // This is the index at which the eq constraints start. - const auto idx = std::distance(sp.begin(), it_sp); - // Grab the start of the gradient data for the eq constraints. - auto g_it = gradient.data() + idx; + try { + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nec()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an equality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } - // Then we iterate over the sparsity pattern, and fill in the - // nonzero bits in grad. We terminate either at the end of sp, or when - // we encounter the first inequality constraint. - for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write EC to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + detail::unchecked_copy(p.get_nec(), fitness.data() + 1, result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + if (p.has_gradient_sparsity()) { + // Sparse gradient case. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); + if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { + // This means that there sparsity data for eq constraints is empty: either we went + // at the end of sp, or the first index pair found refers to inequality constraints. + // Just + // return. + return; + } + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the eq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the eq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. We terminate either at the end of sp, or when + // we encounter the first inequality constraint. + for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { + grad[it_sp->second] = *g_it; + } + } else { + // Dense gradient. + detail::unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); } - } else { - // Dense gradient. - detail::unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); } + } catch (...) { + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); } }, static_cast(this), c_tol.data()); @@ -598,6 +625,11 @@ struct nlopt_obj { unsigned m_verbosity; unsigned long m_objfun_counter = 0; log_type m_log; + // This exception pointer will be null, unless + // an error is raised during the computation of the objfun + // or constraints. If not null, it will be re-thrown + // in the evolve() method. + std::exception_ptr m_eptr; }; } @@ -957,10 +989,14 @@ class nlopt // Print to screen the result of the optimisation, if we are being verbose. std::cout << "\nOptimisation return status: " << detail::nlopt_res2string(m_last_opt_result) << '\n'; } - // Replace the log. m_log = std::move(no.m_log); + // Handle any exception that might've been thrown. + if (no.m_eptr) { + std::rethrow_exception(no.m_eptr); + } + // Store the new individual into the population. if (boost::any_cast(&m_replace)) { const auto &s_replace = boost::any_cast(m_replace); diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 2e5d6bf27..93cdcb171 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -41,11 +41,15 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include +#include using namespace pagmo; BOOST_AUTO_TEST_CASE(nlopt_construction) { + random_device::set_seed(42); + algorithm a{nlopt{}}; BOOST_CHECK_EQUAL(a.extract()->get_solver_name(), "cobyla"); // Check params of default-constructed instance. @@ -145,4 +149,18 @@ BOOST_AUTO_TEST_CASE(nlopt_selection_replacement) a.set_replacement(0); BOOST_CHECK_EQUAL(boost::any_cast(a.get_replacement()), 0u); a.set_random_sr_seed(123); -} \ No newline at end of file +} + +BOOST_AUTO_TEST_CASE(nlopt_evolve) +{ + algorithm a{nlopt{"lbfgs"}}; + population pop(rosenbrock{10}, 20); + a.evolve(pop); + BOOST_CHECK(a.extract()->get_last_opt_result() >= 0); + pop = population{zdt{}, 20}; + // MOO not supported by NLopt. + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + // Solver wants gradient, but problem does not provide it. + pop = population{null_problem{}, 20}; + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); +} From 19957585284e355c5795941c42909f03730f60dd Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 00:59:37 +0200 Subject: [PATCH 39/57] Small fix for the log. --- include/pagmo/algorithms/nlopt.hpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 440fc82eb..27423a440 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -267,8 +267,10 @@ struct nlopt_obj { // for use in the pagmo API. std::copy(x, x + dim, dv.begin()); - // Compute fitness and, if needed, gradient. + // Compute fitness. const auto fitness = p.fitness(dv); + + // Compute gradient, if needed. if (grad) { const auto gradient = p.gradient(dv); @@ -322,10 +324,10 @@ struct nlopt_obj { std::setw(15), "viol. norm:", '\n'); } // Print to screen the log line. - print(std::setw(10), f_count, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), l, - feas ? "" : " i", '\n'); + print(std::setw(10), f_count + 1u, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), + l, feas ? "" : " i", '\n'); // Record the log. - log.emplace_back(f_count, fitness[0], nv, l, feas); + log.emplace_back(f_count + 1u, fitness[0], nv, l, feas); } // Update the counter. @@ -689,6 +691,8 @@ struct nlopt_obj { */ // TODO: // - investiagate the use of a fitness cache, after we have good perf testing in place. +// - move unchecked copy into class +// - update the log example, after recent change. class nlopt { using nlopt_obj = detail::nlopt_obj; From 644c3b37438fc5db163104980d0df40d2f0c5329 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 21:26:09 +0200 Subject: [PATCH 40/57] small doc add. --- doc/sphinx/docs/python/tutorials/coding_udp_simple.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst index 4479156fe..573a92e40 100644 --- a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst +++ b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst @@ -164,6 +164,10 @@ be revealed only when calling the malformed method: ... AttributeError: 'numpy.float64' object has no attribute '__iter__' +In this case, the issue is that the ``fitness()`` method returns a scalar instead of an array-like object (remember that pygmo is also +able to solve multi-objective and constrained problems, thus the fitness value will be, in general, a vector). pygmo will complain +about the wrong return type the first time the ``fitness()`` method is invoked. + Notes on computational speed ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 7e4e116fc74a2e86b0cff0c5e294a2efd38351ca Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 21:26:43 +0200 Subject: [PATCH 41/57] nlopt: bits of work and a test fix. --- include/pagmo/algorithms/nlopt.hpp | 19 +++++------ tests/nlopt.cpp | 55 ++++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 14 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 440fc82eb..8ff74aa1b 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -334,6 +334,8 @@ struct nlopt_obj { // Return the objfun value. return fitness[0]; } catch (...) { + // Store exception, force the stop of the optimisation, + // and return a useless value. nlo.m_eptr = std::current_exception(); ::nlopt_force_stop(nlo.m_value.get()); return HUGE_VAL; @@ -411,10 +413,6 @@ struct nlopt_obj { // the sparsity data for the constraints start. using pair_t = sparsity_pattern::value_type; auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); - if (it_sp == sp.end()) { - // This means that the sparsity data for ineq constraints is empty. Just return. - return; - } // Need to do a bit of horrid overflow checking :/. using diff_type = std::iterator_traits::difference_type; @@ -441,6 +439,7 @@ struct nlopt_obj { } } } catch (...) { + // Store exception, stop optimisation. nlo.m_eptr = std::current_exception(); ::nlopt_force_stop(nlo.m_value.get()); } @@ -513,14 +512,11 @@ struct nlopt_obj { // Now we need to go into the sparsity pattern and find where // the sparsity data for the constraints start. using pair_t = sparsity_pattern::value_type; + // NOTE: it_sp could be end() or point to ineq constraints. This should + // be fine: it_sp is a valid iterator in sp, sp has the same + // size as gradient and we do the proper checks below before accessing + // the values pointed to by it_sp/g_it. auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); - if (it_sp == sp.end() || it_sp->first >= p.get_nec() + 1u) { - // This means that there sparsity data for eq constraints is empty: either we went - // at the end of sp, or the first index pair found refers to inequality constraints. - // Just - // return. - return; - } // Need to do a bit of horrid overflow checking :/. using diff_type = std::iterator_traits::difference_type; @@ -546,6 +542,7 @@ struct nlopt_obj { } } } catch (...) { + // Store exception, stop optimisation. nlo.m_eptr = std::current_exception(); ::nlopt_force_stop(nlo.m_value.get()); } diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 93cdcb171..eb736f057 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -31,6 +31,7 @@ see https://www.gnu.org/licenses/. */ #include #include +#include #include #include #include @@ -39,6 +40,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -46,6 +48,8 @@ see https://www.gnu.org/licenses/. */ using namespace pagmo; +using hs71 = hock_schittkowsky_71; + BOOST_AUTO_TEST_CASE(nlopt_construction) { random_device::set_seed(42); @@ -71,7 +75,7 @@ BOOST_AUTO_TEST_CASE(nlopt_construction) a.extract()->set_ftol_abs(1E-5); a.extract()->set_maxeval(123); // Copy. - auto b{a}; + auto b(a); BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_selection()), 12u); BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_replacement()), "random"); BOOST_CHECK(b.extract()->get_last_opt_result() == NLOPT_SUCCESS); @@ -95,8 +99,8 @@ BOOST_AUTO_TEST_CASE(nlopt_construction) BOOST_CHECK_EQUAL(c.extract()->get_maxeval(), 123); BOOST_CHECK_EQUAL(c.extract()->get_maxtime(), 0); // Move. - auto tmp{*a.extract()}; - auto d{std::move(tmp)}; + auto tmp(*a.extract()); + auto d(std::move(tmp)); BOOST_CHECK_EQUAL(boost::any_cast(d.get_selection()), 12u); BOOST_CHECK_EQUAL(boost::any_cast(d.get_replacement()), "random"); BOOST_CHECK(d.get_last_opt_result() == NLOPT_SUCCESS); @@ -151,6 +155,14 @@ BOOST_AUTO_TEST_CASE(nlopt_selection_replacement) a.set_random_sr_seed(123); } +// A version of hs71 which provides the sparsity pattern. +struct hs71a : hs71 { + sparsity_pattern gradient_sparsity() const + { + return detail::dense_gradient(3, 4); + } +}; + BOOST_AUTO_TEST_CASE(nlopt_evolve) { algorithm a{nlopt{"lbfgs"}}; @@ -163,4 +175,41 @@ BOOST_AUTO_TEST_CASE(nlopt_evolve) // Solver wants gradient, but problem does not provide it. pop = population{null_problem{}, 20}; BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + pop = population{hs71{}, 20}; + // lbfgs does not support ineq constraints. + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + // mma supports ineq constraints but not eq constraints. + BOOST_CHECK_THROW(algorithm{nlopt{"mma"}}.evolve(pop), std::invalid_argument); + a = algorithm{nlopt{"slsqp"}}; + a.extract()->set_verbosity(5); + for (auto s : {"best", "worst", "random"}) { + for (auto r : {"best", "worst", "random"}) { + a.extract()->set_selection(s); + a.extract()->set_replacement(r); + pop = population(rosenbrock{10}, 20); + a.evolve(pop); + pop = population{hs71{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + pop = population{hs71a{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + } + } + for (auto s : {0, 2, 15}) { + for (auto r : {1, 3, 16}) { + a.extract()->set_selection(s); + a.extract()->set_replacement(r); + pop = population(rosenbrock{10}, 20); + a.evolve(pop); + pop = population{hs71{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + pop = population{hs71a{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + } + } + // Empty evolve. + a.evolve(population{}); } From 48d0e82780ac4fc63bfd6e633c123a145c83abf8 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 21:27:25 +0200 Subject: [PATCH 42/57] Small change. --- pygmo/py_islands.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pygmo/py_islands.py b/pygmo/py_islands.py index af3581274..a6f767588 100644 --- a/pygmo/py_islands.py +++ b/pygmo/py_islands.py @@ -45,10 +45,9 @@ class _temp_disable_sigint(object): def __enter__(self): import signal - # Store the previous sigint handler. - self._prev_signal = signal.getsignal(signal.SIGINT) - # Assign the new sig handler (i.e., ignore SIGINT). - signal.signal(signal.SIGINT, signal.SIG_IGN) + # Store the previous sigint handler and assign the new sig handler + # (i.e., ignore SIGINT). + self._prev_signal = signal.signal(signal.SIGINT, signal.SIG_IGN) def __exit__(self, type, value, traceback): import signal From 3d6ebd3b42a6595802199dea770dab57d27dbb41 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 21:55:08 +0200 Subject: [PATCH 43/57] Various small improvements and fixes. --- .../python/tutorials/coding_udp_simple.rst | 8 ++- .../docs/python/tutorials/using_problem.rst | 4 +- include/pagmo/algorithms/nlopt.hpp | 70 ++++++++----------- tests/nlopt.cpp | 4 +- 4 files changed, 38 insertions(+), 48 deletions(-) diff --git a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst index 573a92e40..7e48def86 100644 --- a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst +++ b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst @@ -165,7 +165,7 @@ be revealed only when calling the malformed method: AttributeError: 'numpy.float64' object has no attribute '__iter__' In this case, the issue is that the ``fitness()`` method returns a scalar instead of an array-like object (remember that pygmo is also -able to solve multi-objective and constrained problems, thus the fitness value will be, in general, a vector). pygmo will complain +able to solve multi-objective and constrained problems, thus the fitness value must be, in general, a vector). pygmo will complain about the wrong return type the first time the ``fitness()`` method is invoked. Notes on computational speed @@ -231,6 +231,8 @@ our fitness method into C code. >>> start_time = time.time(); [prob_jit.fitness(dummy_x) for i in range(1000)]; print(time.time() - start_time) #doctest: +SKIP 0.03771... +With a bit more elbow grease, we can further improve performance: + .. doctest:: >>> from numba import jit, float64 @@ -254,9 +256,9 @@ our fitness method into C code. 0.01687... -much better right? +Much better, right? -.. note:: For more information on using Numba to speed up your python code see `Numba documentation pages `_. +.. note:: For more information on using Numba to speed up your python code see the `Numba documentation pages `_. In particular, note that only a limited part of NumPy and the python language in general is supported by this use. diff --git a/doc/sphinx/docs/python/tutorials/using_problem.rst b/doc/sphinx/docs/python/tutorials/using_problem.rst index 66d90ea17..4f50211a5 100644 --- a/doc/sphinx/docs/python/tutorials/using_problem.rst +++ b/doc/sphinx/docs/python/tutorials/using_problem.rst @@ -33,12 +33,14 @@ Let us start: Lower bounds: [-5, -5, -5, -5, -5] Upper bounds: [10, 10, 10, 10, 10] - Has gradient: false + Has gradient: true User implemented gradient sparsity: false + Expected gradients: 5 Has hessians: false User implemented hessians sparsity: false Function evaluations: 0 + Gradient evaluations: 0 Thread safety: basic diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index 0c00cf02b..d3d3f4243 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -73,27 +73,6 @@ namespace pagmo namespace detail { -#if defined(_MSC_VER) - -// NOTE: this is a wrapper around std::copy() for use in MSVC in conjunction with raw pointers. -// In debug mode, MSVC will complain about unchecked iterators unless instructed otherwise. -template -void inline unchecked_copy(Int size, const T *begin, T *dest) -{ - std::copy(stdext::make_checked_array_iterator(begin, size), stdext::make_checked_array_iterator(begin, size, size), - stdext::make_checked_array_iterator(dest, size)); -} - -#else - -template -void inline unchecked_copy(Int size, const T *begin, T *dest) -{ - std::copy(begin, begin + size, dest); -} - -#endif - // Usual trick with global read-only data useful to the NLopt wrapper. template struct nlopt_data { @@ -178,6 +157,23 @@ struct nlopt_obj { using log_line_type = std::tuple; // The log. using log_type = std::vector; +#if defined(_MSC_VER) + // NOTE: this is a wrapper around std::copy() for use in MSVC in conjunction with raw pointers. + // In debug mode, MSVC will complain about unchecked iterators unless instructed otherwise. + template + static void unchecked_copy(Int size, const T *begin, T *dest) + { + std::copy(stdext::make_checked_array_iterator(begin, size), + stdext::make_checked_array_iterator(begin, size, size), + stdext::make_checked_array_iterator(dest, size)); + } +#else + template + static void unchecked_copy(Int size, const T *begin, T *dest) + { + std::copy(begin, begin + size, dest); + } +#endif // Shortcut to the static data. using data = nlopt_data<>; explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, @@ -298,7 +294,7 @@ struct nlopt_obj { } } else { // Dense gradient case. - detail::unchecked_copy(p.get_nx(), gradient.data(), grad); + unchecked_copy(p.get_nx(), gradient.data(), grad); } } @@ -392,7 +388,7 @@ struct nlopt_obj { // Compute fitness and write IC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - detail::unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); + unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); if (grad) { // Handle gradient, if requested. @@ -436,8 +432,8 @@ struct nlopt_obj { } } else { // Dense gradient. - detail::unchecked_copy(p.get_nic() * p.get_nx(), - gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); + unchecked_copy(p.get_nic() * p.get_nx(), + gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); } } } catch (...) { @@ -492,7 +488,7 @@ struct nlopt_obj { // Compute fitness and write EC to the output. // NOTE: fitness is nobj + nec + nic. const auto fitness = p.fitness(dv); - detail::unchecked_copy(p.get_nec(), fitness.data() + 1, result); + unchecked_copy(p.get_nec(), fitness.data() + 1, result); if (grad) { // Handle gradient, if requested. @@ -540,7 +536,7 @@ struct nlopt_obj { } } else { // Dense gradient. - detail::unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); + unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); } } } catch (...) { @@ -688,8 +684,6 @@ struct nlopt_obj { */ // TODO: // - investiagate the use of a fitness cache, after we have good perf testing in place. -// - move unchecked copy into class -// - update the log example, after recent change. class nlopt { using nlopt_obj = detail::nlopt_obj; @@ -1054,21 +1048,13 @@ class nlopt * of the optimisation will be both printed to screen and recorded internally. See nlopt::log_line_type and * nlopt::log_type for information on the logging format. The internal log can be fetched via get_log(). * - * Example (verbosity 1): + * Example (verbosity 5): * @code{.unparsed} * fevals: fitness: violated: viol. norm: - * 0 68.6966 1 0.252343 i - * 1 29.3926 1 15.1127 i - * 2 54.2992 1 2.05694 i - * 3 54.2992 1 2.05694 i - * 4 15.4544 2 9.56984 i - * 5 16.6126 2 1.80223 i - * 6 16.8454 2 0.414897 i - * 7 16.9794 2 0.0818469 i - * 8 17.0132 2 0.00243968 i - * 9 17.014 2 2.58628e-05 i - * 10 17.014 0 0 - * 11 17.014 0 0 + * 1 47.9474 1 2.07944 i + * 6 17.1986 2 0.150557 i + * 11 17.014 0 0 + * 16 17.014 0 0 * @endcode * The ``i`` at the end of some rows indicates that the decision vector is infeasible. Feasibility * is checked against the problem's tolerance. diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index eb736f057..7f9aa6238 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -196,8 +196,8 @@ BOOST_AUTO_TEST_CASE(nlopt_evolve) a.evolve(pop); } } - for (auto s : {0, 2, 15}) { - for (auto r : {1, 3, 16}) { + for (auto s : {0u, 2u, 15u}) { + for (auto r : {1u, 3u, 16u}) { a.extract()->set_selection(s); a.extract()->set_replacement(r); pop = population(rosenbrock{10}, 20); From e72f86b789e0a5963a5d0a19ecef79eb019922e3 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 22:03:19 +0200 Subject: [PATCH 44/57] Small udp exposition fixes. --- pygmo/expose_problems.cpp | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/pygmo/expose_problems.cpp b/pygmo/expose_problems.cpp index 7860fe4db..b2b2dd104 100644 --- a/pygmo/expose_problems.cpp +++ b/pygmo/expose_problems.cpp @@ -152,7 +152,7 @@ void expose_problems() (bp::arg("nobj") = 1, bp::arg("nec") = 0, bp::arg("nic") = 0))); // Rosenbrock. auto rb = expose_problem("rosenbrock", rosenbrock_docstring().c_str()); - rb.def(bp::init((bp::arg("dim")))); + rb.def(bp::init((bp::arg("dim")))); rb.def("best_known", &best_known_wrapper, problem_get_best_docstring("Rosenbrock").c_str()); // Hock-Schittkowsky 71 auto hs71 = expose_problem("hock_schittkowsky_71", @@ -211,7 +211,7 @@ void expose_problems() // CEC 2006 auto cec2006_ = expose_problem("cec2006", cec2006_docstring().c_str()); - cec2006_.def(bp::init((bp::arg("prob_id") = 1))); + cec2006_.def(bp::init((bp::arg("prob_id")))); cec2006_.def("best_known", &best_known_wrapper, problem_get_best_docstring("CEC 2006").c_str()); // CEC 2009 @@ -221,7 +221,7 @@ void expose_problems() // Luksan Vlcek 1 auto lv_ = expose_problem("luksan_vlcek1", luksan_vlcek1_docstring().c_str()); - lv_.def(bp::init(bp::arg("dim") = 3u)); + lv_.def(bp::init(bp::arg("dim"))); // Translate meta-problem auto translate_ = expose_problem("translate", translate_docstring().c_str()); @@ -234,9 +234,8 @@ void expose_problems() translate_.add_property("translation", lcast([](const translate &t) { return v_to_a(t.get_translation()); }), translate_translation_docstring().c_str()); translate_.add_property( - "inner_problem", - bp::make_function(lcast([](translate &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](translate &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); // Unconstrain meta-problem. auto unconstrain_ = expose_problem("unconstrain", unconstrain_docstring().c_str()); @@ -248,9 +247,8 @@ void expose_problems() }), bp::default_call_policies())); unconstrain_.add_property( - "inner_problem", - bp::make_function(lcast([](unconstrain &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](unconstrain &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); // Decompose meta-problem. auto decompose_ = expose_problem("decompose", decompose_docstring().c_str()); @@ -269,9 +267,8 @@ void expose_problems() decompose_.add_property("z", lcast([](const pagmo::decompose &p) { return v_to_a(p.get_z()); }), decompose_z_docstring().c_str()); decompose_.add_property( - "inner_problem", - bp::make_function(lcast([](decompose &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](decompose &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); } } From 86989ee558040d727c7421bdb03df811eb3adee0 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Fri, 7 Apr 2017 22:18:52 +0200 Subject: [PATCH 45/57] Small piece of internal doc. --- pygmo/py_islands.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pygmo/py_islands.py b/pygmo/py_islands.py index a6f767588..0fea8307a 100644 --- a/pygmo/py_islands.py +++ b/pygmo/py_islands.py @@ -122,6 +122,9 @@ def run_evolve(self, algo, pop): # we need to make sure we are not trying to touch # the pool while we are sending tasks to it. res = mp_island._pool.apply_async(_evolve_func, (algo, pop)) + # NOTE: there might be a bug in need of a workaround lurking in here: + # http://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python + # Just keep it in mind. return res.get() def get_name(self): From 85e1511a6ec0c6cf9b69bdee2aceafb2d90e4c32 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 01:27:23 +0200 Subject: [PATCH 46/57] Fix mixing link to lv1 docs. --- doc/sphinx/docs/cpp/cpp_docs.rst | 1 + doc/sphinx/docs/cpp/problems/luksan_vlcek1.rst | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 doc/sphinx/docs/cpp/problems/luksan_vlcek1.rst diff --git a/doc/sphinx/docs/cpp/cpp_docs.rst b/doc/sphinx/docs/cpp/cpp_docs.rst index a67d75f04..2aa568ccd 100644 --- a/doc/sphinx/docs/cpp/cpp_docs.rst +++ b/doc/sphinx/docs/cpp/cpp_docs.rst @@ -55,6 +55,7 @@ Implemented problems problems/dtlz problems/hock_schittkowsky_71 problems/inventory + problems/luksan_vlcek1.rst problems/translate problems/decompose problems/cec2006 diff --git a/doc/sphinx/docs/cpp/problems/luksan_vlcek1.rst b/doc/sphinx/docs/cpp/problems/luksan_vlcek1.rst new file mode 100644 index 000000000..a58e8b346 --- /dev/null +++ b/doc/sphinx/docs/cpp/problems/luksan_vlcek1.rst @@ -0,0 +1,5 @@ +Luksan-Vlcek 1 +============== + +.. doxygenstruct:: pagmo::luksan_vlcek1 + :members: \ No newline at end of file From b1e6f87070a59092d48caeee19aee11941248a2f Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 01:30:39 +0200 Subject: [PATCH 47/57] nlopt: fix handling of sparse gradients. --- include/pagmo/algorithms/nlopt.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index d3d3f4243..a7ef5f145 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -428,7 +428,7 @@ struct nlopt_obj { // nonzero bits in grad. Run until sp.end() as the IC are at the // end of the sparsity/gradient vector. for (; it_sp != sp.end(); ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + grad[(it_sp->first - 1u - p.get_nec()) * p.get_nx() + it_sp->second] = *g_it; } } else { // Dense gradient. @@ -532,7 +532,7 @@ struct nlopt_obj { // nonzero bits in grad. We terminate either at the end of sp, or when // we encounter the first inequality constraint. for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { - grad[it_sp->second] = *g_it; + grad[(it_sp->first - 1u) * p.get_nx() + it_sp->second] = *g_it; } } else { // Dense gradient. From a91dd5cd87ab2fa935a0dcb6bdc4d3229b4840a7 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 01:33:59 +0200 Subject: [PATCH 48/57] vl1: fixes. --- include/pagmo/problems/luksan_vlcek1.hpp | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/include/pagmo/problems/luksan_vlcek1.hpp b/include/pagmo/problems/luksan_vlcek1.hpp index 084e35123..b333f5a1b 100644 --- a/include/pagmo/problems/luksan_vlcek1.hpp +++ b/include/pagmo/problems/luksan_vlcek1.hpp @@ -55,12 +55,10 @@ namespace pagmo * * \f[ * \begin{array}{rl} - * \mbox{find:} & -5 \le \mathbf x_i \le 5, \forall i=1..n\\ + * \mbox{find:} & -5 \le \mathbf{x}_i \le 5, \forall i=1..n\\ * \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right]\\ - * \mbox{subject to:} & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - * +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \le UB, \forall k=1..n-2 \\ - * & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - * +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \ge LB, \forall k=1..n-2 \\ + * \mbox{subject to:} & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2})\sin(x_{k+1}+x_{k+2}) + * +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 = 0, \forall k=1..n-2 \\ * \end{array} * \f] * @@ -119,20 +117,15 @@ struct luksan_vlcek1 { */ std::pair get_bounds() const { - vector_double lb(m_dim, -5.); - vector_double ub(m_dim, 5.); - return {lb, ub}; + return std::make_pair(vector_double(m_dim, -5.), vector_double(m_dim, 5.)); } - /// Inequality constraint dimension + /// Equality constraint dimension /** - * - * It returns the number of inequality constraints. - * - * @return the number of inequality constraints. + * @return the number of equality constraints. */ vector_double::size_type get_nec() const { - return (m_dim - 2); + return m_dim - 2u; } /// Gradients /** @@ -184,7 +177,7 @@ struct luksan_vlcek1 { for (decltype(m_dim) i = 0u; i < m_dim; ++i) { retval.emplace_back(0, i); } - // The part relative to the inequality constraints is sparse as each + // The part relative to the equality constraints is sparse as each // constraint c_k depends on x_k, x_{k+1} and x_{k+2} for (decltype(m_dim) i = 0u; i < m_dim - 2u; ++i) { retval.emplace_back(i + 1, i); From 0e5537ea93348825ce93b2fe3959eee30f6458c1 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 02:40:54 +0200 Subject: [PATCH 49/57] Another missing doc bit. --- doc/sphinx/docs/python/problems/py_problems.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/sphinx/docs/python/problems/py_problems.rst b/doc/sphinx/docs/python/problems/py_problems.rst index df702c80e..889780867 100644 --- a/doc/sphinx/docs/python/problems/py_problems.rst +++ b/doc/sphinx/docs/python/problems/py_problems.rst @@ -71,6 +71,11 @@ Problems exposed from C++ ------------------------------------------------------------- +.. autoclass:: pygmo.core.luksan_vlcek1 + :members: + +------------------------------------------------------------- + .. autoclass:: pygmo.core.translate :members: From 11691bef88c391b3bdc32bc49d6f1afd5487f89d Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 15:44:50 +0200 Subject: [PATCH 50/57] More testing bits. --- include/pagmo/algorithms/nlopt.hpp | 24 ++++++++++--- tests/nlopt.cpp | 54 ++++++++++++++++++++++++++++-- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp index a7ef5f145..dc81ae351 100644 --- a/include/pagmo/algorithms/nlopt.hpp +++ b/include/pagmo/algorithms/nlopt.hpp @@ -341,9 +341,11 @@ struct nlopt_obj { }, static_cast(this)); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } // Vector-valued constraints. @@ -557,52 +559,66 @@ struct nlopt_obj { // Handle the various stopping criteria. res = ::nlopt_set_stopval(m_value.get(), stopval); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " + std::to_string(stopval) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " + std::to_string(ftol_rel) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " + std::to_string(ftol_abs) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " + std::to_string(xtol_rel) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " + std::to_string(xtol_abs) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_maxeval(m_value.get(), maxeval); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " + std::to_string(maxeval) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } res = ::nlopt_set_maxtime(m_value.get(), maxtime); if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " + std::to_string(maxtime) + " for the NLopt algorithm '" + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res)); + // LCOV_EXCL_STOP } } @@ -683,7 +699,7 @@ struct nlopt_obj { * in undefined behaviour. */ // TODO: -// - investiagate the use of a fitness cache, after we have good perf testing in place. +// - investigate the use of a fitness cache, after we have good perf testing in place. class nlopt { using nlopt_obj = detail::nlopt_obj; @@ -1019,9 +1035,9 @@ class nlopt } else { const auto idx = boost::any_cast(m_replace); if (idx >= pop.size()) { - pagmo_throw(std::out_of_range, "cannot replace the individual at index " + std::to_string(idx) - + " after evolution: the population has a size of only " - + std::to_string(pop.size())); + pagmo_throw(std::invalid_argument, "cannot replace the individual at index " + std::to_string(idx) + + " after evolution: the population has a size of only " + + std::to_string(pop.size())); } if (nc) { pop.set_x(idx, initial_guess); diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 7f9aa6238..2b3aba2d2 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -32,6 +32,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -196,11 +198,16 @@ BOOST_AUTO_TEST_CASE(nlopt_evolve) a.evolve(pop); } } - for (auto s : {0u, 2u, 15u}) { - for (auto r : {1u, 3u, 16u}) { + BOOST_CHECK(!a.extract()->get_log().empty()); + for (auto s : {0u, 2u, 15u, 25u}) { + for (auto r : {1u, 3u, 16u, 25u}) { a.extract()->set_selection(s); a.extract()->set_replacement(r); pop = population(rosenbrock{10}, 20); + if (s >= 20u || r >= 20u) { + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + continue; + } a.evolve(pop); pop = population{hs71{}, 20}; pop.get_problem().set_c_tol({1E-6, 1E-6}); @@ -212,4 +219,47 @@ BOOST_AUTO_TEST_CASE(nlopt_evolve) } // Empty evolve. a.evolve(population{}); + // Invalid initial guesses. + a = algorithm{nlopt{"slsqp"}}; + pop = population{hs71{}, 1}; + pop.set_x(0, {-123., -123., -123., -123.}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + pop.set_x(0, {123., 123., 123., 123.}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + if (std::numeric_limits::has_quiet_NaN) { + pop.set_x(0, {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + } } + +BOOST_AUTO_TEST_CASE(nlopt_set_sc) +{ + auto a = nlopt{"slsqp"}; + a.set_stopval(-1.23); + BOOST_CHECK_EQUAL(a.get_stopval(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_stopval(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_ftol_rel(-1.23); + BOOST_CHECK_EQUAL(a.get_ftol_rel(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_ftol_rel(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_ftol_abs(-1.23); + BOOST_CHECK_EQUAL(a.get_ftol_abs(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_ftol_abs(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_xtol_rel(-1.23); + BOOST_CHECK_EQUAL(a.get_xtol_rel(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_xtol_rel(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_xtol_abs(-1.23); + BOOST_CHECK_EQUAL(a.get_xtol_abs(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_xtol_abs(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_maxtime(123); +} \ No newline at end of file From 4a3f2e89472d351d9c534a103f4166a0ff3e4ed1 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 23:09:13 +0200 Subject: [PATCH 51/57] nlopt: python exposition and docs. --- .../docs/python/algorithms/py_algorithms.rst | 7 +- pygmo/docstrings.cpp | 420 +++++++++++++++++- pygmo/docstrings.hpp | 14 + pygmo/expose_algorithms.cpp | 76 ++++ 4 files changed, 511 insertions(+), 6 deletions(-) diff --git a/doc/sphinx/docs/python/algorithms/py_algorithms.rst b/doc/sphinx/docs/python/algorithms/py_algorithms.rst index a74b8d296..559b995d6 100644 --- a/doc/sphinx/docs/python/algorithms/py_algorithms.rst +++ b/doc/sphinx/docs/python/algorithms/py_algorithms.rst @@ -72,4 +72,9 @@ Algorithms exposed from C++ ------------------------------------------------------------- .. autoclass:: pygmo.core.cstrs_self_adaptive - :members: \ No newline at end of file + :members: + +------------------------------------------------------------- + +.. autoclass:: pygmo.core.nlopt + :members: diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index 3dce2bd2c..34835f27e 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -1364,7 +1364,7 @@ std::string generic_uda_inner_algorithm_docstring() return R"(Inner algorithm of the meta-algorithm -This property gives direct access to the :class:`~pygmo.core.algorithm` stored within the meta-algorithm. +This read-only property gives direct access to the :class:`~pygmo.core.algorithm` stored within the meta-algorithm. Returns: :class:`~pygmo.core.algorithm`: a reference to the inner algorithm @@ -1377,7 +1377,7 @@ std::string generic_udp_inner_problem_docstring() return R"(Inner problem of the meta-problem -This property gives direct access to the :class:`~pygmo.core.problem` stored within the meta-problem. +This read-only property gives direct access to the :class:`~pygmo.core.problem` stored within the meta-problem. Returns: :class:`~pygmo.core.problem`: a reference to the inner problem @@ -1563,7 +1563,7 @@ objective function. Using the above definitions the overall pseudo code can be s **NOTE** Self-adaptive constraints handling implements an internal cache to avoid the re-evaluation of the fitness for decision vectors already evaluated. This makes the final counter of function evaluations somehow unpredictable. -The number of function evaluation will be bounded to \p iters times the fevals made by one call to the inner UDA. The +The number of function evaluation will be bounded to *iters* times the fevals made by one call to the inner UDA. The internal cache is reset at each iteration, but its size will grow unlimited during each call to the inner UDA evolve method. @@ -2887,7 +2887,7 @@ The numerical approximation of each derivative is made by central difference, ac .. math:: \frac{df}{dx} \approx \frac{f(x+dx) - f(x-dx)}{2dx} + O(dx^2) -The overall cost, in terms of calls to \p f will thus be :math:`n` where :math:`n` is the size of \p x. +The overall cost, in terms of calls to *callable* will thus be :math:`n` where :math:`n` is the size of *x*. Args: callable (a callable object): The function we want to estimate sparsity (typically a fitness). @@ -2930,7 +2930,7 @@ The numerical approximation of each derivative is made by central difference, ac .. math:: m_i = \frac{f(x + i dx) - f(x-i dx)}{2i dx} -The overall cost, in terms of calls to \p f will thus be 6:math:`n` where :math:`n` is the size of \p x. +The overall cost, in terms of calls to *callable* will thus be 6:math:`n` where :math:`n` is the size of *x*. Args: callable (a callable object): The function we want to estimate sparsity (typically a fitness). @@ -3597,4 +3597,414 @@ inserted via :func:`~pygmo.core.archipelago.push_back()`). )"; } +std::string nlopt_docstring() +{ + return R"(__init__(solver = "cobyla") + +NLopt algorithms. + +This user-defined algorithm wraps a selection of solvers from the +`NLopt `_ library, focusing on +local optimisation (both gradient-based and derivative-free). The complete list of supported +NLopt algorithms is: + +* COBYLA, +* BOBYQA, +* NEWUOA + bound constraints, +* PRAXIS, +* Nelder-Mead simplex, +* sbplx, +* MMA (Method of Moving Asymptotes), +* CCSA, +* SLSQP, +* low-storage BFGS, +* preconditioned truncated Newton, +* shifted limited-memory variable-metric. + +The desired NLopt solver is selected upon construction of an :class:`~pygmo.core.nlopt` algorithm. Various properties +of the solver (e.g., the stopping criteria) can be configured during construction or afterwards. Note that multiple +stopping criteria can be active at the same time: the optimisation will stop as soon as at least one stopping criterion +is satisfied. By default, only the ``xtol_rel`` stopping criterion is active (see :attr:`~pygmo.core.nlopt.xtol_rel`). + +All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation +is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient. +Some solvers support equality and/or inequality constaints. + +In order to support pagmo's population-based optimisation model, the ``evolve()`` method will select +a single individual from the input :class:`~pygmo.core.population` to be optimised by the NLopt solver. +The optimised individual will then be inserted back into the population at the end of the optimisation. +The selection and replacement strategies can be configured via the :attr:`~pygmo.core.nlopt.selection` +and :attr:`~pygmo.core.nlopt.replacement` attributes. + +.. note:: + + This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option + enabled (see the :ref:`installation instructions `). + +.. seealso:: + + The `NLopt website `_ contains a detailed description + of each supported solver. + +This constructor will initialise an :class:`~pygmo.core.nlopt` object which will use the NLopt algorithm specified by +the input string *solver*, the ``"best"`` individual selection strategy and the ``"best"`` individual +replacement strategy. *solver* is translated to an NLopt algorithm type according to the following +translation table: + +================================ ==================================== +*solver* string NLopt algorithm +================================ ==================================== +``"cobyla"`` ``NLOPT_LN_COBYLA`` +``"bobyqa"`` ``NLOPT_LN_BOBYQA`` +``"newuoa"`` ``NLOPT_LN_NEWUOA`` +``"newuoa_bound"`` ``NLOPT_LN_NEWUOA_BOUND`` +``"praxis"`` ``NLOPT_LN_PRAXIS`` +``"neldermead"`` ``NLOPT_LN_NELDERMEAD`` +``"sbplx"`` ``NLOPT_LN_SBPLX`` +``"mma"`` ``NLOPT_LD_MMA`` +``"ccsaq"`` ``NLOPT_LD_CCSAQ`` +``"slsqp"`` ``NLOPT_LD_SLSQP`` +``"lbfgs"`` ``NLOPT_LD_LBFGS`` +``"tnewton_precond_restart"`` ``NLOPT_LD_TNEWTON_PRECOND_RESTART`` +``"tnewton_precond"`` ``NLOPT_LD_TNEWTON_PRECOND`` +``"tnewton_restart"`` ``NLOPT_LD_TNEWTON_RESTART`` +``"tnewton"`` ``NLOPT_LD_TNEWTON`` +``"var2"`` ``NLOPT_LD_VAR2`` +``"var1"`` ``NLOPT_LD_VAR1`` +================================ ==================================== + +The parameters of the selected solver can be configured via the attributes of this class. + +See also the docs of the C++ class :cpp:class:`pagmo::nlopt`. + +.. seealso:: + + The `NLopt website `_ contains a detailed + description of each supported solver. + +Args: + solver (``str``): the name of the NLopt algorithm that will be used by this :class:`~pygmo.core.nlopt` object + +Raises: + RuntimeError: if the NLopt version is not at least 2 + ValueError: if *solver* is not one of the allowed algorithm names + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +Examples: + >>> from pygmo import * + >>> nl = nlopt('slsqp') + >>> nl.xtol_rel = 1E-6 # Change the default value of the xtol_rel stopping criterion + >>> nl.xtol_rel # doctest: +SKIP + 1E-6 + >>> algo = algorithm(nl) + >>> algo.set_verbosity(1) + >>> prob = problem(luksan_vlcek1(20)) + >>> prob.c_tol = [1E-6] * 18 # Set constraints tolerance to 1E-6 + >>> pop = population(prob, 20) + >>> pop = algo.evolve(pop) # doctest: +SKIP + fevals: fitness: violated: viol. norm: + 1 95959.4 18 538.227 i + 2 89282.7 18 5177.42 i + 3 75580 18 464.206 i + 4 75580 18 464.206 i + 5 77737.6 18 1095.94 i + 6 41162 18 350.446 i + 7 41162 18 350.446 i + 8 67881 18 362.454 i + 9 30502.2 18 249.762 i + 10 30502.2 18 249.762 i + 11 7266.73 18 95.5946 i + 12 4510.3 18 42.2385 i + 13 2400.66 18 35.2507 i + 14 34051.9 18 749.355 i + 15 1657.41 18 32.1575 i + 16 1657.41 18 32.1575 i + 17 1564.44 18 12.5042 i + 18 275.987 14 6.22676 i + 19 232.765 12 12.442 i + 20 161.892 15 4.00744 i + 21 161.892 15 4.00744 i + 22 17.6821 11 1.78909 i + 23 7.71103 5 0.130386 i + 24 6.24758 4 0.00736759 i + 25 6.23325 1 5.12547e-05 i + 26 6.2325 0 0 + 27 6.23246 0 0 + 28 6.23246 0 0 + 29 6.23246 0 0 + 30 6.23246 0 0 + + Optimisation return status: NLOPT_XTOL_REACHED (value = 4, Optimization stopped because xtol_rel or xtol_abs was reached) + + +)"; +} + +std::string nlopt_stopval_docstring() +{ + return R"(``stopval`` stopping criterion. + +The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than +or equal to ``stopval`` is found. Defaults to the C constant ``-HUGE_VAL`` (that is, this stopping criterion +is disabled by default). + +Returns: + ``float``: the value of the ``stopval`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_ftol_rel_docstring() +{ + return R"(``ftol_rel`` stopping criterion. + +The ``ftol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes the objective function value by less than ``ftol_rel`` multiplied by the absolute value of the +function value. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``float``: the value of the ``ftol_rel`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_ftol_abs_docstring() +{ + return R"(``ftol_abs`` stopping criterion. + +The ``ftol_abs`` stopping criterion instructs the solver to stop when an optimization step +(or an estimate of the optimum) changes the function value by less than ``ftol_abs``. +Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``float``: the value of the ``ftol_abs`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_xtol_rel_docstring() +{ + return R"(``xtol_rel`` stopping criterion. + +The ``xtol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes every parameter by less than ``xtol_rel`` multiplied by the absolute value of the parameter. +Defaults to 1E-8. + +Returns: + ``float``: the value of the ``xtol_rel`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_xtol_abs_docstring() +{ + return R"(``xtol_abs`` stopping criterion. + +The ``xtol_abs`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes every parameter by less than ``xtol_abs``. Defaults to 0 (that is, this stopping criterion is disabled +by default). + +Returns: + ``float``: the value of the ``xtol_abs`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_maxeval_docstring() +{ + return R"(``maxeval`` stopping criterion. + +The ``maxeval`` stopping criterion instructs the solver to stop when the number of function evaluations exceeds +``maxeval``. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``int``: the value of the ``maxeval`` stopping criterion + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_maxtime_docstring() +{ + return R"(``maxtime`` stopping criterion. + +The ``maxtime`` stopping criterion instructs the solver to stop when the optimization time (in seconds) exceeds +``maxtime``. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``int``: the value of the ``maxtime`` stopping criterion + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_selection_docstring() +{ + return R"(Individual selection policy. + +This attribute represents the policy that is used in the ``evolve()`` method to select the individual +that will be optimised. The attribute can be either a string or an integral. + +If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``: + +* ``"best"`` will select the best individual in the population, +* ``"worst"`` will select the worst individual in the population, +* ``"random"`` will randomly choose one individual in the population. + +:func:`~pygmo.core.nlopt.set_random_sr_seed()` can be used to seed the random number generator +used by the ``"random"`` policy. + +If the attribute is an integer, it represents the index (in the population) of the individual that is selected +for optimisation. + +Returns: + ``int`` or ``str``: the individual selection policy or index + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + ValueError: if the attribute is set to an invalid string + TypeError: if the attribute is set to a value of an invalid type + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_replacement_docstring() +{ + return R"(Individual replacement policy. + +This attribute represents the policy that is used in the ``evolve()`` method to select the individual +that will be replaced by the optimised individual. The attribute can be either a string or an integral. + +If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``: + +* ``"best"`` will select the best individual in the population, +* ``"worst"`` will select the worst individual in the population, +* ``"random"`` will randomly choose one individual in the population. + +:func:`~pygmo.core.nlopt.set_random_sr_seed()` can be used to seed the random number generator +used by the ``"random"`` policy. + +If the attribute is an integer, it represents the index (in the population) of the individual that will be +replaced by the optimised individual. + +Returns: + ``int`` or ``str``: the individual replacement policy or index + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + ValueError: if the attribute is set to an invalid string + TypeError: if the attribute is set to a value of an invalid type + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_set_random_sr_seed_docstring() +{ + return R"(set_random_sr_seed(seed) + +Set the seed for the ``"random"`` selection/replacement policies. + +Args: + seed (``int``): the value that will be used to seed the random number generator used by the ``"random"`` + election/replacement policies (see :attr:`~pygmo.core.nlopt.selection` and + :attr:`~pygmo.core.nlopt.replacement`) + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_log_docstring() +{ + return R"(Optimisation log. + +The optimisation log is a collection of log data lines. A log data line is a tuple consisting of: + +* the number of objective function evaluations made so far, +* the objective function value for the current decision vector, +* the number of constraints violated by the current decision vector, +* the constraints violation norm for the current decision vector, +* a boolean flag signalling the feasibility of the current decision vector. + +Returns: + ``list``: the optimisation log + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_last_opt_result_docstring() +{ + return R"(get_last_opt_result() + +Get the result of the last optimisation. + +Returns: + ``int``: the NLopt return code for the last optimisation run, or ``NLOPT_SUCCESS`` if no optimisations have been run yet + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_solver_name_docstring() +{ + return R"(get_solver_name() + +Get the name of the NLopt solver used during construction. + +Returns: + ``str``: the name of the NLopt solver used during construction + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + } // namespace diff --git a/pygmo/docstrings.hpp b/pygmo/docstrings.hpp index 173b10ba5..3dbc821dc 100644 --- a/pygmo/docstrings.hpp +++ b/pygmo/docstrings.hpp @@ -143,6 +143,20 @@ std::string mbh_get_log_docstring(); std::string mbh_get_perturb_docstring(); std::string generic_uda_get_seed_docstring(); std::string generic_uda_inner_algorithm_docstring(); +std::string nlopt_docstring(); +std::string nlopt_stopval_docstring(); +std::string nlopt_ftol_rel_docstring(); +std::string nlopt_ftol_abs_docstring(); +std::string nlopt_xtol_rel_docstring(); +std::string nlopt_xtol_abs_docstring(); +std::string nlopt_maxeval_docstring(); +std::string nlopt_maxtime_docstring(); +std::string nlopt_selection_docstring(); +std::string nlopt_replacement_docstring(); +std::string nlopt_set_random_sr_seed_docstring(); +std::string nlopt_get_log_docstring(); +std::string nlopt_get_last_opt_result_docstring(); +std::string nlopt_get_solver_name_docstring(); // utilities // hypervolume diff --git a/pygmo/expose_algorithms.cpp b/pygmo/expose_algorithms.cpp index ef5238f67..3603c6b26 100644 --- a/pygmo/expose_algorithms.cpp +++ b/pygmo/expose_algorithms.cpp @@ -44,6 +44,7 @@ see https://www.gnu.org/licenses/. */ #endif +#include #include #include #include @@ -52,6 +53,7 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include @@ -68,6 +70,9 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#if defined(PAGMO_WITH_NLOPT) +#include +#endif #include #include #include @@ -329,5 +334,76 @@ void expose_algorithms() nsga2_get_log_docstring().c_str()); nsga2_.def("get_seed", &nsga2::get_seed, generic_uda_get_seed_docstring().c_str()); + +#if defined(PAGMO_WITH_NLOPT) + // NLopt. + auto nlopt_ = expose_algorithm("nlopt", nlopt_docstring().c_str()); + nlopt_.def(bp::init()); + // Properties for the stopping criteria. + nlopt_.add_property("stopval", &nlopt::get_stopval, &nlopt::set_stopval, nlopt_stopval_docstring().c_str()); + nlopt_.add_property("ftol_rel", &nlopt::get_ftol_rel, &nlopt::set_ftol_rel, nlopt_ftol_rel_docstring().c_str()); + nlopt_.add_property("ftol_abs", &nlopt::get_ftol_abs, &nlopt::set_ftol_abs, nlopt_ftol_abs_docstring().c_str()); + nlopt_.add_property("xtol_rel", &nlopt::get_xtol_rel, &nlopt::set_xtol_rel, nlopt_xtol_rel_docstring().c_str()); + nlopt_.add_property("xtol_abs", &nlopt::get_xtol_abs, &nlopt::set_xtol_abs, nlopt_xtol_abs_docstring().c_str()); + nlopt_.add_property("maxeval", &nlopt::get_maxeval, &nlopt::set_maxeval, nlopt_maxeval_docstring().c_str()); + nlopt_.add_property("maxtime", &nlopt::get_maxtime, &nlopt::set_maxtime, nlopt_maxtime_docstring().c_str()); + // Selection/replacement. + nlopt_.add_property( + "selection", lcast([](const nlopt &n) -> bp::object { + auto s = n.get_selection(); + if (boost::any_cast(&s)) { + return bp::str(boost::any_cast(s)); + } + return bp::object(boost::any_cast(s)); + }), + lcast([](nlopt &n, const bp::object &o) { + bp::extract e_str(o); + if (e_str.check()) { + n.set_selection(e_str()); + return; + } + bp::extract e_idx(o); + if (e_idx.check()) { + n.set_selection(e_idx()); + return; + } + pygmo_throw(::PyExc_TypeError, + ("cannot convert the input object '" + str(o) + "' of type '" + str(type(o)) + + "' to either a selection policy (one of ['best', 'worst', 'random']) or an individual index") + .c_str()); + }), + nlopt_selection_docstring().c_str()); + nlopt_.add_property( + "replacement", lcast([](const nlopt &n) -> bp::object { + auto s = n.get_replacement(); + if (boost::any_cast(&s)) { + return bp::str(boost::any_cast(s)); + } + return bp::object(boost::any_cast(s)); + }), + lcast([](nlopt &n, const bp::object &o) { + bp::extract e_str(o); + if (e_str.check()) { + n.set_replacement(e_str()); + return; + } + bp::extract e_idx(o); + if (e_idx.check()) { + n.set_replacement(e_idx()); + return; + } + pygmo_throw( + ::PyExc_TypeError, + ("cannot convert the input object '" + str(o) + "' of type '" + str(type(o)) + + "' to either a replacement policy (one of ['best', 'worst', 'random']) or an individual index") + .c_str()); + }), + nlopt_replacement_docstring().c_str()); + nlopt_.def("set_random_sr_seed", &nlopt::set_random_sr_seed, nlopt_set_random_sr_seed_docstring().c_str()); + expose_algo_log(nlopt_, nlopt_get_log_docstring().c_str()); + nlopt_.def("get_last_opt_result", lcast([](const nlopt &n) { return static_cast(n.get_last_opt_result()); }), + nlopt_get_last_opt_result_docstring().c_str()); + nlopt_.def("get_solver_name", &nlopt::get_solver_name, nlopt_get_solver_name_docstring().c_str()); +#endif } } From cc608a6b17046f6709f140b643ca5c354b49775f Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 23:12:26 +0200 Subject: [PATCH 52/57] Minor. --- doc/sphinx/docs/cpp/cpp_docs.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/sphinx/docs/cpp/cpp_docs.rst b/doc/sphinx/docs/cpp/cpp_docs.rst index c05383e19..121a8dc80 100644 --- a/doc/sphinx/docs/cpp/cpp_docs.rst +++ b/doc/sphinx/docs/cpp/cpp_docs.rst @@ -55,14 +55,13 @@ Implemented problems problems/dtlz problems/hock_schittkowsky_71 problems/inventory - problems/luksan_vlcek1.rst + problems/luksan_vlcek1 problems/translate problems/decompose problems/cec2006 problems/cec2009 problems/cec2013 problems/unconstrain - problems/luksan_vlcek1 Implemented islands ^^^^^^^^^^^^^^^^^^^ From 922abdecfca6c8195353144e37aaf5c9fba13c2a Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 23:14:23 +0200 Subject: [PATCH 53/57] Another small bit. --- doc/sphinx/docs/cpp/algorithms/nlopt.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/sphinx/docs/cpp/algorithms/nlopt.rst b/doc/sphinx/docs/cpp/algorithms/nlopt.rst index 30cd4408e..5f46e4b2e 100644 --- a/doc/sphinx/docs/cpp/algorithms/nlopt.rst +++ b/doc/sphinx/docs/cpp/algorithms/nlopt.rst @@ -2,4 +2,4 @@ NLopt solvers ============= .. doxygenclass:: pagmo::nlopt - :members: \ No newline at end of file + :members: From 268187bcd3f4ad23e9341e25a908490b95ad7cd8 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 23:17:01 +0200 Subject: [PATCH 54/57] lv1 doc fix in Python. --- pygmo/docstrings.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index 34835f27e..282fdff35 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -1812,12 +1812,11 @@ Its formulation in pagmo can be written as: .. math:: \begin{array}{rl} - \mbox{find:} & -5 \le \mathbf x_i \le 5, \forall i=1..n\\ - \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right]\\ - \mbox{subject to:} & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \le UB, \forall k=1..n-2 \\ - & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \ge LB, \forall k=1..n-2 \\ + \mbox{find:} & -5 \le x_i \le 5, \forall i=1..n \\ + \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right] \\ + \mbox{subject to:} & + 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2})\sin(x_{k+1}+x_{k+2}) + \\ + & +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 = 0, \forall k=1..n-2 \end{array} See: Luksan, L., and Jan Vlcek. "Sparse and partially separable test problems for unconstrained and equality From b08633225d73fa40224c14dbb15c134bdefd77bf Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sat, 8 Apr 2017 23:37:36 +0200 Subject: [PATCH 55/57] nlopt: python testing. --- pygmo/test.py | 137 ++++++++++++++++++++++++++++++++++++++++++++---- tests/nlopt.cpp | 2 +- 2 files changed, 129 insertions(+), 10 deletions(-) diff --git a/pygmo/test.py b/pygmo/test.py index 84009e308..e84604d3d 100644 --- a/pygmo/test.py +++ b/pygmo/test.py @@ -371,6 +371,110 @@ def runTest(self): seed = uda.get_seed() +class nlopt_test_case(_ut.TestCase): + """Test case for the UDA nlopt + + """ + + def runTest(self): + from .core import nlopt, algorithm, luksan_vlcek1, problem, population + n = nlopt() + self.assertEqual(n.get_solver_name(), "cobyla") + n = nlopt("slsqp") + self.assertEqual(n.get_solver_name(), "slsqp") + self.assertRaises(ValueError, lambda: nlopt("dsadsa")) + + self.assertEqual(n.get_last_opt_result(), 1) + + self.assertEqual(n.ftol_abs, 0.) + n.ftol_abs = 1E-6 + self.assertEqual(n.ftol_abs, 1E-6) + + def _(): + n.ftol_abs = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.ftol_rel, 0.) + n.ftol_rel = 1E-6 + self.assertEqual(n.ftol_rel, 1E-6) + + def _(): + n.ftol_rel = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.maxeval, 0) + n.maxeval = 42 + self.assertEqual(n.maxeval, 42) + + self.assertEqual(n.maxtime, 0) + n.maxtime = 43 + self.assertEqual(n.maxtime, 43) + + self.assertEqual(n.replacement, "best") + n.replacement = "worst" + self.assertEqual(n.replacement, "worst") + + def _(): + n.replacement = "rr" + self.assertRaises(ValueError, _) + n.replacement = 12 + self.assertEqual(n.replacement, 12) + + def _(): + n.replacement = -1 + self.assertRaises(OverflowError, _) + + self.assertEqual(n.selection, "best") + n.selection = "worst" + self.assertEqual(n.selection, "worst") + + def _(): + n.selection = "rr" + self.assertRaises(ValueError, _) + n.selection = 12 + self.assertEqual(n.selection, 12) + + def _(): + n.selection = -1 + self.assertRaises(OverflowError, _) + + n.set_random_sr_seed(12) + self.assertRaises(OverflowError, lambda: n.set_random_sr_seed(-1)) + + self.assertEqual(n.stopval, -float('inf')) + n.stopval = 1E-6 + self.assertEqual(n.stopval, 1E-6) + + def _(): + n.stopval = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.xtol_abs, 0.) + n.xtol_abs = 1E-6 + self.assertEqual(n.xtol_abs, 1E-6) + + def _(): + n.xtol_abs = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.xtol_rel, 1E-8) + n.xtol_rel = 1E-6 + self.assertEqual(n.xtol_rel, 1E-6) + + def _(): + n.xtol_rel = float('nan') + self.assertRaises(ValueError, _) + + n = nlopt("slsqp") + algo = algorithm(n) + algo.set_verbosity(5) + prob = problem(luksan_vlcek1(20)) + prob.c_tol = [1E-6] * 18 + pop = population(prob, 20) + pop = algo.evolve(pop) + self.assertTrue(len(algo.extract(nlopt).get_log()) != 0) + + class null_problem_test_case(_ut.TestCase): """Test case for the null problem @@ -387,32 +491,42 @@ def runTest(self): self.assertTrue(problem(np()).get_nobj() == 1) self.assertTrue(problem(np(23)).get_nobj() == 23) + class estimate_sparsity_test_case(_ut.TestCase): """Test case for the hypervolume utilities """ + def runTest(self): import pygmo as pg import numpy as np + def my_fun(x): - return [x[0]+x[3], x[2], x[1]] - res = pg.estimate_sparsity(callable = my_fun, x = [0.1,0.1,0.1,0.1], dx = 1e-8) - self.assertTrue((res==np.array([[0, 0],[0, 3],[1, 2],[2, 1]])).all()) + return [x[0] + x[3], x[2], x[1]] + res = pg.estimate_sparsity( + callable=my_fun, x=[0.1, 0.1, 0.1, 0.1], dx=1e-8) + self.assertTrue( + (res == np.array([[0, 0], [0, 3], [1, 2], [2, 1]])).all()) + class estimate_gradient_test_case(_ut.TestCase): """Test case for the hypervolume utilities """ + def runTest(self): import pygmo as pg import numpy as np + def my_fun(x): - return [x[0]+x[3], x[2], x[1]] - out = pg.estimate_gradient(callable = my_fun, x = [0]*4, dx = 1e-8) - res = np.array([ 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.]) - self.assertTrue((abs(out-res)<1e-8).all()) - out = pg.estimate_gradient_h(callable = my_fun, x = [0]*4, dx = 1e-8) - self.assertTrue((abs(out-res)<1e-8).all()) + return [x[0] + x[3], x[2], x[1]] + out = pg.estimate_gradient(callable=my_fun, x=[0] * 4, dx=1e-8) + res = np.array([1., 0., 0., 1., 0., 0., + 1., 0., 0., 1., 0., 0.]) + self.assertTrue((abs(out - res) < 1e-8).all()) + out = pg.estimate_gradient_h(callable=my_fun, x=[0] * 4, dx=1e-8) + self.assertTrue((abs(out - res) < 1e-8).all()) + class hypervolume_test_case(_ut.TestCase): """Test case for the hypervolume utilities @@ -1049,6 +1163,11 @@ def run_test_suite(): suite.addTest(unconstrain_test_case()) suite.addTest(mbh_test_case()) suite.addTest(cstrs_self_adaptive_test_case()) + try: + from .core import nlopt + suite.addTest(nlopt_test_case()) + except ImportError: + pass test_result = _ut.TextTestRunner(verbosity=2).run(suite) if len(test_result.failures) > 0 or len(test_result.errors) > 0: retval = 1 diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp index 2b3aba2d2..e093bdcce 100644 --- a/tests/nlopt.cpp +++ b/tests/nlopt.cpp @@ -262,4 +262,4 @@ BOOST_AUTO_TEST_CASE(nlopt_set_sc) BOOST_CHECK_THROW(a.set_xtol_abs(std::numeric_limits::quiet_NaN()), std::invalid_argument); } a.set_maxtime(123); -} \ No newline at end of file +} From 8be038297aa552abf16fb2d251ee488f8b4d53e0 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 9 Apr 2017 00:08:17 +0200 Subject: [PATCH 56/57] Python 2.7 fix. [skip appveyor] --- pygmo/docstrings.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index 282fdff35..09f686b5d 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -3954,7 +3954,9 @@ Set the seed for the ``"random"`` selection/replacement policies. std::string nlopt_get_log_docstring() { - return R"(Optimisation log. + return R"(get_log() + +Optimisation log. The optimisation log is a collection of log data lines. A log data line is a tuple consisting of: From 9637411211913973cb70eccd193b4dae32b079f9 Mon Sep 17 00:00:00 2001 From: Francesco Biscani Date: Sun, 9 Apr 2017 00:36:29 +0200 Subject: [PATCH 57/57] Last minute fixes. [skip ci] --- pygmo/docstrings.cpp | 2 +- pygmo/expose_algorithms.cpp | 2 +- pygmo/test.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index 09f686b5d..e2fdd4c69 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -3621,7 +3621,7 @@ NLopt algorithms is: * shifted limited-memory variable-metric. The desired NLopt solver is selected upon construction of an :class:`~pygmo.core.nlopt` algorithm. Various properties -of the solver (e.g., the stopping criteria) can be configured during construction or afterwards. Note that multiple +of the solver (e.g., the stopping criteria) can be configured via class attributes. Note that multiple stopping criteria can be active at the same time: the optimisation will stop as soon as at least one stopping criterion is satisfied. By default, only the ``xtol_rel`` stopping criterion is active (see :attr:`~pygmo.core.nlopt.xtol_rel`). diff --git a/pygmo/expose_algorithms.cpp b/pygmo/expose_algorithms.cpp index 3603c6b26..30052c903 100644 --- a/pygmo/expose_algorithms.cpp +++ b/pygmo/expose_algorithms.cpp @@ -338,7 +338,7 @@ void expose_algorithms() #if defined(PAGMO_WITH_NLOPT) // NLopt. auto nlopt_ = expose_algorithm("nlopt", nlopt_docstring().c_str()); - nlopt_.def(bp::init()); + nlopt_.def(bp::init((bp::arg("solver")))); // Properties for the stopping criteria. nlopt_.add_property("stopval", &nlopt::get_stopval, &nlopt::set_stopval, nlopt_stopval_docstring().c_str()); nlopt_.add_property("ftol_rel", &nlopt::get_ftol_rel, &nlopt::set_ftol_rel, nlopt_ftol_rel_docstring().c_str()); diff --git a/pygmo/test.py b/pygmo/test.py index e84604d3d..ee0ca0f51 100644 --- a/pygmo/test.py +++ b/pygmo/test.py @@ -380,7 +380,7 @@ def runTest(self): from .core import nlopt, algorithm, luksan_vlcek1, problem, population n = nlopt() self.assertEqual(n.get_solver_name(), "cobyla") - n = nlopt("slsqp") + n = nlopt(solver = "slsqp") self.assertEqual(n.get_solver_name(), "slsqp") self.assertRaises(ValueError, lambda: nlopt("dsadsa"))