diff --git a/CMakeLists.txt b/CMakeLists.txt index 98e011686..6cdbd5e11 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,8 @@ -project(pagmo) +cmake_minimum_required(VERSION 3.2) -enable_testing() +project(pagmo VERSION 2.0) -cmake_minimum_required(VERSION 3.2) +enable_testing() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake_modules" "${CMAKE_SOURCE_DIR}/cmake_modules/yacma") @@ -27,6 +27,9 @@ option(PAGMO_BUILD_PYGMO "Build PyGMO." OFF) # Build option: enable features depending on Eigen3. option(PAGMO_WITH_EIGEN3 "Enable features depending on Eigen3 (such as CMAES). Requires Eigen3." OFF) +# Build option: enable NLopt. +option(PAGMO_WITH_NLOPT "Enable wrappers for the NLopt algorithms." OFF) + # Build option: install header. option(PAGMO_INSTALL_HEADERS "Enable the installation of PaGMO's header files." ON) @@ -66,6 +69,13 @@ if(PAGMO_WITH_EIGEN3) message(STATUS "Eigen version detected: ${EIGEN3_VERSION}") endif() +# NLopt setup +if(PAGMO_WITH_NLOPT) + find_package(NLOPT REQUIRED) + message(STATUS "NLopt include directory: ${NLOPT_INCLUDE_DIRS}") + message(STATUS "NLopt libraries: ${NLOPT_LIBRARIES}") +endif() + # Python setup. # NOTE: we do it here because we need to detect the Python bits *before* # looking for boost.python. @@ -90,15 +100,29 @@ add_library(pagmo INTERFACE) target_link_libraries(pagmo INTERFACE Threads::Threads Boost::boost) target_include_directories(pagmo INTERFACE $ + $ $) + if(PAGMO_WITH_EIGEN3) + # Link pagmo to eigen3. add_library(Eigen3::eigen3 INTERFACE IMPORTED) set_target_properties(Eigen3::eigen3 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${EIGEN3_INCLUDE_DIR}") target_link_libraries(pagmo INTERFACE Eigen3::eigen3) - # FIXME: this needs to go into config.hpp, once implemented. - target_compile_definitions(pagmo INTERFACE PAGMO_WITH_EIGEN3) + set(PAGMO_ENABLE_EIGEN3 "#define PAGMO_WITH_EIGEN3") endif() +if(PAGMO_WITH_NLOPT) + # Link pagmo to NLopt. + add_library(NLOPT::nlopt UNKNOWN IMPORTED) + set_target_properties(NLOPT::nlopt PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${NLOPT_INCLUDE_DIRS}") + set_target_properties(NLOPT::nlopt PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C" IMPORTED_LOCATION "${NLOPT_LIBRARIES}") + target_link_libraries(pagmo INTERFACE NLOPT::nlopt) + set(PAGMO_ENABLE_NLOPT "#define PAGMO_WITH_NLOPT") +endif() + +# Configure config.hpp. +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/config.hpp.in" "${CMAKE_CURRENT_BINARY_DIR}/include/pagmo/config.hpp") + if(PAGMO_BUILD_TESTS) add_subdirectory("${CMAKE_SOURCE_DIR}/tests") endif() @@ -113,4 +137,5 @@ endif() if(PAGMO_INSTALL_HEADERS) install(DIRECTORY include/ DESTINATION include) + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/include/pagmo/config.hpp" DESTINATION include/pagmo) endif() diff --git a/appveyor.yml b/appveyor.yml index 53456c1be..5cde0299b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,9 +18,9 @@ install: - if [%BUILD_TYPE%]==[Python35] set PATH=C:\Miniconda35-x64\Scripts;%PATH% - if [%BUILD_TYPE%]==[Python36] set PATH=C:\Miniconda36-x64\Scripts;%PATH% - conda config --add channels conda-forge --force -- if [%BUILD_TYPE%]==[Debug] conda create -y --name pagmo python=3.6 cmake boost eigen -- if [%BUILD_TYPE%]==[Python35] conda create -y --name pagmo python=3.5 cmake boost eigen -- if [%BUILD_TYPE%]==[Python36] conda create -y --name pagmo python=3.6 cmake boost eigen +- if [%BUILD_TYPE%]==[Debug] conda create -y --name pagmo python=3.6 cmake boost eigen nlopt +- if [%BUILD_TYPE%]==[Python35] conda create -y --name pagmo python=3.5 cmake boost eigen nlopt +- if [%BUILD_TYPE%]==[Python36] conda create -y --name pagmo python=3.6 cmake boost eigen nlopt - activate pagmo - if [%BUILD_TYPE%]==[Python35] conda install -y numpy dill ipyparallel - if [%BUILD_TYPE%]==[Python36] conda install -y numpy dill ipyparallel @@ -28,11 +28,11 @@ install: build_script: - mkdir build - cd build -- if [%BUILD_TYPE%]==[Debug] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=YES -DPAGMO_BUILD_TUTORIALS=YES -DPAGMO_WITH_EIGEN3=YES .. +- if [%BUILD_TYPE%]==[Debug] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=YES -DPAGMO_BUILD_TUTORIALS=YES -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes .. - if [%BUILD_TYPE%]==[Debug] cmake --build . --config Debug --target install -- if [%BUILD_TYPE%]==[Python35] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=YES -DPAGMO_BUILD_PYGMO=yes .. +- if [%BUILD_TYPE%]==[Python35] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_BUILD_PYGMO=yes .. - if [%BUILD_TYPE%]==[Python35] cmake --build . --config Release --target install -- if [%BUILD_TYPE%]==[Python36] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=YES -DPAGMO_BUILD_PYGMO=yes .. +- if [%BUILD_TYPE%]==[Python36] cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_BUILD_PYGMO=yes .. - if [%BUILD_TYPE%]==[Python36] cmake --build . --config Release --target install test_script: diff --git a/cmake_modules/FindNLOPT.cmake b/cmake_modules/FindNLOPT.cmake new file mode 100644 index 000000000..f1d8f422e --- /dev/null +++ b/cmake_modules/FindNLOPT.cmake @@ -0,0 +1,43 @@ +# Copyright (c) 2015-2016, Humanoid Lab, Georgia Tech Research Corporation +# Copyright (c) 2015-2017, Graphics Lab, Georgia Tech Research Corporation +# Copyright (c) 2016-2017, Personal Robotics Lab, Carnegie Mellon University +# This file is provided under the "BSD-style" License + +# Find NLOPT +# +# This sets the following variables: +# NLOPT_FOUND +# NLOPT_INCLUDE_DIRS +# NLOPT_LIBRARIES +# NLOPT_DEFINITIONS +# NLOPT_VERSION + +find_package(PkgConfig QUIET) + +# Check to see if pkgconfig is installed. +pkg_check_modules(PC_NLOPT nlopt QUIET) + +# Definitions +set(NLOPT_DEFINITIONS ${PC_NLOPT_CFLAGS_OTHER}) + +# Include directories +find_path(NLOPT_INCLUDE_DIRS + NAMES nlopt.h + HINTS ${PC_NLOPT_INCLUDEDIR} + PATHS "${CMAKE_INSTALL_PREFIX}/include") + +# Libraries +find_library(NLOPT_LIBRARIES + NAMES nlopt nlopt_cxx + HINTS ${PC_NLOPT_LIBDIR}) + +# Version +set(NLOPT_VERSION ${PC_NLOPT_VERSION}) + +# Set (NAME)_FOUND if all the variables and the version are satisfied. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NLOPT + FAIL_MESSAGE DEFAULT_MSG + REQUIRED_VARS NLOPT_INCLUDE_DIRS NLOPT_LIBRARIES + VERSION_VAR NLOPT_VERSION) + diff --git a/config.hpp.in b/config.hpp.in new file mode 100644 index 000000000..bf462415b --- /dev/null +++ b/config.hpp.in @@ -0,0 +1,40 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_CONFIG_HPP +#define PAGMO_CONFIG_HPP + +// Start of defines instantiated by CMake. +// clang-format off +#define PAGMO_VERSION @pagmo_VERSION@ +@PAGMO_ENABLE_EIGEN3@ +@PAGMO_ENABLE_NLOPT@ +// clang-format on +// End of defines instantiated by CMake. + +#endif diff --git a/doc/doxygen/Doxyfile b/doc/doxygen/Doxyfile index b09a9d8c7..0e590ab06 100644 --- a/doc/doxygen/Doxyfile +++ b/doc/doxygen/Doxyfile @@ -2058,7 +2058,9 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = PAGMO_DOXYGEN_INVOKED +PREDEFINED = PAGMO_DOXYGEN_INVOKED \ + PAGMO_WITH_EIGEN3 \ + PAGMO_WITH_NLOPT # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/doc/doxygen/images/nlopt.png b/doc/doxygen/images/nlopt.png new file mode 100644 index 000000000..733c02b40 Binary files /dev/null and b/doc/doxygen/images/nlopt.png differ diff --git a/doc/sphinx/docs/cpp/algorithms/nlopt.rst b/doc/sphinx/docs/cpp/algorithms/nlopt.rst new file mode 100644 index 000000000..5f46e4b2e --- /dev/null +++ b/doc/sphinx/docs/cpp/algorithms/nlopt.rst @@ -0,0 +1,5 @@ +NLopt solvers +============= + +.. doxygenclass:: pagmo::nlopt + :members: diff --git a/doc/sphinx/docs/cpp/cpp_docs.rst b/doc/sphinx/docs/cpp/cpp_docs.rst index 2f0303b5e..121a8dc80 100644 --- a/doc/sphinx/docs/cpp/cpp_docs.rst +++ b/doc/sphinx/docs/cpp/cpp_docs.rst @@ -32,6 +32,7 @@ Implemented algorithms algorithms/moead algorithms/mbh algorithms/cstrs_self_adaptive + algorithms/nlopt algorithms/nsga2 algorithms/pso algorithms/sade @@ -54,13 +55,13 @@ Implemented problems problems/dtlz problems/hock_schittkowsky_71 problems/inventory + problems/luksan_vlcek1 problems/translate problems/decompose problems/cec2006 problems/cec2009 problems/cec2013 problems/unconstrain - problems/luksan_vlcek1 Implemented islands ^^^^^^^^^^^^^^^^^^^ diff --git a/doc/sphinx/docs/python/algorithms/py_algorithms.rst b/doc/sphinx/docs/python/algorithms/py_algorithms.rst index a74b8d296..559b995d6 100644 --- a/doc/sphinx/docs/python/algorithms/py_algorithms.rst +++ b/doc/sphinx/docs/python/algorithms/py_algorithms.rst @@ -72,4 +72,9 @@ Algorithms exposed from C++ ------------------------------------------------------------- .. autoclass:: pygmo.core.cstrs_self_adaptive - :members: \ No newline at end of file + :members: + +------------------------------------------------------------- + +.. autoclass:: pygmo.core.nlopt + :members: diff --git a/doc/sphinx/docs/python/problems/py_problems.rst b/doc/sphinx/docs/python/problems/py_problems.rst index df702c80e..889780867 100644 --- a/doc/sphinx/docs/python/problems/py_problems.rst +++ b/doc/sphinx/docs/python/problems/py_problems.rst @@ -71,6 +71,11 @@ Problems exposed from C++ ------------------------------------------------------------- +.. autoclass:: pygmo.core.luksan_vlcek1 + :members: + +------------------------------------------------------------- + .. autoclass:: pygmo.core.translate :members: diff --git a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst index 4479156fe..7e48def86 100644 --- a/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst +++ b/doc/sphinx/docs/python/tutorials/coding_udp_simple.rst @@ -164,6 +164,10 @@ be revealed only when calling the malformed method: ... AttributeError: 'numpy.float64' object has no attribute '__iter__' +In this case, the issue is that the ``fitness()`` method returns a scalar instead of an array-like object (remember that pygmo is also +able to solve multi-objective and constrained problems, thus the fitness value must be, in general, a vector). pygmo will complain +about the wrong return type the first time the ``fitness()`` method is invoked. + Notes on computational speed ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -227,6 +231,8 @@ our fitness method into C code. >>> start_time = time.time(); [prob_jit.fitness(dummy_x) for i in range(1000)]; print(time.time() - start_time) #doctest: +SKIP 0.03771... +With a bit more elbow grease, we can further improve performance: + .. doctest:: >>> from numba import jit, float64 @@ -250,9 +256,9 @@ our fitness method into C code. 0.01687... -much better right? +Much better, right? -.. note:: For more information on using Numba to speed up your python code see `Numba documentation pages `_. +.. note:: For more information on using Numba to speed up your python code see the `Numba documentation pages `_. In particular, note that only a limited part of NumPy and the python language in general is supported by this use. diff --git a/doc/sphinx/docs/python/tutorials/using_problem.rst b/doc/sphinx/docs/python/tutorials/using_problem.rst index 66d90ea17..4f50211a5 100644 --- a/doc/sphinx/docs/python/tutorials/using_problem.rst +++ b/doc/sphinx/docs/python/tutorials/using_problem.rst @@ -33,12 +33,14 @@ Let us start: Lower bounds: [-5, -5, -5, -5, -5] Upper bounds: [10, 10, 10, 10, 10] - Has gradient: false + Has gradient: true User implemented gradient sparsity: false + Expected gradients: 5 Has hessians: false User implemented hessians sparsity: false Function evaluations: 0 + Gradient evaluations: 0 Thread safety: basic diff --git a/doc/sphinx/install.rst b/doc/sphinx/install.rst index 7dde80f0e..c100ee54d 100644 --- a/doc/sphinx/install.rst +++ b/doc/sphinx/install.rst @@ -11,11 +11,13 @@ C++ Pagmo is a header-only library which has the following third party dependencies: -* `Boost `_, headers only (needs the libraries only if you intend to compile the python bindings) -* `Eigen `_, headers only (optional) +* `Boost `_, **mandatory**, header-only (needs the libraries only if you + intend to compile the python bindings) +* `Eigen `_, optional, header-only +* `NLopt `_, optional, requires linking -After making sure the dependencies above are installed in your system and their headers visible to your compiler, you can download +After making sure the dependencies above are installed in your system, you can download the pagmo source via the ``git`` command .. code-block:: bash @@ -28,7 +30,7 @@ and configure your build using ``cmake``. When done, type (in your build directo make install -The headers will be installed in the ``CMAKE_INSTALL_PREFIX/include directory``. To check that all went well +The headers will be installed in the ``CMAKE_INSTALL_PREFIX/include`` directory. To check that all went well compile the :ref:`quick-start example `. ----------------------------------------------------------------------- @@ -38,8 +40,8 @@ Python The python module correponding to pagmo is called pygmo It can be installed either directly from ``conda`` or ``pip`` or by building the module from source. -Installing with pip -^^^^^^^^^^^^^^^^^^^ +Installation with pip/conda +^^^^^^^^^^^^^^^^^^^^^^^^^^^ The python package pygmo (python binding of the C++ code) can be installed using ``pip`` or ``conda``: .. code-block:: bash @@ -53,13 +55,14 @@ or conda config --add channels conda-forge conda install pygmo -Building the module -^^^^^^^^^^^^^^^^^^^ +Installation from source +^^^^^^^^^^^^^^^^^^^^^^^^ -To build the module you need to have the boost python libraries installed and to activate the ``BUILD_PYGMO`` option from within ``cmake``. +To build the module from source you need to have the Boost.Python libraries installed and to activate the cmake +``PAGMO_BUILD_PYGMO`` option. -Check carefully what python version is detected and what libraries are linked to. In particular select the correct boost_python -according to the python version (2 or 3) you want to compile the module for. +Check carefully what python version is detected and what libraries are linked to. In particular, select the correct Boost.Python +version according to the python version (2 or 3) you want to compile the module for. The ``CMAKE_INSTALL_PREFIX`` will be used to construct the final location of headers and Python module after install. diff --git a/doc/sphinx/quickstart.rst b/doc/sphinx/quickstart.rst index 7c4a8186d..f1e399096 100644 --- a/doc/sphinx/quickstart.rst +++ b/doc/sphinx/quickstart.rst @@ -17,11 +17,20 @@ After following the :ref:`install` you will be able to compile and run your firs :language: c++ :linenos: -Place it into a getting_started.cpp text file and compile it (for example) with: +Place it into a ``getting_started.cpp`` text file and compile it (for example) with: .. code-block:: bash - g++ -std=c++11 getting_started.cpp -pthread + g++ -O2 -DNDEBUG -std=c++11 getting_started.cpp -pthread + +If you installed pagmo with support for optional 3rd party libraries, you might need to +add additional switches to the command-line invocation of the compiler. For instance, +if you enabled the optional NLopt support, you will have to link your executable to the +``nlopt`` library: + +.. code-block:: bash + + g++ -O2 -DNDEBUG -std=c++11 getting_started.cpp -pthread -lnlopt ----------------------------------------------------------------------- @@ -36,7 +45,7 @@ If you have successfully installed pygmo following the :ref:`install` you can tr :language: python :linenos: -Place it into a getting_started.py text file and run it with: +Place it into a ``getting_started.py`` text file and run it with: .. code-block:: bash diff --git a/include/pagmo/algorithms/bee_colony.hpp b/include/pagmo/algorithms/bee_colony.hpp index c4a4cc0e7..43fc7fb70 100644 --- a/include/pagmo/algorithms/bee_colony.hpp +++ b/include/pagmo/algorithms/bee_colony.hpp @@ -1,3 +1,31 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + #ifndef PAGMO_ALGORITHMS_BEE_COLONY_HPP #define PAGMO_ALGORITHMS_BEE_COLONY_HPP diff --git a/include/pagmo/algorithms/cmaes.hpp b/include/pagmo/algorithms/cmaes.hpp index 1c34651ad..b71c811eb 100644 --- a/include/pagmo/algorithms/cmaes.hpp +++ b/include/pagmo/algorithms/cmaes.hpp @@ -29,6 +29,10 @@ see https://www.gnu.org/licenses/. */ #ifndef PAGMO_ALGORITHMS_CMAES_HPP #define PAGMO_ALGORITHMS_CMAES_HPP +#include + +#if defined(PAGMO_WITH_EIGEN3) + #include #include #include @@ -51,18 +55,17 @@ namespace pagmo * \image html cmaes.png "CMA-ES logic." width=3cm * * CMA-ES is one of the most successful algorithm, classified as an Evolutionary Strategy, for derivative-free global - * optimization. - * The version implemented in PaGMO is the "classic" version described in the 2006 paper titled + * optimization. The version implemented in PaGMO is the "classic" version described in the 2006 paper titled * "The CMA evolution strategy: a comparing review.". * * **NOTE** Since at each generation all newly generated individuals sampled from the adapted distribution are - * reinserted - * into the population, CMA-ES may not preserve the best individual (not elitist). As a consequence the plot of the - * population best fitness may not be perfectly monotonically decreasing + * reinserted into the population, CMA-ES may not preserve the best individual (not elitist). As a consequence the plot + * of the population best fitness may not be perfectly monotonically decreasing * * **NOTE** The cmaes::evolve method cannot be called concurrently by different threads even if it is marked as const. - * The - * mutable members make such an operation result in an undefined behaviour in terms of algorithmic convergence. + * The mutable members make such an operation result in an undefined behaviour in terms of algorithmic convergence. + * + * **NOTE** This algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_EIGEN3`` option enabled. * * See: Hansen, Nikolaus. "The CMA evolution strategy: a comparing review." Towards a new evolutionary computation. * Springer Berlin Heidelberg, 2006. 75-102. @@ -611,4 +614,10 @@ class cmaes PAGMO_REGISTER_ALGORITHM(pagmo::cmaes) +#else // PAGMO_WITH_EIGEN3 + +#error The cmaes.hpp header was included, but pagmo was not compiled with eigen3 support + +#endif // PAGMO_WITH_EIGEN3 + #endif diff --git a/include/pagmo/algorithms/nlopt.hpp b/include/pagmo/algorithms/nlopt.hpp new file mode 100644 index 000000000..dc81ae351 --- /dev/null +++ b/include/pagmo/algorithms/nlopt.hpp @@ -0,0 +1,1333 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_ALGORITHMS_NLOPT_HPP +#define PAGMO_ALGORITHMS_NLOPT_HPP + +#include + +#if defined(PAGMO_WITH_NLOPT) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace pagmo +{ + +namespace detail +{ + +// Usual trick with global read-only data useful to the NLopt wrapper. +template +struct nlopt_data { + // The idea here is to establish a bijection between string name (e.g., "cobyla") + // and the enums used in the NLopt C API to refer to the algos (e.g., NLOPT_LN_COBYLA). + // We use a bidirectional map so that we can map both string -> enum and enum -> string, + // depending on what is needed. + using names_map_t = boost::bimap; + static const names_map_t names; + // A map to link a human-readable description to NLopt return codes. + // NOTE: in C++11 hashing of enums might not be available. Provide our own. + struct res_hasher { + std::size_t operator()(::nlopt_result res) const + { + return std::hash{}(static_cast(res)); + } + }; + using result_map_t = std::unordered_map<::nlopt_result, std::string, res_hasher>; + static const result_map_t results; +}; + +// Initialise the mapping between algo names and enums for the supported algorithms. +inline typename nlopt_data<>::names_map_t nlopt_names_map() +{ + typename nlopt_data<>::names_map_t retval; + using value_type = typename nlopt_data<>::names_map_t::value_type; + retval.insert(value_type("cobyla", NLOPT_LN_COBYLA)); + retval.insert(value_type("bobyqa", NLOPT_LN_BOBYQA)); + retval.insert(value_type("newuoa", NLOPT_LN_NEWUOA)); + retval.insert(value_type("newuoa_bound", NLOPT_LN_NEWUOA_BOUND)); + retval.insert(value_type("praxis", NLOPT_LN_PRAXIS)); + retval.insert(value_type("neldermead", NLOPT_LN_NELDERMEAD)); + retval.insert(value_type("sbplx", NLOPT_LN_SBPLX)); + retval.insert(value_type("mma", NLOPT_LD_MMA)); + retval.insert(value_type("ccsaq", NLOPT_LD_CCSAQ)); + retval.insert(value_type("slsqp", NLOPT_LD_SLSQP)); + retval.insert(value_type("lbfgs", NLOPT_LD_LBFGS)); + retval.insert(value_type("tnewton_precond_restart", NLOPT_LD_TNEWTON_PRECOND_RESTART)); + retval.insert(value_type("tnewton_precond", NLOPT_LD_TNEWTON_PRECOND)); + retval.insert(value_type("tnewton_restart", NLOPT_LD_TNEWTON_RESTART)); + retval.insert(value_type("tnewton", NLOPT_LD_TNEWTON)); + retval.insert(value_type("var2", NLOPT_LD_VAR2)); + retval.insert(value_type("var1", NLOPT_LD_VAR1)); + return retval; +} + +// Static init using the helper function above. +template +const typename nlopt_data::names_map_t nlopt_data::names = nlopt_names_map(); + +// Other static init. +template +const typename nlopt_data::result_map_t nlopt_data::results = { + {NLOPT_SUCCESS, "NLOPT_SUCCESS (value = " + std::to_string(NLOPT_SUCCESS) + ", Generic success return value)"}, + {NLOPT_STOPVAL_REACHED, "NLOPT_STOPVAL_REACHED (value = " + std::to_string(NLOPT_STOPVAL_REACHED) + + ", Optimization stopped because stopval was reached)"}, + {NLOPT_FTOL_REACHED, "NLOPT_FTOL_REACHED (value = " + std::to_string(NLOPT_FTOL_REACHED) + + ", Optimization stopped because ftol_rel or ftol_abs was reached)"}, + {NLOPT_XTOL_REACHED, "NLOPT_XTOL_REACHED (value = " + std::to_string(NLOPT_XTOL_REACHED) + + ", Optimization stopped because xtol_rel or xtol_abs was reached)"}, + {NLOPT_MAXEVAL_REACHED, "NLOPT_MAXEVAL_REACHED (value = " + std::to_string(NLOPT_MAXEVAL_REACHED) + + ", Optimization stopped because maxeval was reached)"}, + {NLOPT_MAXTIME_REACHED, "NLOPT_MAXTIME_REACHED (value = " + std::to_string(NLOPT_MAXTIME_REACHED) + + ", Optimization stopped because maxtime was reached)"}, + {NLOPT_FAILURE, "NLOPT_FAILURE (value = " + std::to_string(NLOPT_FAILURE) + ", Generic failure code)"}, + {NLOPT_INVALID_ARGS, "NLOPT_INVALID_ARGS (value = " + std::to_string(NLOPT_INVALID_ARGS) + ", Invalid arguments)"}, + {NLOPT_OUT_OF_MEMORY, + "NLOPT_OUT_OF_MEMORY (value = " + std::to_string(NLOPT_OUT_OF_MEMORY) + ", Ran out of memory)"}, + {NLOPT_ROUNDOFF_LIMITED, "NLOPT_ROUNDOFF_LIMITED (value = " + std::to_string(NLOPT_ROUNDOFF_LIMITED) + + ", Halted because roundoff errors limited progress)"}, + {NLOPT_FORCED_STOP, + "NLOPT_FORCED_STOP (value = " + std::to_string(NLOPT_FORCED_STOP) + ", Halted because of a forced termination)"}}; + +// Convert an NLopt result in a more descriptive string. +inline std::string nlopt_res2string(::nlopt_result err) +{ + return (nlopt_data<>::results.find(err) == nlopt_data<>::results.end() ? "??" : nlopt_data<>::results.at(err)); +} + +struct nlopt_obj { + // Single entry of the log (feval, fitness, n of unsatisfied const, constr. violation, feasibility). + using log_line_type = std::tuple; + // The log. + using log_type = std::vector; +#if defined(_MSC_VER) + // NOTE: this is a wrapper around std::copy() for use in MSVC in conjunction with raw pointers. + // In debug mode, MSVC will complain about unchecked iterators unless instructed otherwise. + template + static void unchecked_copy(Int size, const T *begin, T *dest) + { + std::copy(stdext::make_checked_array_iterator(begin, size), + stdext::make_checked_array_iterator(begin, size, size), + stdext::make_checked_array_iterator(dest, size)); + } +#else + template + static void unchecked_copy(Int size, const T *begin, T *dest) + { + std::copy(begin, begin + size, dest); + } +#endif + // Shortcut to the static data. + using data = nlopt_data<>; + explicit nlopt_obj(::nlopt_algorithm algo, problem &prob, double stopval, double ftol_rel, double ftol_abs, + double xtol_rel, double xtol_abs, int maxeval, int maxtime, unsigned verbosity) + : m_prob(prob), m_value(nullptr, ::nlopt_destroy), m_verbosity(verbosity) + { + // If needed, init the sparsity pattern. + if (prob.has_gradient_sparsity()) { + m_sp = prob.gradient_sparsity(); + } + + // Extract and set problem dimension. + const auto n = boost::numeric_cast(prob.get_nx()); + // Try to init the nlopt_obj. + m_value.reset(::nlopt_create(algo, n)); + if (!m_value) { + pagmo_throw(std::runtime_error, "the creation of the nlopt_opt object failed"); // LCOV_EXCL_LINE + } + + // NLopt does not handle MOO. + if (prob.get_nobj() != 1u) { + pagmo_throw(std::invalid_argument, "NLopt algorithms cannot handle multi-objective optimization"); + } + + // Variable to hold the result of various operations. + ::nlopt_result res; + + // Box bounds. + const auto bounds = prob.get_bounds(); + res = ::nlopt_set_lower_bounds(m_value.get(), bounds.first.data()); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the lower bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_upper_bounds(m_value.get(), bounds.second.data()); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the upper bounds for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + + // This is just a vector_double that is re-used across objfun invocations. + // It will hold the current decision vector. + m_dv.resize(prob.get_nx()); + + // Set the objfun + gradient. + res = ::nlopt_set_min_objective( + m_value.get(), + [](unsigned dim, const double *x, double *grad, void *f_data) -> double { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + // NOTE: the idea here is that we wrap everything in a try/catch block, + // and, if any exception is thrown, we record it into the nlo object + // and re-throw it later. We do this because we are using the NLopt C API, + // and if we let exceptions out of here we run in undefined behaviour. + // We do the same for the constraints functions. + try { + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + const auto verb = nlo.m_verbosity; + auto &f_count = nlo.m_objfun_counter; + auto &log = nlo.m_log; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' a fitness gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness. + const auto fitness = p.fitness(dv); + + // Compute gradient, if needed. + if (grad) { + const auto gradient = p.gradient(dv); + + if (p.has_gradient_sparsity()) { + // Sparse gradient case. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + auto g_it = gradient.begin(); + + // First we fill the dense output gradient with zeroes. + std::fill(grad, grad + dim, 0.); + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. + for (auto it = sp.begin(); it != sp.end() && it->first == 0u; ++it, ++g_it) { + // NOTE: we just need the gradient of the objfun, + // i.e., those (i,j) pairs in which i == 0. We know that the gradient + // of the objfun, if present, starts at the beginning of sp, as sp is + // sorted in lexicographic fashion. + grad[it->second] = *g_it; + } + } else { + // Dense gradient case. + unchecked_copy(p.get_nx(), gradient.data(), grad); + } + } + + // Update the log if requested. + if (verb && !(f_count % verb)) { + // Constraints bits. + const auto ctol = p.get_c_tol(); + const auto c1eq = detail::test_eq_constraints(fitness.data() + 1, + fitness.data() + 1 + p.get_nec(), ctol.data()); + const auto c1ineq + = detail::test_ineq_constraints(fitness.data() + 1 + p.get_nec(), + fitness.data() + fitness.size(), ctol.data() + p.get_nec()); + // This will be the total number of violated constraints. + const auto nv = p.get_nc() - c1eq.first - c1ineq.first; + // This will be the norm of the violation. + const auto l = c1eq.second + c1ineq.second; + // Test feasibility. + const auto feas = p.feasibility_f(fitness); + + if (!(f_count / verb % 50u)) { + // Every 50 lines print the column names. + print("\n", std::setw(10), "fevals:", std::setw(15), "fitness:", std::setw(15), "violated:", + std::setw(15), "viol. norm:", '\n'); + } + // Print to screen the log line. + print(std::setw(10), f_count + 1u, std::setw(15), fitness[0], std::setw(15), nv, std::setw(15), + l, feas ? "" : " i", '\n'); + // Record the log. + log.emplace_back(f_count + 1u, fitness[0], nv, l, feas); + } + + // Update the counter. + ++f_count; + + // Return the objfun value. + return fitness[0]; + } catch (...) { + // Store exception, force the stop of the optimisation, + // and return a useless value. + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); + return HUGE_VAL; + } + }, + static_cast(this)); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the objective function for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + + // Vector-valued constraints. + const auto nic = boost::numeric_cast(prob.get_nic()); + const auto nec = boost::numeric_cast(prob.get_nec()); + const auto c_tol = prob.get_c_tol(); + + // Inequality. + if (nic) { + res = ::nlopt_add_inequality_mconstraint( + m_value.get(), nic, + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + try { + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nic()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw(std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an inequality constraints gradient was requested, but the " + "optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write IC to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + unchecked_copy(p.get_nic(), fitness.data() + 1 + p.get_nec(), result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + if (p.has_gradient_sparsity()) { + // Sparse gradient. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nic(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(p.get_nec() + 1u, 0u)); + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the ineq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the ineq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. Run until sp.end() as the IC are at the + // end of the sparsity/gradient vector. + for (; it_sp != sp.end(); ++it_sp, ++g_it) { + grad[(it_sp->first - 1u - p.get_nec()) * p.get_nx() + it_sp->second] = *g_it; + } + } else { + // Dense gradient. + unchecked_copy(p.get_nic() * p.get_nx(), + gradient.data() + p.get_nx() * (1u + p.get_nec()), grad); + } + } + } catch (...) { + // Store exception, stop optimisation. + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); + } + }, + static_cast(this), c_tol.data() + nec); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, + "could not set the inequality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + "\nThis usually means that the algorithm does not support inequality constraints"); + } + } + + // Equality. + if (nec) { + res = ::nlopt_add_equality_mconstraint( + m_value.get(), nec, + [](unsigned m, double *result, unsigned dim, const double *x, double *grad, void *f_data) { + // Get *this back from the function data. + auto &nlo = *static_cast(f_data); + + try { + // A few shortcuts. + auto &p = nlo.m_prob; + auto &dv = nlo.m_dv; + + // A couple of sanity checks. + assert(dim == p.get_nx()); + assert(dv.size() == dim); + assert(m == p.get_nec()); + + if (grad && !p.has_gradient()) { + // If grad is not null, it means we are in an algorithm + // that needs the gradient. If the problem does not support it, + // we error out. + pagmo_throw( + std::invalid_argument, + "during an optimization with the NLopt algorithm '" + + data::names.right.at(::nlopt_get_algorithm(nlo.m_value.get())) + + "' an equality constraints gradient was requested, but the optimisation problem '" + + p.get_name() + "' does not provide it"); + } + + // Copy the decision vector in our temporary dv vector_double, + // for use in the pagmo API. + std::copy(x, x + dim, dv.begin()); + + // Compute fitness and write EC to the output. + // NOTE: fitness is nobj + nec + nic. + const auto fitness = p.fitness(dv); + unchecked_copy(p.get_nec(), fitness.data() + 1, result); + + if (grad) { + // Handle gradient, if requested. + const auto gradient = p.gradient(dv); + + if (p.has_gradient_sparsity()) { + // Sparse gradient case. + auto &sp = nlo.m_sp; + // NOTE: problem::gradient() has already checked that + // the returned vector has size m_gs_dim, i.e., the stored + // size of the sparsity pattern. On the other hand, + // problem::gradient_sparsity() also checks that the returned + // vector has size m_gs_dim, so these two must have the same size. + assert(gradient.size() == sp.size()); + + // Let's first fill it with zeroes. + std::fill(grad, grad + p.get_nx() * p.get_nec(), 0.); + + // Now we need to go into the sparsity pattern and find where + // the sparsity data for the constraints start. + using pair_t = sparsity_pattern::value_type; + // NOTE: it_sp could be end() or point to ineq constraints. This should + // be fine: it_sp is a valid iterator in sp, sp has the same + // size as gradient and we do the proper checks below before accessing + // the values pointed to by it_sp/g_it. + auto it_sp = std::lower_bound(sp.begin(), sp.end(), pair_t(1u, 0u)); + + // Need to do a bit of horrid overflow checking :/. + using diff_type = std::iterator_traits::difference_type; + using udiff_type = std::make_unsigned::type; + if (sp.size() > static_cast(std::numeric_limits::max())) { + pagmo_throw(std::overflow_error, + "Overflow error, the sparsity pattern size is too large."); + } + // This is the index at which the eq constraints start. + const auto idx = std::distance(sp.begin(), it_sp); + // Grab the start of the gradient data for the eq constraints. + auto g_it = gradient.data() + idx; + + // Then we iterate over the sparsity pattern, and fill in the + // nonzero bits in grad. We terminate either at the end of sp, or when + // we encounter the first inequality constraint. + for (; it_sp != sp.end() && it_sp->first < p.get_nec() + 1u; ++it_sp, ++g_it) { + grad[(it_sp->first - 1u) * p.get_nx() + it_sp->second] = *g_it; + } + } else { + // Dense gradient. + unchecked_copy(p.get_nx() * p.get_nec(), gradient.data() + p.get_nx(), grad); + } + } + } catch (...) { + // Store exception, stop optimisation. + nlo.m_eptr = std::current_exception(); + ::nlopt_force_stop(nlo.m_value.get()); + } + }, + static_cast(this), c_tol.data()); + if (res != NLOPT_SUCCESS) { + pagmo_throw(std::invalid_argument, + "could not set the equality constraints for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + nlopt_res2string(res) + + "\nThis usually means that the algorithm does not support equality constraints"); + } + } + + // Handle the various stopping criteria. + res = ::nlopt_set_stopval(m_value.get(), stopval); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'stopval' stopping criterion to " + + std::to_string(stopval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_ftol_rel(m_value.get(), ftol_rel); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'ftol_rel' stopping criterion to " + + std::to_string(ftol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_ftol_abs(m_value.get(), ftol_abs); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'ftol_abs' stopping criterion to " + + std::to_string(ftol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_xtol_rel(m_value.get(), xtol_rel); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'xtol_rel' stopping criterion to " + + std::to_string(xtol_rel) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_xtol_abs1(m_value.get(), xtol_abs); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'xtol_abs' stopping criterion to " + + std::to_string(xtol_abs) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_maxeval(m_value.get(), maxeval); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'maxeval' stopping criterion to " + + std::to_string(maxeval) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + res = ::nlopt_set_maxtime(m_value.get(), maxtime); + if (res != NLOPT_SUCCESS) { + // LCOV_EXCL_START + pagmo_throw(std::invalid_argument, "could not set the 'maxtime' stopping criterion to " + + std::to_string(maxtime) + " for the NLopt algorithm '" + + data::names.right.at(algo) + "', the error is: " + + nlopt_res2string(res)); + // LCOV_EXCL_STOP + } + } + + // Delete all other ctors/assignment ops. + nlopt_obj(const nlopt_obj &) = delete; + nlopt_obj(nlopt_obj &&) = delete; + nlopt_obj &operator=(const nlopt_obj &) = delete; + nlopt_obj &operator=(nlopt_obj &&) = delete; + + // Data members. + problem &m_prob; + sparsity_pattern m_sp; + std::unique_ptr::type, void (*)(::nlopt_opt)> m_value; + vector_double m_dv; + unsigned m_verbosity; + unsigned long m_objfun_counter = 0; + log_type m_log; + // This exception pointer will be null, unless + // an error is raised during the computation of the objfun + // or constraints. If not null, it will be re-thrown + // in the evolve() method. + std::exception_ptr m_eptr; +}; +} + +/// NLopt algorithms. +/** + * \image html nlopt.png "NLopt logo." width=3cm + * + * This user-defined algorithm wraps a selection of solvers from the NLopt library, focusing on + * local optimisation (both gradient-based and derivative-free). The complete list of supported + * NLopt algorithms is: + * - COBYLA, + * - BOBYQA, + * - NEWUOA + bound constraints, + * - PRAXIS, + * - Nelder-Mead simplex, + * - sbplx, + * - MMA (Method of Moving Asymptotes), + * - CCSA, + * - SLSQP, + * - low-storage BFGS, + * - preconditioned truncated Newton, + * - shifted limited-memory variable-metric. + * + * The desired NLopt solver is selected upon construction of a pagmo::nlopt algorithm. Various properties + * of the solver (e.g., the stopping criteria) can be configured after construction via methods provided + * by this class. Note that multiple stopping criteria can be active at the same time: the optimisation will + * stop as soon as at least one stopping criterion is satisfied. By default, only the ``xtol_rel`` stopping + * criterion is active (see get_xtol_rel()). + * + * All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation + * is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient. + * Some solvers support equality and/or inequality constaints. + * + * In order to support pagmo's population-based optimisation model, nlopt::evolve() will select + * a single individual from the input pagmo::population to be optimised by the NLopt solver. + * The optimised individual will then be inserted back into the population at the end of the optimisation. + * The selection and replacement strategies can be configured via set_selection(const std::string &), + * set_selection(population::size_type), set_replacement(const std::string &) and + * set_replacement(population::size_type). + * + * \verbatim embed:rst:leading-asterisk + * .. note:: + * + * This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option + * enabled (see the :ref:`installation instructions `). + * + * .. seealso:: + * + * The `NLopt website `_ contains a detailed description + * of each supported solver. + * + * \endverbatim + * + * **NOTE**: a moved-from pagmo::nlopt is destructible and assignable. Any other operation will result + * in undefined behaviour. + */ +// TODO: +// - investigate the use of a fitness cache, after we have good perf testing in place. +class nlopt +{ + using nlopt_obj = detail::nlopt_obj; + using nlopt_data = detail::nlopt_data<>; + +public: + /// Single data line for the algorithm's log. + /** + * A log data line is a tuple consisting of: + * - the number of objective function evaluations made so far, + * - the objective function value for the current decision vector, + * - the number of constraints violated by the current decision vector, + * - the constraints violation norm for the current decision vector, + * - a boolean flag signalling the feasibility of the current decision vector. + */ + using log_line_type = std::tuple; + /// Log type. + /** + * The algorithm log is a collection of nlopt::log_line_type data lines, stored in chronological order + * during the optimisation if the verbosity of the algorithm is set to a nonzero value + * (see nlopt::set_verbosity()). + */ + using log_type = std::vector; + +private: + static_assert(std::is_same::value, "Invalid log line type."); + +public: + /// Default constructor. + /** + * The default constructor initialises the pagmo::nlopt algorithm with the ``"cobyla"`` solver, + * the ``"best"`` individual selection strategy and the ``"best"`` individual replacement strategy. + * + * @throws unspecified any exception thrown by pagmo::nlopt(const std::string &). + */ + nlopt() : nlopt("cobyla") + { + } + /// Constructor from solver name. + /** + * This constructor will initialise a pagmo::nlopt object which will use the NLopt algorithm specified by + * the input string \p algo, the ``"best"`` individual selection strategy and the ``"best"`` individual + * replacement strategy. \p algo is translated to an NLopt algorithm type according to the following + * translation table: + * \verbatim embed:rst:leading-asterisk + * ================================ ==================================== + * ``algo`` string NLopt algorithm + * ================================ ==================================== + * ``"cobyla"`` ``NLOPT_LN_COBYLA`` + * ``"bobyqa"`` ``NLOPT_LN_BOBYQA`` + * ``"newuoa"`` ``NLOPT_LN_NEWUOA`` + * ``"newuoa_bound"`` ``NLOPT_LN_NEWUOA_BOUND`` + * ``"praxis"`` ``NLOPT_LN_PRAXIS`` + * ``"neldermead"`` ``NLOPT_LN_NELDERMEAD`` + * ``"sbplx"`` ``NLOPT_LN_SBPLX`` + * ``"mma"`` ``NLOPT_LD_MMA`` + * ``"ccsaq"`` ``NLOPT_LD_CCSAQ`` + * ``"slsqp"`` ``NLOPT_LD_SLSQP`` + * ``"lbfgs"`` ``NLOPT_LD_LBFGS`` + * ``"tnewton_precond_restart"`` ``NLOPT_LD_TNEWTON_PRECOND_RESTART`` + * ``"tnewton_precond"`` ``NLOPT_LD_TNEWTON_PRECOND`` + * ``"tnewton_restart"`` ``NLOPT_LD_TNEWTON_RESTART`` + * ``"tnewton"`` ``NLOPT_LD_TNEWTON`` + * ``"var2"`` ``NLOPT_LD_VAR2`` + * ``"var1"`` ``NLOPT_LD_VAR1`` + * ================================ ==================================== + * \endverbatim + * The parameters of the selected algorithm can be specified via the methods of this class. + * + * \verbatim embed:rst:leading-asterisk + * .. seealso:: + * + * The `NLopt website `_ contains a detailed + * description of each supported solver. + * + * \endverbatim + * + * @param algo the name of the NLopt algorithm that will be used by this pagmo::nlopt object. + * + * @throws std::runtime_error if the NLopt version is not at least 2. + * @throws std::invalid_argument if \p algo is not one of the allowed algorithm names. + */ + explicit nlopt(const std::string &algo) + : m_algo(algo), m_select(std::string("best")), m_replace(std::string("best")), + m_rselect_seed(random_device::next()), m_e(static_cast(m_rselect_seed)) + { + // Check version. + int major, minor, bugfix; + ::nlopt_version(&major, &minor, &bugfix); + if (major < 2) { + pagmo_throw(std::runtime_error, "Only NLopt version >= 2 is supported"); // LCOV_EXCL_LINE + } + + // Check the algorithm. + if (nlopt_data::names.left.find(m_algo) == nlopt_data::names.left.end()) { + // The selected algorithm is unknown or not among the supported ones. + std::ostringstream oss; + std::transform(nlopt_data::names.left.begin(), nlopt_data::names.left.end(), + std::ostream_iterator(oss, "\n"), + [](const uncvref_t &v) { return v.first; }); + pagmo_throw(std::invalid_argument, "unknown/unsupported NLopt algorithm '" + algo + + "'. The supported algorithms are:\n" + oss.str()); + } + } + /// Set the seed for the ``"random"`` selection/replacement policies. + /** + * @param seed the value that will be used to seed the random number generator used by the ``"random"`` + * selection/replacement policies. + */ + void set_random_sr_seed(unsigned seed) + { + m_rselect_seed = seed; + m_e.seed(static_cast(m_rselect_seed)); + } + /// Set the individual selection policy. + /** + * This method will set the policy that is used in evolve() to select the individual + * that will be optimised. + * + * The input string must be one of ``"best"``, ``"worst"`` and ``"random"``: + * - ``"best"`` will select the best individual in the population, + * - ``"worst"`` will select the worst individual in the population, + * - ``"random"`` will randomly choose one individual in the population. + * + * set_random_sr_seed() can be used to seed the random number generator used by the ``"random"`` policy. + * + * Instead of a selection policy, a specific individual in the population can be selected via + * set_selection(population::size_type). + * + * @param select the selection policy. + * + * @throws std::invalid_argument if \p select is not one of ``"best"``, ``"worst"`` or ``"random"``. + */ + void set_selection(const std::string &select) + { + if (select != "best" && select != "worst" && select != "random") { + pagmo_throw(std::invalid_argument, + "the individual selection policy must be one of ['best', 'worst', 'random'], but '" + select + + "' was provided instead"); + } + m_select = select; + } + /// Set the individual selection index. + /** + * This method will set the index of the individual that is selected for optimisation + * in evolve(). + * + * @param n the index in the population of the individual to be selected for optimisation. + */ + void set_selection(population::size_type n) + { + m_select = n; + } + /// Get the individual selection policy or index. + /** + * This method will return a \p boost::any containing either the individual selection policy (as an \p std::string) + * or the individual selection index (as a population::size_type). The selection policy or index is set via + * set_selection(const std::string &) and set_selection(population::size_type). + * + * @return the individual selection policy or index. + */ + boost::any get_selection() const + { + return m_select; + } + /// Set the individual replacement policy. + /** + * This method will set the policy that is used in evolve() to select the individual + * that will be replaced by the optimised individual. + * + * The input string must be one of ``"best"``, ``"worst"`` and ``"random"``: + * - ``"best"`` will select the best individual in the population, + * - ``"worst"`` will select the worst individual in the population, + * - ``"random"`` will randomly choose one individual in the population. + * + * set_random_sr_seed() can be used to seed the random number generator used by the ``"random"`` policy. + * + * Instead of a replacement policy, a specific individual in the population can be selected via + * set_replacement(population::size_type). + * + * @param replace the replacement policy. + * + * @throws std::invalid_argument if \p replace is not one of ``"best"``, ``"worst"`` or ``"random"``. + */ + void set_replacement(const std::string &replace) + { + if (replace != "best" && replace != "worst" && replace != "random") { + pagmo_throw(std::invalid_argument, + "the individual replacement policy must be one of ['best', 'worst', 'random'], but '" + replace + + "' was provided instead"); + } + m_replace = replace; + } + /// Set the individual replacement index. + /** + * This method will set the index of the individual that is replaced after the optimisation + * in evolve(). + * + * @param n the index in the population of the individual to be replaced after the optimisation. + */ + void set_replacement(population::size_type n) + { + m_replace = n; + } + /// Get the individual replacement policy or index. + /** + * This method will return a \p boost::any containing either the individual replacement policy (as an \p + * std::string) or the individual replacement index (as a population::size_type). The replacement policy or index is + * set via set_replacement(const std::string &) and set_replacement(population::size_type). + * + * @return the individual replacement policy or index. + */ + boost::any get_replacement() const + { + return m_replace; + } + /// Evolve population. + /** + * This method will select an individual from \p pop, optimise it with the NLopt algorithm specified upon + * construction, replace an individual in \p pop with the optimised individual, and finally return \p pop. + * The individual selection and replacement criteria can be set via set_selection(const std::string &), + * set_selection(population::size_type), set_replacement(const std::string &) and + * set_replacement(population::size_type). The NLopt solver will run until one of the stopping criteria + * is satisfied, and the return status of the NLopt solver will be recorded (it can be fetched with + * get_last_opt_result()). + * + * @param pop the population to be optimised. + * + * @return the optimised population. + * + * @throws std::invalid_argument in the following cases: + * - the population's problem is multi-objective, + * - the setup of the NLopt algorithm fails (e.g., if the problem is constrained but the selected + * NLopt solver does not support constrained optimisation), + * - the selected NLopt solver needs gradients but they are not provided by the population's + * problem, + * - the components of the individual selected for optimisation contain NaNs or they are outside + * the problem's bounds, + * - the individual selection/replacement index is not smaller than the population's size. + * @throws unspecified any exception thrown by the public interface of pagmo::problem. + */ + population evolve(population pop) const + { + if (!pop.size()) { + // In case of an empty pop, just return it. + return pop; + } + + auto &prob = pop.get_problem(); + const auto nc = prob.get_nc(); + + // Create the nlopt obj. + // NOTE: this will check also the problem's properties. + nlopt_obj no(nlopt_data::names.left.at(m_algo), prob, m_sc_stopval, m_sc_ftol_rel, m_sc_ftol_abs, m_sc_xtol_rel, + m_sc_xtol_abs, m_sc_maxeval, m_sc_maxtime, m_verbosity); + + // Setup of the initial guess. + vector_double initial_guess; + if (boost::any_cast(&m_select)) { + const auto &s_select = boost::any_cast(m_select); + if (s_select == "best") { + initial_guess = pop.get_x()[pop.best_idx()]; + } else if (s_select == "worst") { + initial_guess = pop.get_x()[pop.worst_idx()]; + } else { + assert(s_select == "random"); + std::uniform_int_distribution dist(0, pop.size() - 1u); + initial_guess = pop.get_x()[dist(m_e)]; + } + } else { + const auto idx = boost::any_cast(m_select); + if (idx >= pop.size()) { + pagmo_throw(std::invalid_argument, "cannot select the individual at index " + std::to_string(idx) + + " for evolution: the population has a size of only " + + std::to_string(pop.size())); + } + initial_guess = pop.get_x()[idx]; + } + // Check the initial guess. + // NOTE: this should be guaranteed by the population's invariants. + assert(initial_guess.size() == prob.get_nx()); + const auto bounds = prob.get_bounds(); + for (decltype(bounds.first.size()) i = 0; i < bounds.first.size(); ++i) { + if (std::isnan(initial_guess[i])) { + pagmo_throw(std::invalid_argument, + "the value of the initial guess at index " + std::to_string(i) + " is NaN"); + } + if (initial_guess[i] < bounds.first[i] || initial_guess[i] > bounds.second[i]) { + pagmo_throw(std::invalid_argument, "the value of the initial guess at index " + std::to_string(i) + + " is outside the problem's bounds"); + } + } + + // Run the optimisation and store the status returned by NLopt. + double fitness; + m_last_opt_result = ::nlopt_optimize(no.m_value.get(), initial_guess.data(), &fitness); + if (m_verbosity) { + // Print to screen the result of the optimisation, if we are being verbose. + std::cout << "\nOptimisation return status: " << detail::nlopt_res2string(m_last_opt_result) << '\n'; + } + // Replace the log. + m_log = std::move(no.m_log); + + // Handle any exception that might've been thrown. + if (no.m_eptr) { + std::rethrow_exception(no.m_eptr); + } + + // Store the new individual into the population. + if (boost::any_cast(&m_replace)) { + const auto &s_replace = boost::any_cast(m_replace); + if (s_replace == "best") { + if (nc) { + pop.set_x(pop.best_idx(), initial_guess); + } else { + pop.set_xf(pop.best_idx(), initial_guess, {fitness}); + } + } else if (s_replace == "worst") { + if (nc) { + pop.set_x(pop.worst_idx(), initial_guess); + } else { + pop.set_xf(pop.worst_idx(), initial_guess, {fitness}); + } + } else { + assert(s_replace == "random"); + std::uniform_int_distribution dist(0, pop.size() - 1u); + if (nc) { + pop.set_x(dist(m_e), initial_guess); + } else { + pop.set_xf(dist(m_e), initial_guess, {fitness}); + } + } + } else { + const auto idx = boost::any_cast(m_replace); + if (idx >= pop.size()) { + pagmo_throw(std::invalid_argument, "cannot replace the individual at index " + std::to_string(idx) + + " after evolution: the population has a size of only " + + std::to_string(pop.size())); + } + if (nc) { + pop.set_x(idx, initial_guess); + } else { + pop.set_xf(idx, initial_guess, {fitness}); + } + } + + // Return the evolved pop. + return pop; + } + /// Algorithm's name. + /** + * @return a human-readable name for the algorithm. + */ + std::string get_name() const + { + return "NLopt - " + m_algo; + } + /// Set verbosity. + /** + * This method will set the algorithm's verbosity. If \p n is zero, no output is produced during the optimisation + * and no logging is performed. If \p n is nonzero, then every \p n objective function evaluations the status + * of the optimisation will be both printed to screen and recorded internally. See nlopt::log_line_type and + * nlopt::log_type for information on the logging format. The internal log can be fetched via get_log(). + * + * Example (verbosity 5): + * @code{.unparsed} + * fevals: fitness: violated: viol. norm: + * 1 47.9474 1 2.07944 i + * 6 17.1986 2 0.150557 i + * 11 17.014 0 0 + * 16 17.014 0 0 + * @endcode + * The ``i`` at the end of some rows indicates that the decision vector is infeasible. Feasibility + * is checked against the problem's tolerance. + * + * By default, the verbosity level is zero. + * + * @param n the desired verbosity level. + */ + void set_verbosity(unsigned n) + { + m_verbosity = n; + } + /// Get extra information about the algorithm. + /** + * @return a human-readable string containing useful information about the algorithm's properties + * (e.g., the stopping criteria, the selection/replacement policies, etc.). + */ + std::string get_extra_info() const + { + int major, minor, bugfix; + ::nlopt_version(&major, &minor, &bugfix); + return "\tNLopt version: " + std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(bugfix) + + "\n\tLast optimisation return code: " + detail::nlopt_res2string(m_last_opt_result) + "\n\tVerbosity: " + + std::to_string(m_verbosity) + "\n\tIndividual selection " + + (boost::any_cast(&m_select) + ? "idx: " + std::to_string(boost::any_cast(m_select)) + : "policy: " + boost::any_cast(m_select)) + + "\n\tIndividual replacement " + + (boost::any_cast(&m_replace) + ? "idx: " + std::to_string(boost::any_cast(m_replace)) + : "policy: " + boost::any_cast(m_replace)) + + "\n\tStopping criteria:\n\t\tstopval: " + + (m_sc_stopval == -HUGE_VAL ? "disabled" : detail::to_string(m_sc_stopval)) + "\n\t\tftol_rel: " + + (m_sc_ftol_rel <= 0. ? "disabled" : detail::to_string(m_sc_ftol_rel)) + "\n\t\tftol_abs: " + + (m_sc_ftol_abs <= 0. ? "disabled" : detail::to_string(m_sc_ftol_abs)) + "\n\t\txtol_rel: " + + (m_sc_xtol_rel <= 0. ? "disabled" : detail::to_string(m_sc_xtol_rel)) + "\n\t\txtol_abs: " + + (m_sc_xtol_abs <= 0. ? "disabled" : detail::to_string(m_sc_xtol_abs)) + "\n\t\tmaxeval: " + + (m_sc_maxeval <= 0. ? "disabled" : detail::to_string(m_sc_maxeval)) + "\n\t\tmaxtime: " + + (m_sc_maxtime <= 0. ? "disabled" : detail::to_string(m_sc_maxtime)) + "\n"; + } + /// Get the optimisation log. + /** + * See nlopt::log_type for a description of the optimisation log. Logging is turned on/off via + * set_verbosity(). + * + * @return a const reference to the log. + */ + const log_type &get_log() const + { + return m_log; + } + /// Get the name of the solver that was used to construct this pagmo::nlopt algorithm. + /** + * @return the name of the NLopt solver used upon construction. + */ + std::string get_solver_name() const + { + return m_algo; + } + /// Get the result of the last optimisation. + /** + * @return the result of the last evolve() call, or ``NLOPT_SUCCESS`` if no optimisations have been + * run yet. + */ + ::nlopt_result get_last_opt_result() const + { + return m_last_opt_result; + } + /// Get the ``stopval`` stopping criterion. + /** + * The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than + * or equal to ``stopval`` is found. Defaults to the C constant ``-HUGE_VAL`` (that is, this stopping criterion + * is disabled by default). + * + * @return the ``stopval`` stopping criterion for this pagmo::nlopt. + */ + double get_stopval() const + { + return m_sc_stopval; + } + /// Set the ``stopval`` stopping criterion. + /** + * @param stopval the desired value for the ``stopval`` stopping criterion (see get_stopval()). + * + * @throws std::invalid_argument if \p stopval is NaN. + */ + void set_stopval(double stopval) + { + if (std::isnan(stopval)) { + pagmo_throw(std::invalid_argument, "The 'stopval' stopping criterion cannot be NaN"); + } + m_sc_stopval = stopval; + } + /// Get the ``ftol_rel`` stopping criterion. + /** + * The ``ftol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes the objective function value by less than ``ftol_rel`` multiplied by the absolute value of the + * function value. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``ftol_rel`` stopping criterion for this pagmo::nlopt. + */ + double get_ftol_rel() const + { + return m_sc_ftol_rel; + } + /// Set the ``ftol_rel`` stopping criterion. + /** + * @param ftol_rel the desired value for the ``ftol_rel`` stopping criterion (see get_ftol_rel()). + * + * @throws std::invalid_argument if \p ftol_rel is NaN. + */ + void set_ftol_rel(double ftol_rel) + { + if (std::isnan(ftol_rel)) { + pagmo_throw(std::invalid_argument, "The 'ftol_rel' stopping criterion cannot be NaN"); + } + m_sc_ftol_rel = ftol_rel; + } + /// Get the ``ftol_abs`` stopping criterion. + /** + * The ``ftol_abs`` stopping criterion instructs the solver to stop when an optimization step + * (or an estimate of the optimum) changes the function value by less than ``ftol_abs``. + * Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``ftol_abs`` stopping criterion for this pagmo::nlopt. + */ + double get_ftol_abs() const + { + return m_sc_ftol_abs; + } + /// Set the ``ftol_abs`` stopping criterion. + /** + * @param ftol_abs the desired value for the ``ftol_abs`` stopping criterion (see get_ftol_abs()). + * + * @throws std::invalid_argument if \p ftol_abs is NaN. + */ + void set_ftol_abs(double ftol_abs) + { + if (std::isnan(ftol_abs)) { + pagmo_throw(std::invalid_argument, "The 'ftol_abs' stopping criterion cannot be NaN"); + } + m_sc_ftol_abs = ftol_abs; + } + /// Get the ``xtol_rel`` stopping criterion. + /** + * The ``xtol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes every parameter by less than ``xtol_rel`` multiplied by the absolute value of the parameter. + * Defaults to 1E-8. + * + * @return the ``xtol_rel`` stopping criterion for this pagmo::nlopt. + */ + double get_xtol_rel() const + { + return m_sc_xtol_rel; + } + /// Set the ``xtol_rel`` stopping criterion. + /** + * @param xtol_rel the desired value for the ``xtol_rel`` stopping criterion (see get_xtol_rel()). + * + * @throws std::invalid_argument if \p xtol_rel is NaN. + */ + void set_xtol_rel(double xtol_rel) + { + if (std::isnan(xtol_rel)) { + pagmo_throw(std::invalid_argument, "The 'xtol_rel' stopping criterion cannot be NaN"); + } + m_sc_xtol_rel = xtol_rel; + } + /// Get the ``xtol_abs`` stopping criterion. + /** + * The ``xtol_abs`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the + * optimum) changes every parameter by less than ``xtol_abs``. + * Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``xtol_abs`` stopping criterion for this pagmo::nlopt. + */ + double get_xtol_abs() const + { + return m_sc_xtol_abs; + } + /// Set the ``xtol_abs`` stopping criterion. + /** + * @param xtol_abs the desired value for the ``xtol_abs`` stopping criterion (see get_xtol_abs()). + * + * @throws std::invalid_argument if \p xtol_abs is NaN. + */ + void set_xtol_abs(double xtol_abs) + { + if (std::isnan(xtol_abs)) { + pagmo_throw(std::invalid_argument, "The 'xtol_abs' stopping criterion cannot be NaN"); + } + m_sc_xtol_abs = xtol_abs; + } + /// Get the ``maxeval`` stopping criterion. + /** + * The ``maxeval`` stopping criterion instructs the solver to stop when the number of function evaluations exceeds + * ``maxeval``. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``maxeval`` stopping criterion for this pagmo::nlopt. + */ + int get_maxeval() const + { + return m_sc_maxeval; + } + /// Set the ``maxeval`` stopping criterion. + /** + * @param n the desired value for the ``maxeval`` stopping criterion (see get_maxeval()). + */ + void set_maxeval(int n) + { + m_sc_maxeval = n; + } + /// Get the ``maxtime`` stopping criterion. + /** + * The ``maxtime`` stopping criterion instructs the solver to stop when the optimization time (in seconds) exceeds + * ``maxtime``. Defaults to 0 (that is, this stopping criterion is disabled by default). + * + * @return the ``maxtime`` stopping criterion for this pagmo::nlopt. + */ + int get_maxtime() const + { + return m_sc_maxtime; + } + /// Set the ``maxtime`` stopping criterion. + /** + * @param n the desired value for the ``maxtime`` stopping criterion (see get_maxtime()). + */ + void set_maxtime(int n) + { + m_sc_maxtime = n; + } + +private: + std::string m_algo; + boost::any m_select; + boost::any m_replace; + unsigned m_rselect_seed; + mutable detail::random_engine_type m_e; + mutable ::nlopt_result m_last_opt_result = NLOPT_SUCCESS; + // Stopping criteria. + double m_sc_stopval = -HUGE_VAL; + double m_sc_ftol_rel = 0.; + double m_sc_ftol_abs = 0.; + double m_sc_xtol_rel = 1E-8; + double m_sc_xtol_abs = 0.; + int m_sc_maxeval = 0; + int m_sc_maxtime = 0; + // Verbosity/log. + unsigned m_verbosity = 0; + mutable log_type m_log; +}; +} + +#else // PAGMO_WITH_NLOPT + +#error The nlopt.hpp header was included, but pagmo was not compiled with NLopt support + +#endif // PAGMO_WITH_NLOPT + +#endif diff --git a/include/pagmo/pagmo.hpp b/include/pagmo/pagmo.hpp new file mode 100644 index 000000000..ec3c40370 --- /dev/null +++ b/include/pagmo/pagmo.hpp @@ -0,0 +1,93 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef PAGMO_PAGMO_HPP +#define PAGMO_PAGMO_HPP + +#include + +#include +#include +#if defined(PAGMO_WITH_EIGEN3) +#include +#endif +#include +#include +#include +#include +#include +#include +#if defined(PAGMO_WITH_NLOPT) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(_MSC_VER) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif diff --git a/include/pagmo/problem.hpp b/include/pagmo/problem.hpp index 9cdfb5bee..9b81b162a 100644 --- a/include/pagmo/problem.hpp +++ b/include/pagmo/problem.hpp @@ -1864,7 +1864,7 @@ class problem os << "\tEquality constraints dimension:\t\t" << p.get_nec() << '\n'; os << "\tInequality constraints dimension:\t" << p.get_nic() << '\n'; if (p.get_nec() + p.get_nic() > 0u) { - stream(os, "\tTolerances on constraints:\t", p.get_c_tol(), '\n'); + stream(os, "\tTolerances on constraints: ", p.get_c_tol(), '\n'); } os << "\tLower bounds: "; stream(os, p.get_bounds().first, '\n'); diff --git a/include/pagmo/problems/luksan_vlcek1.hpp b/include/pagmo/problems/luksan_vlcek1.hpp index 5291676e0..39d48faf5 100644 --- a/include/pagmo/problems/luksan_vlcek1.hpp +++ b/include/pagmo/problems/luksan_vlcek1.hpp @@ -118,20 +118,15 @@ struct luksan_vlcek1 { */ std::pair get_bounds() const { - vector_double lb(m_dim, -5.); - vector_double ub(m_dim, 5.); - return {lb, ub}; + return std::make_pair(vector_double(m_dim, -5.), vector_double(m_dim, 5.)); } /// Equality constraint dimension /** - * - * It returns the number of equality constraints. - * * @return the number of equality constraints. */ vector_double::size_type get_nec() const { - return (m_dim - 2); + return m_dim - 2u; } /// Gradients /** @@ -183,7 +178,7 @@ struct luksan_vlcek1 { for (decltype(m_dim) i = 0u; i < m_dim; ++i) { retval.emplace_back(0, i); } - // The part relative to the inequality constraints is sparse as each + // The part relative to the equality constraints is sparse as each // constraint c_k depends on x_k, x_{k+1} and x_{k+2} for (decltype(m_dim) i = 0u; i < m_dim - 2u; ++i) { retval.emplace_back(i + 1, i); diff --git a/include/pagmo/problems/rosenbrock.hpp b/include/pagmo/problems/rosenbrock.hpp index 23479d676..3e521eb8b 100644 --- a/include/pagmo/problems/rosenbrock.hpp +++ b/include/pagmo/problems/rosenbrock.hpp @@ -59,10 +59,11 @@ namespace pagmo struct rosenbrock { /// Constructor from dimension /** - * @param dim problem dimension - * @throw std::invalid_argument if \p dim is less than 2 + * @param dim problem dimension. + * + * @throw std::invalid_argument if \p dim is less than 2. */ - rosenbrock(unsigned int dim = 2u) : m_dim(dim) + rosenbrock(vector_double::size_type dim = 2u) : m_dim(dim) { if (dim < 2u) { pagmo_throw(std::invalid_argument, @@ -71,7 +72,7 @@ struct rosenbrock { }; /// Fitness computation /** - * Computes the fitness for this UDP + * Computes the fitness for this UDP. * * @param x the decision vector. * @@ -79,37 +80,47 @@ struct rosenbrock { */ vector_double fitness(const vector_double &x) const { - vector_double f(1, 0.); + double retval = 0.; for (decltype(m_dim) i = 0u; i < m_dim - 1u; ++i) { - f[0] += 100. * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1]) + (x[i] - 1) * (x[i] - 1); + retval += 100. * (x[i] * x[i] - x[i + 1]) * (x[i] * x[i] - x[i + 1]) + (x[i] - 1) * (x[i] - 1); } - return f; + return {retval}; } /// Box-bounds /** - * - * It returns the box-bounds for this UDP. - * - * @return the lower and upper bounds for each of the decision vector components + * @return the lower (-5.) and upper (10.) bounds for each decision vector component. */ std::pair get_bounds() const { - vector_double lb(m_dim, -5.); - vector_double ub(m_dim, 10.); - return {lb, ub}; + return {vector_double(m_dim, -5.), vector_double(m_dim, 10.)}; } /// Problem name /** - * - * - * @return a string containing the problem name + * @return a string containing the problem name. */ std::string get_name() const { return "Multidimensional Rosenbrock Function"; } - /// Optimal solution + /// Gradient. + /** + * @param x the input decision vector. + * + * @return the gradient of the fitness function in \p x. + */ + vector_double gradient(const vector_double &x) const + { + vector_double retval(m_dim); + retval[0] = -400. * x[0] * (x[1] - x[0] * x[0]) - 2. * (1 - x[0]); + for (unsigned i = 1; i < m_dim - 1u; ++i) { + retval[i] + = -400. * x[i] * (x[i + 1u] - x[i] * x[i]) - 2. * (1 - x[i]) + 200. * (x[i] - x[i - 1u] * x[i - 1u]); + } + retval[m_dim - 1u] = 200. * (x[m_dim - 1u] - x[m_dim - 2u] * x[m_dim - 2u]); + return retval; + } + /// Optimal solution. /** * @return the decision vector corresponding to the best solution for this problem. */ @@ -131,7 +142,7 @@ struct rosenbrock { ar(m_dim); } /// Problem dimensions - unsigned int m_dim; + vector_double::size_type m_dim; }; } // namespace pagmo diff --git a/include/pagmo/serialization.hpp b/include/pagmo/serialization.hpp index 7d32981ad..4ed41e760 100644 --- a/include/pagmo/serialization.hpp +++ b/include/pagmo/serialization.hpp @@ -57,6 +57,8 @@ see https://www.gnu.org/licenses/. */ #pragma GCC diagnostic pop #endif +#include + #include #include #include diff --git a/pygmo/core.cpp b/pygmo/core.cpp index 442570fe7..5e3e62adc 100644 --- a/pygmo/core.cpp +++ b/pygmo/core.cpp @@ -460,7 +460,6 @@ BOOST_PYTHON_MODULE(core) .add_property("problem", bp::make_function(lcast([](population &pop) -> problem & { return pop.get_problem(); }), bp::return_internal_reference<>()), - lcast([](population &pop, const problem &p) { pop.get_problem() = p; }), pygmo::population_problem_docstring().c_str()) .def("get_f", lcast([](const population &pop) { return pygmo::vv_to_a(pop.get_f()); }), pygmo::population_get_f_docstring().c_str()) diff --git a/pygmo/docstrings.cpp b/pygmo/docstrings.cpp index d37b451c3..e2fdd4c69 100644 --- a/pygmo/docstrings.cpp +++ b/pygmo/docstrings.cpp @@ -228,15 +228,11 @@ std::string population_problem_docstring() { return R"(Population's problem. -This property gives direct access to the :class:`~pygmo.core.problem` stored within the population. +This read-only property gives direct access to the :class:`~pygmo.core.problem` stored within the population. Returns: :class:`~pygmo.core.problem`: a reference to the internal problem -Raises: - unspecified: any exception thrown by failures at the intersection between C++ and - Python (e.g., type conversion errors, mismatched function signatures, etc.) when setting the property - )"; } @@ -1368,7 +1364,7 @@ std::string generic_uda_inner_algorithm_docstring() return R"(Inner algorithm of the meta-algorithm -This property gives direct access to the :class:`~pygmo.core.algorithm` stored within the meta-algorithm. +This read-only property gives direct access to the :class:`~pygmo.core.algorithm` stored within the meta-algorithm. Returns: :class:`~pygmo.core.algorithm`: a reference to the inner algorithm @@ -1381,7 +1377,7 @@ std::string generic_udp_inner_problem_docstring() return R"(Inner problem of the meta-problem -This property gives direct access to the :class:`~pygmo.core.problem` stored within the meta-problem. +This read-only property gives direct access to the :class:`~pygmo.core.problem` stored within the meta-problem. Returns: :class:`~pygmo.core.problem`: a reference to the inner problem @@ -1567,7 +1563,7 @@ objective function. Using the above definitions the overall pseudo code can be s **NOTE** Self-adaptive constraints handling implements an internal cache to avoid the re-evaluation of the fitness for decision vectors already evaluated. This makes the final counter of function evaluations somehow unpredictable. -The number of function evaluation will be bounded to \p iters times the fevals made by one call to the inner UDA. The +The number of function evaluation will be bounded to *iters* times the fevals made by one call to the inner UDA. The internal cache is reset at each iteration, but its size will grow unlimited during each call to the inner UDA evolve method. @@ -1816,12 +1812,11 @@ Its formulation in pagmo can be written as: .. math:: \begin{array}{rl} - \mbox{find:} & -5 \le \mathbf x_i \le 5, \forall i=1..n\\ - \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right]\\ - \mbox{subject to:} & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \le UB, \forall k=1..n-2 \\ - & 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2}})\sin(x_{k+1}+x_{k+2}}) - +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 \ge LB, \forall k=1..n-2 \\ + \mbox{find:} & -5 \le x_i \le 5, \forall i=1..n \\ + \mbox{to minimize: } & \sum_{i=1}^{n-1}\left[100\left(x_i^2-x_{i+1}\right)^2 + \left(x_i-1\right)^2\right] \\ + \mbox{subject to:} & + 3x_{k+1}^3+2x_{k+2}-5+\sin(x_{k+1}-x_{k+2})\sin(x_{k+1}+x_{k+2}) + \\ + & +4x_{k+1}-x_k\exp(x_k-x_{k+1})-3 = 0, \forall k=1..n-2 \end{array} See: Luksan, L., and Jan Vlcek. "Sparse and partially separable test problems for unconstrained and equality @@ -2891,7 +2886,7 @@ The numerical approximation of each derivative is made by central difference, ac .. math:: \frac{df}{dx} \approx \frac{f(x+dx) - f(x-dx)}{2dx} + O(dx^2) -The overall cost, in terms of calls to \p f will thus be :math:`n` where :math:`n` is the size of \p x. +The overall cost, in terms of calls to *callable* will thus be :math:`n` where :math:`n` is the size of *x*. Args: callable (a callable object): The function we want to estimate sparsity (typically a fitness). @@ -2934,7 +2929,7 @@ The numerical approximation of each derivative is made by central difference, ac .. math:: m_i = \frac{f(x + i dx) - f(x-i dx)}{2i dx} -The overall cost, in terms of calls to \p f will thus be 6:math:`n` where :math:`n` is the size of \p x. +The overall cost, in terms of calls to *callable* will thus be 6:math:`n` where :math:`n` is the size of *x*. Args: callable (a callable object): The function we want to estimate sparsity (typically a fitness). @@ -3601,4 +3596,416 @@ inserted via :func:`~pygmo.core.archipelago.push_back()`). )"; } +std::string nlopt_docstring() +{ + return R"(__init__(solver = "cobyla") + +NLopt algorithms. + +This user-defined algorithm wraps a selection of solvers from the +`NLopt `_ library, focusing on +local optimisation (both gradient-based and derivative-free). The complete list of supported +NLopt algorithms is: + +* COBYLA, +* BOBYQA, +* NEWUOA + bound constraints, +* PRAXIS, +* Nelder-Mead simplex, +* sbplx, +* MMA (Method of Moving Asymptotes), +* CCSA, +* SLSQP, +* low-storage BFGS, +* preconditioned truncated Newton, +* shifted limited-memory variable-metric. + +The desired NLopt solver is selected upon construction of an :class:`~pygmo.core.nlopt` algorithm. Various properties +of the solver (e.g., the stopping criteria) can be configured via class attributes. Note that multiple +stopping criteria can be active at the same time: the optimisation will stop as soon as at least one stopping criterion +is satisfied. By default, only the ``xtol_rel`` stopping criterion is active (see :attr:`~pygmo.core.nlopt.xtol_rel`). + +All NLopt solvers support only single-objective optimisation, and, as usual in pagmo, minimisation +is always assumed. The gradient-based algorithms require the optimisation problem to provide a gradient. +Some solvers support equality and/or inequality constaints. + +In order to support pagmo's population-based optimisation model, the ``evolve()`` method will select +a single individual from the input :class:`~pygmo.core.population` to be optimised by the NLopt solver. +The optimised individual will then be inserted back into the population at the end of the optimisation. +The selection and replacement strategies can be configured via the :attr:`~pygmo.core.nlopt.selection` +and :attr:`~pygmo.core.nlopt.replacement` attributes. + +.. note:: + + This user-defined algorithm is available only if pagmo was compiled with the ``PAGMO_WITH_NLOPT`` option + enabled (see the :ref:`installation instructions `). + +.. seealso:: + + The `NLopt website `_ contains a detailed description + of each supported solver. + +This constructor will initialise an :class:`~pygmo.core.nlopt` object which will use the NLopt algorithm specified by +the input string *solver*, the ``"best"`` individual selection strategy and the ``"best"`` individual +replacement strategy. *solver* is translated to an NLopt algorithm type according to the following +translation table: + +================================ ==================================== +*solver* string NLopt algorithm +================================ ==================================== +``"cobyla"`` ``NLOPT_LN_COBYLA`` +``"bobyqa"`` ``NLOPT_LN_BOBYQA`` +``"newuoa"`` ``NLOPT_LN_NEWUOA`` +``"newuoa_bound"`` ``NLOPT_LN_NEWUOA_BOUND`` +``"praxis"`` ``NLOPT_LN_PRAXIS`` +``"neldermead"`` ``NLOPT_LN_NELDERMEAD`` +``"sbplx"`` ``NLOPT_LN_SBPLX`` +``"mma"`` ``NLOPT_LD_MMA`` +``"ccsaq"`` ``NLOPT_LD_CCSAQ`` +``"slsqp"`` ``NLOPT_LD_SLSQP`` +``"lbfgs"`` ``NLOPT_LD_LBFGS`` +``"tnewton_precond_restart"`` ``NLOPT_LD_TNEWTON_PRECOND_RESTART`` +``"tnewton_precond"`` ``NLOPT_LD_TNEWTON_PRECOND`` +``"tnewton_restart"`` ``NLOPT_LD_TNEWTON_RESTART`` +``"tnewton"`` ``NLOPT_LD_TNEWTON`` +``"var2"`` ``NLOPT_LD_VAR2`` +``"var1"`` ``NLOPT_LD_VAR1`` +================================ ==================================== + +The parameters of the selected solver can be configured via the attributes of this class. + +See also the docs of the C++ class :cpp:class:`pagmo::nlopt`. + +.. seealso:: + + The `NLopt website `_ contains a detailed + description of each supported solver. + +Args: + solver (``str``): the name of the NLopt algorithm that will be used by this :class:`~pygmo.core.nlopt` object + +Raises: + RuntimeError: if the NLopt version is not at least 2 + ValueError: if *solver* is not one of the allowed algorithm names + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +Examples: + >>> from pygmo import * + >>> nl = nlopt('slsqp') + >>> nl.xtol_rel = 1E-6 # Change the default value of the xtol_rel stopping criterion + >>> nl.xtol_rel # doctest: +SKIP + 1E-6 + >>> algo = algorithm(nl) + >>> algo.set_verbosity(1) + >>> prob = problem(luksan_vlcek1(20)) + >>> prob.c_tol = [1E-6] * 18 # Set constraints tolerance to 1E-6 + >>> pop = population(prob, 20) + >>> pop = algo.evolve(pop) # doctest: +SKIP + fevals: fitness: violated: viol. norm: + 1 95959.4 18 538.227 i + 2 89282.7 18 5177.42 i + 3 75580 18 464.206 i + 4 75580 18 464.206 i + 5 77737.6 18 1095.94 i + 6 41162 18 350.446 i + 7 41162 18 350.446 i + 8 67881 18 362.454 i + 9 30502.2 18 249.762 i + 10 30502.2 18 249.762 i + 11 7266.73 18 95.5946 i + 12 4510.3 18 42.2385 i + 13 2400.66 18 35.2507 i + 14 34051.9 18 749.355 i + 15 1657.41 18 32.1575 i + 16 1657.41 18 32.1575 i + 17 1564.44 18 12.5042 i + 18 275.987 14 6.22676 i + 19 232.765 12 12.442 i + 20 161.892 15 4.00744 i + 21 161.892 15 4.00744 i + 22 17.6821 11 1.78909 i + 23 7.71103 5 0.130386 i + 24 6.24758 4 0.00736759 i + 25 6.23325 1 5.12547e-05 i + 26 6.2325 0 0 + 27 6.23246 0 0 + 28 6.23246 0 0 + 29 6.23246 0 0 + 30 6.23246 0 0 + + Optimisation return status: NLOPT_XTOL_REACHED (value = 4, Optimization stopped because xtol_rel or xtol_abs was reached) + + +)"; +} + +std::string nlopt_stopval_docstring() +{ + return R"(``stopval`` stopping criterion. + +The ``stopval`` stopping criterion instructs the solver to stop when an objective value less than +or equal to ``stopval`` is found. Defaults to the C constant ``-HUGE_VAL`` (that is, this stopping criterion +is disabled by default). + +Returns: + ``float``: the value of the ``stopval`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_ftol_rel_docstring() +{ + return R"(``ftol_rel`` stopping criterion. + +The ``ftol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes the objective function value by less than ``ftol_rel`` multiplied by the absolute value of the +function value. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``float``: the value of the ``ftol_rel`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_ftol_abs_docstring() +{ + return R"(``ftol_abs`` stopping criterion. + +The ``ftol_abs`` stopping criterion instructs the solver to stop when an optimization step +(or an estimate of the optimum) changes the function value by less than ``ftol_abs``. +Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``float``: the value of the ``ftol_abs`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_xtol_rel_docstring() +{ + return R"(``xtol_rel`` stopping criterion. + +The ``xtol_rel`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes every parameter by less than ``xtol_rel`` multiplied by the absolute value of the parameter. +Defaults to 1E-8. + +Returns: + ``float``: the value of the ``xtol_rel`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_xtol_abs_docstring() +{ + return R"(``xtol_abs`` stopping criterion. + +The ``xtol_abs`` stopping criterion instructs the solver to stop when an optimization step (or an estimate of the +optimum) changes every parameter by less than ``xtol_abs``. Defaults to 0 (that is, this stopping criterion is disabled +by default). + +Returns: + ``float``: the value of the ``xtol_abs`` stopping criterion + +Raises: + ValueError: if, when setting this property, a ``NaN`` is passed + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_maxeval_docstring() +{ + return R"(``maxeval`` stopping criterion. + +The ``maxeval`` stopping criterion instructs the solver to stop when the number of function evaluations exceeds +``maxeval``. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``int``: the value of the ``maxeval`` stopping criterion + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_maxtime_docstring() +{ + return R"(``maxtime`` stopping criterion. + +The ``maxtime`` stopping criterion instructs the solver to stop when the optimization time (in seconds) exceeds +``maxtime``. Defaults to 0 (that is, this stopping criterion is disabled by default). + +Returns: + ``int``: the value of the ``maxtime`` stopping criterion + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_selection_docstring() +{ + return R"(Individual selection policy. + +This attribute represents the policy that is used in the ``evolve()`` method to select the individual +that will be optimised. The attribute can be either a string or an integral. + +If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``: + +* ``"best"`` will select the best individual in the population, +* ``"worst"`` will select the worst individual in the population, +* ``"random"`` will randomly choose one individual in the population. + +:func:`~pygmo.core.nlopt.set_random_sr_seed()` can be used to seed the random number generator +used by the ``"random"`` policy. + +If the attribute is an integer, it represents the index (in the population) of the individual that is selected +for optimisation. + +Returns: + ``int`` or ``str``: the individual selection policy or index + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + ValueError: if the attribute is set to an invalid string + TypeError: if the attribute is set to a value of an invalid type + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_replacement_docstring() +{ + return R"(Individual replacement policy. + +This attribute represents the policy that is used in the ``evolve()`` method to select the individual +that will be replaced by the optimised individual. The attribute can be either a string or an integral. + +If the attribute is a string, it must be one of ``"best"``, ``"worst"`` and ``"random"``: + +* ``"best"`` will select the best individual in the population, +* ``"worst"`` will select the worst individual in the population, +* ``"random"`` will randomly choose one individual in the population. + +:func:`~pygmo.core.nlopt.set_random_sr_seed()` can be used to seed the random number generator +used by the ``"random"`` policy. + +If the attribute is an integer, it represents the index (in the population) of the individual that will be +replaced by the optimised individual. + +Returns: + ``int`` or ``str``: the individual replacement policy or index + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + ValueError: if the attribute is set to an invalid string + TypeError: if the attribute is set to a value of an invalid type + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_set_random_sr_seed_docstring() +{ + return R"(set_random_sr_seed(seed) + +Set the seed for the ``"random"`` selection/replacement policies. + +Args: + seed (``int``): the value that will be used to seed the random number generator used by the ``"random"`` + election/replacement policies (see :attr:`~pygmo.core.nlopt.selection` and + :attr:`~pygmo.core.nlopt.replacement`) + +Raises: + OverflowError: if the attribute is set to an integer which is negative or too large + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_log_docstring() +{ + return R"(get_log() + +Optimisation log. + +The optimisation log is a collection of log data lines. A log data line is a tuple consisting of: + +* the number of objective function evaluations made so far, +* the objective function value for the current decision vector, +* the number of constraints violated by the current decision vector, +* the constraints violation norm for the current decision vector, +* a boolean flag signalling the feasibility of the current decision vector. + +Returns: + ``list``: the optimisation log + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_last_opt_result_docstring() +{ + return R"(get_last_opt_result() + +Get the result of the last optimisation. + +Returns: + ``int``: the NLopt return code for the last optimisation run, or ``NLOPT_SUCCESS`` if no optimisations have been run yet + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + +std::string nlopt_get_solver_name_docstring() +{ + return R"(get_solver_name() + +Get the name of the NLopt solver used during construction. + +Returns: + ``str``: the name of the NLopt solver used during construction + +Raises: + unspecified: any exception thrown by failures at the intersection between C++ and Python (e.g., + type conversion errors, mismatched function signatures, etc.) + +)"; +} + } // namespace diff --git a/pygmo/docstrings.hpp b/pygmo/docstrings.hpp index 173b10ba5..3dbc821dc 100644 --- a/pygmo/docstrings.hpp +++ b/pygmo/docstrings.hpp @@ -143,6 +143,20 @@ std::string mbh_get_log_docstring(); std::string mbh_get_perturb_docstring(); std::string generic_uda_get_seed_docstring(); std::string generic_uda_inner_algorithm_docstring(); +std::string nlopt_docstring(); +std::string nlopt_stopval_docstring(); +std::string nlopt_ftol_rel_docstring(); +std::string nlopt_ftol_abs_docstring(); +std::string nlopt_xtol_rel_docstring(); +std::string nlopt_xtol_abs_docstring(); +std::string nlopt_maxeval_docstring(); +std::string nlopt_maxtime_docstring(); +std::string nlopt_selection_docstring(); +std::string nlopt_replacement_docstring(); +std::string nlopt_set_random_sr_seed_docstring(); +std::string nlopt_get_log_docstring(); +std::string nlopt_get_last_opt_result_docstring(); +std::string nlopt_get_solver_name_docstring(); // utilities // hypervolume diff --git a/pygmo/expose_algorithms.cpp b/pygmo/expose_algorithms.cpp index 1822227ab..30052c903 100644 --- a/pygmo/expose_algorithms.cpp +++ b/pygmo/expose_algorithms.cpp @@ -44,6 +44,7 @@ see https://www.gnu.org/licenses/. */ #endif +#include #include #include #include @@ -52,11 +53,14 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#include #include #include #include -#ifdef PAGMO_WITH_EIGEN3 +#include + +#if defined(PAGMO_WITH_EIGEN3) #include #endif #include @@ -66,6 +70,9 @@ see https://www.gnu.org/licenses/. */ #include #include #include +#if defined(PAGMO_WITH_NLOPT) +#include +#endif #include #include #include @@ -273,7 +280,7 @@ void expose_algorithms() expose_algo_log(de1220_, de1220_get_log_docstring().c_str()); de1220_.def("get_seed", &de1220::get_seed, generic_uda_get_seed_docstring().c_str()); // CMA-ES -#ifdef PAGMO_WITH_EIGEN3 +#if defined(PAGMO_WITH_EIGEN3) auto cmaes_ = expose_algorithm("cmaes", cmaes_docstring().c_str()); cmaes_.def(bp::init( (bp::arg("gen") = 1u, bp::arg("cc") = -1., bp::arg("cs") = -1., bp::arg("c1") = -1., bp::arg("cmu") = -1., @@ -327,5 +334,76 @@ void expose_algorithms() nsga2_get_log_docstring().c_str()); nsga2_.def("get_seed", &nsga2::get_seed, generic_uda_get_seed_docstring().c_str()); + +#if defined(PAGMO_WITH_NLOPT) + // NLopt. + auto nlopt_ = expose_algorithm("nlopt", nlopt_docstring().c_str()); + nlopt_.def(bp::init((bp::arg("solver")))); + // Properties for the stopping criteria. + nlopt_.add_property("stopval", &nlopt::get_stopval, &nlopt::set_stopval, nlopt_stopval_docstring().c_str()); + nlopt_.add_property("ftol_rel", &nlopt::get_ftol_rel, &nlopt::set_ftol_rel, nlopt_ftol_rel_docstring().c_str()); + nlopt_.add_property("ftol_abs", &nlopt::get_ftol_abs, &nlopt::set_ftol_abs, nlopt_ftol_abs_docstring().c_str()); + nlopt_.add_property("xtol_rel", &nlopt::get_xtol_rel, &nlopt::set_xtol_rel, nlopt_xtol_rel_docstring().c_str()); + nlopt_.add_property("xtol_abs", &nlopt::get_xtol_abs, &nlopt::set_xtol_abs, nlopt_xtol_abs_docstring().c_str()); + nlopt_.add_property("maxeval", &nlopt::get_maxeval, &nlopt::set_maxeval, nlopt_maxeval_docstring().c_str()); + nlopt_.add_property("maxtime", &nlopt::get_maxtime, &nlopt::set_maxtime, nlopt_maxtime_docstring().c_str()); + // Selection/replacement. + nlopt_.add_property( + "selection", lcast([](const nlopt &n) -> bp::object { + auto s = n.get_selection(); + if (boost::any_cast(&s)) { + return bp::str(boost::any_cast(s)); + } + return bp::object(boost::any_cast(s)); + }), + lcast([](nlopt &n, const bp::object &o) { + bp::extract e_str(o); + if (e_str.check()) { + n.set_selection(e_str()); + return; + } + bp::extract e_idx(o); + if (e_idx.check()) { + n.set_selection(e_idx()); + return; + } + pygmo_throw(::PyExc_TypeError, + ("cannot convert the input object '" + str(o) + "' of type '" + str(type(o)) + + "' to either a selection policy (one of ['best', 'worst', 'random']) or an individual index") + .c_str()); + }), + nlopt_selection_docstring().c_str()); + nlopt_.add_property( + "replacement", lcast([](const nlopt &n) -> bp::object { + auto s = n.get_replacement(); + if (boost::any_cast(&s)) { + return bp::str(boost::any_cast(s)); + } + return bp::object(boost::any_cast(s)); + }), + lcast([](nlopt &n, const bp::object &o) { + bp::extract e_str(o); + if (e_str.check()) { + n.set_replacement(e_str()); + return; + } + bp::extract e_idx(o); + if (e_idx.check()) { + n.set_replacement(e_idx()); + return; + } + pygmo_throw( + ::PyExc_TypeError, + ("cannot convert the input object '" + str(o) + "' of type '" + str(type(o)) + + "' to either a replacement policy (one of ['best', 'worst', 'random']) or an individual index") + .c_str()); + }), + nlopt_replacement_docstring().c_str()); + nlopt_.def("set_random_sr_seed", &nlopt::set_random_sr_seed, nlopt_set_random_sr_seed_docstring().c_str()); + expose_algo_log(nlopt_, nlopt_get_log_docstring().c_str()); + nlopt_.def("get_last_opt_result", lcast([](const nlopt &n) { return static_cast(n.get_last_opt_result()); }), + nlopt_get_last_opt_result_docstring().c_str()); + nlopt_.def("get_solver_name", &nlopt::get_solver_name, nlopt_get_solver_name_docstring().c_str()); +#endif } } diff --git a/pygmo/expose_problems.cpp b/pygmo/expose_problems.cpp index 7860fe4db..b2b2dd104 100644 --- a/pygmo/expose_problems.cpp +++ b/pygmo/expose_problems.cpp @@ -152,7 +152,7 @@ void expose_problems() (bp::arg("nobj") = 1, bp::arg("nec") = 0, bp::arg("nic") = 0))); // Rosenbrock. auto rb = expose_problem("rosenbrock", rosenbrock_docstring().c_str()); - rb.def(bp::init((bp::arg("dim")))); + rb.def(bp::init((bp::arg("dim")))); rb.def("best_known", &best_known_wrapper, problem_get_best_docstring("Rosenbrock").c_str()); // Hock-Schittkowsky 71 auto hs71 = expose_problem("hock_schittkowsky_71", @@ -211,7 +211,7 @@ void expose_problems() // CEC 2006 auto cec2006_ = expose_problem("cec2006", cec2006_docstring().c_str()); - cec2006_.def(bp::init((bp::arg("prob_id") = 1))); + cec2006_.def(bp::init((bp::arg("prob_id")))); cec2006_.def("best_known", &best_known_wrapper, problem_get_best_docstring("CEC 2006").c_str()); // CEC 2009 @@ -221,7 +221,7 @@ void expose_problems() // Luksan Vlcek 1 auto lv_ = expose_problem("luksan_vlcek1", luksan_vlcek1_docstring().c_str()); - lv_.def(bp::init(bp::arg("dim") = 3u)); + lv_.def(bp::init(bp::arg("dim"))); // Translate meta-problem auto translate_ = expose_problem("translate", translate_docstring().c_str()); @@ -234,9 +234,8 @@ void expose_problems() translate_.add_property("translation", lcast([](const translate &t) { return v_to_a(t.get_translation()); }), translate_translation_docstring().c_str()); translate_.add_property( - "inner_problem", - bp::make_function(lcast([](translate &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](translate &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); // Unconstrain meta-problem. auto unconstrain_ = expose_problem("unconstrain", unconstrain_docstring().c_str()); @@ -248,9 +247,8 @@ void expose_problems() }), bp::default_call_policies())); unconstrain_.add_property( - "inner_problem", - bp::make_function(lcast([](unconstrain &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](unconstrain &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); // Decompose meta-problem. auto decompose_ = expose_problem("decompose", decompose_docstring().c_str()); @@ -269,9 +267,8 @@ void expose_problems() decompose_.add_property("z", lcast([](const pagmo::decompose &p) { return v_to_a(p.get_z()); }), decompose_z_docstring().c_str()); decompose_.add_property( - "inner_problem", - bp::make_function(lcast([](decompose &udp) -> problem & { return udp.get_inner_problem(); }), - bp::return_internal_reference<>()), + "inner_problem", bp::make_function(lcast([](decompose &udp) -> problem & { return udp.get_inner_problem(); }), + bp::return_internal_reference<>()), generic_udp_inner_problem_docstring().c_str()); } } diff --git a/pygmo/py_islands.py b/pygmo/py_islands.py index af3581274..0fea8307a 100644 --- a/pygmo/py_islands.py +++ b/pygmo/py_islands.py @@ -45,10 +45,9 @@ class _temp_disable_sigint(object): def __enter__(self): import signal - # Store the previous sigint handler. - self._prev_signal = signal.getsignal(signal.SIGINT) - # Assign the new sig handler (i.e., ignore SIGINT). - signal.signal(signal.SIGINT, signal.SIG_IGN) + # Store the previous sigint handler and assign the new sig handler + # (i.e., ignore SIGINT). + self._prev_signal = signal.signal(signal.SIGINT, signal.SIG_IGN) def __exit__(self, type, value, traceback): import signal @@ -123,6 +122,9 @@ def run_evolve(self, algo, pop): # we need to make sure we are not trying to touch # the pool while we are sending tasks to it. res = mp_island._pool.apply_async(_evolve_func, (algo, pop)) + # NOTE: there might be a bug in need of a workaround lurking in here: + # http://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python + # Just keep it in mind. return res.get() def get_name(self): diff --git a/pygmo/test.py b/pygmo/test.py index bdc4df56d..ee0ca0f51 100644 --- a/pygmo/test.py +++ b/pygmo/test.py @@ -177,10 +177,10 @@ def run_problem_test(self): pop = population(rosenbrock(), size=10) self.assertTrue(pop.problem.extract(null_problem) is None) self.assertTrue(pop.problem.extract(rosenbrock) is not None) - pop.problem = problem(zdt(param=10)) - self.assertRaises(ValueError, lambda: pop.best_idx()) - self.assertTrue(pop.problem.extract(null_problem) is None) - self.assertTrue(pop.problem.extract(zdt) is not None) + + def prob_setter(): + pop.problem = problem(zdt(param=10)) + self.assertRaises(AttributeError, prob_setter) def run_push_back_test(self): from .core import population, rosenbrock @@ -371,6 +371,110 @@ def runTest(self): seed = uda.get_seed() +class nlopt_test_case(_ut.TestCase): + """Test case for the UDA nlopt + + """ + + def runTest(self): + from .core import nlopt, algorithm, luksan_vlcek1, problem, population + n = nlopt() + self.assertEqual(n.get_solver_name(), "cobyla") + n = nlopt(solver = "slsqp") + self.assertEqual(n.get_solver_name(), "slsqp") + self.assertRaises(ValueError, lambda: nlopt("dsadsa")) + + self.assertEqual(n.get_last_opt_result(), 1) + + self.assertEqual(n.ftol_abs, 0.) + n.ftol_abs = 1E-6 + self.assertEqual(n.ftol_abs, 1E-6) + + def _(): + n.ftol_abs = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.ftol_rel, 0.) + n.ftol_rel = 1E-6 + self.assertEqual(n.ftol_rel, 1E-6) + + def _(): + n.ftol_rel = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.maxeval, 0) + n.maxeval = 42 + self.assertEqual(n.maxeval, 42) + + self.assertEqual(n.maxtime, 0) + n.maxtime = 43 + self.assertEqual(n.maxtime, 43) + + self.assertEqual(n.replacement, "best") + n.replacement = "worst" + self.assertEqual(n.replacement, "worst") + + def _(): + n.replacement = "rr" + self.assertRaises(ValueError, _) + n.replacement = 12 + self.assertEqual(n.replacement, 12) + + def _(): + n.replacement = -1 + self.assertRaises(OverflowError, _) + + self.assertEqual(n.selection, "best") + n.selection = "worst" + self.assertEqual(n.selection, "worst") + + def _(): + n.selection = "rr" + self.assertRaises(ValueError, _) + n.selection = 12 + self.assertEqual(n.selection, 12) + + def _(): + n.selection = -1 + self.assertRaises(OverflowError, _) + + n.set_random_sr_seed(12) + self.assertRaises(OverflowError, lambda: n.set_random_sr_seed(-1)) + + self.assertEqual(n.stopval, -float('inf')) + n.stopval = 1E-6 + self.assertEqual(n.stopval, 1E-6) + + def _(): + n.stopval = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.xtol_abs, 0.) + n.xtol_abs = 1E-6 + self.assertEqual(n.xtol_abs, 1E-6) + + def _(): + n.xtol_abs = float('nan') + self.assertRaises(ValueError, _) + + self.assertEqual(n.xtol_rel, 1E-8) + n.xtol_rel = 1E-6 + self.assertEqual(n.xtol_rel, 1E-6) + + def _(): + n.xtol_rel = float('nan') + self.assertRaises(ValueError, _) + + n = nlopt("slsqp") + algo = algorithm(n) + algo.set_verbosity(5) + prob = problem(luksan_vlcek1(20)) + prob.c_tol = [1E-6] * 18 + pop = population(prob, 20) + pop = algo.evolve(pop) + self.assertTrue(len(algo.extract(nlopt).get_log()) != 0) + + class null_problem_test_case(_ut.TestCase): """Test case for the null problem @@ -387,32 +491,42 @@ def runTest(self): self.assertTrue(problem(np()).get_nobj() == 1) self.assertTrue(problem(np(23)).get_nobj() == 23) + class estimate_sparsity_test_case(_ut.TestCase): """Test case for the hypervolume utilities """ + def runTest(self): import pygmo as pg import numpy as np + def my_fun(x): - return [x[0]+x[3], x[2], x[1]] - res = pg.estimate_sparsity(callable = my_fun, x = [0.1,0.1,0.1,0.1], dx = 1e-8) - self.assertTrue((res==np.array([[0, 0],[0, 3],[1, 2],[2, 1]])).all()) + return [x[0] + x[3], x[2], x[1]] + res = pg.estimate_sparsity( + callable=my_fun, x=[0.1, 0.1, 0.1, 0.1], dx=1e-8) + self.assertTrue( + (res == np.array([[0, 0], [0, 3], [1, 2], [2, 1]])).all()) + class estimate_gradient_test_case(_ut.TestCase): """Test case for the hypervolume utilities """ + def runTest(self): import pygmo as pg import numpy as np + def my_fun(x): - return [x[0]+x[3], x[2], x[1]] - out = pg.estimate_gradient(callable = my_fun, x = [0]*4, dx = 1e-8) - res = np.array([ 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.]) - self.assertTrue((abs(out-res)<1e-8).all()) - out = pg.estimate_gradient_h(callable = my_fun, x = [0]*4, dx = 1e-8) - self.assertTrue((abs(out-res)<1e-8).all()) + return [x[0] + x[3], x[2], x[1]] + out = pg.estimate_gradient(callable=my_fun, x=[0] * 4, dx=1e-8) + res = np.array([1., 0., 0., 1., 0., 0., + 1., 0., 0., 1., 0., 0.]) + self.assertTrue((abs(out - res) < 1e-8).all()) + out = pg.estimate_gradient_h(callable=my_fun, x=[0] * 4, dx=1e-8) + self.assertTrue((abs(out - res) < 1e-8).all()) + class hypervolume_test_case(_ut.TestCase): """Test case for the hypervolume utilities @@ -1049,6 +1163,11 @@ def run_test_suite(): suite.addTest(unconstrain_test_case()) suite.addTest(mbh_test_case()) suite.addTest(cstrs_self_adaptive_test_case()) + try: + from .core import nlopt + suite.addTest(nlopt_test_case()) + except ImportError: + pass test_result = _ut.TextTestRunner(verbosity=2).run(suite) if len(test_result.failures) > 0 or len(test_result.errors) > 0: retval = 1 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9b59ecef7..1ac5f46e0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -64,6 +64,10 @@ if(PAGMO_WITH_EIGEN3) ADD_PAGMO_TESTCASE(eigen3_serialization) endif() +if(PAGMO_WITH_NLOPT) + ADD_PAGMO_TESTCASE(nlopt) +endif() + # Here are problematic tests for MSVC. if(NOT YACMA_COMPILER_IS_MSVC) # This test compiles but MSVC seems to have troubles in diff --git a/tests/nlopt.cpp b/tests/nlopt.cpp new file mode 100644 index 000000000..e093bdcce --- /dev/null +++ b/tests/nlopt.cpp @@ -0,0 +1,265 @@ +/* Copyright 2017 PaGMO development team + +This file is part of the PaGMO library. + +The PaGMO library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 3 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The PaGMO library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the PaGMO library. If not, +see https://www.gnu.org/licenses/. */ + +#define BOOST_TEST_MODULE nlopt_test +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace pagmo; + +using hs71 = hock_schittkowsky_71; + +BOOST_AUTO_TEST_CASE(nlopt_construction) +{ + random_device::set_seed(42); + + algorithm a{nlopt{}}; + BOOST_CHECK_EQUAL(a.extract()->get_solver_name(), "cobyla"); + // Check params of default-constructed instance. + BOOST_CHECK_EQUAL(boost::any_cast(a.extract()->get_selection()), "best"); + BOOST_CHECK_EQUAL(boost::any_cast(a.extract()->get_replacement()), "best"); + BOOST_CHECK(a.extract()->get_name() != ""); + BOOST_CHECK(a.extract()->get_extra_info() != ""); + BOOST_CHECK(a.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(a.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(a.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_ftol_abs(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(a.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(a.extract()->get_maxeval(), 0); + BOOST_CHECK_EQUAL(a.extract()->get_maxtime(), 0); + // Change a few params and copy. + a.extract()->set_selection(12u); + a.extract()->set_replacement("random"); + a.extract()->set_ftol_abs(1E-5); + a.extract()->set_maxeval(123); + // Copy. + auto b(a); + BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(b.extract()->get_replacement()), "random"); + BOOST_CHECK(b.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(b.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(b.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(b.extract()->get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(b.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(b.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(b.extract()->get_maxeval(), 123); + BOOST_CHECK_EQUAL(b.extract()->get_maxtime(), 0); + algorithm c; + c = b; + BOOST_CHECK_EQUAL(boost::any_cast(c.extract()->get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(c.extract()->get_replacement()), "random"); + BOOST_CHECK(c.extract()->get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(c.extract()->get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(c.extract()->get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(c.extract()->get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(c.extract()->get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(c.extract()->get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(c.extract()->get_maxeval(), 123); + BOOST_CHECK_EQUAL(c.extract()->get_maxtime(), 0); + // Move. + auto tmp(*a.extract()); + auto d(std::move(tmp)); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_replacement()), "random"); + BOOST_CHECK(d.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(d.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(d.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(d.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(d.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(d.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(d.get_maxeval(), 123); + BOOST_CHECK_EQUAL(d.get_maxtime(), 0); + nlopt e; + e = std::move(d); + BOOST_CHECK_EQUAL(boost::any_cast(e.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(e.get_replacement()), "random"); + BOOST_CHECK(e.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(e.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(e.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(e.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(e.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(e.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(e.get_maxeval(), 123); + BOOST_CHECK_EQUAL(e.get_maxtime(), 0); + // Revive moved-from. + d = std::move(e); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_selection()), 12u); + BOOST_CHECK_EQUAL(boost::any_cast(d.get_replacement()), "random"); + BOOST_CHECK(d.get_last_opt_result() == NLOPT_SUCCESS); + BOOST_CHECK_EQUAL(d.get_stopval(), -HUGE_VAL); + BOOST_CHECK_EQUAL(d.get_ftol_rel(), 0.); + BOOST_CHECK_EQUAL(d.get_ftol_abs(), 1E-5); + BOOST_CHECK_EQUAL(d.get_xtol_rel(), 1E-8); + BOOST_CHECK_EQUAL(d.get_xtol_abs(), 0.); + BOOST_CHECK_EQUAL(d.get_maxeval(), 123); + BOOST_CHECK_EQUAL(d.get_maxtime(), 0); + // Check exception throwing on ctor. + BOOST_CHECK_THROW(nlopt{""}, std::invalid_argument); +} + +BOOST_AUTO_TEST_CASE(nlopt_selection_replacement) +{ + nlopt a; + a.set_selection("worst"); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_selection()), "worst"); + BOOST_CHECK_THROW(a.set_selection("worstee"), std::invalid_argument); + a.set_selection(0); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_selection()), 0u); + a.set_replacement("worst"); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_replacement()), "worst"); + BOOST_CHECK_THROW(a.set_replacement("worstee"), std::invalid_argument); + a.set_replacement(0); + BOOST_CHECK_EQUAL(boost::any_cast(a.get_replacement()), 0u); + a.set_random_sr_seed(123); +} + +// A version of hs71 which provides the sparsity pattern. +struct hs71a : hs71 { + sparsity_pattern gradient_sparsity() const + { + return detail::dense_gradient(3, 4); + } +}; + +BOOST_AUTO_TEST_CASE(nlopt_evolve) +{ + algorithm a{nlopt{"lbfgs"}}; + population pop(rosenbrock{10}, 20); + a.evolve(pop); + BOOST_CHECK(a.extract()->get_last_opt_result() >= 0); + pop = population{zdt{}, 20}; + // MOO not supported by NLopt. + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + // Solver wants gradient, but problem does not provide it. + pop = population{null_problem{}, 20}; + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + pop = population{hs71{}, 20}; + // lbfgs does not support ineq constraints. + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + // mma supports ineq constraints but not eq constraints. + BOOST_CHECK_THROW(algorithm{nlopt{"mma"}}.evolve(pop), std::invalid_argument); + a = algorithm{nlopt{"slsqp"}}; + a.extract()->set_verbosity(5); + for (auto s : {"best", "worst", "random"}) { + for (auto r : {"best", "worst", "random"}) { + a.extract()->set_selection(s); + a.extract()->set_replacement(r); + pop = population(rosenbrock{10}, 20); + a.evolve(pop); + pop = population{hs71{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + pop = population{hs71a{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + } + } + BOOST_CHECK(!a.extract()->get_log().empty()); + for (auto s : {0u, 2u, 15u, 25u}) { + for (auto r : {1u, 3u, 16u, 25u}) { + a.extract()->set_selection(s); + a.extract()->set_replacement(r); + pop = population(rosenbrock{10}, 20); + if (s >= 20u || r >= 20u) { + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + continue; + } + a.evolve(pop); + pop = population{hs71{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + pop = population{hs71a{}, 20}; + pop.get_problem().set_c_tol({1E-6, 1E-6}); + a.evolve(pop); + } + } + // Empty evolve. + a.evolve(population{}); + // Invalid initial guesses. + a = algorithm{nlopt{"slsqp"}}; + pop = population{hs71{}, 1}; + pop.set_x(0, {-123., -123., -123., -123.}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + pop.set_x(0, {123., 123., 123., 123.}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + if (std::numeric_limits::has_quiet_NaN) { + pop.set_x(0, {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}); + BOOST_CHECK_THROW(a.evolve(pop), std::invalid_argument); + } +} + +BOOST_AUTO_TEST_CASE(nlopt_set_sc) +{ + auto a = nlopt{"slsqp"}; + a.set_stopval(-1.23); + BOOST_CHECK_EQUAL(a.get_stopval(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_stopval(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_ftol_rel(-1.23); + BOOST_CHECK_EQUAL(a.get_ftol_rel(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_ftol_rel(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_ftol_abs(-1.23); + BOOST_CHECK_EQUAL(a.get_ftol_abs(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_ftol_abs(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_xtol_rel(-1.23); + BOOST_CHECK_EQUAL(a.get_xtol_rel(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_xtol_rel(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_xtol_abs(-1.23); + BOOST_CHECK_EQUAL(a.get_xtol_abs(), -1.23); + if (std::numeric_limits::has_quiet_NaN) { + BOOST_CHECK_THROW(a.set_xtol_abs(std::numeric_limits::quiet_NaN()), std::invalid_argument); + } + a.set_maxtime(123); +} diff --git a/tests/rosenbrock.cpp b/tests/rosenbrock.cpp index fa7c32eb1..178d2bfa7 100644 --- a/tests/rosenbrock.cpp +++ b/tests/rosenbrock.cpp @@ -30,6 +30,7 @@ see https://www.gnu.org/licenses/. */ #include #include +#include #include #include #include @@ -61,6 +62,16 @@ BOOST_AUTO_TEST_CASE(rosenbrock_test) // Best known test auto x_best = ros2.best_known(); BOOST_CHECK((x_best == vector_double{1., 1.})); + // Gradient test. + auto g2 = ros2.gradient({.1, .2}); + BOOST_CHECK(std::abs(g2[0] + 9.4) < 1E-8); + BOOST_CHECK(std::abs(g2[1] - 38.) < 1E-8); + auto g5 = ros5.gradient({.1, .2, .3, .4, .5}); + BOOST_CHECK(std::abs(g5[0] + 9.4) < 1E-8); + BOOST_CHECK(std::abs(g5[1] - 15.6) < 1E-8); + BOOST_CHECK(std::abs(g5[2] - 13.4) < 1E-8); + BOOST_CHECK(std::abs(g5[3] - 6.4) < 1E-8); + BOOST_CHECK(std::abs(g5[4] - 68.) < 1E-8); } BOOST_AUTO_TEST_CASE(rosenbrock_serialization_test) diff --git a/tools/install_deps.sh b/tools/install_deps.sh index c2b604fa7..7ad9bbb0f 100644 --- a/tools/install_deps.sh +++ b/tools/install_deps.sh @@ -15,7 +15,7 @@ bash miniconda.sh -b -p $HOME/miniconda export PATH="$HOME/miniconda/bin:$PATH" conda config --add channels conda-forge --force -conda_pkgs="boost>=1.55 cmake>=3.2 eigen" +conda_pkgs="boost>=1.55 cmake>=3.2 eigen nlopt" if [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "OSXPython36" ]]; then conda_pkgs="$conda_pkgs python=3.6 numpy dill ipyparallel numba" diff --git a/tools/install_travis.sh b/tools/install_travis.sh index f9ce4bb23..960880862 100644 --- a/tools/install_travis.sh +++ b/tools/install_travis.sh @@ -8,40 +8,40 @@ set -x export PATH="$deps_dir/bin:$PATH" if [[ "${PAGMO_BUILD}" == "ReleaseGCC48" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "DebugGCC48" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fsanitize=address -fuse-ld=gold" ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fsanitize=address -fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "CoverageGCC5" ]]; then - CXX=g++-5 CC=gcc-5 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="--coverage -fuse-ld=gold" ../; + CXX=g++-5 CC=gcc-5 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="--coverage -fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; bash <(curl -s https://codecov.io/bash) -x gcov-5; elif [[ "${PAGMO_BUILD}" == "DebugGCC6" ]]; then - CXX=g++-6 CC=gcc-6 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; + CXX=g++-6 CC=gcc-6 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-fuse-ld=gold" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "DebugClang38" ]]; then - CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "ReleaseClang38" ]]; then - CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++-3.8 CC=clang-3.8 cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "OSXDebug" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; + CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "OSXRelease" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes ../; + CXX=clang++ CC=clang cmake -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Release -DPAGMO_BUILD_TESTS=yes -DPAGMO_BUILD_TUTORIALS=yes -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes ../; make -j2 VERBOSE=1; ctest; elif [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "Python27" ]]; then - CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes ../; + CXX=g++-4.8 CC=gcc-4.8 cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes ../; make install VERBOSE=1; ipcluster start --daemonize=True; # Give some time for the cluster to start up. @@ -110,7 +110,7 @@ elif [[ "${PAGMO_BUILD}" == "Python36" || "${PAGMO_BUILD}" == "Python27" ]]; the fi done elif [[ "${PAGMO_BUILD}" == "OSXPython36" || "${PAGMO_BUILD}" == "OSXPython27" ]]; then - CXX=clang++ CC=clang cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; + CXX=clang++ CC=clang cmake -DCMAKE_INSTALL_PREFIX=$deps_dir -DCMAKE_PREFIX_PATH=$deps_dir -DCMAKE_BUILD_TYPE=Debug -DPAGMO_WITH_EIGEN3=yes -DPAGMO_WITH_NLOPT=yes -DPAGMO_INSTALL_HEADERS=no -DPAGMO_BUILD_PYGMO=yes -DCMAKE_CXX_FLAGS="-g0 -O2" ../; make install VERBOSE=1; ipcluster start --daemonize=True; # Give some time for the cluster to start up.