Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

added sparse jacobian and sparse hessian computation with colpack again,

adapted the README and the code to work with the official ColPack 1.0.0,
also the dependency on the availability of the ADOL-C source should be gone now
  • Loading branch information...
commit 72ea6fc592475dfd0acb28fea803ec0669e6fbdb 1 parent 324e8e0
Sebastian F. Walter authored
8 README.rst
View
@@ -70,12 +70,13 @@ REQUIREMENTS:
* scons build tool
OPTIONAL REQUIREMENTS:
- * Fork of Colpack http://github.com/b45ch1/colpack . Colpack is needed for sparse Jacobians and sparse Hessians. The original version does not work for me.
+ * For sparse Jacobians and Hessians: http://www.cscapes.org/coloringpage/software.htm
INSTALLATION:
- * CHECK REQUIREMENTS: Make sure you have ADOL-C (version 2.1 and above), the boost libraries and numpy installed. All with header files.
- * BUILD ADOL-C: run ``./configure && make``. To use PYADOLC with sparse support, you do _not_ have to do ``./configure --with-sparse``. You should then have a folder ``~/workspace/ADOL-C-2.1.0/ADOL-C/src`` with ``adolc.h`` in it.
+ * CHECK REQUIREMENTS: Make sure you have ADOL-C (version 2.1 and above), ColPack (version 1.0.0 and above) the boost libraries and numpy installed. All with header files.
+ * BUILD ADOL-C: run e.g. ``/configure --enable-sparse --with-colpack=/home/b45ch1/workspace/ColPack/build
+ && make``. You should then have a folder ``~/workspace/ADOL-C-2.1.0/ADOL-C/src`` with ``adolc.h`` in it. You don't have to run ``make install``.
* DOWNLAD PYADOLC: ``cd ~`` and then ``git clone git://github.com/b45ch1/pyadolc.git``
* BUILD PYADOL:
* change to the folder ``~/pyadolc`` and rename the file ``setup.py.EXAMPLE`` to ``setup.py``.
@@ -83,4 +84,5 @@ INSTALLATION:
* Run ``python setup.py build``. A new folder with a name similar to ``~/pyadolc/build/lib.linux-x86_64-2.6`` should be generated.
* TEST YOUR INSTALLATION: Change directory to ``~/pyadolc/build/lib.linux-x86_64-2.6`` and run ``python -c "import adolc; adolc.test()"``. All tests should pass.
* You can also use scons (if you have it) instead of using setup.py
+ * If anything goes wrong, please file a bug report.
6 adolc/__init__.py
View
@@ -7,6 +7,12 @@
import sparse
except:
print 'adolc Notice: sparse drivers not available'
+
+try:
+ import colpack
+
+except:
+ print 'adolc Notice: colpack drivers not available'
# testing
from numpy.testing import Tester
19 adolc/colpack/SConscript
View
@@ -0,0 +1,19 @@
+Import('env')
+Import('adolc_include_path')
+
+colpack = env.SharedLibrary(target='_colpack',
+ source=['src/py_colpack_adolc.cpp',
+ 'src/num_util.cpp',
+ # adolc_include_path +'/sparse/sparsedrivers.cpp',
+ # adolc_include_path +'/sparse/sparse_fo_rev.cpp',
+ # adolc_include_path +'/int_forward_s.c',
+ # adolc_include_path +'/int_forward_t.c',
+ # adolc_include_path +'/int_reverse_s.c',
+ # adolc_include_path +'/int_reverse_t.c',
+ # adolc_include_path +'/nonl_ind_forward_s.c',
+ # adolc_include_path +'/nonl_ind_forward_t.c',
+ # adolc_include_path +'/indopro_forward_s.c',
+ # adolc_include_path +'/indopro_forward_t.c',
+])
+
+
1  adolc/colpack/__init__.py
View
@@ -0,0 +1 @@
+from wrapped_functions import *
443 adolc/colpack/src/num_util.cpp
View
@@ -0,0 +1,443 @@
+// Copyright 2006 Phil Austin (http://www.eos.ubc.ca/personal/paustin)
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#define PY_ARRAY_UNIQUE_SYMBOL PyArrayHandle
+#define NO_IMPORT_ARRAY
+#include "num_util.h"
+
+namespace { const char* rcsid = "$Id: num_util.cpp 39 2007-02-01 02:54:54Z phil $"; }
+
+using namespace boost::python;
+
+namespace num_util{
+
+ //specializations for use by makeNum
+
+
+ template <>
+ PyArray_TYPES getEnum<unsigned char>(void)
+ {
+ return PyArray_UBYTE;
+ }
+
+
+ template <>
+ PyArray_TYPES getEnum<signed char>(void)
+ {
+ return PyArray_BYTE;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<short>(void)
+ {
+ return PyArray_SHORT;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<unsigned short>(void)
+ {
+ return PyArray_USHORT;
+ }
+
+
+ template <>
+ PyArray_TYPES getEnum<unsigned int>(void)
+ {
+ return PyArray_UINT;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<int>(void)
+ {
+ return PyArray_INT;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<long>(void)
+ {
+ return PyArray_LONG;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<unsigned long>(void)
+ {
+ return PyArray_ULONG;
+ }
+
+
+ template <>
+ PyArray_TYPES getEnum<long long>(void)
+ {
+ return PyArray_LONGLONG;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<unsigned long long>(void)
+ {
+ return PyArray_ULONGLONG;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<float>(void)
+ {
+ return PyArray_FLOAT;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<double>(void)
+ {
+ return PyArray_DOUBLE;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<long double>(void)
+ {
+ return PyArray_LONGDOUBLE;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<std::complex<float> >(void)
+ {
+ return PyArray_CFLOAT;
+ }
+
+
+ template <>
+ PyArray_TYPES getEnum<std::complex<double> >(void)
+ {
+ return PyArray_CDOUBLE;
+ }
+
+ template <>
+ PyArray_TYPES getEnum<std::complex<long double> >(void)
+ {
+ return PyArray_CLONGDOUBLE;
+ }
+
+
+typedef KindStringMap::value_type KindStringMapEntry;
+KindStringMapEntry kindStringMapEntries[] =
+ {
+ KindStringMapEntry(PyArray_UBYTE, "PyArray_UBYTE"),
+ KindStringMapEntry(PyArray_BYTE, "PyArray_BYTE"),
+ KindStringMapEntry(PyArray_SHORT, "PyArray_SHORT"),
+ KindStringMapEntry(PyArray_INT, "PyArray_INT"),
+ KindStringMapEntry(PyArray_LONG, "PyArray_LONG"),
+ KindStringMapEntry(PyArray_FLOAT, "PyArray_FLOAT"),
+ KindStringMapEntry(PyArray_DOUBLE, "PyArray_DOUBLE"),
+ KindStringMapEntry(PyArray_CFLOAT, "PyArray_CFLOAT"),
+ KindStringMapEntry(PyArray_CDOUBLE,"PyArray_CDOUBLE"),
+ KindStringMapEntry(PyArray_OBJECT, "PyArray_OBJECT"),
+ KindStringMapEntry(PyArray_NTYPES, "PyArray_NTYPES"),
+ KindStringMapEntry(PyArray_NOTYPE ,"PyArray_NOTYPE")
+ };
+
+typedef KindCharMap::value_type KindCharMapEntry;
+KindCharMapEntry kindCharMapEntries[] =
+ {
+ KindCharMapEntry(PyArray_UBYTE, 'B'),
+ KindCharMapEntry(PyArray_BYTE, 'b'),
+ KindCharMapEntry(PyArray_SHORT, 'h'),
+ KindCharMapEntry(PyArray_INT, 'i'),
+ KindCharMapEntry(PyArray_LONG, 'l'),
+ KindCharMapEntry(PyArray_FLOAT, 'f'),
+ KindCharMapEntry(PyArray_DOUBLE, 'd'),
+ KindCharMapEntry(PyArray_CFLOAT, 'F'),
+ KindCharMapEntry(PyArray_CDOUBLE,'D'),
+ KindCharMapEntry(PyArray_OBJECT, 'O')
+ };
+
+typedef KindTypeMap::value_type KindTypeMapEntry;
+KindTypeMapEntry kindTypeMapEntries[] =
+ {
+ KindTypeMapEntry('B',PyArray_UBYTE),
+ KindTypeMapEntry('b',PyArray_BYTE),
+ KindTypeMapEntry('h',PyArray_SHORT),
+ KindTypeMapEntry('i',PyArray_INT),
+ KindTypeMapEntry('l',PyArray_LONG),
+ KindTypeMapEntry('f',PyArray_FLOAT),
+ KindTypeMapEntry('d',PyArray_DOUBLE),
+ KindTypeMapEntry('F',PyArray_CFLOAT),
+ KindTypeMapEntry('D',PyArray_CDOUBLE),
+ KindTypeMapEntry('O',PyArray_OBJECT)
+ };
+
+
+int numStringEntries = sizeof(kindStringMapEntries)/sizeof(KindStringMapEntry);
+int numCharEntries = sizeof(kindCharMapEntries)/sizeof(KindCharMapEntry);
+int numTypeEntries = sizeof(kindTypeMapEntries)/sizeof(KindTypeMapEntry);
+
+
+using namespace boost::python;
+
+static KindStringMap kindstrings(kindStringMapEntries,
+ kindStringMapEntries + numStringEntries);
+
+static KindCharMap kindchars(kindCharMapEntries,
+ kindCharMapEntries + numCharEntries);
+
+static KindTypeMap kindtypes(kindTypeMapEntries,
+ kindTypeMapEntries + numTypeEntries);
+
+//Create a numarray referencing Python sequence object
+numeric::array makeNum(object x){
+ if (!PySequence_Check(x.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a sequence");
+ throw_error_already_set();
+ }
+ object obj(handle<>
+ (PyArray_ContiguousFromObject(x.ptr(),PyArray_NOTYPE,0,0)));
+ check_PyArrayElementType(obj);
+ return extract<numeric::array>(obj);
+}
+
+//Create a one-dimensional Numeric array of length n and Numeric type t
+numeric::array makeNum(npy_intp n, PyArray_TYPES t=PyArray_DOUBLE){
+ object obj(handle<>(PyArray_SimpleNew(1, &n, t)));
+ return extract<numeric::array>(obj);
+}
+
+//Create a Numeric array with dimensions dimens and Numeric type t
+numeric::array makeNum(std::vector<npy_intp> dimens,
+ PyArray_TYPES t=PyArray_DOUBLE){
+ object obj(handle<>(PyArray_SimpleNew(dimens.size(), &dimens[0], t)));
+ return extract<numeric::array>(obj);
+}
+
+numeric::array makeNum(const numeric::array& arr){
+ //Returns a reference of arr by calling numeric::array copy constructor.
+ //The copy constructor increases arr's reference count.
+ return numeric::array(arr);
+}
+
+PyArray_TYPES type(numeric::array arr){
+ return PyArray_TYPES(PyArray_TYPE(arr.ptr()));
+}
+
+void check_type(boost::python::numeric::array arr,
+ PyArray_TYPES expected_type){
+ PyArray_TYPES actual_type = type(arr);
+ if (actual_type != expected_type) {
+ std::ostringstream stream;
+ stream << "expected Numeric type " << kindstrings[expected_type]
+ << ", found Numeric type " << kindstrings[actual_type] << std::ends;
+ PyErr_SetString(PyExc_TypeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+//Return the number of dimensions
+int rank(numeric::array arr){
+ //std::cout << "inside rank" << std::endl;
+ if(!PyArray_Check(arr.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a PyArrayObject");
+ throw_error_already_set();
+ }
+ return PyArray_NDIM(arr.ptr());
+}
+
+void check_rank(boost::python::numeric::array arr, int expected_rank){
+ int actual_rank = rank(arr);
+ if (actual_rank != expected_rank) {
+ std::ostringstream stream;
+ stream << "expected rank " << expected_rank
+ << ", found rank " << actual_rank << std::ends;
+ PyErr_SetString(PyExc_RuntimeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+intp size(numeric::array arr)
+{
+ if(!PyArray_Check(arr.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a PyArrayObject");
+ throw_error_already_set();
+ }
+ return PyArray_Size(arr.ptr());
+}
+
+void check_size(boost::python::numeric::array arr, intp expected_size){
+ intp actual_size = size(arr);
+ if (actual_size != expected_size) {
+ std::ostringstream stream;
+ stream << "expected size " << expected_size
+ << ", found size " << actual_size << std::ends;
+ PyErr_SetString(PyExc_RuntimeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+std::vector<intp> shape(numeric::array arr){
+ std::vector<intp> out_dims;
+ if(!PyArray_Check(arr.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a PyArrayObject");
+ throw_error_already_set();
+ }
+ npy_intp* dims_ptr = PyArray_DIMS(arr.ptr());
+ int the_rank = rank(arr);
+ for (int i = 0; i < the_rank; i++){
+ out_dims.push_back(*(dims_ptr + i));
+ }
+ return out_dims;
+}
+
+intp get_dim(boost::python::numeric::array arr, int dimnum){
+ assert(dimnum >= 0);
+ int the_rank=rank(arr);
+ if(the_rank < dimnum){
+ std::ostringstream stream;
+ stream << "Error: asked for length of dimension ";
+ stream << dimnum << " but rank of array is " << the_rank << std::ends;
+ PyErr_SetString(PyExc_RuntimeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ std::vector<intp> actual_dims = shape(arr);
+ return actual_dims[dimnum];
+}
+
+void check_shape(boost::python::numeric::array arr, std::vector<intp> expected_dims){
+ std::vector<intp> actual_dims = shape(arr);
+ if (actual_dims != expected_dims) {
+ std::ostringstream stream;
+ stream << "expected dimensions " << vector_str(expected_dims)
+ << ", found dimensions " << vector_str(actual_dims) << std::ends;
+ PyErr_SetString(PyExc_RuntimeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+void check_dim(boost::python::numeric::array arr, int dimnum, intp dimsize){
+ std::vector<intp> actual_dims = shape(arr);
+ if(actual_dims[dimnum] != dimsize){
+ std::ostringstream stream;
+ stream << "Error: expected dimension number ";
+ stream << dimnum << " to be length " << dimsize;
+ stream << ", but found length " << actual_dims[dimnum] << std::ends;
+ PyErr_SetString(PyExc_RuntimeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+bool iscontiguous(numeric::array arr)
+{
+ // return arr.iscontiguous();
+ return PyArray_ISCONTIGUOUS(arr.ptr());
+}
+
+void check_contiguous(numeric::array arr)
+{
+ if (!iscontiguous(arr)) {
+ PyErr_SetString(PyExc_RuntimeError, "expected a contiguous array");
+ throw_error_already_set();
+ }
+ return;
+}
+
+void* data(numeric::array arr){
+ if(!PyArray_Check(arr.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a PyArrayObject");
+ throw_error_already_set();
+ }
+ return PyArray_DATA(arr.ptr());
+}
+
+//Copy data into the array
+void copy_data(boost::python::numeric::array arr, char* new_data){
+ char* arr_data = (char*) data(arr);
+ intp nbytes = PyArray_NBYTES(arr.ptr());
+ for (intp i = 0; i < nbytes; i++) {
+ arr_data[i] = new_data[i];
+ }
+ return;
+}
+
+//Return a clone of this array
+numeric::array clone(numeric::array arr){
+ object obj(handle<>(PyArray_NewCopy((PyArrayObject*)arr.ptr(),PyArray_CORDER)));
+ return makeNum(obj);
+}
+
+
+//Return a clone of this array with a new type
+numeric::array astype(boost::python::numeric::array arr, PyArray_TYPES t){
+ return (numeric::array) arr.astype(type2char(t));
+}
+
+std::vector<intp> strides(numeric::array arr){
+ std::vector<intp> out_strides;
+ if(!PyArray_Check(arr.ptr())){
+ PyErr_SetString(PyExc_ValueError, "expected a PyArrayObject");
+ throw_error_already_set();
+ }
+ intp* strides_ptr = PyArray_STRIDES(arr.ptr());
+ intp the_rank = rank(arr);
+ for (intp i = 0; i < the_rank; i++){
+ out_strides.push_back(*(strides_ptr + i));
+ }
+ return out_strides;
+}
+
+int refcount(numeric::array arr){
+ return REFCOUNT(arr.ptr());
+}
+
+void check_PyArrayElementType(object newo){
+ PyArray_TYPES theType=PyArray_TYPES(PyArray_TYPE(newo.ptr()));
+ if(theType == PyArray_OBJECT){
+ std::ostringstream stream;
+ stream << "array elments have been cast to PyArray_OBJECT, "
+ << "numhandle can only accept arrays with numerical elements"
+ << std::ends;
+ PyErr_SetString(PyExc_TypeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+std::string type2string(PyArray_TYPES t_type){
+ return kindstrings[t_type];
+}
+
+char type2char(PyArray_TYPES t_type){
+ return kindchars[t_type];
+}
+
+PyArray_TYPES char2type(char e_type){
+ return kindtypes[e_type];
+}
+
+template <class T>
+inline std::string vector_str(const std::vector<T>& vec)
+{
+ std::ostringstream stream;
+ stream << "(" << vec[0];
+
+ for(std::size_t i = 1; i < vec.size(); i++){
+ stream << ", " << vec[i];
+ }
+ stream << ")";
+ return stream.str();
+}
+
+inline void check_size_match(std::vector<intp> dims, intp n)
+{
+ intp total = std::accumulate(dims.begin(),dims.end(),1,std::multiplies<intp>());
+ if (total != n) {
+ std::ostringstream stream;
+ stream << "expected array size " << n
+ << ", dimensions give array size " << total << std::ends;
+ PyErr_SetString(PyExc_TypeError, stream.str().c_str());
+ throw_error_already_set();
+ }
+ return;
+}
+
+} //namespace num_util
+
315 adolc/colpack/src/num_util.h
View
@@ -0,0 +1,315 @@
+#ifndef NUM_UTIL_H__
+#define NUM_UTIL_H__
+
+// Copyright 2006 Phil Austin (http://www.eos.ubc.ca/personal/paustin)
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+//
+// $Id: num_util.h 39 2007-02-01 02:54:54Z phil $
+//
+
+#include <boost/python.hpp>
+#include <numpy/noprefix.h>
+#include <iostream>
+#include <sstream>
+#include <vector>
+#include <numeric>
+#include <map>
+#include <complex>
+
+
+
+namespace num_util{
+ //!
+ /**
+ *A free function that extracts a PyArrayObject from any sequential PyObject.
+ *@param x a sequential PyObject wrapped in a Boost/Python 'object'.
+ *@return a PyArrayObject wrapped in Boost/Python numeric array.
+ */
+ boost::python::numeric::array makeNum(boost::python::object x);
+
+ /**
+ *Creates an one-dimensional numpy array of length n and numpy type t.
+ * The elements of the array are initialized to zero.
+ *@param n an integer representing the length of the array.
+ *@param t elements' numpy type. Default is double.
+ *@return a numeric array of size n with elements initialized to zero.
+ */
+ boost::python::numeric::array makeNum(intp n, PyArray_TYPES t);
+
+ /**
+ *Creates a n-dimensional numpy array with dimensions dimens and numpy
+ *type t. The elements of the array are initialized to zero.
+ *@param dimens a vector of interger specifies the dimensions of the array.
+ *@param t elements' numpy type. Default is double.
+ *@return a numeric array of shape dimens with elements initialized to zero.
+ */
+ boost::python::numeric::array makeNum(std::vector<npy_intp> dimens,
+ PyArray_TYPES t);
+
+ /**
+ *Function template returns PyArray_Type for C++ type
+ *See num_util.cpp for specializations
+ *@param T C++ type
+ *@return numpy type enum
+ */
+
+ template<typename T> PyArray_TYPES getEnum(void)
+ {
+ PyErr_SetString(PyExc_ValueError, "no mapping available for this type");
+ boost::python::throw_error_already_set();
+ return PyArray_VOID;
+ }
+
+ /**
+ *Function template creates a one-dimensional numpy array of length n containing
+ *a copy of data at data*. See num_util.cpp::getEnum<T>() for list of specializations
+ *@param T C type of data
+ *@param T* data pointer to start of data
+ *@param n an integer indicates the size of the array.
+ *@return a numpy array of size n with elements initialized to data.
+ */
+
+ template <typename T> boost::python::numeric::array makeNum(T* data, npy_intp n = 0){
+ boost::python::object obj(boost::python::handle<>((PyArray_SimpleNew(1, &n, getEnum<T>()))));
+ void *arr_data = PyArray_DATA((PyArrayObject*) obj.ptr());
+ memcpy(arr_data, data, PyArray_ITEMSIZE((PyArrayObject*) obj.ptr()) * n); // copies the input data to
+ return boost::python::extract<boost::python::numeric::array>(obj);
+ }
+
+ /**
+ *Function template creates an n-dimensional numpy array with dimensions dimens containing
+ *a copy of values starting at data. See num_util.cpp::getEnum<T>() for list of specializations
+ *@param T C type of data
+ *@param T* data pointer to start of data
+ *@param n an integer indicates the size of the array.
+ *@return a numpy array of size n with elements initialized to data.
+ */
+
+
+ template <typename T> boost::python::numeric::array makeNum(T * data, std::vector<npy_intp> dims){
+ npy_intp total = std::accumulate(dims.begin(),dims.end(),1,std::multiplies<npy_intp>());
+ boost::python::object obj(boost::python::handle<>(PyArray_SimpleNew(dims.size(),&dims[0], getEnum<T>())));
+ void *arr_data = PyArray_DATA((PyArrayObject*) obj.ptr());
+ memcpy(arr_data, data, PyArray_ITEMSIZE((PyArrayObject*) obj.ptr()) * total);
+ return boost::python::extract<boost::python::numeric::array>(obj);
+ }
+
+
+ /**
+ *Creates a numpy array from a numpy array, referencing the data.
+ *@param arr a Boost/Python numeric array.
+ *@return a numeric array referencing the input array.
+ */
+ boost::python::numeric::array makeNum(const
+ boost::python::numeric::array& arr);
+
+ /**
+ *A free function that retrieves the numpy type of a numpy array.
+ *@param arr a Boost/Python numeric array.
+ *@return the numpy type of the array's elements
+ */
+ PyArray_TYPES type(boost::python::numeric::array arr);
+
+ /**
+ *Throws an exception if the actual array type is not equal to the expected
+ *type.
+ *@param arr a Boost/Python numeric array.
+ *@param expected_type an expected numpy type.
+ *@return -----
+ */
+ void check_type(boost::python::numeric::array arr,
+ PyArray_TYPES expected_type);
+
+ /**
+ *A free function that retrieves the number of dimensions of a numpy array.
+ *@param arr a Boost/Python numeric array.
+ *@return an integer that indicates the rank of an array.
+ */
+ int rank(boost::python::numeric::array arr);
+
+ /**
+ *Throws an exception if the actual rank is not equal to the expected rank.
+ *@param arr a Boost/Python numeric array.
+ *@param expected_rank an expected rank of the numeric array.
+ *@return -----
+ */
+ void check_rank(boost::python::numeric::array arr, int expected_rank);
+
+ /**
+ *A free function that returns the total size of the array.
+ *@param arr a Boost/Python numeric array.
+ *@return an integer that indicates the total size of the array.
+ */
+ intp size(boost::python::numeric::array arr);
+
+ /**
+ *Throw an exception if the actual total size of the array is not equal to
+ *the expected size.
+ *@param arr a Boost/Python numeric array.
+ *@param expected_size the expected size of an array.
+ *@return -----
+ */
+ void check_size(boost::python::numeric::array arr, intp expected_size);
+
+ /**
+ *Returns the dimensions in a vector.
+ *@param arr a Boost/Python numeric array.
+ *@return a vector with integer values that indicates the shape of the array.
+ */
+ std::vector<intp> shape(boost::python::numeric::array arr);
+
+ /**
+ *Returns the size of a specific dimension.
+ *@param arr a Boost/Python numeric array.
+ *@param dimnum an integer that identifies the dimension to retrieve.
+ *@return the size of the requested dimension.
+ */
+ intp get_dim(boost::python::numeric::array arr, int dimnum);
+
+ /**
+ *Throws an exception if the actual dimensions of the array are not equal to
+ *the expected dimensions.
+ *@param arr a Boost/Python numeric array.
+ *@param expected_dims an integer vector of expected dimension.
+ *@return -----
+ */
+ void check_shape(boost::python::numeric::array arr,
+ std::vector<intp> expected_dims);
+
+ /**
+ *Throws an exception if a specific dimension from a numpy array does not
+ *match the expected size.
+ *@param arr a Boost/Python numeric array.
+ *@param dimnum an integer that specifies which dimension of 'arr' to check.
+ *@param dimsize an expected size of the specified dimension.
+ *@return -----
+ */
+ void check_dim(boost::python::numeric::array arr, int dimnum, intp dimsize);
+
+ /**
+ *Returns true if the array is contiguous.
+ *@param arr a Boost/Python numeric array.
+ *@return true if the array is contiguous, false otherwise.
+ */
+ bool iscontiguous(boost::python::numeric::array arr);
+
+ /**
+ *Throws an exception if the array is not contiguous.
+ *@param arr a Boost/Python numeric array.
+ *@return -----
+ */
+ void check_contiguous(boost::python::numeric::array arr);
+
+ /**
+ *Returns a pointer to the data in the array.
+ *@param arr a Boost/Python numeric array.
+ *@return a char pointer pointing at the first element of the array.
+ */
+ void* data(boost::python::numeric::array arr);
+
+ /**
+ *Copies data into the array.
+ *@param arr a Boost/Python numeric array.
+ *@param new_data a char pointer referencing the new data.
+ *@return -----
+ */
+ void copy_data(boost::python::numeric::array arr, char* new_data);
+
+ /**
+ *Returns a clone of this array.
+ *@param arr a Boost/Python numeric array.
+ *@return a replicate of the Boost/Python numeric array.
+ */
+ boost::python::numeric::array clone(boost::python::numeric::array arr);
+
+ /**
+ *Returns a clone of this array with a new type.
+ *@param arr a Boost/Python numeric array.
+ *@param t PyArray_TYPES of the output array.
+ *@return a replicate of 'arr' with type set to 't'.
+ */
+ boost::python::numeric::array astype(boost::python::numeric::array arr,
+ PyArray_TYPES t);
+
+
+/* *Returns the reference count of the array. */
+/* *@param arr a Boost/Python numeric array. */
+/* *@return the reference count of the array. */
+
+ int refcount(boost::python::numeric::array arr);
+
+ /**
+ *Returns the strides array in a vector of integer.
+ *@param arr a Boost/Python numeric array.
+ *@return the strides of an array.
+ */
+ std::vector<intp> strides(boost::python::numeric::array arr);
+
+ /**
+ *Throws an exception if the element of a numpy array is type cast to
+ *PyArray_OBJECT.
+ *@param newo a Boost/Python object.
+ *@return -----
+ */
+ void check_PyArrayElementType(boost::python::object newo);
+
+ /**
+ *Mapping from a PyArray_TYPE to its corresponding name in string.
+ */
+ typedef std::map<PyArray_TYPES, std::string> KindStringMap;
+
+ /**
+ *Mapping from a PyArray_TYPE to its corresponding typeID in char.
+ */
+ typedef std::map<PyArray_TYPES, char> KindCharMap;
+
+ /**
+ *Mapping from a typeID to its corresponding PyArray_TYPE.
+ */
+ typedef std::map<char, PyArray_TYPES> KindTypeMap;
+
+ /**
+ *Converts a PyArray_TYPE to its name in string.
+ *@param t_type a PyArray_TYPES.
+ *@return the corresponding name in string.
+ */
+ std::string type2string(PyArray_TYPES t_type);
+
+ /**
+ *Converts a PyArray_TYPE to its single character typecode.
+ *@param t_type a PyArray_TYPES.
+ *@return the corresponding typecode in char.
+ */
+ char type2char(PyArray_TYPES t_type);
+
+ /**
+ *Coverts a single character typecode to its PyArray_TYPES.
+ *@param e_type a PyArray_TYPES typecode in char.
+ *@return its corresponding PyArray_TYPES.
+ */
+ PyArray_TYPES char2type(char e_type);
+
+ /**
+ *Constructs a string which contains a list of elements extracted from the
+ *input vector.
+ *@param vec a vector of any type.
+ *@return a string that lists the elements from the input vector.
+ */
+ template <class T>
+ inline std::string vector_str(const std::vector<T>& vec);
+
+ /**
+ *Throws an exception if the total size computed from a vector of integer
+ *does not match with the expected size.
+ *@param dims an integer vector of dimensions.
+ *@param n an expected size.
+ *@return -----
+ */
+ inline void check_size_match(std::vector<intp> dims, intp n);
+
+} // namespace num_util
+
+#endif
138 adolc/colpack/src/py_colpack_adolc.cpp
View
@@ -0,0 +1,138 @@
+#include "py_colpack_adolc.hpp"
+
+bp::list wrapped_sparse_jac_no_repeat(short tape_tag, bpn::array &bpn_x, bpn::array &bpn_options){
+ int tape_stats[STAT_SIZE];
+ tapestats(tape_tag, tape_stats);
+ int N = tape_stats[NUM_INDEPENDENTS];
+ int M = tape_stats[NUM_DEPENDENTS];
+
+ double* x = (double*) nu::data(bpn_x);
+ int* options = (int*) nu::data(bpn_options);
+// int options[4] = {1,1,0,0};
+
+
+ int nnz=-1;
+ unsigned int *rind = NULL;
+ unsigned int *cind = NULL;
+ double *values = NULL;
+ sparse_jac(tape_tag, M, N, 0, x, &nnz, &rind, &cind, &values, options);
+
+ npy_intp ret_nnz = static_cast<npy_intp>(nnz);
+ bp::object bp_rind ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_INT, (char*) rind )));
+ bp::object bp_cind ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_INT, (char*) cind )));
+ bp::object bp_values ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_DOUBLE, (char*) values )));
+
+ bpn::array ret_rind = boost::python::extract<boost::python::numeric::array>(bp_rind);
+ bpn::array ret_cind = boost::python::extract<boost::python::numeric::array>(bp_cind);
+ bpn::array ret_values = boost::python::extract<boost::python::numeric::array>(bp_values);
+
+ bp::list retvals;
+ retvals.append(ret_nnz);
+ retvals.append(ret_rind);
+ retvals.append(ret_cind);
+ retvals.append(ret_values);
+
+ return retvals;
+
+}
+
+bp::list wrapped_sparse_jac_repeat(short tape_tag, bpn::array &bpn_x, npy_intp nnz, bpn::array &bpn_rind, bpn::array &bpn_cind, bpn::array &bpn_values){
+ int tape_stats[STAT_SIZE];
+ tapestats(tape_tag, tape_stats);
+ int N = tape_stats[NUM_INDEPENDENTS];
+ int M = tape_stats[NUM_DEPENDENTS];
+
+ double* x = (double*) nu::data(bpn_x);
+ unsigned int* rind = (unsigned int*) nu::data(bpn_rind);
+ unsigned int* cind = (unsigned int*) nu::data(bpn_cind);
+ double *values = (double*) nu::data(bpn_values);
+ int options[4]={0,0,0,0};
+ int tmp_nnz = static_cast<int>(nnz);
+
+ sparse_jac(tape_tag, M, N, 1, x, &tmp_nnz, &rind, &cind, &values, options);
+
+ bp::object bp_rind ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_INT, (char*) rind )));
+ bp::object bp_cind ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_INT, (char*) cind )));
+ bp::object bp_values ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_DOUBLE, (char*) values )));
+
+ bpn::array ret_rind = boost::python::extract<boost::python::numeric::array>(bp_rind);
+ bpn::array ret_cind = boost::python::extract<boost::python::numeric::array>(bp_cind);
+ bpn::array ret_values = boost::python::extract<boost::python::numeric::array>(bp_values);
+
+ bp::list retvals;
+ retvals.append(nnz);
+ retvals.append(ret_rind);
+ retvals.append(ret_cind);
+ retvals.append(ret_values);
+
+ return retvals;
+
+}
+
+
+bp::list wrapped_sparse_hess_no_repeat(short tape_tag, bpn::array &bpn_x, bpn::array &bpn_options){
+ int tape_stats[STAT_SIZE];
+ tapestats(tape_tag, tape_stats);
+ int N = tape_stats[NUM_INDEPENDENTS];
+
+ double* x = (double*) nu::data(bpn_x);
+ int* options = (int*) nu::data(bpn_options);
+// int options[2] = {0,0};
+
+ int nnz=-1;
+ unsigned int *rind = NULL;
+ unsigned int *cind = NULL;
+ double *values = NULL;
+ sparse_hess(tape_tag, N, 0, x, &nnz, &rind, &cind, &values, options);
+
+ npy_intp ret_nnz = static_cast<npy_intp>(nnz);
+ bp::object bp_rind ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_INT, (char*) rind )));
+ bp::object bp_cind ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_INT, (char*) cind )));
+ bp::object bp_values ( bp::handle<>(PyArray_SimpleNewFromData(1, &ret_nnz, PyArray_DOUBLE, (char*) values )));
+
+ bpn::array ret_rind = boost::python::extract<boost::python::numeric::array>(bp_rind);
+ bpn::array ret_cind = boost::python::extract<boost::python::numeric::array>(bp_cind);
+ bpn::array ret_values = boost::python::extract<boost::python::numeric::array>(bp_values);
+
+ bp::list retvals;
+ retvals.append(ret_nnz);
+ retvals.append(ret_rind);
+ retvals.append(ret_cind);
+ retvals.append(ret_values);
+
+ return retvals;
+
+}
+
+bp::list wrapped_sparse_hess_repeat(short tape_tag, bpn::array &bpn_x, npy_intp nnz, bpn::array &bpn_rind, bpn::array &bpn_cind, bpn::array &bpn_values){
+ int tape_stats[STAT_SIZE];
+ tapestats(tape_tag, tape_stats);
+ npy_intp N = tape_stats[NUM_INDEPENDENTS];
+
+ double* x = (double*) nu::data(bpn_x);
+ unsigned int* rind = (unsigned int*) nu::data(bpn_rind);
+ unsigned int* cind = (unsigned int*) nu::data(bpn_cind);
+ double *values = (double*) nu::data(bpn_values);
+ int options[2]={0,1};
+ int tmp_nnz = static_cast<int>(nnz);
+
+ sparse_hess(tape_tag, N, 1, x, &tmp_nnz, &rind, &cind, &values, options);
+
+ bp::object bp_rind ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_INT, (char*) rind )));
+ bp::object bp_cind ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_INT, (char*) cind )));
+ bp::object bp_values ( bp::handle<>(PyArray_SimpleNewFromData(1, &nnz, PyArray_DOUBLE, (char*) values )));
+
+ bpn::array ret_rind = boost::python::extract<boost::python::numeric::array>(bp_rind);
+ bpn::array ret_cind = boost::python::extract<boost::python::numeric::array>(bp_cind);
+ bpn::array ret_values = boost::python::extract<boost::python::numeric::array>(bp_values);
+
+ bp::list retvals;
+ retvals.append(nnz);
+ retvals.append(ret_rind);
+ retvals.append(ret_cind);
+ retvals.append(ret_values);
+
+ return retvals;
+
+}
+
43 adolc/colpack/src/py_colpack_adolc.hpp
View
@@ -0,0 +1,43 @@
+#ifndef PY_COLPACK_HPP
+#define PY_COLPACK_HPP
+
+#define PY_ARRAY_UNIQUE_SYMBOL PyArrayHandle
+
+#include "num_util.h"
+#include "adolc/adolc.h"
+#include "adolc/sparse/sparsedrivers.h"
+
+using namespace std;
+namespace b = boost;
+namespace bp = boost::python;
+namespace bpn = boost::python::numeric;
+namespace nu = num_util;
+
+// extern int jac_pat(short,int,int,double*,unsigned int**,int*);
+// extern void generate_seed_jac(int, int, unsigned int**, double***, int*, int);
+// extern int sparse_jac(short, int , int, int, double*, int *, unsigned int **, unsigned int **, double **,int*);
+// extern int hess_pat(short,int,double*,unsigned int**, int);
+// extern void generate_seed_hess(int, unsigned int**, double***, int*, int);
+// extern int sparse_hess(short, int , int, double*, int *, unsigned int **, unsigned int **, double **,int*);
+// extern int bit_vector_propagation(short, int, int, double*, unsigned int**, int*);
+
+bp::list wrapped_sparse_jac_no_repeat(short tape_tag, bpn::array &bpn_x, bpn::array &bpn_options);
+bp::list wrapped_sparse_jac_repeat(short tape_tag, bpn::array &bpn_x, npy_intp nnz, bpn::array &bpn_rind, bpn::array &bpn_cind, bpn::array &bpn_values);
+
+bp::list wrapped_sparse_hess_no_repeat(short tape_tag, bpn::array &bpn_x, bpn::array &bpn_options);
+bp::list wrapped_sparse_hess_repeat(short tape_tag, bpn::array &bpn_x, npy_intp nnz, bpn::array &bpn_rind, bpn::array &bpn_cind, bpn::array &bpn_values);
+
+BOOST_PYTHON_MODULE(_colpack)
+{
+ using namespace boost::python;
+ import_array(); /* some kind of hack to get numpy working */
+ bpn::array::set_module_and_type("numpy", "ndarray"); /* some kind of hack to get numpy working */
+ def("sparse_jac_no_repeat", &wrapped_sparse_jac_no_repeat);
+ def("sparse_jac_repeat", &wrapped_sparse_jac_repeat);
+
+ def("sparse_hess_no_repeat", &wrapped_sparse_hess_no_repeat);
+ def("sparse_hess_repeat", &wrapped_sparse_hess_repeat);
+
+}
+
+#endif
1  adolc/colpack/tests/__init__.py
View
@@ -0,0 +1 @@
+
380 adolc/colpack/tests/test_wrapped_functions.py
View
@@ -0,0 +1,380 @@
+import numpy
+import numpy.random
+from numpy.testing import *
+import numpy
+import pylab
+
+from adolc import *
+
+class SparseFunctionalityTests(TestCase):
+
+ def test_jac_pat(self):
+ N = 3 # dimension
+ M = 2 # codimension
+ def vector_f(x):
+ return numpy.array([x[0]*x[1],x[1]*x[2]])
+
+ x = numpy.array([1.*n +1. for n in range(N)])
+ ax = adouble(x)
+
+ trace_on(1)
+ independent(ax)
+ ay = vector_f(ax)
+ dependent(ay)
+ trace_off()
+
+ options = numpy.array([1,1,0,0],dtype=int)
+ pat = sparse.jac_pat(1,x,options)
+
+ pat = numpy.asarray(pat,dtype=int)
+ correct_pat = numpy.array([[0,1],[1,2]], dtype=int)
+ assert_array_equal(pat, correct_pat)
+
+ # def test_sparse_jac_no_repeat(self):
+ # N = 3 # dimension
+ # M = 2 # codimension
+ # def vector_f(x):
+ # return numpy.array([x[0]*x[1],x[1]*x[2]])
+
+ # x = numpy.array([1.*n +1. for n in range(N)])
+ # ax = adouble(x)
+
+ # trace_on(1)
+ # independent(ax)
+ # ay = vector_f(ax)
+ # dependent(ay)
+ # trace_off()
+
+ # options = numpy.array([1,1,0,0],dtype=int)
+ # result = sparse.sparse_jac_no_repeat(1,x,options)
+ # correct_nnz = 4
+ # correct_rind = numpy.array([0,0,1,1])
+ # corrent_cind = numpy.array([0,1,1,2])
+ # correct_values = numpy.array([2.,1.,3.,2.])
+
+ # assert_equal(result[0], correct_nnz)
+ # assert_array_equal(result[1], correct_rind)
+ # assert_array_equal(result[2], corrent_cind)
+ # assert_array_almost_equal(result[3], correct_values)
+
+ # def test_sparse_jac_with_repeat(self):
+ # N = 3 # dimension
+ # M = 2 # codimension
+ # def vector_f(x):
+ # return numpy.array([x[0]*x[1],x[1]*x[2]])
+
+ # x = numpy.array([1.*n +1. for n in range(N)])
+ # ax = adouble(x)
+
+ # trace_on(1)
+ # independent(ax)
+ # ay = vector_f(ax)
+ # dependent(ay)
+ # trace_off()
+
+ # options = numpy.array([1,1,0,0],dtype=int)
+
+ # # first call
+ # result = sparse.sparse_jac_no_repeat(1,x,options)
+
+ # # second call
+ # x = numpy.array([1.*n +2. for n in range(N)])
+ # result = sparse.sparse_jac_repeat(1,x, result[0], result[1], result[2], result[3])
+
+ # correct_nnz = 4
+ # correct_rind = numpy.array([0,0,1,1])
+ # corrent_cind = numpy.array([0,1,1,2])
+ # correct_values = numpy.array([3.,2.,4.,3.])
+
+ # assert_equal(result[0], correct_nnz)
+ # assert_array_equal(result[1], correct_rind)
+ # assert_array_equal(result[2], corrent_cind)
+ # assert_array_almost_equal(result[3], correct_values)
+
+ def test_hess_pat(self):
+ N = 3 # dimension
+
+ def scalar_f(x):
+ return x[0]*x[1] + x[1]*x[2] + x[2]*x[0]
+
+ x = numpy.array([1.*n +1. for n in range(N)])
+ ax = adouble(x)
+
+ trace_on(1)
+ independent(ax)
+ ay = scalar_f(ax)
+ dependent(ay)
+ trace_off()
+
+ option = 0
+ pat = sparse.hess_pat(1,x,option)
+ pat = numpy.asarray(pat,dtype=int)
+
+ correct_pat = numpy.array([[1,2],[0,2], [0,1]], dtype=int)
+ assert_array_equal(pat, correct_pat)
+
+
+ # def test_sparse_hess_no_repeat(self):
+ # N1 = 3 # dimension
+ # def scalar_f(x):
+ # return x[0]*x[1] + x[1]*x[2] + x[2]*x[0]
+
+ # def scalar_f2(x):
+ # return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]
+
+ # x1 = numpy.array([1.*n +1. for n in range(N1)])
+ # ax1 = adouble(x1)
+
+ # trace_on(1)
+ # independent(ax1)
+ # ay = scalar_f(ax1)
+ # dependent(ay)
+ # trace_off()
+
+ # options = numpy.array([0,0],dtype=int)
+ # result = sparse.sparse_hess_no_repeat(1, x1, options)
+ # correct_nnz = 3
+
+ # correct_rind = numpy.array([0,0,1])
+ # corrent_cind = numpy.array([1,2,2])
+ # correct_values = numpy.array([1.,1.,1.])
+
+ # assert_equal(result[0], correct_nnz)
+ # assert_array_equal(result[1], correct_rind)
+ # assert_array_equal(result[2], corrent_cind)
+ # assert_array_almost_equal(result[3], correct_values)
+
+
+ # N2 = 4
+ # x2 = numpy.array([1.*n +1. for n in range(N2)])
+
+ # trace_on(2)
+ # ax2 = adouble(x2)
+ # independent(ax2)
+ # ay = scalar_f2(ax2)
+ # dependent(ay)
+ # trace_off()
+
+ # options = numpy.array([0,0],dtype=int)
+ # for i in range(10):
+ # result = sparse.sparse_hess_no_repeat(2, x2, options)
+
+ # def test_sparse_hess_repeat(self):
+ # N = 3 # dimension
+
+ # def scalar_f(x):
+ # return x[0]**3 + x[0]*x[1] + x[1]*x[2] + x[2]*x[0]
+
+ # x = numpy.array([1.*n +1. for n in range(N)])
+ # ax = adouble(x)
+
+ # trace_on(1)
+ # independent(ax)
+ # ay = scalar_f(ax)
+ # dependent(ay)
+ # trace_off()
+
+ # options = numpy.array([1,1],dtype=int)
+
+ # # first call
+ # result = sparse.sparse_hess_no_repeat(1,x,options)
+
+ # # second call
+ # x = numpy.array([1.*n +2. for n in range(N)])
+ # result = sparse.sparse_hess_repeat(1,x, result[1], result[2], result[3])
+
+ # correct_nnz = 4
+
+ # correct_rind = numpy.array([0,0,0,1])
+ # corrent_cind = numpy.array([0,1,2,2])
+ # correct_values = numpy.array([6*x[0],1.,1.,1.])
+
+ # assert_equal(result[0], correct_nnz)
+ # assert_array_equal(result[1], correct_rind)
+ # assert_array_equal(result[2], corrent_cind)
+ # assert_array_almost_equal(result[3], correct_values)
+
+ # def test_sparse_problem(self):
+ # return 0
+ # import scipy.sparse
+
+ # nvar = 4
+ # ncon = 2
+
+ # def eval_f(x, user_data = None):
+ # assert len(x) == 4
+ # return x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]
+
+ # def eval_grad_f(x, user_data = None):
+ # assert len(x) == 4
+ # grad_f = numpy.array([
+ # x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]) ,
+ # x[0] * x[3],
+ # x[0] * x[3] + 1.0,
+ # x[0] * (x[0] + x[1] + x[2])
+ # ])
+ # return grad_f;
+
+ # def eval_g(x, user_data= None):
+ # assert len(x) == 4
+ # return numpy.array([
+ # x[0] * x[1] * x[2] * x[3],
+ # x[0]*x[0] + x[1]*x[1] + x[2]*x[2] + x[3]*x[3]
+ # ])
+
+ # nnzj = 8
+ # def eval_jac_g(x, flag, user_data = None):
+ # if flag:
+ # return (numpy.array([0, 0, 0, 0, 1, 1, 1, 1]),
+ # numpy.array([0, 1, 2, 3, 0, 1, 2, 3]))
+ # else:
+ # assert len(x) == 4
+ # return numpy.array([ x[1]*x[2]*x[3],
+ # x[0]*x[2]*x[3],
+ # x[0]*x[1]*x[3],
+ # x[0]*x[1]*x[2],
+ # 2.0*x[0],
+ # 2.0*x[1],
+ # 2.0*x[2],
+ # 2.0*x[3] ])
+
+ # nnzh = 10
+ # def eval_h(x, lagrange, obj_factor, flag, user_data = None):
+ # if flag:
+ # hrow = [0, 1, 1, 2, 2, 2, 3, 3, 3, 3]
+ # hcol = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
+ # return (numpy.array(hcol,dtype=int), numpy.array(hrow,dtype=int))
+ # else:
+ # values = numpy.zeros((10), numpy.float_)
+ # values[0] = obj_factor * (2*x[3])
+ # values[1] = obj_factor * (x[3])
+ # values[2] = 0
+ # values[3] = obj_factor * (x[3])
+ # values[4] = 0
+ # values[5] = 0
+ # values[6] = obj_factor * (2*x[0] + x[1] + x[2])
+ # values[7] = obj_factor * (x[0])
+ # values[8] = obj_factor * (x[0])
+ # values[9] = 0
+ # values[1] += lagrange[0] * (x[2] * x[3])
+
+ # values[3] += lagrange[0] * (x[1] * x[3])
+ # values[4] += lagrange[0] * (x[0] * x[3])
+
+ # values[6] += lagrange[0] * (x[1] * x[2])
+ # values[7] += lagrange[0] * (x[0] * x[2])
+ # values[8] += lagrange[0] * (x[0] * x[1])
+ # values[0] += lagrange[1] * 2
+ # values[2] += lagrange[1] * 2
+ # values[5] += lagrange[1] * 2
+ # values[9] += lagrange[1] * 2
+ # return values
+
+
+ # x0 = numpy.array([1.0, 5.0, 5.0, 1.0])
+
+ # # check that adolc gives the same answers as derivatives calculated by hand
+ # trace_on(1)
+ # ax = adouble(x0)
+ # independent(ax)
+ # ay = eval_f(ax)
+ # dependent(ay)
+ # trace_off()
+
+ # trace_on(2)
+ # ax = adouble(x0)
+ # independent(ax)
+ # ay = eval_g(ax)
+ # dependent(ay)
+ # trace_off()
+
+ # trace_on(3)
+ # ax = adouble(x0)
+ # independent(ax)
+ # ay = eval_g(ax)
+ # dependent(ay[0])
+ # trace_off()
+
+ # trace_on(4)
+ # ax = adouble(x0)
+ # independent(ax)
+ # ay = eval_g(ax)
+ # dependent(ay[1])
+ # trace_off()
+
+
+ # def eval_f_adolc(x, user_data = None):
+ # return function(1,x)[0]
+
+ # def eval_grad_f_adolc(x, user_data = None):
+ # return gradient(1,x)
+
+ # def eval_g_adolc(x, user_data= None):
+ # return function(2,x)
+
+ # def eval_jac_g_adolc(x, flag, user_data = None):
+ # options = numpy.array([1,1,0,0],dtype=int)
+ # result = sparse.sparse_jac_no_repeat(2,x,options)
+ # if flag:
+ # return (numpy.asarray(result[1],dtype=int), numpy.asarray(result[2],dtype=int))
+ # else:
+ # return result[3]
+
+ # def eval_h_adolc(x, lagrange, obj_factor, flag, user_data = None):
+ # options = numpy.array([0,0],dtype=int)
+ # assert numpy.ndim(x) == 1
+ # assert numpy.size(x) == 4
+ # result_f = sparse.sparse_hess_no_repeat(1, x, options)
+ # result_g0 = sparse.sparse_hess_no_repeat(3, x,options)
+ # result_g1 = sparse.sparse_hess_no_repeat(4, x,options)
+ # Hf = scipy.sparse.coo_matrix( (result_f[3], (result_f[1], result_f[2])), shape=(4, 4))
+ # Hg0 = scipy.sparse.coo_matrix( (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
+ # Hg1 = scipy.sparse.coo_matrix( (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))
+
+ # H = Hf + Hg0 + Hg1
+ # H = H.tocoo()
+
+ # if flag:
+ # hrow = H.row
+ # hcol = H.col
+ # return (numpy.array(hcol,dtype=int), numpy.array(hrow,dtype=int))
+
+ # else:
+ # values = numpy.zeros((10), float)
+ # values[:] = H.data
+ # return values
+
+ # # function of f
+ # assert_almost_equal(eval_f(x0), eval_f_adolc(x0))
+
+ # # gradient of f
+ # assert_array_almost_equal(eval_grad_f(x0), eval_grad_f_adolc(x0))
+
+ # # function of g
+ # assert_array_almost_equal(eval_g(x0), function(2,x0))
+
+ # # sparse jacobian of g
+ # assert_array_equal(eval_jac_g_adolc(x0,True)[0], eval_jac_g(x0,True)[0])
+ # assert_array_equal(eval_jac_g_adolc(x0,True)[1], eval_jac_g(x0,True)[1])
+ # assert_array_equal(eval_jac_g_adolc(x0,False), eval_jac_g(x0,False))
+
+ # # sparse hessian of the lagrangian
+ # lagrange = numpy.ones(2,dtype=float)
+ # obj_factor = 1.
+ # x0 = numpy.random.rand(4)
+ # result = (eval_h(x0, lagrange, obj_factor, False), eval_h(x0, lagrange, obj_factor, True))
+ # result_adolc = (eval_h_adolc(x0, lagrange, obj_factor, False), eval_h_adolc(x0, lagrange, obj_factor, True))
+ # H = scipy.sparse.coo_matrix( result, shape=(4, 4))
+ # H_adolc = scipy.sparse.coo_matrix( result_adolc, shape=(4, 4))
+ # H = H.todense()
+ # H_adolc = H_adolc.todense()
+ # assert_array_almost_equal( H, H_adolc.T)
+
+
+if __name__ == '__main__':
+ try:
+ import nose
+ except:
+ print 'Please install nose for unit testing'
+ nose.runmodule()
+
171 adolc/colpack/wrapped_functions.py
View
@@ -0,0 +1,171 @@
+import _colpack
+from _colpack import *
+
+import numpy
+
+def sparse_jac_no_repeat(tape_tag, x, options):
+ """
+ computes sparse Jacobian for a function F:R^N -> R^M without any prior information,
+ i.e. this function internally finds the sparsity pattern
+ of the Jacobian J, then uses Graph Coloring to find the smallest number P or Q of necessary directions
+ (P in the forward mode, Q in the reverse mode)
+ and computes dot(J,S) with S (N,P) array in forward mode
+ or dot(S^T,J) with S(N,Q) in the reverse mode
+
+ [nnz, rind, cind, values] =sparse_jac_no_repeat(tape_tag, x, options)
+
+ INPUT:
+ The base point x at which the Jacobian should be computed, i.e. J = dF(x)/dx
+ options is a list or array of length 4
+ options[0] : way of sparsity pattern computation
+ 0 - propagation of index domains (default)
+ 1 - propagation of bit pattern
+ options[1] : test the computational graph control flow
+ 0 - safe mode (default)
+ 1 - tight mode
+ options[2] : way of bit pattern propagation
+ 0 - automatic detection (default)
+ 1 - forward mode
+ 2 - reverse mode
+ options[3] : way of compression
+ 0 - column compression (default)
+ 1 - row compression
+
+ OUTPUT:
+ nnz is the guessed number of nonzeros in the Jacobian. This can be larger than the true number of nonzeros.
+
+ sparse matrix representation in standard format:
+ rind is an nnz-array of row indices
+ cind is an nnz-array of column indices
+ values are the corresponding Jacobian entries
+ """
+ assert type(tape_tag) == int
+
+ if options == None:
+ options = numpy.array([1,1,0,0], dtype=numpy.int32)
+
+ options = numpy.asarray(options,dtype=numpy.int32)
+ assert numpy.ndim(options) == 1
+ assert numpy.size(options) == 4
+
+ assert numpy.ndim(x) == 1
+ x = numpy.asarray(x, dtype=float)
+
+ return _colpack.sparse_jac_no_repeat(tape_tag, x, options)
+
+def sparse_jac_repeat(tape_tag, x, nnz, rind, cind, values):
+ """
+ computes sparse Jacobian J for a function F:R^N -> R^M with
+ the sparsity pattern that has been computed previously (e.g. by calling sparse_jac_no_repeat)
+
+ I guess it also reuses the options that have been set previously. So it would be not necessary to set the options again.
+
+
+ [nnz, rind, cind, values] = sparse_jac_repeat(tape_tag, x, rind, cind, values)
+
+ INPUT:
+ The base point x at which the Jacobian should be computed, i.e. J = dF(x)/dx
+
+ OUTPUT:
+ nnz is the guessed number of nonzeros in the Jacobian. This can be larger than the true number of nonzeros.
+
+ sparse matrix representation in standard format:
+ rind is an nnz-array of row indices
+ cind is an nnz-array of column indices
+ values are the corresponding Jacobian entries
+ """
+ assert type(tape_tag) == int
+ assert type(nnz) == int
+
+ assert numpy.ndim(x) == 1
+ assert numpy.ndim(rind) == 1
+ assert numpy.ndim(cind) == 1
+ assert numpy.ndim(values) == 1
+
+ x = numpy.asarray(x, dtype=float)
+ rind= numpy.asarray(rind, dtype=numpy.uint32)
+ cind= numpy.asarray(cind, dtype=numpy.uint32)
+ values = numpy.asarray(values, dtype=float)
+
+ return _colpack.sparse_jac_repeat(tape_tag, x, nnz, rind, cind, values)
+
+
+def sparse_hess_no_repeat(tape_tag, x, options = None):
+ """
+ computes sparse Hessian for a function F:R^N -> R without any prior information,
+
+ [nnz, rind, cind, values] =sparse_hess_no_repeat(tape_tag, x, options)
+
+ INPUT:
+ The base point x at which the Jacobian should be computed, i.e. J = dF(x)/dx
+ options is a list or array of length 2
+ options[0] :test the computational graph control flow
+ 0 - safe mode (default)
+ 1 - tight mode
+ options[1] : way of recovery
+ 0 - indirect recovery
+ 1 - direct recovery
+
+ OUTPUT:
+ nnz is the guessed number of nonzeros in the Hessian. This can be larger than the true number of nonzeros.
+
+ sparse matrix representation in standard format:
+ rind is an nnz-array of row indices
+ cind is an nnz-array of column indices
+ values are the corresponding Jacobian entries
+ """
+ assert type(tape_tag) == int
+
+ if options == None:
+ options = numpy.array([0,0], dtype=numpy.int32)
+
+ options = numpy.asarray(options,dtype=numpy.int32)
+ assert numpy.ndim(options) == 1
+ assert numpy.size(options) == 2
+
+ assert numpy.ndim(x) == 1
+ x = numpy.asarray(x, dtype=float)
+ return _colpack.sparse_hess_no_repeat(tape_tag, x, options)
+
+def sparse_hess_repeat(tape_tag, x, rind, cind, values):
+ """
+ computes sparse Hessian for a function F:R^N -> R without any prior information,
+
+ [nnz, rind, cind, values] =sparse_hess_no_repeat(tape_tag, x, options)
+
+ INPUT:
+ The base point x at which the Jacobian should be computed, i.e. J = dF(x)/dx
+ options is a list or array of length 2
+ options[0] :test the computational graph control flow
+ 0 - safe mode (default)
+ 1 - tight mode
+ options[1] : way of recovery
+ 0 - indirect recovery
+ 1 - direct recovery
+
+ OUTPUT:
+ nnz is the guessed number of nonzeros in the Hessian. This can be larger than the true number of nonzeros.
+
+ sparse matrix representation in standard format:
+ rind is an nnz-array of row indices
+ cind is an nnz-array of column indices
+ values are the corresponding Jacobian entries
+ """
+ assert type(tape_tag) == int
+
+ assert numpy.ndim(x) == 1
+ assert numpy.ndim(rind) == 1
+ assert numpy.ndim(cind) == 1
+ assert numpy.ndim(values) == 1
+
+ nnz = int(numpy.size(rind))
+
+ assert nnz == numpy.size(cind)
+ assert nnz == numpy.size(values)
+
+ x = numpy.asarray(x, dtype=float)
+ rind = numpy.asarray(rind, dtype=numpy.uint32)
+ cind = numpy.asarray(cind, dtype=numpy.uint32)
+ values = numpy.asarray(values, dtype=float)
+
+ return _colpack.sparse_hess_repeat(tape_tag, x, nnz, rind, cind, values)
20 adolc/sparse/SConscript
View
@@ -4,16 +4,16 @@ Import('adolc_include_path')
colpack = env.SharedLibrary(target='_sparse',
source=['src/py_sparse_adolc.cpp',
'src/num_util.cpp',
- adolc_include_path +'/sparse/sparsedrivers.cpp',
- adolc_include_path +'/sparse/sparse_fo_rev.cpp',
- adolc_include_path +'/int_forward_s.c',
- adolc_include_path +'/int_forward_t.c',
- adolc_include_path +'/int_reverse_s.c',
- adolc_include_path +'/int_reverse_t.c',
- adolc_include_path +'/nonl_ind_forward_s.c',
- adolc_include_path +'/nonl_ind_forward_t.c',
- adolc_include_path +'/indopro_forward_s.c',
- adolc_include_path +'/indopro_forward_t.c',
+ # adolc_include_path +'/sparse/sparsedrivers.cpp',
+ # adolc_include_path +'/sparse/sparse_fo_rev.cpp',
+ # adolc_include_path +'/int_forward_s.c',
+ # adolc_include_path +'/int_forward_t.c',
+ # adolc_include_path +'/int_reverse_s.c',
+ # adolc_include_path +'/int_reverse_t.c',
+ # adolc_include_path +'/nonl_ind_forward_s.c',
+ # adolc_include_path +'/nonl_ind_forward_t.c',
+ # adolc_include_path +'/indopro_forward_s.c',
+ # adolc_include_path +'/indopro_forward_t.c',
])
4 adolc/sparse/src/py_sparse_adolc.hpp
View
@@ -4,8 +4,8 @@
#define PY_ARRAY_UNIQUE_SYMBOL PyArrayHandle
#include "num_util.h"
-#include "adolc.h"
-#include "sparse/sparsedrivers.h"
+#include "adolc/adolc.h"
+#include "adolc/sparse/sparsedrivers.h"
using namespace std;
namespace b = boost;
2  adolc/src/py_adolc.hpp
View
@@ -3,7 +3,7 @@
#define PY_ARRAY_UNIQUE_SYMBOL PyArrayHandle
#include "num_util.h"
-#include "adolc.h"
+#include "adolc/adolc.h"
using namespace std;
namespace b = boost;
20 tests/experimental_tests.py
View
@@ -6,6 +6,8 @@
or complicated_tests.py
"""
import numpy
+import scipy
+from numpy.testing import *
from adolc import *
def test_big_tape():
@@ -200,7 +202,7 @@ def eval_g_adolc(x, user_data= None):
def eval_jac_g_adolc(x, flag, user_data = None):
options = numpy.array([1,1,0,0],dtype=int)
- result = sparse.sparse_jac_no_repeat(2,x,options)
+ result = colpack.sparse_jac_no_repeat(2,x,options)
if flag:
return (numpy.asarray(result[1],dtype=int), numpy.asarray(result[2],dtype=int))
else:
@@ -210,12 +212,12 @@ def eval_h_adolc(x, lagrange, obj_factor, flag, user_data = None):
options = numpy.array([0,0],dtype=int)
assert numpy.ndim(x) == 1
assert numpy.size(x) == 4
- result_f = sparse.sparse_hess_no_repeat(1, x, options)
- result_g0 = sparse.sparse_hess_no_repeat(3, x,options)
- result_g1 = sparse.sparse_hess_no_repeat(4, x,options)
- Hf = scipy.sparse.coo_matrix( (result_f[3], (result_f[1], result_f[2])), shape=(4, 4))
- Hg0 = scipy.sparse.coo_matrix( (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
- Hg1 = scipy.sparse.coo_matrix( (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))
+ result_f = colpack.sparse_hess_no_repeat(1, x, options)
+ result_g0 = colpack.sparse_hess_no_repeat(3, x,options)
+ result_g1 = colpack.sparse_hess_no_repeat(4, x,options)
+ Hf = scipy.linalg.sparse.coo_matrix( (result_f[3], (result_f[1], result_f[2])), shape=(4, 4))
+ Hg0 = scipy.linalg.sparse.coo_matrix( (result_g0[3], (result_g0[1], result_g0[2])), shape=(4, 4))
+ Hg1 = scipy.linalg.sparse.coo_matrix( (result_g1[3], (result_g1[1], result_g1[2])), shape=(4, 4))
H = Hf + Hg0 + Hg1
H = H.tocoo()
@@ -250,8 +252,8 @@ def eval_h_adolc(x, lagrange, obj_factor, flag, user_data = None):
x0 = numpy.random.rand(4)
result = (eval_h(x0, lagrange, obj_factor, False), eval_h(x0, lagrange, obj_factor, True))
result_adolc = (eval_h_adolc(x0, lagrange, obj_factor, False), eval_h_adolc(x0, lagrange, obj_factor, True))
- H = scipy.sparse.coo_matrix( result, shape=(4, 4))
- H_adolc = scipy.sparse.coo_matrix( result_adolc, shape=(4, 4))
+ H = scipy.linalg.sparse.coo_matrix( result, shape=(4, 4))
+ H_adolc = scipy.linalg.sparse.coo_matrix( result_adolc, shape=(4, 4))
H = H.todense()
H_adolc = H_adolc.todense()
assert_array_almost_equal( H, H_adolc.T)
Please sign in to comment.
Something went wrong with that request. Please try again.