Skip to content

Commit

Permalink
Implemented a cuda autodetection feature in the build system. It is c…
Browse files Browse the repository at this point in the history
…ompletely backwards compatible but if no options concerning cuda are given during the configure stage, it tries to find cuda in /usr/local/cuda.
  • Loading branch information
rempferg committed Jan 10, 2013
1 parent 46b7290 commit 56b2be3
Showing 1 changed file with 89 additions and 45 deletions.
134 changes: 89 additions & 45 deletions configure.ac
Expand Up @@ -405,61 +405,105 @@ AS_IF([test x$fftw_found = xyes],[
##################################
# check for CUDA
AC_MSG_CHECKING([whether to use CUDA])

AC_ARG_WITH([cuda],
AS_HELP_STRING([--with-cuda=CUDAINSTALLDIR],[specify where CUDA is installed.
AS_HELP_STRING([--with-cuda[=CUDAINSTALLDIR]],[specify where CUDA is installed.
Defaults to /usr/local/cuda.
The cuda compiler can also be specified by setting the NVCC
environment variable. The CUDA library and header can be
manually specified by using CPPFLAGS, LDFLAGS and LIBS.]),
, with_cuda=no)
, with_cuda=guess)
AC_MSG_RESULT($with_cuda)

if test x$with_cuda != xno; then
# if installation dir is given, set the paths
if test x$with_cuda != xyes; then
if test x$NVCC = x; then
NVCC=$with_cuda/bin/nvcc
fi
if test -d $with_cuda/lib64; then
LDFLAGS="$LDFLAGS -L$with_cuda/lib64"
else
LDFLAGS="$LDFLAGS -L$with_cuda/lib"
fi
NVCCFLAGS="$NVCCFLAGS -I$with_cuda/include"
fi
if test x$with_cuda = xguess -o x$with_cuda = xyes; then
cuda_path=/usr/local/cuda
elif test x$with_cuda != xno; then
cuda_path=$with_cuda
with_cuda=yes
fi

# NVCC
AC_ARG_VAR(NVCC,[NVIDIA CUDA compiler command])
AC_ARG_VAR(NVCCFLAGS,[special compiler flags for the NVIDIA CUDA compiler])
AC_PATH_PROG(NVCC, nvcc, no)
if test x$NVCC = xno; then
AC_MSG_FAILURE([CUDA compiler nvcc was not found, specify location using the NVCC variable])
fi
cuda_ok=no

# runtime library
AC_CHECK_LIB(cudart, cudaGetDevice, [LIBS="$LIBS -lcudart"], [AC_MSG_FAILURE([could not find cuda runtime library (cudart), specify location using LDFLAGS])])
if test x$with_cuda != xno; then
cuda_ok=yes

if test x$NVCC = x; then
NVCC=$cuda_path/bin/nvcc
fi

if test -d $cuda_path/lib64; then
LDFLAGS="$LDFLAGS -L$cuda_path/lib64"
else
LDFLAGS="$LDFLAGS -L$cuda_path/lib"
fi

NVCCFLAGS="$NVCCFLAGS -I$cuda_path/include"

# NVCC
AC_ARG_VAR(NVCC,[NVIDIA CUDA compiler command])
AC_ARG_VAR(NVCCFLAGS,[special compiler flags for the NVIDIA CUDA compiler])
AC_PATH_PROG(NVCC, nvcc, no)

if test x$NVCC = xno; then
AC_MSG_FAILURE([CUDA compiler nvcc was not found, specify location using the NVCC variable])
fi

# libraries
AC_CHECK_LIB(cudart, cudaGetDevice, [LIBS="$LIBS -lcudart"], [
cuda_ok=no
AS_IF([test x$with_cuda = xyes],[
AC_MSG_FAILURE([could not find cuda runtime library (cudart), specify location using LDFLAGS])
],[
AC_MSG_WARN([could not find cuda runtime library (cudart). Some features will not be available!])
])
])

AC_CHECK_LIB(cufft, cufftPlan3d, [LIBS="$LIBS -lcufft"], [
cuda_ok=no
AS_IF([test x$with_cuda = xyes],[
AC_MSG_FAILURE([could not find cuda FFT library (cufft), specify location using LDFLAGS])
],[
AC_MSG_WARN([could not find cuda FFT library (cufft). Some features will not be available!])
])
])

# NVCC compile check
AC_MSG_CHECKING([whether CUDA works])
# if no other compute capability is defined by the user, we need at least 1.1
case "$NVCCFLAGS" in
# NVCC compile check
AC_MSG_CHECKING([whether CUDA works])

# if no other compute capability is defined by the user, we require at least 1.1
case "$NVCCFLAGS" in
*-arch=*) ;;
*) NVCCFLAGS="$NVCCFLAGS --ptxas-options=-v -gencode arch=compute_11,code=compute_11 -gencode arch=compute_20,code=compute_20"
esac
# use nvcc
save_CC=$CC
save_CFLAGS=$CFLAGS
# autoconf currently doesn't allow to define new languages like cuda, so try to work around it
CC=$NVCC
CFLAGS="$NVCCFLAGS"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([
#include <cuda.h>
],[
cudaGetDevice(0);
])], [AC_MSG_RESULT(yes)], [AC_MSG_FAILURE([cannot compile CUDA code. Look at config.log for more details.])])
CC=$save_CC
CFLAGS=$save_CFLAGS
AC_DEFINE(CUDA,[],[Whether CUDA is available])
*) NVCCFLAGS="$NVCCFLAGS --ptxas-options=-v -gencode arch=compute_11,code=compute_11 -gencode arch=compute_20,code=compute_20"
esac

# use nvcc
save_CC=$CC
save_CFLAGS=$CFLAGS

# autoconf currently doesn't allow to define new languages like cuda, this is a workaround
CC=$NVCC
CFLAGS="$NVCCFLAGS"

AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include <cuda.h>],[cudaGetDevice(0);])], [
AC_MSG_RESULT(yes)
],[
cuda_ok=no
AS_IF([test x$with_cuda = xyes],[
AC_MSG_FAILURE([cannot compile CUDA code. Look at config.log for more details.])
],[
AC_MSG_WARN([cannot compile CUDA code. Some features will not be available!])
])
])

CC=$save_CC
CFLAGS=$save_CFLAGS

AC_DEFINE(CUDA,[],[Whether CUDA is available])
fi

AM_CONDITIONAL(CUDA, [test x$with_cuda != xno])

cat <<EOF
Expand Down Expand Up @@ -593,7 +637,7 @@ dot = $DOT
python = $PYTHON
EOF

if test x$with_cuda != xno; then
if test x$cuda_ok = xyes; then
cat <<EOF
CUDA compiler = $NVCC
CUDA flags = $NVCCFLAGS
Expand Down

0 comments on commit 56b2be3

Please sign in to comment.