Skip to content

Commit

Permalink
Merge pull request #111 from eyalroz/development
Browse files Browse the repository at this point in the history
Merge work on the development branch
  • Loading branch information
Eyal Rozenberg committed May 26, 2019
2 parents 5ec0d55 + 5f89e74 commit d991925
Show file tree
Hide file tree
Showing 9 changed files with 92 additions and 384 deletions.
42 changes: 42 additions & 0 deletions .travis.yml
@@ -0,0 +1,42 @@
language: cpp

sudo: enabled

compiler:
- gcc

matrix:
include:
- name: CUDA 9
env:
- CUDA=9.2.148-1
- CUDA_SHORT=9.2
- CUDA_APT=9-2
- UBUNTU_VERSION=ubuntu1604
dist: xenial
- name: CUDA 10
env:
- CUDA=10.1.105-1
- CUDA_APT=10-1
- CUDA_SHORT=10.1
- UBUNTU_VERSION=ubuntu1804
dist: bionic

before_install:
- INSTALLER=cuda-repo-${UBUNTU_VERSION}_${CUDA}_amd64.deb
- wget http://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/${INSTALLER}
- sudo dpkg -i ${INSTALLER}
- wget https://developer.download.nvidia.com/compute/cuda/repos/${UBUNTU_VERSION}/x86_64/7fa2af80.pub
- sudo apt-key add 7fa2af80.pub
- sudo apt update -qq
- sudo apt install -y cuda-core-${CUDA_APT} cuda-cudart-dev-${CUDA_APT} cuda-nvtx-${CUDA_APT}
- sudo apt clean
- CUDA_HOME=/usr/local/cuda-${CUDA_SHORT}
- LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
- PATH=${CUDA_HOME}/bin:${PATH}

before_script:
- cmake .

script:
- VERBOSE=1 make examples
39 changes: 29 additions & 10 deletions CMakeLists.txt
Expand Up @@ -19,7 +19,7 @@

# We need version 3.8 for native CUDA support in CMake
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")

# -----------------------------------
# Project name, version & build type
Expand Down Expand Up @@ -68,10 +68,22 @@ option(EXPORT_BUILD_DIR "Enables external use without install" OFF)
# CUDA
# -------------

#find_package(CUDA 7.0 REQUIRED) # Why do I to do this damn it ?!
#include_directories( "${CUDA_TOOLKIT_INCLUDE}" )
# While this should not be necessary with CMake 3.8 and later,
# it apparently _is_ necessary to achieve the following:
#
# 1. Allow non-CUDA C++ code access to the CUDA libraries
# 2. Determine the gencode/arch/code flags for the GPUs on the host (= target) machine
#
# so...
include(FindCUDA)

include_directories( ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} )
include(HandleCUDAComputeCapability)

cuda_select_nvcc_arch_flags(CUDA_ARCH_FLAGS_TMP Auto)
set(CUDA_ARCH_FLAGS ${CUDA_ARCH_FLAGS_TMP} CACHE STRING "CUDA -gencode parameters")
string(REPLACE ";" " " CUDA_ARCH_FLAGS_STR "${CUDA_ARCH_FLAGS}")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_ARCH_FLAGS_STR}")


set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler -Wall" )
set(CMAKE_CUDA_STANDARD 11)
Expand All @@ -81,6 +93,7 @@ set(CMAKE_CUDA_EXTENSIONS ON)
set(CUDA_SEPARABLE_COMPILATION ON) # Does this work with native CUDA support?
set(CUDA_PROPAGATE_HOST_FLAGS OFF) # Does this work with native CUDA support?


# -----------------------
# Main target(s)
# -----------------------
Expand All @@ -102,17 +115,23 @@ target_include_directories(
"${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}"
)

if(WIN32)
# Windows users report that CMake has trouble
set(CUDA_LIBRARIES "cudadevrt.lib;cudart.lib")
target_link_libraries(${PROJECT_NAME} PUBLIC ${CUDA_LIBRARIES})
endif()
target_link_libraries(${PROJECT_NAME} ${CUDA_LIBRARIES})

# -----------------------
# Examples / Tests
# -----------------------

link_libraries(${CMAKE_CUDA_IMPLICIT_LINK_LIBRARIES} cuda-api-wrappers)
# This next line should have been enough to make the example programs
# link against the CUDA runtime library. However, users have reported
# that doesn't actually happen in some cases / on some platforms; see
# the project page for details and specifically issue #106. It has
# been removed in favor of relying on the find_package(CUDA) line
# above, despite the deprecation of that method of locating CUDA
# libraries.
#
#link_libraries(${CMAKE_CUDA_IMPLICIT_LINK_LIBRARIES})

link_libraries(cuda-api-wrappers)

set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "examples/bin")
add_executable(vectorAdd EXCLUDE_FROM_ALL examples/modified_cuda_samples/vectorAdd/vectorAdd.cu)
Expand Down
2 changes: 2 additions & 0 deletions README.md
@@ -1,5 +1,7 @@
# cuda-api-wrappers:<br> Thin C++-flavored wrappers for the CUDA runtime API

Branch Build Status: Master [![Master Build Status](https://travis-ci.org/eyalroz/cuda-api-wrappers.svg?branch=master)](https://travis-ci.org/eyalroz/cuda-api-wrappers) | Development: [![Development Build Status](https://travis-ci.org/eyalroz/cuda-api-wrappers.svg?branch=development)](https://travis-ci.org/eyalroz/cuda-api-wrappers)

nVIDIA's [Runtime API](http://docs.nvidia.com/cuda/cuda-runtime-api/index.html) for [CUDA](http://www.nvidia.com/object/cuda_home_new.html) is intended for use both in C and C++ code. As such, it uses a C-style API, the lower common denominator (with a few [notable exceptions](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__HIGHLEVEL.html) of templated function overloads).

This library of wrappers around the Runtime API is intended to allow us to embrace many of the features of C++ (including some C++11) for using the runtime API - but without reducing expressivity or increasing the level of abstraction (as in, e.g., the [Thrust](https://thrust.github.io/) library). Using cuda-api-wrappers, you still have your devices, streams, events and so on - but they will be more convenient to work with in more C++-idiomatic ways.
Expand Down
2 changes: 1 addition & 1 deletion cmake/Modules/FindCUDAAPIWrappers.cmake
Expand Up @@ -17,7 +17,7 @@ find_package(PkgConfig) # will this even help us at all?

find_path(
CUDA_API_WRAPPERS_INCLUDE_DIR
cuda/api_wrappers.h
cuda/api_wrappers.hpp
HINTS
${CUDA_INCLUDE_DIRS}
${CMAKE_CURRENT_SOURCE_DIR}/cuda-api-wrappers
Expand Down
88 changes: 0 additions & 88 deletions cmake/Modules/HandleCUDAComputeCapability.cmake

This file was deleted.

0 comments on commit d991925

Please sign in to comment.