Skip to content

Commit

Permalink
Update to CUDA 11.0 RC
Browse files Browse the repository at this point in the history
Update to CUDA 11.0 RC:
  * CUDA version 11.0.182
  * NVIDIA drivers version 450.36.06

Use the same package structure on Intel/AMD (x86_64), Power (ppc64le) and ARMv8/SBSA (aarch64).

Include support for c++17, gcc 9, clang 9.

See https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#title-new-features .
  • Loading branch information
fwyzard committed Jun 12, 2020
1 parent a2427b6 commit 2051095
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 108 deletions.
4 changes: 2 additions & 2 deletions cuda-flags.file
Expand Up @@ -16,8 +16,8 @@
%define llvm_cuda_arch %(echo $(for ARCH in %cuda_arch; do echo "$ARCH"; done) | sed -e"s/ /,/g")


# enable C++14, and generate optimised code
%define cuda_flags_0 -std=c++14 -O3
# enable C++17, and generate optimised code
%define cuda_flags_0 -std=c++17 -O3

# generate debugging information for device code
%define cuda_flags_1 --generate-line-info --source-in-ptx
Expand Down
7 changes: 1 addition & 6 deletions cuda-toolfile.spec
Expand Up @@ -39,7 +39,7 @@ cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda.xml
<flags CUDA_FLAGS="%{cuda_flags}"/>
<flags CUDA_HOST_REM_CXXFLAGS="-std=%"/>
<flags CUDA_HOST_REM_CXXFLAGS="%potentially-evaluated-expression"/>
<flags CUDA_HOST_CXXFLAGS="-std=c++14"/>
<flags CUDA_HOST_CXXFLAGS="-std=c++17"/>
<lib name="cudadevrt" type="cuda"/>
<runtime name="PATH" value="$CUDA_BASE/bin" type="path"/>
</tool>
Expand Down Expand Up @@ -76,9 +76,7 @@ cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda-cusolver.xml
<info url="https://docs.nvidia.com/cuda/cusolver/index.html"/>
<use name="cuda"/>
<lib name="cusolver"/>
%ifarch x86_64 ppc64le
<lib name="cusolverMg"/>
%endif
</tool>
EOF_TOOLFILE

Expand All @@ -96,7 +94,6 @@ cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda-npp.xml
<use name="cuda"/>
<lib name="nppial"/>
<lib name="nppicc"/>
<lib name="nppicom"/>
<lib name="nppidei"/>
<lib name="nppif"/>
<lib name="nppig"/>
Expand Down Expand Up @@ -125,15 +122,13 @@ cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda-nvml.xml
</tool>
EOF_TOOLFILE

%ifarch x86_64 ppc64le
cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda-nvjpeg.xml
<tool name="cuda-nvjpeg" version="@TOOL_VERSION@">
<info url="https://docs.nvidia.com/cuda/nvjpeg/index.html"/>
<use name="cuda"/>
<lib name="nvjpeg"/>
</tool>
EOF_TOOLFILE
%endif

cat << \EOF_TOOLFILE >%{i}/etc/scram.d/cuda-nvrtc.xml
<tool name="cuda-nvrtc" version="@TOOL_VERSION@">
Expand Down
150 changes: 50 additions & 100 deletions cuda.spec
@@ -1,24 +1,15 @@
### RPM external cuda %{fullversion}
### RPM external cuda 11.0.1

%ifarch x86_64 ppc64le
%define fullversion 10.2.89
%define cudaversion %(echo %realversion | cut -d. -f 1,2)
%define driversversion 440.33.01
%endif
%ifarch aarch64
%define fullversion 10.2.107
%define cudaversion %(echo %realversion | cut -d. -f 1,2)
%define driversversion 435.17.01
%endif
%define driversversion 450.36.06

%ifarch x86_64
Source0: https://developer.download.nvidia.com/compute/cuda/%{cudaversion}/Prod/local_installers/%{n}_%{realversion}_%{driversversion}_linux.run
Source0: https://developer.download.nvidia.com/compute/cuda/%{realversion}/local_installers/%{n}_%{realversion}_%{driversversion}_linux.run
%endif
%ifarch ppc64le
Source0: https://developer.download.nvidia.com/compute/cuda/%{cudaversion}/Prod/local_installers/%{n}_%{realversion}_%{driversversion}_linux_ppc64le.run
Source0: https://developer.download.nvidia.com/compute/cuda/%{realversion}/local_installers/%{n}_%{realversion}_%{driversversion}_linux_ppc64le.run
%endif
%ifarch aarch64
Source0: https://patatrack.web.cern.ch/patatrack/files/cuda-repo-rhel8-10-2-local-%{realversion}-%{driversversion}-1.0-1.aarch64.rpm
Source0: https://developer.download.nvidia.com/compute/cuda/%{realversion}/local_installers/%{n}_%{realversion}_%{driversversion}_linux_sbsa.run
%endif
Requires: python
AutoReq: no
Expand All @@ -31,111 +22,70 @@ AutoReq: no
rm -rf %_builddir/build %_builddir/tmp
mkdir %_builddir/build %_builddir/tmp

# extract and repackage the CUDA runtime, tools and stubs
%ifarch x86_64 ppc64le
/bin/sh %{SOURCE0} --silent --override --override-driver-check --tmpdir %_builddir/tmp --extract=%_builddir/build
# extracts:
# %_builddir/build/EULA.txt
# %_builddir/build/NVIDIA-Linux-%{_arch}-440.33.01.run # linux drivers
# %_builddir/build/cublas/ # standalone cuBLAS library, also included in cuda-toolkit
# %_builddir/build/cuda-samples/ # CUDA samples
# %_builddir/build/cuda-toolkit/ # CUDA runtime, tools and stubs
# %_builddir/build/integration/ # scripts for running Nsight Systems and Compute

# extract NVIDIA libraries needed by the CUDA runtime to %_builddir/build/drivers
/bin/sh %_builddir/build/NVIDIA-Linux-%{_arch}-%{driversversion}.run --silent --extract-only --tmpdir %_builddir/tmp --target %_builddir/build/drivers
%endif
%ifarch aarch64
# extract the individual .rpm archives from the repository into
# %_builddir/tmp/var/cuda-repo-10-2-local-10.2.107-435.17.01/
rpm2cpio %{SOURCE0} | { cd %_builddir/tmp; cpio -i -d; }

# extract the contents from the individual .rpm archives into
# %_builddir/tmp/usr/local/cuda-10.2/...
for FILE in %_builddir/tmp/var/cuda-repo-10-2-local-%{realversion}-%{driversversion}/*.rpm; do
rpm2cpio $FILE | { cd %_builddir/tmp; cpio -i -d; }
done
# move the CUDA libraries to %_builddir/build/cuda-toolkit/
mv %_builddir/tmp/usr/local/cuda-%{cudaversion} %_builddir/build/cuda-toolkit
mv %_builddir/tmp/usr/lib64/libcublas* %_builddir/build/cuda-toolkit/lib64/
mv %_builddir/tmp/usr/lib64/libnvblas* %_builddir/build/cuda-toolkit/lib64/
mv %_builddir/tmp/usr/lib64/stubs/* %_builddir/build/cuda-toolkit/lib64/stubs/
# move the NVIDIA libraries to %_builddir/build/drivers
mv %_builddir/tmp/usr/lib64 %_builddir/build/drivers
%endif
# extract and repackage the CUDA runtime
cd %_builddir/
/bin/sh %{SOURCE0} --silent --override --tmpdir=%_builddir/tmp --installpath=%_builddir/build --toolkit --keep

# create target directory structure
mkdir -p %{i}/bin
mkdir -p %{i}/include
mkdir -p %{i}/lib64
mkdir -p %{i}/share

# package only the runtime static libraries
mv %_builddir/build/cuda-toolkit/lib64/libcudart_static.a %{i}/lib64/
mv %_builddir/build/cuda-toolkit/lib64/libcudadevrt.a %{i}/lib64/
rm -f %_builddir/build/cuda-toolkit/lib64/lib*.a

# do not package dynamic libraries for which there are stubs
rm -f %_builddir/build/cuda-toolkit/lib64/libcublas.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcublasLt.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcufft.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcufftw.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcurand.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcusolver.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcusolverMg.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libcusparse.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libnpp*.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libnvgraph.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libnvidia-ml.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libnvjpeg.so*
rm -f %_builddir/build/cuda-toolkit/lib64/libnvrtc.so*

# package the other dynamic libraries and the stubs
chmod a+x %_builddir/build/cuda-toolkit/lib64/*.so
chmod a+x %_builddir/build/cuda-toolkit/lib64/stubs/*.so
mv %_builddir/build/cuda-toolkit/lib64/* %{i}/lib64/
mkdir -p %{i}/lib64/stubs

# package the includes
mv %_builddir/build/cuda-toolkit/include/* %{i}/include/
# package only the runtime static library
mv %_builddir/build/lib64/libcudadevrt.a %{i}/lib64/
rm -f %_builddir/build/lib64/lib*.a

# package the CUDA Profiling Tools Interface includea and libraries
%ifarch x86_64 ppc64le
chmod a+x %_builddir/build/cuda-toolkit/extras/CUPTI/lib64/*.so*
mv %_builddir/build/cuda-toolkit/extras/CUPTI/lib64/*.so* %{i}/lib64/
mv %_builddir/build/cuda-toolkit/extras/CUPTI/include/*.h %{i}/include/
%endif
%ifarch aarch64
# the RPMs already have the CUPTI files under lib64/ and include/
%endif
# package only the CUDA driver library stub
mv %_builddir/build/lib64/stubs/libcuda.so %{i}/lib64/stubs/
rm -rf %_builddir/build/lib64/stubs/

# leave out the Nsight and NVVP graphical tools
rm -f %_builddir/build/cuda-toolkit/bin/computeprof
rm -f %_builddir/build/cuda-toolkit/bin/nsight
rm -f %_builddir/build/cuda-toolkit/bin/nsight_ee_plugins_manage.sh
rm -f %_builddir/build/cuda-toolkit/bin/nv-nsight-cu-cli
rm -f %_builddir/build/cuda-toolkit/bin/nvvp
# do not package the OpenCL libraries
rm -f %_builddir/build/lib64/libOpenCL.*

# leave out the CUDA samples
rm -f %_builddir/build/cuda-toolkit/bin/cuda-install-samples-%{cudaversion}.sh
# package the dynamic libraries
chmod a+x %_builddir/build/lib64/*.so
mv %_builddir/build/lib64/* %{i}/lib64/

# package the includes
chmod a-x %_builddir/build/include/*.h*
mv %_builddir/build/include/* %{i}/include/

# package the CUDA Profiling Tools Interface includes and libraries
chmod a+x %_builddir/build/extras/CUPTI/lib64/*.so*
mv %_builddir/build/extras/CUPTI/lib64/*.so* %{i}/lib64/
mv %_builddir/build/extras/CUPTI/include/*.h %{i}/include/

# leave out the Nsight and NVVP graphical tools, and package the other binaries
rm -f %_builddir/build/bin/computeprof
rm -f %_builddir/build/bin/cuda-uninstaller
rm -f %_builddir/build/bin/ncu*
rm -f %_builddir/build/bin/nsyght*
rm -f %_builddir/build/bin/nsys*
rm -f %_builddir/build/bin/nv-nsight*
rm -f %_builddir/build/bin/nvvp
mv %_builddir/build/bin %{i}/

# package the cuda-gdb support files, and rename the binary to use it via a wrapper
mv %_builddir/build/cuda-toolkit/share/gdb/ %{i}/share/
mv %_builddir/build/cuda-toolkit/bin/cuda-gdb %{i}/bin/cuda-gdb.real
mv %_builddir/build/share/ %{i}/
mv %{i}/bin/cuda-gdb %{i}/bin/cuda-gdb.real
cat > %{i}/bin/cuda-gdb << @EOF
#! /bin/bash
export PYTHONHOME=$PYTHON_ROOT
exec %{i}/bin/cuda-gdb.real "\$@"
@EOF
chmod a+x %{i}/bin/cuda-gdb

# package the binaries and tools
mv %_builddir/build/cuda-toolkit/bin/* %{i}/bin/
mv %_builddir/build/cuda-toolkit/nvvm %{i}/
# package the other binaries and tools
mv %_builddir/build/nvvm %{i}/
mv %_builddir/build/Sanitizer %{i}/

# package the EULA and version file
mv %_builddir/build/EULA.txt %{i}/
mv %_builddir/build/version.txt %{i}/

# package the version file
mv %_builddir/build/cuda-toolkit/version.txt %{i}/
# extract and repackage the NVIDIA libraries needed by the CUDA runtime
/bin/sh %_builddir/pkg/builds/NVIDIA-Linux-%{_arch}-%{driversversion}.run --silent --extract-only --tmpdir %_builddir/tmp --target %_builddir/build/drivers

# repackage the NVIDIA libraries needed by the CUDA runtime
mkdir -p %{i}/drivers
mv %_builddir/build/drivers/libcuda.so.%{driversversion} %{i}/drivers/
ln -sf libcuda.so.%{driversversion} %{i}/drivers/libcuda.so.1
Expand Down

0 comments on commit 2051095

Please sign in to comment.