Skip to content

Commit

Permalink
CI: Set up self-hosted Azure agent with GPU (#491)
Browse files Browse the repository at this point in the history
Use the self-hosted Docker agent to set up a CI capable of running GPU tests.
  • Loading branch information
carterbox committed Jun 23, 2020
1 parent 79bf70d commit a5e60c9
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 4 deletions.
32 changes: 32 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,38 @@ trigger:

jobs:

- job: CUDA_Linux
pool:
name: Default
demands:
- CUDA_VERSION
- Agent.OS -equals Linux
strategy:
matrix:
Python37:
python.version: '37'
maxParallel: 4
steps:
- script: echo "CUDA version is $(CUDA_VERSION)"
displayName: Print CUDA version
- script: |
conda update -n base conda --yes --quiet
displayName: Update conda
- script: >
conda env create --quiet --force
-n tomopy
-f envs/linux-$(python.version).yml
displayName: Create build environment
- script: |
source activate tomopy
python setup.py install --enable-cuda
displayName: Setup and install
- script: |
source activate tomopy
export CUDA_VERSION="$(CUDA_VERSION)"
nosetests
displayName: nosetests
- job: Linux
pool:
vmImage: 'ubuntu-latest'
Expand Down
Binary file added test/test_tomopy/test_data/mlem_accel_gpu.npy
Binary file not shown.
Binary file added test/test_tomopy/test_data/sirt_accel_gpu.npy
Binary file not shown.
8 changes: 4 additions & 4 deletions test/test_tomopy/test_recon/test_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,11 +125,11 @@ def test_mlem_accel(self):
accelerated=True, device='cpu')
assert_allclose(result, read_file('mlem_accel.npy'), rtol=1e-2)

@unittest.skipUnless("CUDA_PATH" in os.environ, "CUDA_PATH not set.")
@unittest.skipUnless("CUDA_VERSION" in os.environ, "CUDA_VERSION not set.")
def test_mlem_gpu(self):
result = recon(self.prj, self.ang, algorithm='mlem', num_iter=4,
accelerated=True, device='gpu')
assert_allclose(result, read_file('mlem_gpu.npy'), rtol=1e-2)
assert_allclose(result, read_file('mlem_accel_gpu.npy'), rtol=1e-2)

def test_osem(self):
assert_allclose(
Expand Down Expand Up @@ -165,11 +165,11 @@ def test_sirt_accel(self):
num_iter=4, accelerated=True, device='cpu')
assert_allclose(result, read_file('sirt_accel.npy'), rtol=1e-2)

@unittest.skipUnless("CUDA_PATH" in os.environ, "CUDA_PATH not set.")
@unittest.skipUnless("CUDA_VERSION" in os.environ, "CUDA_VERSION not set.")
def test_sirt_gpu(self):
result = recon(self.prj, self.ang, algorithm='sirt',
num_iter=4, accelerated=True, device='gpu')
assert_allclose(result, read_file('sirt_gpu.npy'), rtol=1e-2)
assert_allclose(result, read_file('sirt_accel_gpu.npy'), rtol=1e-2)

def test_tv(self):
assert_allclose(
Expand Down

0 comments on commit a5e60c9

Please sign in to comment.