Skip to content

Commit

Permalink
Merge pull request #6298 from gmarkall/grm-cudadrv-tests-flake8
Browse files Browse the repository at this point in the history
Fix flake8 violations in numba.cuda.tests.cudadrv
  • Loading branch information
sklam committed Oct 6, 2020
2 parents fd2e6b0 + 8e44dd6 commit 159510a
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 53 deletions.
14 changes: 0 additions & 14 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -91,20 +91,6 @@ exclude =
numba/misc/mergesort.py
numba/core/base.py
numba/np/npdatetime.py
numba/cuda/tests/cudadrv/test_reset_device.py
numba/cuda/tests/cudadrv/test_linker.py
numba/cuda/tests/cudadrv/test_cuda_driver.py
numba/cuda/tests/cudadrv/test_cuda_ndarray.py
numba/cuda/tests/cudadrv/test_inline_ptx.py
numba/cuda/tests/cudadrv/test_profiler.py
numba/cuda/tests/cudadrv/test_deallocations.py
numba/cuda/tests/cudadrv/test_cuda_devicerecord.py
numba/cuda/tests/cudadrv/test_nvvm_driver.py
numba/cuda/tests/cudadrv/test_pinned.py
numba/cuda/tests/cudadrv/test_cuda_auto_context.py
numba/cuda/tests/cudadrv/test_select_device.py
numba/cuda/tests/cudadrv/test_events.py
numba/cuda/tests/cudadrv/test_cuda_memory.py
numba/pycc/cc.py
numba/pycc/compiler.py
numba/pycc/llvm_types.py
Expand Down
1 change: 1 addition & 0 deletions numba/cuda/tests/cudadrv/test_cuda_auto_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,6 @@ def test_auto_context(self):
dA.copy_to_host(newA)
self.assertTrue(np.allclose(A, newA))


if __name__ == '__main__':
unittest.main()
3 changes: 1 addition & 2 deletions numba/cuda/tests/cudadrv/test_cuda_devicerecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,11 @@
import ctypes
from numba.cuda.cudadrv.devicearray import (DeviceRecord, from_record_like,
auto_device)
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim
import numpy as np
from numba.np import numpy_support


@skip_on_cudasim('Device Record API unsupported in the simulator')
class TestCudaDeviceRecord(CUDATestCase):
"""
Expand Down
13 changes: 9 additions & 4 deletions numba/cuda/tests/cudadrv/test_cuda_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,18 @@ def test_cuda_driver_occupancy(self):
module = self.context.create_module_ptx(self.ptx)
function = module.get_function('_Z10helloworldPi')

value = self.context.get_active_blocks_per_multiprocessor(function, 128, 128)
value = self.context.get_active_blocks_per_multiprocessor(function,
128, 128)
self.assertTrue(value > 0)
def b2d(bs): return bs
grid, block = self.context.get_max_potential_block_size(function, b2d, 128, 128)

def b2d(bs):
return bs

grid, block = self.context.get_max_potential_block_size(function, b2d,
128, 128)
self.assertTrue(grid > 0)
self.assertTrue(block > 0)


if __name__ == '__main__':
unittest.main()

7 changes: 4 additions & 3 deletions numba/cuda/tests/cudadrv/test_cuda_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def test_devicearray_partition(self):

self.assertTrue(np.all(array == 0))

right.copy_to_host(array[N//2:])
left.copy_to_host(array[:N//2])
right.copy_to_host(array[N // 2:])
left.copy_to_host(array[:N // 2])

self.assertTrue(np.all(array == original))

Expand All @@ -116,7 +116,8 @@ def test_devicearray_transpose_wrongdim(self):
def test_devicearray_transpose_identity(self):
# any-shape identities should work
original = np.array(np.arange(24)).reshape(3, 4, 2)
array = np.transpose(cuda.to_device(original), axes=(0, 1, 2)).copy_to_host()
array = np.transpose(cuda.to_device(original),
axes=(0, 1, 2)).copy_to_host()
self.assertTrue(np.all(array == original))

def test_devicearray_transpose_duplicatedaxis(self):
Expand Down
29 changes: 17 additions & 12 deletions numba/cuda/tests/cudadrv/test_deallocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,10 @@ def test_max_pending_bytes(self):
try:
# change to a smaller ratio
config.CUDA_DEALLOCS_RATIO = max_pending / mi.total
# due to round off error (floor is used in calculating _max_pending_bytes)
# it can be off by 1.
self.assertAlmostEqual(deallocs._max_pending_bytes, max_pending, delta=1)
# due to round off error (floor is used in calculating
# _max_pending_bytes) it can be off by 1.
self.assertAlmostEqual(deallocs._max_pending_bytes, max_pending,
delta=1)

# allocate half the max size
# this will not trigger deallocation
Expand All @@ -50,7 +51,8 @@ def test_max_pending_bytes(self):

# allocate another remaining
# this will not trigger deallocation
cuda.to_device(np.ones(deallocs._max_pending_bytes - deallocs._size, dtype=np.int8))
cuda.to_device(np.ones(deallocs._max_pending_bytes -
deallocs._size, dtype=np.int8))
self.assertEqual(len(deallocs), 2)

# another byte to trigger .clear()
Expand Down Expand Up @@ -198,7 +200,8 @@ class PinnedException(Exception):
pass
with cuda.pinned(arr):
pass
# Should also work when breaking out of the block due to an exception
# Should also work when breaking out of the block due to an
# exception
try:
with cuda.pinned(arr):
raise PinnedException
Expand All @@ -216,23 +219,25 @@ class MappedException(Exception):
ctx = cuda.current_context()
ctx.deallocations.clear()
with self.check_ignored_exception(ctx):
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
pass
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
pass
# Should also work inside a `defer_cleanup` block
with cuda.defer_cleanup():
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
pass
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
pass
# Should also work when breaking out of the block due to an exception
# Should also work when breaking out of the block due to an
# exception
try:
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
raise MappedException
except MappedException:
with cuda.mapped(arr) as marr:
with cuda.mapped(arr):
pass


if __name__ == '__main__':
unittest.main()
1 change: 1 addition & 0 deletions numba/cuda/tests/cudadrv/test_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,6 @@ def test_event_elapsed_stream(self):
# Exercise the code path
evtstart.elapsed_time(evtend)


if __name__ == '__main__':
unittest.main()
1 change: 0 additions & 1 deletion numba/cuda/tests/cudadrv/test_inline_ptx.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from llvmlite.llvmpy.core import Module, Type, Builder, InlineAsm
from llvmlite import binding as ll

from numba.cuda.cudadrv import nvvm
from numba.cuda.testing import unittest, ContextResettingTestCase
Expand Down
10 changes: 5 additions & 5 deletions numba/cuda/tests/cudadrv/test_linker.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from numba import cuda, void, float64, int64


def function_with_lots_of_registers(x, a, b, c, d, e, f):
def func_with_lots_of_registers(x, a, b, c, d, e, f):
a1 = 1.0
a2 = 1.0
a3 = 1.0
Expand Down Expand Up @@ -91,26 +91,26 @@ def test_set_registers_no_max(self):
uses more than 57 registers - this ensures that test_set_registers_*
are really checking that they reduced the number of registers used from
something greater than the maximum."""
compiled = cuda.jit(function_with_lots_of_registers)
compiled = cuda.jit(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertGreater(compiled._func.get().attrs.regs, 57)

@require_context
def test_set_registers_57(self):
compiled = cuda.jit(max_registers=57)(function_with_lots_of_registers)
compiled = cuda.jit(max_registers=57)(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertLessEqual(compiled._func.get().attrs.regs, 57)

@require_context
def test_set_registers_38(self):
compiled = cuda.jit(max_registers=38)(function_with_lots_of_registers)
compiled = cuda.jit(max_registers=38)(func_with_lots_of_registers)
compiled = compiled.specialize(np.empty(32), *range(6))
self.assertLessEqual(compiled._func.get().attrs.regs, 38)

@require_context
def test_set_registers_eager(self):
sig = void(float64[::1], int64, int64, int64, int64, int64, int64)
compiled = cuda.jit(sig, max_registers=38)(function_with_lots_of_registers)
compiled = cuda.jit(sig, max_registers=38)(func_with_lots_of_registers)
self.assertLessEqual(compiled._func.get().attrs.regs, 38)


Expand Down
12 changes: 5 additions & 7 deletions numba/cuda/tests/cudadrv/test_nvvm_driver.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from llvmlite.llvmpy.core import Module, Type, Builder
from numba.cuda.cudadrv.nvvm import (NVVM, CompilationUnit, llvm_to_ptx,
set_cuda_kernel, fix_data_layout,
get_arch_option, get_supported_ccs)
from numba.cuda.cudadrv.nvvm import (llvm_to_ptx, set_cuda_kernel,
fix_data_layout, get_arch_option,
get_supported_ccs)
from ctypes import c_size_t, c_uint64, sizeof
from numba.cuda.testing import unittest
from numba.cuda.cudadrv.nvvm import LibDevice, NvvmError
Expand All @@ -13,8 +13,6 @@
@skip_on_cudasim('NVVM Driver unsupported in the simulator')
class TestNvvmDriver(unittest.TestCase):
def get_ptx(self):
nvvm = NVVM()

if is64bit:
return gpu64
else:
Expand Down Expand Up @@ -134,7 +132,7 @@ def test_libdevice_arch_fix(self):
!nvvm.annotations = !{!1}
!1 = metadata !{void (i32*)* @simple, metadata !"kernel", i32 1}
'''
''' # noqa: E501

gpu32 = '''
target triple="nvptx-"
Expand Down Expand Up @@ -170,7 +168,7 @@ def test_libdevice_arch_fix(self):
!nvvm.annotations = !{!1}
!1 = metadata !{void (i32*)* @simple, metadata !"kernel", i32 1}
'''
''' # noqa: E501

if __name__ == '__main__':
unittest.main()
5 changes: 2 additions & 3 deletions numba/cuda/tests/cudadrv/test_pinned.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@ def _run_copies(self, A):
self.assertTrue(np.allclose(A, A0))

def test_pinned(self):
A = np.arange(2*1024*1024) # 16 MB
A = np.arange(2 * 1024 * 1024) # 16 MB
with cuda.pinned(A):
self._run_copies(A)

def test_unpinned(self):
A = np.arange(2*1024*1024) # 16 MB
A = np.arange(2 * 1024 * 1024) # 16 MB
self._run_copies(A)


if __name__ == '__main__':
unittest.main()

1 change: 0 additions & 1 deletion numba/cuda/tests/cudadrv/test_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,3 @@ def test_profiling(self):

if __name__ == '__main__':
unittest.main()

1 change: 0 additions & 1 deletion numba/cuda/tests/cudadrv/test_select_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,3 @@ def test_select_device(self):

if __name__ == '__main__':
unittest.main()

0 comments on commit 159510a

Please sign in to comment.