Skip to content

Commit 3aa0599

Browse files
committed
test: fix broken simulator interface
1 parent d123353 commit 3aa0599

File tree

4 files changed

+34
-14
lines changed

4 files changed

+34
-14
lines changed

numba_cuda/numba/cuda/simulator/cudadrv/devicearray.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88

99
from contextlib import contextmanager
1010
from numba.cuda.np.numpy_support import numpy_version
11+
from numba.cuda.np import numpy_support
12+
from numba.cuda import types
1113

1214
import numpy as np
1315

@@ -57,8 +59,8 @@ def __wrap_if_fake(self, item):
5759
return item
5860

5961
def __getattr__(self, attrname):
60-
if attrname in dir(self._item._ary): # For e.g. array size.
61-
return self.__wrap_if_fake(getattr(self._item._ary, attrname))
62+
if (value := getattr(self._item, attrname, None)) is not None:
63+
return self.__wrap_if_fake(value)
6264
else:
6365
return self.__wrap_if_fake(self._item.__getitem__(attrname))
6466

@@ -109,6 +111,23 @@ def __init__(self, ary, stream=0):
109111
self._ary = ary
110112
self.stream = stream
111113

114+
@property
115+
def _numba_type_(self):
116+
"""
117+
Magic attribute expected by Numba to get the numba type that
118+
represents this object.
119+
"""
120+
broadcast = 0 in self.strides
121+
if self.is_c_contiguous() and not broadcast:
122+
layout = "C"
123+
elif self.is_f_contiguous() and not broadcast:
124+
layout = "F"
125+
else:
126+
layout = "A"
127+
128+
dtype = numpy_support.from_dtype(self._ary.dtype)
129+
return types.Array(dtype, self._ary.ndim, layout)
130+
112131
@property
113132
def alloc_size(self):
114133
return self._ary.nbytes

numba_cuda/numba/cuda/testing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -191,9 +191,9 @@ def skip_on_cudasim(reason):
191191
return unittest.skipIf(config.ENABLE_CUDASIM, reason)
192192

193193

194-
def skip_on_standalone_numba_cuda(reason):
195-
"""Skip this test if running on standalone numba_cuda"""
196-
return unittest.skipIf(not HAS_NUMBA, reason)
194+
skip_on_standalone_numba_cuda = unittest.skipUnless(
195+
HAS_NUMBA, "requires base numba install"
196+
)
197197

198198

199199
def skip_unless_cudasim(reason):

numba_cuda/numba/cuda/tests/cudapy/test_atomics.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from textwrap import dedent
66

77
from numba import cuda
8-
from numba.cuda import uint32, uint64, float32, float64
8+
from numba.cuda import uint32, uint64, float32, float64, int32
99
from numba.cuda.testing import unittest, CUDATestCase, cc_X_or_above
1010
from numba.cuda.core import config
1111

@@ -239,19 +239,19 @@ def atomic_add_double_3(ary):
239239

240240
def atomic_sub(ary):
241241
atomic_binary_1dim_shared(
242-
ary, ary, 1, uint32, 32, cuda.atomic.sub, atomic_cast_none, 0, False
242+
ary, ary, 1, int32, 32, cuda.atomic.sub, atomic_cast_none, 0, False
243243
)
244244

245245

246246
def atomic_sub2(ary):
247247
atomic_binary_2dim_shared(
248-
ary, 1, uint32, (4, 8), cuda.atomic.sub, atomic_cast_none, False
248+
ary, 1, int32, (4, 8), cuda.atomic.sub, atomic_cast_none, False
249249
)
250250

251251

252252
def atomic_sub3(ary):
253253
atomic_binary_2dim_shared(
254-
ary, 1, uint32, (4, 8), cuda.atomic.sub, atomic_cast_to_uint64, False
254+
ary, 1, int32, (4, 8), cuda.atomic.sub, atomic_cast_to_uint64, False
255255
)
256256

257257

@@ -789,7 +789,7 @@ def test_atomic_add_double_global_3(self):
789789
self.assertCorrectFloat64Atomics(cuda_func, shared=False)
790790

791791
def test_atomic_sub(self):
792-
ary = np.random.randint(0, 32, size=32).astype(np.int32)
792+
ary = np.random.randint(0, 32, size=32, dtype=np.int32)
793793
orig = ary.copy()
794794
cuda_atomic_sub = cuda.jit("void(int32[:])")(atomic_sub)
795795
cuda_atomic_sub[1, 32](ary)
@@ -801,16 +801,16 @@ def test_atomic_sub(self):
801801
self.assertTrue(np.all(ary == gold))
802802

803803
def test_atomic_sub2(self):
804-
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
804+
ary = np.random.randint(0, 32, size=(4, 8), dtype=np.int32)
805805
orig = ary.copy()
806-
cuda_atomic_sub2 = cuda.jit("void(uint32[:,:])")(atomic_sub2)
806+
cuda_atomic_sub2 = cuda.jit("void(int32[:,:])")(atomic_sub2)
807807
cuda_atomic_sub2[1, (4, 8)](ary)
808808
self.assertTrue(np.all(ary == orig - 1))
809809

810810
def test_atomic_sub3(self):
811-
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
811+
ary = np.random.randint(0, 32, size=(4, 8), dtype=np.uint32)
812812
orig = ary.copy()
813-
cuda_atomic_sub3 = cuda.jit("void(uint32[:,:])")(atomic_sub3)
813+
cuda_atomic_sub3 = cuda.jit("void(int32[:,:])")(atomic_sub3)
814814
cuda_atomic_sub3[1, (4, 8)](ary)
815815
self.assertTrue(np.all(ary == orig - 1))
816816

testing/pytest.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,3 +23,4 @@ filterwarnings =
2323
ignore:.*Compilation is falling back to object mode WITHOUT looplifting enabled.*:numba.cuda.core.errors.NumbaWarning
2424
ignore:\nCompilation is falling back to object mode WITHOUT looplifting enabled.*:numba.core.errors.NumbaWarning
2525
ignore:overflow encountered in scalar .+:RuntimeWarning
26+
ignore:.*Host array used in CUDA kernel will incur copy overhead.*:numba.cuda.core.errors.NumbaPerformanceWarning

0 commit comments

Comments
 (0)