Skip to content

Commit d83c669

Browse files
committed
perf: use local variables in device array constructor where applicable
1 parent 063378d commit d83c669

File tree

1 file changed

+12
-11
lines changed

1 file changed

+12
-11
lines changed

numba_cuda/numba/cuda/cudadrv/devicearray.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -90,26 +90,27 @@ def __init__(self, shape, strides, dtype, stream=0, gpu_data=None):
9090
if isinstance(strides, int):
9191
strides = (strides,)
9292
dtype = np.dtype(dtype)
93-
self.ndim = len(shape)
94-
if len(strides) != self.ndim:
93+
itemsize = dtype.itemsize
94+
self.ndim = ndim = len(shape)
95+
if len(strides) != ndim:
9596
raise ValueError("strides not match ndim")
96-
self._dummy = dummyarray.Array.from_desc(
97-
0, shape, strides, dtype.itemsize
97+
self._dummy = dummy = dummyarray.Array.from_desc(
98+
0, shape, strides, itemsize
9899
)
99100
# confirm that all elements of shape are ints
100101
if not all(isinstance(dim, (int, np.integer)) for dim in shape):
101102
raise TypeError("all elements of shape must be ints")
102-
self.shape = self._dummy.shape
103-
self.strides = self._dummy.strides
103+
self.shape = shape = dummy.shape
104+
self.strides = strides = dummy.strides
104105
self.dtype = dtype
105-
self.size = self._dummy.size
106+
self.size = size = dummy.size
106107
# prepare gpu memory
107-
if self.size > 0:
108-
self.alloc_size = _driver.memory_size_from_info(
109-
self.shape, self.strides, self.dtype.itemsize
108+
if size:
109+
self.alloc_size = alloc_size = _driver.memory_size_from_info(
110+
shape, strides, itemsize
110111
)
111112
if gpu_data is None:
112-
gpu_data = devices.get_context().memalloc(self.alloc_size)
113+
gpu_data = devices.get_context().memalloc(alloc_size)
113114
else:
114115
# Make NULL pointer for empty allocation
115116
null = _driver.binding.CUdeviceptr(0)

0 commit comments

Comments
 (0)