Skip to content

Commit

Permalink
move memsys tests to test_nrt_refct
Browse files Browse the repository at this point in the history
  • Loading branch information
isVoid committed Dec 4, 2024
1 parent 8f454ac commit 8843765
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 109 deletions.
110 changes: 1 addition & 109 deletions numba_cuda/numba/cuda/tests/nrt/test_nrt.py
Original file line number Diff line number Diff line change
@@ -1,122 +1,14 @@
import re
import gc
import numpy as np
import unittest
from unittest.mock import patch
from numba.cuda.runtime import rtsys
from numba.tests.support import EnableNRTStatsMixin
from numba.cuda.testing import CUDATestCase

from numba.cuda.tests.nrt.mock_numpy import cuda_empty, cuda_empty_like
from numba.cuda.tests.nrt.mock_numpy import cuda_empty

from numba import cuda


class TestNrtRefCt(EnableNRTStatsMixin, CUDATestCase):

def setUp(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
super(TestNrtRefCt, self).setUp()

def test_no_return(self):
"""
Test issue #1291
"""
n = 10

@cuda.jit(debug=True)
def kernel():
for i in range(n):
temp = cuda_empty(2, np.float64) # noqa: F841
return None

init_stats = rtsys.get_allocation_stats()
print("init_stats", init_stats)

with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
kernel[1,1]()
print("After kernel launch...")
cur_stats = rtsys.get_allocation_stats()
print("cur_stats", cur_stats)
self.assertEqual(cur_stats.alloc - init_stats.alloc, n)
self.assertEqual(cur_stats.free - init_stats.free, n)

def test_escaping_var_init_in_loop(self):
"""
Test issue #1297
"""

@cuda.jit
def g(n):

x = cuda_empty((n, 2), np.float64)

for i in range(n):
y = x[i]

for i in range(n):
y = x[i] # noqa: F841

return None

init_stats = rtsys.get_allocation_stats()
print("init_stats", init_stats)
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
g[1, 1](10)
print("After kernel launch...")
cur_stats = rtsys.get_allocation_stats()
print("cur_stats", cur_stats)
self.assertEqual(cur_stats.alloc - init_stats.alloc, 1)
self.assertEqual(cur_stats.free - init_stats.free, 1)

def test_invalid_computation_of_lifetime(self):
"""
Test issue #1573
"""
@cuda.jit
def if_with_allocation_and_initialization(arr1, test1):
tmp_arr = cuda_empty_like(arr1)

for i in range(tmp_arr.shape[0]):
pass

if test1:
cuda_empty_like(arr1)

arr = np.random.random((5, 5)) # the values are not consumed

init_stats = rtsys.get_allocation_stats()
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
if_with_allocation_and_initialization[1, 1](arr, False)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)

def test_del_at_beginning_of_loop(self):
"""
Test issue #1734
"""
@cuda.jit
def f(arr):
res = 0

for i in (0, 1):
# `del t` is issued here before defining t. It must be
# correctly handled by the lowering phase.
t = arr[i]
if t[i] > 1:
res += t[i]

arr = np.ones((2, 2))
init_stats = rtsys.get_allocation_stats()
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
f[1, 1](arr)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)


class TestNrtBasic(CUDATestCase):
def test_nrt_launches(self):
@cuda.jit
Expand Down
115 changes: 115 additions & 0 deletions numba_cuda/numba/cuda/tests/nrt/test_nrt_refct.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@

import gc
import numpy as np
import unittest
from unittest.mock import patch
from numba.cuda.runtime import rtsys
from numba.tests.support import EnableNRTStatsMixin
from numba.cuda.testing import CUDATestCase

from numba.cuda.tests.nrt.mock_numpy import cuda_empty, cuda_empty_like

from numba import cuda


class TestNrtRefCt(EnableNRTStatsMixin, CUDATestCase):

def setUp(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
super(TestNrtRefCt, self).setUp()

def test_no_return(self):
"""
Test issue #1291
"""
n = 10

@cuda.jit(debug=True)
def kernel():
for i in range(n):
temp = cuda_empty(2, np.float64) # noqa: F841
return None

init_stats = rtsys.get_allocation_stats()

with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
kernel[1,1]()
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc, n)
self.assertEqual(cur_stats.free - init_stats.free, n)

def test_escaping_var_init_in_loop(self):
"""
Test issue #1297
"""

@cuda.jit
def g(n):

x = cuda_empty((n, 2), np.float64)

for i in range(n):
y = x[i]

for i in range(n):
y = x[i] # noqa: F841

return None

init_stats = rtsys.get_allocation_stats()
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
g[1, 1](10)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc, 1)
self.assertEqual(cur_stats.free - init_stats.free, 1)

def test_invalid_computation_of_lifetime(self):
"""
Test issue #1573
"""
@cuda.jit
def if_with_allocation_and_initialization(arr1, test1):
tmp_arr = cuda_empty_like(arr1)

for i in range(tmp_arr.shape[0]):
pass

if test1:
cuda_empty_like(arr1)

arr = np.random.random((5, 5)) # the values are not consumed

init_stats = rtsys.get_allocation_stats()
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
if_with_allocation_and_initialization[1, 1](arr, False)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)

def test_del_at_beginning_of_loop(self):
"""
Test issue #1734
"""
@cuda.jit
def f(arr):
res = 0

for i in (0, 1):
# `del t` is issued here before defining t. It must be
# correctly handled by the lowering phase.
t = arr[i]
if t[i] > 1:
res += t[i]

arr = np.ones((2, 2))
init_stats = rtsys.get_allocation_stats()
with patch('numba.config.CUDA_ENABLE_NRT', True, create=True):
f[1, 1](arr)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)


if __name__ == '__main__':
unittest.main()

0 comments on commit 8843765

Please sign in to comment.