diff --git a/.github/workflows/slow-tests.yml.disabled b/.github/workflows/slow-tests.yml similarity index 100% rename from .github/workflows/slow-tests.yml.disabled rename to .github/workflows/slow-tests.yml diff --git a/cubed/tests/test_mem_utilization.py b/cubed/tests/test_mem_utilization.py index 312c9eac..bb7013ea 100644 --- a/cubed/tests/test_mem_utilization.py +++ b/cubed/tests/test_mem_utilization.py @@ -1,4 +1,5 @@ import math +import shutil import pytest @@ -33,46 +34,46 @@ def round_up_to_multiple(x, multiple=10): @pytest.mark.slow -def test_index(spec): +def test_index(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = a[1:, :] - run_operation("index", b) + run_operation(tmp_path, "index", b) @pytest.mark.slow -def test_index_step(spec): +def test_index_step(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = a[::2, :] - run_operation("index_step", b) + run_operation(tmp_path, "index_step", b) # Creation Functions @pytest.mark.slow -def test_eye(spec): +def test_eye(tmp_path, spec): a = xp.eye(10000, 10000, chunks=(5000, 5000), spec=spec) - run_operation("eye", a) + run_operation(tmp_path, "eye", a) @pytest.mark.slow -def test_tril(spec): +def test_tril(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.tril(a) - run_operation("tril", b) + run_operation(tmp_path, "tril", b) # Elementwise Functions @pytest.mark.slow -def test_add(spec): +def test_add(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks @@ -80,23 +81,23 @@ def test_add(spec): (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks c = xp.add(a, b) - run_operation("add", c) + run_operation(tmp_path, "add", c) @pytest.mark.slow -def test_negative(spec): +def test_negative(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.negative(a) - run_operation("negative", b) + run_operation(tmp_path, "negative", b) # Linear Algebra Functions @pytest.mark.slow -def test_matmul(spec): +def test_matmul(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks @@ -106,20 +107,20 @@ def test_matmul(spec): c = xp.astype(a, xp.float32) d = xp.astype(b, xp.float32) e = xp.matmul(c, d) - run_operation("matmul", e) + run_operation(tmp_path, "matmul", e) @pytest.mark.slow -def test_matrix_transpose(spec): +def test_matrix_transpose(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.matrix_transpose(a) - run_operation("matrix_transpose", b) + run_operation(tmp_path, "matrix_transpose", b) @pytest.mark.slow -def test_tensordot(spec): +def test_tensordot(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks @@ -129,14 +130,14 @@ def test_tensordot(spec): c = xp.astype(a, xp.float32) d = xp.astype(b, xp.float32) e = xp.tensordot(c, d, axes=1) - run_operation("tensordot", e) + run_operation(tmp_path, "tensordot", e) # Manipulation Functions @pytest.mark.slow -def test_concat(spec): +def test_concat(tmp_path, spec): # Note 'a' has one fewer element in axis=0 to force chunking to cross array boundaries a = cubed.random.random( (9999, 10000), chunks=(5000, 5000), spec=spec @@ -145,22 +146,22 @@ def test_concat(spec): (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks c = xp.concat((a, b), axis=0) - run_operation("concat", c) + run_operation(tmp_path, "concat", c) @pytest.mark.slow -def test_reshape(spec): +def test_reshape(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks # need intermediate reshape due to limitations in Dask's reshape_rechunk b = xp.reshape(a, (5000, 2, 10000)) c = xp.reshape(b, (5000, 20000)) - run_operation("reshape", c) + run_operation(tmp_path, "reshape", c) @pytest.mark.slow -def test_stack(spec): +def test_stack(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks @@ -168,46 +169,46 @@ def test_stack(spec): (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks c = xp.stack((a, b), axis=0) - run_operation("stack", c) + run_operation(tmp_path, "stack", c) # Searching Functions @pytest.mark.slow -def test_argmax(spec): +def test_argmax(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.argmax(a, axis=0) - run_operation("argmax", b) + run_operation(tmp_path, "argmax", b) # Statistical Functions @pytest.mark.slow -def test_max(spec): +def test_max(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.max(a, axis=0) - run_operation("max", b) + run_operation(tmp_path, "max", b) @pytest.mark.slow -def test_mean(spec): +def test_mean(tmp_path, spec): a = cubed.random.random( (10000, 10000), chunks=(5000, 5000), spec=spec ) # 200MB chunks b = xp.mean(a, axis=0) - run_operation("mean", b) + run_operation(tmp_path, "mean", b) # Internal functions -def run_operation(name, result_array): +def run_operation(tmp_path, name, result_array): # result_array.visualize(f"cubed-{name}-unoptimized", optimize_graph=False) # result_array.visualize(f"cubed-{name}") executor = LithopsDagExecutor(config=LITHOPS_LOCAL_CONFIG) @@ -220,3 +221,6 @@ def run_operation(name, result_array): # check projected_mem_utilization does not exceed 1 assert (df["projected_mem_utilization"] <= 1.0).all() + + # delete temp files for this test immediately since they are so large + shutil.rmtree(tmp_path)