diff --git a/tests/everest/conftest.py b/tests/everest/conftest.py index 62dbfbb281e..b17be861b51 100644 --- a/tests/everest/conftest.py +++ b/tests/everest/conftest.py @@ -7,6 +7,7 @@ import pytest from everest.config.control_config import ControlConfig +from tests.everest.utils import relpath @pytest.fixture(scope="session") @@ -82,3 +83,15 @@ def control_config( config = deepcopy(control_data_no_variables) config["variables"] = request.param return ControlConfig.model_validate(config) + + +@pytest.fixture +def copy_math_func_test_data_to_tmp(tmp_path, monkeypatch): + path = relpath("..", "..", "test-data", "everest", "math_func") + shutil.copytree(path, tmp_path, dirs_exist_ok=True) + monkeypatch.chdir(tmp_path) + + +@pytest.fixture +def change_to_tmpdir(tmp_path, monkeypatch): + monkeypatch.chdir(tmp_path) diff --git a/tests/everest/entry_points/test_config_branch_entry.py b/tests/everest/entry_points/test_config_branch_entry.py index 1fca14d086c..c7af83df306 100644 --- a/tests/everest/entry_points/test_config_branch_entry.py +++ b/tests/everest/entry_points/test_config_branch_entry.py @@ -8,9 +8,8 @@ from everest.config import EverestConfig from everest.config_file_loader import load_yaml from everest.config_keys import ConfigKeys as CK -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE = "config_advanced.yml" CACHED_SEBA_FOLDER = relpath("test_data", "cached_results_config_advanced") @@ -22,8 +21,7 @@ new_callable=PropertyMock, return_value=CACHED_SEBA_FOLDER, ) -@tmpdir(CONFIG_PATH) -def test_config_branch_entry(get_opt_output_dir_mock): +def test_config_branch_entry(get_opt_output_dir_mock, copy_math_func_test_data_to_tmp): new_config_file_name = "new_restart_config.yml" batch_id = 1 @@ -65,8 +63,9 @@ def test_config_branch_entry(get_opt_output_dir_mock): new_callable=PropertyMock, return_value=CACHED_SEBA_FOLDER, ) -@tmpdir(CONFIG_PATH) -def test_config_branch_preserves_config_section_order(get_opt_output_dir_mock): +def test_config_branch_preserves_config_section_order( + get_opt_output_dir_mock, copy_math_func_test_data_to_tmp +): new_config_file_name = "new_restart_config.yml" batch_id = 1 diff --git a/tests/everest/entry_points/test_everest_entry.py b/tests/everest/entry_points/test_everest_entry.py index 1156f1d36aa..472b8ea8066 100644 --- a/tests/everest/entry_points/test_everest_entry.py +++ b/tests/everest/entry_points/test_everest_entry.py @@ -18,9 +18,8 @@ from everest.jobs import shell_commands from everest.simulator import JOB_SUCCESS from ieverest.bin.ieverest_script import ieverest_entry -from tests.everest.utils import capture_streams, relpath, tmpdir +from tests.everest.utils import capture_streams -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_MINIMAL = "config_minimal.yml" @@ -80,13 +79,13 @@ def run_detached_monitor_mock( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_debug( everserver_status_mock, start_server_mock, wait_for_server_mock, start_monitor_mock, caplog, + copy_math_func_test_data_to_tmp, ): """Test running everest with --debug""" with caplog.at_level(logging.DEBUG): @@ -111,12 +110,12 @@ def test_everest_entry_debug( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry( everserver_status_mock, start_server_mock, wait_for_server_mock, start_monitor_mock, + copy_math_func_test_data_to_tmp, ): """Test running everest in detached mode""" everest_entry([CONFIG_FILE_MINIMAL]) @@ -134,13 +133,13 @@ def test_everest_entry( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.completed, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_detached_already_run( everserver_status_mock, start_server_mock, wait_for_server_mock, start_monitor_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test everest detached, when an optimization has already run""" # optimization already run, notify the user @@ -171,11 +170,11 @@ def test_everest_entry_detached_already_run( "everest.bin.monitor_script.everserver_status", return_value={"status": ServerStatus.completed, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_detached_already_run_monitor( everserver_status_mock, start_monitor_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test everest detached, when an optimization has already run""" # optimization already run, notify the user @@ -199,7 +198,6 @@ def test_everest_entry_detached_already_run_monitor( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.completed, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_detached_running( everserver_status_mock, wait_for_server_to_stop_mock, @@ -209,6 +207,7 @@ def test_everest_entry_detached_running( start_monitor_mock, server_is_running_mock_kill_script, server_is_running_mock_everest_script, + copy_math_func_test_data_to_tmp, ): """Test everest detached, optimization is running""" # can't start a new run if one is already running @@ -248,11 +247,11 @@ def test_everest_entry_detached_running( "everest.bin.monitor_script.everserver_status", return_value={"status": ServerStatus.completed, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_detached_running_monitor( everserver_status_mock, start_monitor_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test everest detached, optimization is running, monitoring""" # Attach to a running optimization. @@ -269,11 +268,11 @@ def test_everest_entry_detached_running_monitor( "everest.bin.monitor_script.everserver_status", return_value={"status": ServerStatus.completed, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_monitor_no_run( everserver_status_mock, start_monitor_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test everest detached, optimization is running, monitoring""" # Attach to a running optimization. @@ -300,7 +299,6 @@ def test_everest_entry_monitor_no_run( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_show_all_jobs( everserver_status_mock, get_opt_status_mock, @@ -309,6 +307,7 @@ def test_everest_entry_show_all_jobs( start_server_mock, wait_for_server_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test running everest with --show-all-jobs""" @@ -335,7 +334,6 @@ def test_everest_entry_show_all_jobs( "everest.bin.everest_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_everest_entry_no_show_all_jobs( everserver_status_mock, get_opt_status_mock, @@ -344,6 +342,7 @@ def test_everest_entry_no_show_all_jobs( start_server_mock, wait_for_server_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test running everest without --show-all-jobs""" @@ -372,13 +371,13 @@ def test_everest_entry_no_show_all_jobs( "everest.bin.monitor_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_monitor_entry_show_all_jobs( everserver_status_mock, get_opt_status_mock, get_server_context_mock, query_server_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test running everest with and without --show-all-jobs""" @@ -404,13 +403,13 @@ def test_monitor_entry_show_all_jobs( "everest.bin.monitor_script.everserver_status", return_value={"status": ServerStatus.never_run, "message": None}, ) -@tmpdir(CONFIG_PATH) def test_monitor_entry_no_show_all_jobs( everserver_status_mock, get_opt_status_mock, get_server_context_mock, query_server_mock, server_is_running_mock, + copy_math_func_test_data_to_tmp, ): """Test running everest without --show-all-jobs""" @@ -436,9 +435,11 @@ def test_monitor_entry_no_show_all_jobs( ) @patch("everest.bin.everest_script.wait_for_server") @patch("everest.bin.everest_script.start_server") -@tmpdir(CONFIG_PATH) def test_exception_raised_when_server_run_fails( - start_server_mock, wait_for_server_mock, start_monitor_mock + start_server_mock, + wait_for_server_mock, + start_monitor_mock, + copy_math_func_test_data_to_tmp, ): with pytest.raises(SystemExit, match="Reality was ripped to shreds!"): everest_entry([CONFIG_FILE_MINIMAL]) @@ -453,9 +454,8 @@ def test_exception_raised_when_server_run_fails( error="Reality was ripped to shreds!", ), ) -@tmpdir(CONFIG_PATH) def test_exception_raised_when_server_run_fails_monitor( - start_monitor_mock, server_is_running_mock + start_monitor_mock, server_is_running_mock, copy_math_func_test_data_to_tmp ): with pytest.raises(SystemExit, match="Reality was ripped to shreds!"): monitor_entry([CONFIG_FILE_MINIMAL]) @@ -467,9 +467,11 @@ def test_exception_raised_when_server_run_fails_monitor( ) @patch("everest.bin.everest_script.wait_for_server") @patch("everest.bin.everest_script.start_server") -@tmpdir(CONFIG_PATH) def test_complete_status_for_normal_run( - start_server_mock, wait_for_server_mock, start_monitor_mock + start_server_mock, + wait_for_server_mock, + start_monitor_mock, + copy_math_func_test_data_to_tmp, ): everest_entry([CONFIG_FILE_MINIMAL]) config = EverestConfig.load_file(CONFIG_FILE_MINIMAL) @@ -486,9 +488,8 @@ def test_complete_status_for_normal_run( "everest.bin.monitor_script.run_detached_monitor", side_effect=run_detached_monitor_mock, ) -@tmpdir(CONFIG_PATH) def test_complete_status_for_normal_run_monitor( - start_monitor_mock, server_is_running_mock + start_monitor_mock, server_is_running_mock, copy_math_func_test_data_to_tmp ): monitor_entry([CONFIG_FILE_MINIMAL]) config = EverestConfig.load_file(CONFIG_FILE_MINIMAL) diff --git a/tests/everest/entry_points/test_everexport.py b/tests/everest/entry_points/test_everexport.py index 1d9fb5b60da..1fad072424b 100644 --- a/tests/everest/entry_points/test_everexport.py +++ b/tests/everest/entry_points/test_everexport.py @@ -19,7 +19,6 @@ tmpdir, ) -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_MINIMAL = "config_minimal.yml" CONFIG_FILE_MOCKED_TEST_CASE = "mocked_multi_batch.yml" @@ -71,8 +70,7 @@ def cache_dir(request, monkeypatch): @patch("everest.bin.utils.export_with_progress", side_effect=export_mock) -@tmpdir(CONFIG_PATH) -def test_everexport_entry_run(mocked_func): +def test_everexport_entry_run(mocked_func, copy_math_func_test_data_to_tmp): """Test running everexport with not flags""" # NOTE: there is probably a bug concerning output folders. Everexport # seems to assume that the folder where the file will be saved exists. @@ -88,8 +86,7 @@ def test_everexport_entry_run(mocked_func): @patch("everest.bin.utils.export_with_progress", side_effect=empty_mock) -@tmpdir(CONFIG_PATH) -def test_everexport_entry_empty(mocked_func): +def test_everexport_entry_empty(mocked_func, copy_math_func_test_data_to_tmp): """Test running everexport with no data""" # NOTE: When there is no data (ie, the optimization has not yet run) # the current behavior is to create an empty .csv file. It is arguable @@ -112,9 +109,10 @@ def test_everexport_entry_empty(mocked_func): side_effect=validate_export_mock, ) @patch("everest.bin.utils.export") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everexport_entry_batches(mocked_func, validate_export_mock): +def test_everexport_entry_batches( + mocked_func, validate_export_mock, copy_math_func_test_data_to_tmp +): """Test running everexport with the --batches flag""" everexport_entry([CONFIG_FILE_MINIMAL, "--batches", "0", "2"]) @@ -135,8 +133,7 @@ def check_export_batches(config: EverestConfig): @patch("everest.bin.everexport_script.export_to_csv") -@tmpdir(CONFIG_PATH) -def test_everexport_entry_no_export(mocked_func): +def test_everexport_entry_no_export(mocked_func, copy_math_func_test_data_to_tmp): """Test running everexport on config file with skip_export flag set to true""" @@ -157,8 +154,7 @@ def test_everexport_entry_no_export(mocked_func): @patch("everest.bin.everexport_script.export_to_csv") -@tmpdir(CONFIG_PATH) -def test_everexport_entry_empty_export(mocked_func): +def test_everexport_entry_empty_export(mocked_func, copy_math_func_test_data_to_tmp): """Test running everexport on config file with empty export section""" # Add empty export section to config file diff --git a/tests/everest/functional/test_main_everest_entry.py b/tests/everest/functional/test_main_everest_entry.py index 42af2f6423c..a993aad8924 100644 --- a/tests/everest/functional/test_main_everest_entry.py +++ b/tests/everest/functional/test_main_everest_entry.py @@ -21,7 +21,6 @@ wait_for_context, ) -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_MINIMAL = "config_minimal.yml" EGG_CONFIG_PATH = relpath("..", "..", "test-data", "everest", "egg") @@ -84,9 +83,8 @@ def test_everest_main_entry_bad_command(): @pytest.mark.flaky(reruns=5) -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everest_entry_run(): +def test_everest_entry_run(copy_math_func_test_data_to_tmp): wait_for_context() # Setup command line arguments with capture_streams(): @@ -119,8 +117,7 @@ def test_everest_entry_run(): context_stop_and_wait() -@tmpdir(CONFIG_PATH) -def test_everest_entry_monitor_no_run(): +def test_everest_entry_monitor_no_run(copy_math_func_test_data_to_tmp): with capture_streams(): start_everest(["everest", "monitor", CONFIG_FILE_MINIMAL]) @@ -132,16 +129,14 @@ def test_everest_entry_monitor_no_run(): context_stop_and_wait() -@tmpdir(CONFIG_PATH) -def test_everest_main_export_entry(): +def test_everest_main_export_entry(copy_math_func_test_data_to_tmp): # Setup command line arguments with capture_streams(): start_everest(["everest", "export", CONFIG_FILE_MINIMAL]) assert os.path.exists(os.path.join("everest_output", "config_minimal.csv")) -@tmpdir(CONFIG_PATH) -def test_everest_main_lint_entry(): +def test_everest_main_lint_entry(copy_math_func_test_data_to_tmp): # Setup command line arguments with capture_streams() as (out, err): start_everest(["everest", "lint", CONFIG_FILE_MINIMAL]) diff --git a/tests/everest/functional/test_ui.py b/tests/everest/functional/test_ui.py index 94c3e7dbb77..88acc52d6b8 100644 --- a/tests/everest/functional/test_ui.py +++ b/tests/everest/functional/test_ui.py @@ -3,7 +3,6 @@ from qtpy.QtCore import Qt from seba_sqlite.snapshot import SebaSnapshot from tests.everest.dialogs_mocker import mock_dialogs_all -from tests.everest.utils import relpath, tmpdir from everest.config import EverestConfig from everest.detached import ( @@ -14,15 +13,13 @@ ) from ieverest import IEverest -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_MINIMAL = "config_minimal.yml" @pytest.mark.flaky(reruns=5) -@tmpdir(CONFIG_PATH) @pytest.mark.ui_test @pytest.mark.xdist_group(name="starts_everest") -def test_ui_optimization(qapp, qtbot, mocker): +def test_ui_optimization(qapp, qtbot, mocker, copy_math_func_test_data_to_tmp): """Load a configuration and run it from the UI""" wait_for_context() diff --git a/tests/everest/test_api.py b/tests/everest/test_api.py index 88522666fd5..4195ca67de3 100644 --- a/tests/everest/test_api.py +++ b/tests/everest/test_api.py @@ -10,7 +10,7 @@ from everest.api import EverestDataAPI from everest.config import EverestConfig from everest.detached import ServerStatus -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath # Global values used to create the mock snapshot. _functions = ["f0", "f1"] @@ -531,8 +531,7 @@ def test_get_summary_keys_single_key(_, api_no_gradient): @patch.object(EverestConfig, "optimization_output_dir", new_callable=PropertyMock) @patch("everest.api.everest_data_api.SebaSnapshot") @patch("everest.api.everest_data_api.SebaSnapshot.get_snapshot") -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_output_folder(_1, _2, _3): +def test_output_folder(_1, _2, _3, copy_math_func_test_data_to_tmp): config_file = "config_multiobj.yml" config = EverestConfig.load_file(config_file) assert config.environment is not None @@ -548,8 +547,9 @@ def test_output_folder(_1, _2, _3): "everest.api.everest_data_api.everserver_status", return_value={"status": ServerStatus.completed}, ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everest_csv(everserver_status_mock, _1, _2, _3): +def test_everest_csv( + everserver_status_mock, _1, _2, _3, copy_math_func_test_data_to_tmp +): config_file = "config_multiobj.yml" config = EverestConfig.load_file(config_file) expected = config.export_path diff --git a/tests/everest/test_config_branch.py b/tests/everest/test_config_branch.py index 4860d459c7e..94ef60f84a3 100644 --- a/tests/everest/test_config_branch.py +++ b/tests/everest/test_config_branch.py @@ -6,15 +6,13 @@ ) from everest.config_file_loader import load_yaml from everest.config_keys import ConfigKeys as CK -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE = "config_advanced.yml" CACHED_SEBA_FOLDER = relpath("test_data", "cached_results_config_advanced") -@tmpdir(CONFIG_PATH) -def test_get_controls_for_batch(): +def test_get_controls_for_batch(copy_math_func_test_data_to_tmp): assert opt_controls_by_batch(CACHED_SEBA_FOLDER, 1) is not None assert opt_controls_by_batch(CACHED_SEBA_FOLDER, 42) is None @@ -38,8 +36,7 @@ def test_get_controls_for_batch(): ) -@tmpdir(CONFIG_PATH) -def test_update_controls_initial_guess(): +def test_update_controls_initial_guess(copy_math_func_test_data_to_tmp): old_controls = load_yaml(CONFIG_FILE)[CK.CONTROLS] assert len(old_controls) == 1 diff --git a/tests/everest/test_cvar.py b/tests/everest/test_cvar.py index 9bddeab100a..f439fb68521 100644 --- a/tests/everest/test_cvar.py +++ b/tests/everest/test_cvar.py @@ -2,14 +2,11 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_CVAR = "config_cvar.yml" -@tmpdir(CONFIG_PATH) -def test_mathfunc_cvar(): +def test_mathfunc_cvar(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_CVAR) workflow = _EverestWorkflow(config) diff --git a/tests/everest/test_detached.py b/tests/everest/test_detached.py index c8c6605b282..7d8d973b980 100644 --- a/tests/everest/test_detached.py +++ b/tests/everest/test_detached.py @@ -61,8 +61,7 @@ def job_progress(*args): @pytest.mark.integration_test @pytest.mark.fails_on_macos_github_workflow @pytest.mark.xdist_group(name="starts_everest") -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_https_requests(): +def test_https_requests(copy_math_func_test_data_to_tmp): everest_config = EverestConfig.load_file("config_minimal_slow.yml") expected_server_status = ServerStatus.never_run @@ -121,8 +120,7 @@ def test_https_requests(): assert ServerStatus.stopped == server_status["status"] -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_server_status(): +def test_server_status(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file("config_minimal.yml") # Check status file does not exist before initial status update @@ -219,8 +217,7 @@ def _get_reference_config(): return everest_config, reference_config -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_detached_mode_config_base(): +def test_detached_mode_config_base(copy_math_func_test_data_to_tmp): everest_config, reference = _get_reference_config() ert_config = generate_everserver_ert_config(everest_config) @@ -237,8 +234,9 @@ def test_detached_mode_config_base(): ("slurm", 5, "test_slurm"), ], ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_queue_config_equal_to_run_config(queue_system, cores, name): +def test_everserver_queue_config_equal_to_run_config( + copy_math_func_test_data_to_tmp, queue_system, cores, name +): everest_config, _ = _get_reference_config() simulator_config = {CK.QUEUE_SYSTEM: queue_system, CK.CORES: cores} @@ -267,8 +265,7 @@ def test_everserver_queue_config_equal_to_run_config(queue_system, cores, name): ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_detached_mode_config_debug(): +def test_detached_mode_config_debug(copy_math_func_test_data_to_tmp): everest_config, reference = _get_reference_config() ert_config = generate_everserver_ert_config(everest_config, debug_mode=True) @@ -279,8 +276,7 @@ def test_detached_mode_config_debug(): @pytest.mark.parametrize("queue_system", ["lsf", "slurm"]) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_detached_mode_config_only_sim(queue_system): +def test_detached_mode_config_only_sim(copy_math_func_test_data_to_tmp, queue_system): everest_config, reference = _get_reference_config() reference["QUEUE_SYSTEM"] = queue_system.upper() @@ -292,8 +288,7 @@ def test_detached_mode_config_only_sim(queue_system): assert ert_config == reference -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_detached_mode_config_error(): +def test_detached_mode_config_error(copy_math_func_test_data_to_tmp): """ We are not allowing the simulator queue to be local and at the same time the everserver queue to be something other than local @@ -305,8 +300,7 @@ def test_detached_mode_config_error(): generate_everserver_ert_config(everest_config) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_detached_mode_config_queue_name(): +def test_detached_mode_config_queue_name(copy_math_func_test_data_to_tmp): everest_config, reference = _get_reference_config() queue_name = "put_me_in_the_queue" diff --git a/tests/everest/test_discrete.py b/tests/everest/test_discrete.py index 1ecdd251450..ca92aa7d10e 100644 --- a/tests/everest/test_discrete.py +++ b/tests/everest/test_discrete.py @@ -1,13 +1,10 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_DISCRETE = "config_discrete.yml" -@tmpdir(CONFIG_PATH) -def test_discrete_optimizer(): +def test_discrete_optimizer(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_DISCRETE) workflow = _EverestWorkflow(config) diff --git a/tests/everest/test_environment.py b/tests/everest/test_environment.py index 48a250124ef..a47c7f1bb1c 100644 --- a/tests/everest/test_environment.py +++ b/tests/everest/test_environment.py @@ -1,21 +1,16 @@ -import os - import pytest import everest from everest.config import EverestConfig from everest.simulator.everest_to_ert import everest_to_ert_config -from tests.everest.utils import relpath, tmpdir -root = os.path.join("..", "..", "test-data", "everest", "math_func") -config_file = "config_minimal.yml" +CONFIG_FILE = "config_minimal.yml" @pytest.mark.integration_test -@tmpdir(relpath(root)) -def test_seed(): +def test_seed(copy_math_func_test_data_to_tmp): random_seed = 42 - config = EverestConfig.load_file(config_file) + config = EverestConfig.load_file(CONFIG_FILE) config.environment.random_seed = random_seed ever_workflow = everest.suite._EverestWorkflow(config) @@ -28,9 +23,8 @@ def test_seed(): @pytest.mark.integration_test -@tmpdir(relpath(root)) -def test_loglevel(): - config = EverestConfig.load_file(config_file) +def test_loglevel(copy_math_func_test_data_to_tmp): + config = EverestConfig.load_file(CONFIG_FILE) config.environment.log_level = "info" ever_workflow = everest.suite._EverestWorkflow(config) config = ever_workflow.config diff --git a/tests/everest/test_everest_output.py b/tests/everest/test_everest_output.py index 9d0b76c7cc9..cd83305f517 100644 --- a/tests/everest/test_everest_output.py +++ b/tests/everest/test_everest_output.py @@ -20,8 +20,9 @@ from tests.everest.utils import relpath, tmpdir -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_that_one_experiment_creates_one_ensemble_per_batch(): +def test_that_one_experiment_creates_one_ensemble_per_batch( + copy_math_func_test_data_to_tmp, +): config = EverestConfig.load_file("config_minimal.yml") workflow = _EverestWorkflow(config) assert workflow is not None @@ -107,8 +108,7 @@ def useless_cb(*args, **kwargs): @patch("ert.simulator.BatchSimulator.start", return_value=None) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_save_running_config(start_mock): +def test_save_running_config(start_mock, copy_math_func_test_data_to_tmp): file_name = "config_minimal.yml" config = EverestConfig.load_file(file_name) ert_config = ErtConfig.with_plugins().from_dict( diff --git a/tests/everest/test_everserver.py b/tests/everest/test_everserver.py index 46ee6c192ba..c0a27f17ea9 100644 --- a/tests/everest/test_everserver.py +++ b/tests/everest/test_everserver.py @@ -11,7 +11,6 @@ from everest.detached.jobs import everserver from everest.simulator import JOB_FAILURE, JOB_SUCCESS from everest.strings import OPT_FAILURE_REALIZATIONS, SIM_PROGRESS_ENDPOINT -from tests.everest.utils import relpath, tmpdir def configure_everserver_logger(*args, **kwargs): @@ -51,8 +50,7 @@ def set_shared_status(context_status, event, shared_data, progress): } -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_certificate_generation(): +def test_certificate_generation(copy_math_func_test_data_to_tmp): everest_config = EverestConfig.load_file("config_minimal.yml") cert, key, pw = everserver._generate_certificate(everest_config) @@ -65,8 +63,7 @@ def test_certificate_generation(): ctx.load_cert_chain(cert, key, pw) # raise on error -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_hostfile_storage(): +def test_hostfile_storage(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file("config_minimal.yml") expected_result = { @@ -85,8 +82,7 @@ def test_hostfile_storage(): "everest.detached.jobs.everserver.configure_logger", side_effect=configure_everserver_logger, ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_status_failure(mf_1): +def test_everserver_status_failure(copy_math_func_test_data_to_tmp, mf_1): config_file = "config_minimal.yml" config = EverestConfig.load_file(config_file) everserver.main() @@ -121,8 +117,7 @@ def test_everserver_status_failure(mf_1): "everest.detached.jobs.everserver.export_to_csv", side_effect=partial(check_status, status=ServerStatus.exporting_to_csv), ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_status_running_complete(*args): +def test_everserver_status_running_complete(*args, copy_math_func_test_data_to_tmp): config_file = "config_minimal.yml" config = EverestConfig.load_file(config_file) everserver.main() @@ -165,8 +160,7 @@ def test_everserver_status_running_complete(*args): ], ), ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_status_failed_job(*args): +def test_everserver_status_failed_job(*args, copy_math_func_test_data_to_tmp): config_file = "config_minimal.yml" config = EverestConfig.load_file(config_file) everserver.main() @@ -199,8 +193,7 @@ def test_everserver_status_failed_job(*args): "everest.detached.jobs.everserver._sim_monitor", side_effect=partial(set_shared_status, progress=[]), ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_status_exception(*args): +def test_everserver_status_exception(*args, copy_math_func_test_data_to_tmp): config_file = "config_minimal.yml" config = EverestConfig.load_file(config_file) everserver.main() @@ -229,8 +222,7 @@ def test_everserver_status_exception(*args): "everest.detached.jobs.everserver._sim_monitor", side_effect=partial(set_shared_status, progress=[]), ) -@tmpdir(relpath("..", "..", "test-data", "everest", "math_func")) -def test_everserver_status_max_batch_num(*args): +def test_everserver_status_max_batch_num(*args, copy_math_func_test_data_to_tmp): config_file = "config_one_batch.yml" config = EverestConfig.load_file(config_file) everserver.main() diff --git a/tests/everest/test_export.py b/tests/everest/test_export.py index 3b82e1b1589..4d4083dc973 100644 --- a/tests/everest/test_export.py +++ b/tests/everest/test_export.py @@ -11,8 +11,6 @@ from everest.export import export, validate_export from tests.everest.utils import create_cached_mocked_test_case, relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") - CONFIG_FILE_MOCKED_TEST_CASE = "mocked_multi_batch.yml" CONFIG_PATH_MOCKED_TEST_CASE = relpath("test_data", "mocked_test_case") CASHED_RESULTS_FOLDER = relpath("test_data", "cached_results_config_multiobj") @@ -68,8 +66,7 @@ def test_filter_double_wildcard(): ) -@tmpdir(CONFIG_PATH) -def test_export_only_non_gradient_with_increased_merit(): +def test_export_only_non_gradient_with_increased_merit(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -91,8 +88,7 @@ def test_export_only_non_gradient_with_increased_merit(): assert merit_flag == 1 -@tmpdir(CONFIG_PATH) -def test_export_only_non_gradient(): +def test_export_only_non_gradient(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -115,8 +111,7 @@ def test_export_only_non_gradient(): assert 1 in df["increased_merit"].values -@tmpdir(CONFIG_PATH) -def test_export_only_increased_merit(): +def test_export_only_increased_merit(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -139,8 +134,7 @@ def test_export_only_increased_merit(): assert 0 not in df["increased_merit"].values -@tmpdir(CONFIG_PATH) -def test_export_all_batches(): +def test_export_all_batches(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -162,8 +156,7 @@ def test_export_all_batches(): assert 0 in df["increased_merit"].values -@tmpdir(CONFIG_PATH) -def test_export_only_give_batches(): +def test_export_only_give_batches(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -200,8 +193,7 @@ def test_export_batches_progress(cache_dir): assert id == 2 -@tmpdir(CONFIG_PATH) -def test_export_nothing_for_empty_batch_list(): +def test_export_nothing_for_empty_batch_list(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -219,8 +211,7 @@ def test_export_nothing_for_empty_batch_list(): assert df.empty -@tmpdir(CONFIG_PATH) -def test_export_nothing(): +def test_export_nothing(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( @@ -238,8 +229,7 @@ def test_export_nothing(): assert df.empty -@tmpdir(CONFIG_PATH) -def test_get_export_path(): +def test_get_export_path(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) # Test default export path when no csv_output_filepath is defined @@ -353,8 +343,7 @@ def check_error(expected_error, reported_errors): assert config.export.batches == [0] -@tmpdir(CONFIG_PATH) -def test_export_gradients(): +def test_export_gradients(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) os.makedirs(config.optimization_output_dir) shutil.copy( diff --git a/tests/everest/test_fix_control.py b/tests/everest/test_fix_control.py index ca3ec5dcfd0..3c04a2698c7 100644 --- a/tests/everest/test_fix_control.py +++ b/tests/everest/test_fix_control.py @@ -1,13 +1,10 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" -@tmpdir(CONFIG_PATH) -def test_fix_control(): +def test_fix_control(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].variables[0].enabled = False diff --git a/tests/everest/test_logging.py b/tests/everest/test_logging.py index 7cd4263b0cf..6675bcb6323 100644 --- a/tests/everest/test_logging.py +++ b/tests/everest/test_logging.py @@ -13,9 +13,7 @@ wait_for_server, ) from everest.util import makedirs_if_needed -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE = "config_fm_failure.yml" @@ -29,8 +27,7 @@ def string_exists_in_file(file_path, string): @pytest.mark.integration_test @pytest.mark.xdist_group(name="starts_everest") @pytest.mark.fails_on_macos_github_workflow -@tmpdir(CONFIG_PATH) -def test_logging_setup(): +def test_logging_setup(copy_math_func_test_data_to_tmp): everest_config = EverestConfig.load_file(CONFIG_FILE) wait_for_context() diff --git a/tests/everest/test_math_func.py b/tests/everest/test_math_func.py index 3ee7847aea9..aed2bfcf7b0 100644 --- a/tests/everest/test_math_func.py +++ b/tests/everest/test_math_func.py @@ -11,9 +11,6 @@ from everest.export import export from everest.suite import _EverestWorkflow from everest.util import makedirs_if_needed -from tests.everest.utils import relpath, tmpdir - -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_MINIMAL = "config_minimal.yml" CONFIG_FILE_IN_CONSTR = "config_in_constr.yml" @@ -25,8 +22,7 @@ @pytest.mark.redundant_test # superseded by test_math_func_advanced and _multiobj -@tmpdir(CONFIG_PATH) -def test_math_func_minimal(): +def test_math_func_minimal(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_MINIMAL) workflow = _EverestWorkflow(config) assert workflow is not None @@ -62,8 +58,7 @@ def test_math_func_minimal(): @pytest.mark.redundant_test # superseded by test_math_func_advanced and _multiobj -@tmpdir(CONFIG_PATH) -def test_math_func_in_constr(): +def test_math_func_in_constr(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_IN_CONSTR) workflow = _EverestWorkflow(config) assert workflow is not None @@ -94,8 +89,7 @@ def test_math_func_in_constr(): @pytest.mark.redundant_test # superseded by test_math_func_advanced and _multiobj -@tmpdir(CONFIG_PATH) -def test_math_func_out_constr(): +def test_math_func_out_constr(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_OUT_CONSTR) workflow = _EverestWorkflow(config) assert workflow is not None @@ -130,8 +124,7 @@ def test_math_func_out_constr(): @pytest.mark.integration_test -@tmpdir(CONFIG_PATH) -def test_math_func_multiobj(): +def test_math_func_multiobj(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_MULTIOBJ) workflow = _EverestWorkflow(config) @@ -216,8 +209,7 @@ def test_math_func_multiobj(): @pytest.mark.integration_test -@tmpdir(CONFIG_PATH) -def test_math_func_advanced(): +def test_math_func_advanced(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) workflow = _EverestWorkflow(config) @@ -287,8 +279,7 @@ def test_math_func_advanced(): @pytest.mark.integration_test -@tmpdir(CONFIG_PATH) -def test_remove_run_path(): +def test_remove_run_path(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_REMOVE_RUN_PATH) simulation_should_fail = "simulation_2" @@ -335,8 +326,7 @@ def test_remove_run_path(): ), "Simulation folder should be there, something went wrong and was removed" -@tmpdir(CONFIG_PATH) -def test_math_func_auto_scaled_controls(): +def test_math_func_auto_scaled_controls(copy_math_func_test_data_to_tmp): mn = -1 mx = 1 ex = (0.25 - mn) / (mx - mn) * 0.4 + 0.3 diff --git a/tests/everest/test_objective_type.py b/tests/everest/test_objective_type.py index 397252cae3d..26f9a3b750a 100644 --- a/tests/everest/test_objective_type.py +++ b/tests/everest/test_objective_type.py @@ -2,14 +2,11 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_STDDEV = "config_stddev.yml" -@tmpdir(CONFIG_PATH) -def test_mathfunc_stddev(): +def test_mathfunc_stddev(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_STDDEV) workflow = _EverestWorkflow(config) diff --git a/tests/everest/test_samplers.py b/tests/everest/test_samplers.py index 62ade9f7a02..70488ad2395 100644 --- a/tests/everest/test_samplers.py +++ b/tests/everest/test_samplers.py @@ -3,14 +3,11 @@ from everest.config import EverestConfig from everest.config.sampler_config import SamplerConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmpdir -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" -@tmpdir(CONFIG_PATH) -def test_sampler_uniform(): +def test_sampler_uniform(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].sampler = SamplerConfig(**{"method": "uniform"}) @@ -39,8 +36,7 @@ def test_sampler_uniform(): assert expected_opt == pytest.approx(workflow.result.total_objective, abs=0.001) -@tmpdir(CONFIG_PATH) -def test_sampler_mixed(): +def test_sampler_mixed(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].variables[0].sampler = SamplerConfig(**{"method": "uniform"}) config.controls[0].variables[1].sampler = SamplerConfig(**{"method": "norm"}) diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index abd724b9c0c..9cb769195c2 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -4,13 +4,11 @@ from everest.config import EverestConfig, SimulatorConfig from everest.optimizer.everest2ropt import everest2ropt from everest.simulator import Simulator -from tests.everest.utils import relpath, tmp -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") CONFIG_FILE = "config_advanced_scipy.yml" -def test_simulator_cache(monkeypatch): +def test_simulator_cache(monkeypatch, copy_math_func_test_data_to_tmp): n_evals = 0 original_call = Simulator.__call__ @@ -22,39 +20,38 @@ def new_call(*args): monkeypatch.setattr(Simulator, "__call__", new_call) - with tmp(CONFIG_PATH): - config = EverestConfig.load_file(CONFIG_FILE) - config.simulator = SimulatorConfig(enable_cache=True) - - ropt_config = everest2ropt(config) - simulator = Simulator(config) - - # Run once, populating the cache of the simulator: - variables1 = ( - OptimizationPlanRunner( - enopt_config=ropt_config, - evaluator=simulator, - seed=config.environment.random_seed, - ) - .run() - .variables + config = EverestConfig.load_file(CONFIG_FILE) + config.simulator = SimulatorConfig(enable_cache=True) + + ropt_config = everest2ropt(config) + simulator = Simulator(config) + + # Run once, populating the cache of the simulator: + variables1 = ( + OptimizationPlanRunner( + enopt_config=ropt_config, + evaluator=simulator, + seed=config.environment.random_seed, ) - assert variables1 is not None - assert np.allclose(variables1, [0.1, 0, 0.4], atol=0.02) - assert n_evals > 0 - - # Run again with the same simulator: - n_evals = 0 - variables2 = ( - OptimizationPlanRunner( - enopt_config=ropt_config, - evaluator=simulator, - seed=config.environment.random_seed, - ) - .run() - .variables + .run() + .variables + ) + assert variables1 is not None + assert np.allclose(variables1, [0.1, 0, 0.4], atol=0.02) + assert n_evals > 0 + + # Run again with the same simulator: + n_evals = 0 + variables2 = ( + OptimizationPlanRunner( + enopt_config=ropt_config, + evaluator=simulator, + seed=config.environment.random_seed, ) - assert variables2 is not None - assert n_evals == 0 + .run() + .variables + ) + assert variables2 is not None + assert n_evals == 0 - assert np.array_equal(variables1, variables2) + assert np.array_equal(variables1, variables2) diff --git a/tests/everest/test_templating.py b/tests/everest/test_templating.py index c3ae3bd760c..ade1e5915a1 100644 --- a/tests/everest/test_templating.py +++ b/tests/everest/test_templating.py @@ -14,7 +14,7 @@ TMPL_WELL_DRILL_FILE = os.path.join("templates", "well_drill_info.tmpl") TMPL_DUAL_INPUT_FILE = os.path.join("templates", "dual_input.tmpl") -MATH_TEST_PATH = relpath("..", "..", "test-data", "everest", "math_func") + MATH_CONFIG_FILE = "config_minimal.yml" @@ -173,8 +173,7 @@ def test_well_order_template(): @pytest.mark.integration_test -@tmpdir(relpath(MATH_TEST_PATH)) -def test_user_specified_data_n_template(): +def test_user_specified_data_n_template(copy_math_func_test_data_to_tmp): """ Ensure that a user specifying a data resource and an installed_template with "extra_data", the results of that template will be passed to the diff --git a/tests/everest/test_util.py b/tests/everest/test_util.py index b397c38588e..147c2185c07 100644 --- a/tests/everest/test_util.py +++ b/tests/everest/test_util.py @@ -25,8 +25,6 @@ ) SPE1_DATA = relpath("test_data/eclipse/SPE1.DATA") -CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func") - @skipif_no_opm def test_loadwells(): @@ -129,8 +127,7 @@ def test_makedirs_roll_existing(): assert len(os.listdir(cwd)) == 3 -@tmpdir(CONFIG_PATH) -def test_get_everserver_status_path(): +def test_get_everserver_status_path(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file("config_minimal.yml") cwd = os.getcwd() session_path = os.path.join(