From 94a6dfaf9db5ee9e06770a1c90d6329c2a8387ec Mon Sep 17 00:00:00 2001 From: Lucas Vittor Date: Fri, 21 Feb 2025 17:05:53 -0300 Subject: [PATCH 1/2] chore(docs): set a consistent file name Signed-off-by: Lucas Vittor --- docs/source/tutorial/test_a_project.md | 178 ++++++++++++------------- 1 file changed, 89 insertions(+), 89 deletions(-) diff --git a/docs/source/tutorial/test_a_project.md b/docs/source/tutorial/test_a_project.md index ffa75d5f55..435c5595ff 100644 --- a/docs/source/tutorial/test_a_project.md +++ b/docs/source/tutorial/test_a_project.md @@ -66,33 +66,33 @@ When we put these steps together, we have the following test: import pandas as pd from spaceflights.pipelines.data_science.nodes import split_data - def test_split_data(): - # Arrange - dummy_data = pd.DataFrame( - { - "engines": [1, 2, 3], - "crew": [4, 5, 6], - "passenger_capacity": [5, 6, 7], - "price": [120, 290, 30], - } - ) - - dummy_parameters = { - "model_options": { - "test_size": 0.2, - "random_state": 3, - "features": ["engines", "passenger_capacity", "crew"], - } +def test_split_data(): + # Arrange + dummy_data = pd.DataFrame( + { + "engines": [1, 2, 3], + "crew": [4, 5, 6], + "passenger_capacity": [5, 6, 7], + "price": [120, 290, 30], } + ) - # Act - X_train, X_test, y_train, y_test = split_data(dummy_data, dummy_parameters["model_options"]) + dummy_parameters = { + "model_options": { + "test_size": 0.2, + "random_state": 3, + "features": ["engines", "passenger_capacity", "crew"], + } + } - # Assert - assert len(X_train) == 2 - assert len(y_train) == 2 - assert len(X_test) == 1 - assert len(y_test) == 1 + # Act + X_train, X_test, y_train, y_test = split_data(dummy_data, dummy_parameters["model_options"]) + + # Assert + assert len(X_train) == 2 + assert len(y_train) == 2 + assert len(X_test) == 1 + assert len(y_test) == 1 ``` @@ -112,31 +112,31 @@ Using the same steps as above, we can write the following test to validate an er import pandas as pd from spaceflights.pipelines.data_science.nodes import split_data - def test_split_data_missing_price(): - # Arrange - dummy_data = pd.DataFrame( - { - "engines": [1, 2, 3], - "crew": [4, 5, 6], - "passenger_capacity": [5, 6, 7], - # Note the missing price data - } - ) - - dummy_parameters = { - "model_options": { - "test_size": 0.2, - "random_state": 3, - "features": ["engines", "passenger_capacity", "crew"], - } +def test_split_data_missing_price(): + # Arrange + dummy_data = pd.DataFrame( + { + "engines": [1, 2, 3], + "crew": [4, 5, 6], + "passenger_capacity": [5, 6, 7], + # Note the missing price data } + ) - with pytest.raises(KeyError) as e_info: - # Act - X_train, X_test, y_train, y_test = split_data(dummy_data, dummy_parameters["model_options"]) + dummy_parameters = { + "model_options": { + "test_size": 0.2, + "random_state": 3, + "features": ["engines", "passenger_capacity", "crew"], + } + } - # Assert - assert "price" in str(e_info.value) # checks that the error is about the missing price data + with pytest.raises(KeyError) as e_info: + # Act + X_train, X_test, y_train, y_test = split_data(dummy_data, dummy_parameters["model_options"]) + + # Assert + assert "price" in str(e_info.value) # checks that the error is about the missing price data ``` @@ -205,46 +205,46 @@ from kedro.io import DataCatalog from kedro.runner import SequentialRunner from spaceflights.pipelines.data_science import create_pipeline as create_ds_pipeline - def test_data_science_pipeline(caplog): # Note: caplog is passed as an argument - # Arrange pipeline - pipeline = create_ds_pipeline() - - # Arrange data catalog - catalog = DataCatalog() - - dummy_data = pd.DataFrame( - { - "engines": [1, 2, 3], - "crew": [4, 5, 6], - "passenger_capacity": [5, 6, 7], - "price": [120, 290, 30], - } - ) - - duummy_parameters = { - "model_options": { - "test_size": 0.2, - "random_state": 3, - "features": ["engines", "passenger_capacity", "crew"], - } +def test_data_science_pipeline(caplog): # Note: caplog is passed as an argument + # Arrange pipeline + pipeline = create_ds_pipeline() + + # Arrange data catalog + catalog = DataCatalog() + + dummy_data = pd.DataFrame( + { + "engines": [1, 2, 3], + "crew": [4, 5, 6], + "passenger_capacity": [5, 6, 7], + "price": [120, 290, 30], + } + ) + + duummy_parameters = { + "model_options": { + "test_size": 0.2, + "random_state": 3, + "features": ["engines", "passenger_capacity", "crew"], } + } - catalog.add_feed_dict( - { - "model_input_table" : dummy_data, - "params:model_options": dummy_parameters["model_options"], - } - ) + catalog.add_feed_dict( + { + "model_input_table" : dummy_data, + "params:model_options": dummy_parameters["model_options"], + } + ) - # Arrange the log testing setup - caplog.set_level(logging.DEBUG, logger="kedro") # Ensure all logs produced by Kedro are captured - successful_run_msg = "Pipeline execution completed successfully." + # Arrange the log testing setup + caplog.set_level(logging.DEBUG, logger="kedro") # Ensure all logs produced by Kedro are captured + successful_run_msg = "Pipeline execution completed successfully." - # Act - SequentialRunner().run(pipeline, catalog) + # Act + SequentialRunner().run(pipeline, catalog) - # Assert - assert successful_run_msg in caplog.text + # Assert + assert successful_run_msg in caplog.text ``` @@ -321,16 +321,16 @@ def test_split_data(dummy_data, dummy_parameters): In the test `test_data_science_pipeline` we test the data science pipeline, as currently defined, can be run successfully. However, as pipelines are not static, this test is not robust. Instead we should be specific with how we define the pipeline to be tested; we do this by using [pipeline slicing](../nodes_and_pipelines/slice_a_pipeline.md#slice-a-pipeline-by-running-specified-nodes) to specify the pipeline's start and end: ```python - def test_data_science_pipeline(self): - # Arrange pipeline - pipeline = create_pipeline().from_nodes("split_data_node").to_nodes("evaluate_model_node") - ... +def test_data_science_pipeline(self): + # Arrange pipeline + pipeline = create_pipeline().from_nodes("split_data_node").to_nodes("evaluate_model_node") + ... ``` This ensures that the test will still perform as designed, even with the addition of more nodes to the pipeline. -After incorporating these testing practices, our test file `test_data_science.py` becomes: +After incorporating these testing practices, our test file `test_data_science_pipeline.py` becomes:
@@ -428,7 +428,7 @@ To run your tests, run `pytest` from within your project's root directory. ```bash cd -pytest tests/pipelines/test_data_science.py +pytest tests/pipelines/test_data_science_pipeline.py ``` You should see the following output in your shell. @@ -438,9 +438,9 @@ You should see the following output in your shell. ... collected 2 items -tests/pipelines/test_data_science.py .. [100%] +tests/pipelines/test_data_science_pipeline.py .. [100%] ============================== 2 passed in 4.38s =============================== ``` -This output indicates that all tests ran successfully in the file `tests/pipelines/test_data_science.py`. +This output indicates that all tests ran successfully in the file `tests/pipelines/test_data_science_pipeline.py`. From a5d99410f42667518794dbbe15849418770f6e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Luis=20Cano=20Rodr=C3=ADguez?= Date: Tue, 25 Feb 2025 13:44:51 +0100 Subject: [PATCH 2/2] Improve prose --- docs/source/tutorial/test_a_project.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorial/test_a_project.md b/docs/source/tutorial/test_a_project.md index 435c5595ff..e01a13b4a9 100644 --- a/docs/source/tutorial/test_a_project.md +++ b/docs/source/tutorial/test_a_project.md @@ -443,4 +443,4 @@ tests/pipelines/test_data_science_pipeline.py .. ============================== 2 passed in 4.38s =============================== ``` -This output indicates that all tests ran successfully in the file `tests/pipelines/test_data_science_pipeline.py`. +This output indicates that all tests ran without errors in the file `tests/pipelines/test_data_science_pipeline.py`.