diff --git a/.gitignore b/.gitignore
index 306aa97..66e02d5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,17 +20,13 @@ dmypy.json
# pytest --basetemp
src/hydamo/tests/temp/
src/ribasim_nl/tests/temp/
-src/peilbeheerst_model/tests/temp/
src/peilbeheerst_model/*.html
src/peilbeheerst_model/*.code-workspace
src/peilbeheerst_model/.vscode
-src/peilbeheerst_model/*.jpeg
-src/peilbeheerst_model/*.gpkg
-src/peilbeheerst_model/tests_results
-/src/peilbeheerst_model/Output_zdrive
-/src/peilbeheerst_model/Rekenend_Model_Test
-/src/peilbeheerst_model/vervallen
+/src/peilbeheerst_model/Parametrize/ribasim
+/src/peilbeheerst_model/Parametrize/fix_FF_HHSK.ipynb
+/src/peilbeheerst_model/01_test_parse_crossings.ipynb
notebooks/rijkswaterstaat/plots/
diff --git a/src/peilbeheerst_model/01_parse_crossings.ipynb b/src/peilbeheerst_model/01_parse_crossings.ipynb
index 026f976..d6c7eb9 100644
--- a/src/peilbeheerst_model/01_parse_crossings.ipynb
+++ b/src/peilbeheerst_model/01_parse_crossings.ipynb
@@ -36,8 +36,8 @@
" print_df[funcname].append(pd.Series(func_args, name=waterschap))\n",
"\n",
"for funcname, df in print_df.items():\n",
- " display(HTML(f\"
Function {funcname}:
\"))\n",
- " display(pd.DataFrame(df))"
+ " print(HTML(f\"Function {funcname}:
\"))\n",
+ " print(pd.DataFrame(df))"
]
},
{
@@ -164,8 +164,8 @@
"fig1.savefig(\"network_results.jpeg\", bbox_inches=\"tight\")\n",
"fig2.savefig(\"reduction_results.jpeg\", bbox_inches=\"tight\")\n",
"\n",
- "display(pd.DataFrame(reduction_results, index=waterschappen))\n",
- "display(pd.DataFrame(network_results, index=waterschappen))"
+ "print(pd.DataFrame(reduction_results, index=waterschappen))\n",
+ "print(pd.DataFrame(network_results, index=waterschappen))"
]
},
{
@@ -201,7 +201,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.13"
+ "version": "3.11.6"
}
},
"nbformat": 4,
diff --git a/src/peilbeheerst_model/01b_ad_krw_to_peilgebieden.ipynb b/src/peilbeheerst_model/01b_ad_krw_to_peilgebieden.ipynb
index edb465e..5f1fe7c 100644
--- a/src/peilbeheerst_model/01b_ad_krw_to_peilgebieden.ipynb
+++ b/src/peilbeheerst_model/01b_ad_krw_to_peilgebieden.ipynb
@@ -34,8 +34,8 @@
" print_df[funcname].append(pd.Series(func_args, name=waterschap))\n",
"\n",
"for funcname, df in print_df.items():\n",
- " display(HTML(f\"Function {funcname}:
\"))\n",
- " display(pd.DataFrame(df))"
+ " print(HTML(f\"Function {funcname}:
\"))\n",
+ " print(pd.DataFrame(df))"
]
},
{
@@ -96,7 +96,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.13"
+ "version": "3.11.6"
}
},
"nbformat": 4,
diff --git a/src/peilbeheerst_model/Parametrize/sturing_AmstelGooienVecht.json b/src/peilbeheerst_model/Parametrize/sturing_AmstelGooienVecht.json
index 58e2e06..1e34cf2 100644
--- a/src/peilbeheerst_model/Parametrize/sturing_AmstelGooienVecht.json
+++ b/src/peilbeheerst_model/Parametrize/sturing_AmstelGooienVecht.json
@@ -58,7 +58,7 @@
"truth_state": ["FF", "FT", "TF", "TT"],
"control_state": ["block", "block", "pass", "block"],
"flow_rate_block": 0,
- "flow_rate_pass": 0.2,
+ "flow_rate_pass": 0.20,
"node_type": "pump"
},
@@ -67,7 +67,7 @@
"truth_state": ["FF", "FT", "TF", "TT"],
"control_state": ["block", "block", "pass", "pass"],
"flow_rate_block": 0,
- "flow_rate_pass": 0.2,
+ "flow_rate_pass": 0.20,
"node_type": "pump"
},
@@ -76,7 +76,7 @@
"truth_state": ["FF", "FT", "TF", "TT"],
"control_state": ["block", "block", "pass", "pass"],
"flow_rate_block": 0,
- "flow_rate_pass": 0.2,
+ "flow_rate_pass": 0.20,
"node_type": "pump"
},
@@ -85,7 +85,7 @@
"truth_state": ["FF", "FT", "TF", "TT"],
"control_state": ["pass", "block", "pass", "block"],
"flow_rate_block": 0,
- "flow_rate_pass": 0.1,
+ "flow_rate_pass": 0.10,
"node_type": "pump"
},
diff --git a/src/peilbeheerst_model/Shortest_path/02_shortest_path_HHSK.ipynb b/src/peilbeheerst_model/Shortest_path/02_shortest_path_HHSK.ipynb
index a1ae0f8..664b77e 100644
--- a/src/peilbeheerst_model/Shortest_path/02_shortest_path_HHSK.ipynb
+++ b/src/peilbeheerst_model/Shortest_path/02_shortest_path_HHSK.ipynb
@@ -632,7 +632,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.13"
+ "version": "3.11.6"
}
},
"nbformat": 4,
diff --git a/src/peilbeheerst_model/Workflow_peilbeheerst_NL_LHM.txt b/src/peilbeheerst_model/Workflow_peilbeheerst_NL_LHM.txt
new file mode 100644
index 0000000..3557561
--- /dev/null
+++ b/src/peilbeheerst_model/Workflow_peilbeheerst_NL_LHM.txt
@@ -0,0 +1,15 @@
+Stappenplan ontwikkelen code peilbeheerst NL:
+1) Doorloop de preprocessing notebooks. Notebook per waterschap, locatie: peilbeheerst_model/peilbeheerst_model/preprocess_data
+2) Doorloop de post processing notebook. Notebook per waterschap, locatie: peilbeheerst_model/peilbeheerst_model/postprocess_data
+3) Doorloop de crossings notebook. Eén notebook, locatie: peilbeheerst_model/01_test_parse_crossings.ipynb
+4) Doorloop shortest paths notebookS. Notebook per waterschap, locatie: peilbeheerst_model/Shortest_path
+5) Doorloop crossings to Ribasim notebook. Eén notebook, alle waterschappen staan onder elkaar, locatie: peilbeheerst_model/02_crossings_to_ribasim_notebook.ipynb
+6) Doorloop parametrize notebookS. Notebook per waterschap, voor nu alleen nog AGV. Locatie: peilbeheerst_model/Parametrize/AmstelGooienVecht_parametrize.ipynb
+
+We hadden oorspronkelijk meer parametrize notebooks, maar omdat zo veel is veranderd heb ik deze nu opgeslagen in onze back up.
+Deze gaan we alleen gebruiken om te kijken of er nog extra handmatige aanpassingen waren.
+Voor de rest gaat het dezelfde workflow volgens als AGV_parametrize.
+
+Tot slot: stap 1 begon met een overzichtelijk notebook per waterschap. Gedurende het proces van 1.5 jaar zijn deze notebooks steeds groter en onoverzichtelijker geworden,
+waarbij niet elke regel meer nodig is. Voor nu ligt er geen prioriteit om dit op te schonen, mede omdat dit een groot risico dat de data (onbedoeld) wijzigt,
+waardoor de netwerken zullen veranderen en de feedback formulieren niet meer gebruikt kunnen worden.
diff --git a/src/peilbeheerst_model/peilbeheerst_model/assign_authorities.py b/src/peilbeheerst_model/peilbeheerst_model/assign_authorities.py
new file mode 100644
index 0000000..44ac5dc
--- /dev/null
+++ b/src/peilbeheerst_model/peilbeheerst_model/assign_authorities.py
@@ -0,0 +1,140 @@
+import geopandas as gpd
+import numpy as np
+import pandas as pd
+
+
+class AssignAuthorities:
+ def __init__(self, ribasim_model, waterschap, ws_grenzen_path, RWS_grenzen_path, ws_buffer=1025, RWS_buffer=1000):
+ self.ws_grenzen_path = ws_grenzen_path
+ self.RWS_grenzen_path = RWS_grenzen_path
+
+ self.ws_buffer = ws_buffer
+ self.RWS_buffer = RWS_buffer
+
+ self.ribasim_model = ribasim_model
+ self.waterschap = waterschap
+
+ def assign_authorities(self):
+ authority_borders = self.retrieve_geodataframe()
+ ribasim_model = self.embed_authorities_in_model(
+ ribasim_model=self.ribasim_model, waterschap=self.waterschap, authority_borders=authority_borders
+ )
+ return ribasim_model
+
+ def retrieve_geodataframe(self):
+ """Main function which calls the other functions."""
+ ws_grenzen, RWS_grenzen = self.load_data()
+ authority_borders = self.clip_and_buffer(ws_grenzen, RWS_grenzen)
+ authority_borders = self.extent_authority_borders(authority_borders)
+
+ return authority_borders
+
+ def load_data(self):
+ """Loads and processes the authority areas of the waterschappen and RWS."""
+ ws_grenzen = gpd.read_file(self.ws_grenzen_path)
+ RWS_grenzen = gpd.read_file(self.RWS_grenzen_path)
+
+ # Removing "\n", "waterschap", "Hoogheemraadschap", "van" and spaces and commas
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace(r"\n", "", regex=True)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("Waterschap", "", regex=False)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("Hoogheemraadschap", "", regex=False)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("De S", "S", regex=False) # HDSR
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("â", "a", regex=False) # WF
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("van", "", regex=False)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace(",", "", regex=False)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace("'", "", regex=False)
+ ws_grenzen["naam"] = ws_grenzen["naam"].str.replace(" ", "", regex=False)
+
+ ws_grenzen = ws_grenzen.sort_values(by="naam").reset_index(drop=True)
+ self.ws_grenzen_OG = ws_grenzen.copy()
+
+ # get rid of irrelvant polygons
+ ws_grenzen = ws_grenzen.explode()
+ ws_grenzen["area"] = ws_grenzen.area
+ ws_grenzen = ws_grenzen.loc[ws_grenzen.area > 10000000] # remove some small polygons
+ ws_grenzen.reset_index(drop=True, inplace=True)
+
+ # add RWS_grenzen. Buffer and dissolve it
+ RWS_grenzen["geometry"] = RWS_grenzen.buffer(self.RWS_buffer)
+ RWS_grenzen = RWS_grenzen.dissolve()[["geometry"]]
+
+ return ws_grenzen, RWS_grenzen
+
+ def clip_and_buffer(self, ws_grenzen, RWS_grenzen):
+ """Clips the waterboard boundaries by removing the RWS areas and applies a buffer to the remaining polygons."""
+ # Remove the RWS area in each WS
+ ws_grenzen_cut_out = gpd.overlay(ws_grenzen, RWS_grenzen, how="symmetric_difference")
+ ws_grenzen_cut_out.dropna(subset="area", inplace=True)
+
+ # add a name to the RWS area
+ RWS_grenzen["naam"] = "Rijkswaterstaat"
+
+ # add a buffer to each waterschap. Within this strip an authority will be found.
+ ws_grenzen_cut_out["geometry"] = ws_grenzen_cut_out.buffer(self.ws_buffer)
+
+ # add the two layers together
+ authority_borders = pd.concat([ws_grenzen_cut_out, RWS_grenzen])
+ authority_borders = authority_borders.reset_index(drop=True)
+ authority_borders = gpd.GeoDataFrame(authority_borders, geometry="geometry").set_crs(crs="EPSG:28992")
+
+ return authority_borders
+
+ def extent_authority_borders(self, authority_borders):
+ """Extends the authority borders by combining them with the original waterboard boundaries and dissolving the geometries based on the name."""
+ # Add a bit more area by dissolving it with the original gdf
+ authority_borders = pd.concat([authority_borders, self.ws_grenzen_OG])
+ authority_borders = gpd.GeoDataFrame(authority_borders, geometry="geometry").set_crs(crs="EPSG:28992")
+ authority_borders = authority_borders.dissolve(by="naam", as_index=False)
+ authority_borders = authority_borders[["naam", "geometry"]]
+
+ return authority_borders
+
+ def embed_authorities_in_model(self, ribasim_model, waterschap, authority_borders):
+ # create a temp copy of the level boundary df
+ temp_LB_node = ribasim_model.level_boundary.node.df.copy()
+ temp_LB_node = temp_LB_node[["node_id", "node_type", "geometry"]]
+ ribasim_model.level_boundary.static.df = ribasim_model.level_boundary.static.df[["node_id", "level"]]
+
+ # perform a spatial join
+ joined = gpd.sjoin(temp_LB_node, authority_borders, how="left", op="intersects")
+
+ # #find whether the LevelBoundary flows inward and outward the waterschap
+ FB_inward = ribasim_model.edge.df.loc[ribasim_model.edge.df.from_node_id.isin(joined.node_id.values)].copy()
+ FB_outward = ribasim_model.edge.df.loc[ribasim_model.edge.df.to_node_id.isin(joined.node_id.values)].copy()
+
+ # add the current waterschap name in the correct column
+ FB_inward["meta_to_authority"], FB_outward["meta_from_authority"] = waterschap, waterschap
+
+ temp_LB_node = temp_LB_node.merge(
+ right=FB_inward[["from_node_id", "meta_to_authority"]],
+ left_on="node_id",
+ right_on="from_node_id",
+ how="left",
+ )
+
+ temp_LB_node = temp_LB_node.merge(
+ right=FB_outward[["to_node_id", "meta_from_authority"]],
+ left_on="node_id",
+ right_on="to_node_id",
+ how="left",
+ )
+
+ # #replace the current waterschaps name in the joined layer to NaN, and drop those
+ joined["naam"].replace(to_replace=waterschap, value=np.nan, inplace=True)
+ joined = joined.dropna(subset="naam").reset_index(drop=True)
+
+ # now fill the meta_from_authority and meta_to_authority columns. As they already contain the correct position of the current waterschap, the remaining 'naam' will be placed correctly as well
+ temp_LB_node = temp_LB_node.merge(right=joined[["node_id", "naam"]], on="node_id", how="left")
+ temp_LB_node.meta_from_authority.fillna(temp_LB_node["naam"], inplace=True)
+ temp_LB_node.meta_to_authority.fillna(temp_LB_node["naam"], inplace=True)
+
+ # only select the relevant columns
+ temp_LB_node = temp_LB_node[["node_id", "node_type", "geometry", "meta_from_authority", "meta_to_authority"]]
+ temp_LB_node = temp_LB_node.drop_duplicates(subset="node_id").reset_index(drop=True)
+
+ # place the meta categories to the static table
+ ribasim_model.level_boundary.static.df = ribasim_model.level_boundary.static.df.merge(
+ right=temp_LB_node[["node_id", "meta_from_authority", "meta_to_authority"]], on="node_id", how="left"
+ ).reset_index(drop=True)
+
+ return ribasim_model
diff --git a/src/peilbeheerst_model/peilbeheerst_model/controle_output.py b/src/peilbeheerst_model/peilbeheerst_model/controle_output.py
index 2dbf817..e2e0b00 100644
--- a/src/peilbeheerst_model/peilbeheerst_model/controle_output.py
+++ b/src/peilbeheerst_model/peilbeheerst_model/controle_output.py
@@ -2,14 +2,9 @@
import shutil
import geopandas as gpd
-
-# import matplotlib.pyplot as plt
-# import numpy as np
import pandas as pd
import ribasim
-# from ribasim import Model
-
class Control:
def __init__(self, work_dir):
@@ -176,7 +171,7 @@ def is_stationary(group):
average_last_values = last_24_hours["level"].mean()
actual_last_value = group["level"].iloc[-1]
- # Calculate the deviation in cm
+ # Calculate the deviation
deviation = abs(actual_last_value - average_last_values)
# Determine if it's stationary (deviation <= .11 cm)
@@ -208,7 +203,7 @@ def store_data(self, data, output_path):
data[str(key)].to_file(output_path + ".gpkg", layer=str(key), driver="GPKG")
# copy checks_symbology file from old dir to new dir
- output_controle_qlr_path = r"../../../../Data_overig/QGIS_qlr/output_controle.qlr"
+ output_controle_qlr_path = r"../../../../../Data_overig/QGIS_qlr/output_controle.qlr"
shutil.copy(src=output_controle_qlr_path, dst=os.path.join(self.work_dir, "results", "output_controle.qlr"))
return