Skip to content

Commit

Permalink
reading of cross-section and culverts including filtering based on dr…
Browse files Browse the repository at this point in the history
…aintype added #11
  • Loading branch information
rhutten committed Oct 29, 2020
1 parent fecfd61 commit 7ce817e
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 37 deletions.
5 changes: 3 additions & 2 deletions data/osm/osm_settings.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@ DataFile = dar_drains.json

[datacolumns]
IDcolumn = id
DrainTypeColumn = drain_type
branches = id, geometry # write down
crosssections = id, draintype, covered, material, width, depth, top_width, diameter, profile_op, profile_cl, bottom_width, geometry
structure = id, draintype, geometry
crosssections = id, drain_type, covered, material, width, depth, top_width, diameter, profile_op, profile_cl, bottom_wid, elliptical, geometry
structures = id, drain_type, geometry

[parameter]

Expand Down
9 changes: 7 additions & 2 deletions delft3dfmpy/datamodels/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def delete_all(self):
self.dropna(inplace=True)

def read_shp(self, path, index_col=None, column_mapping=None, check_columns=True, clip=None, check_geotype=True,
id_col='code',filter_cols = False):
id_col='code',filter_cols = False, draintype_col=None, filter_culverts=False):
"""
Import function, extended with type checks. Does not destroy reference to object.
"""
Expand All @@ -95,10 +95,15 @@ def read_shp(self, path, index_col=None, column_mapping=None, check_columns=True

#FIXME: add filter to read_file

# Remove unnecessary columns
# Only keep required columns
if filter_cols:
gdf.drop(columns=gdf.columns[~gdf.columns.isin(self.required_columns)], inplace=True)

# In case of culvert select indices that are culverts
#FIXME: draintype culvert could be different for other OSM data
if filter_culverts:
gdf.drop(index= gdf.index[gdf[draintype_col]!='culvert'], inplace = True)



#FIXME: add method to handle features with geometry is None.
Expand Down
28 changes: 2 additions & 26 deletions delft3dfmpy/datamodels/osm.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,35 +38,11 @@ def __init__(self, extent_file=None, data_columns=None):

# FIXME: ensure that all required parameterised properties are provided. I can imagine this is a matter of making
# several parameterised profiles for different profile types (e.g. trapezoidal, rectangular, circular, etc.)
self.parametrised_profiles = ExtendedGeoDataFrame(geotype=LineString, required_columns=[
'code',
'bodemhoogtebenedenstrooms',
'bodemhoogtebovenstrooms',
'bodembreedte',
'taludhellinglinkerzijde',
'taludhellingrechterzijde',
'hoogteinsteeklinkerzijde',
'hoogteinsteekrechterzijde',
'ruwheidswaarde',
'ruwheidstypecode'
])
self.parametrised_profiles = ExtendedGeoDataFrame(geotype=LineString, required_columns=self.get_columns('crosssections'))


# FIXME: ensure that all culvert types and properties can be handled. We probably have circular and box-shaped culverts, sometimes with multiple openings
self.culverts = ExtendedGeoDataFrame(geotype=LineString, required_columns=[
'code',
'geometry',
'lengte',
'hoogteopening',
'breedteopening',
'hoogtebinnenonderkantbenedenstrooms',
'hoogtebinnenonderkantbovenstrooms',
'vormcode',
'intreeverlies',
'uittreeverlies',
'ruwheidstypecode',
'ruwheidswaarde'
])
self.culverts = ExtendedGeoDataFrame(geotype=LineString, required_columns=self.get_columns('structures'))

# # FIXME: not sure what laterals in this context mean, but I don't think we need it at this stage.
# self.laterals = ExtendedGeoDataFrame(geotype=Point, required_columns=[
Expand Down
24 changes: 17 additions & 7 deletions examples/test_osm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,20 @@

print(type(osm))

# TODO: read branches, culvert, cross sections and properties from file and plot network to check
# TODO: BRANCHES - read id column from json. Do not deviate between drain type
osm.branches.read_shp(os.path.join(path,config.get('input','datafile')),index_col='id',clip = osm.clipgeo
, id_col=config.get('datacolumns', 'idcolumn'), filter_cols=True)
# Id column
id = config.get('datacolumns','idcolumn')

print('hello world')
# TODO: BRANCHES - connect branches on the right locations
# TODO: plot branches
# TODO: BRANCHES - read id column from json. Do not deviate between drain type
# Read branches and store in OSM data model
osm.branches.read_shp(os.path.join(path,config.get('input','datafile')),index_col=id, clip = osm.clipgeo
, id_col=id, filter_cols=True)

# TODO: CROSS SECTIONS DEFINTION - read id, drain_type, material, width, depth, top_width, diameter, profile_op, profile_cl, bottom_width columns from json
# read cross-sections
osm.parametrised_profiles.read_shp(os.path.join(path,config.get('input','datafile')),index_col=id, clip = osm.clipgeo
, id_col=id, filter_cols=True)

print('Hello world')

# TODO: CROSS SECTIONS DEFINTION - specify roughness dependent on material add this
# TODO: CROSS SECTION DEFINITION - assign elevation value to cross sections. this needs to be retrieved from a DEM (which we have!)
Expand All @@ -44,6 +48,11 @@
# TODO: plot branches + cross sections locations

# TODO: STRUCTURE - read id, draintype
# Read culverts
osm.culverts.read_shp(os.path.join(path,config.get('input','datafile')),index_col=id, clip = osm.clipgeo,
id_col=id, filter_cols=True, draintype_col=config.get('datacolumns','draintypecolumn')
, filter_culverts=True)

# TODO: STRUCTURE - select rows with draintype culvert
# TODO: STRUCTURE - determine length and midpoint location culvert
# TODO: STRUCTURE - snapping of open drains over culverts. May not be needed as wel only use parameterized profiles
Expand All @@ -56,3 +65,4 @@

# TODO: create 1D2D links

print("Hello world")

0 comments on commit 7ce817e

Please sign in to comment.