diff --git a/src/io.jl b/src/io.jl index 8d5d20182..9ddaba080 100644 --- a/src/io.jl +++ b/src/io.jl @@ -14,7 +14,7 @@ macro symbols_str(s) end "Get a nested field using a tuple of Symbols" -param(obj, fields) = foldl(getproperty, fields; init = obj) +param(obj, fields) = foldl(getproperty, fields; init=obj) param(obj, fields::AbstractString) = param(obj, symbols(fields)) function param(obj, fields, default) try @@ -112,9 +112,9 @@ end "Extract NetCDF variable name `ncname` from `var` (type `String` or `Config`). If `var` has type `Config`, either `scale`, `offset` and an optional `index` are expected (with `ncname`) or a `value` (uniform value), these are stored as part of `NamedTuple` `modifier`." -function ncvar_name_modifier(var; config = nothing) +function ncvar_name_modifier(var; config=nothing) ncname = nothing - modifier = (scale = 1.0, offset = 0.0, value = nothing, index = nothing) + modifier = (scale=1.0, offset=0.0, value=nothing, index=nothing) if isa(var, Config) if haskey(var, "netcdf") && haskey(var.netcdf, "variable") && @@ -133,21 +133,21 @@ function ncvar_name_modifier(var; config = nothing) push!(indices, index) end modifier = - (scale = scale, offset = offset, value = nothing, index = indices) + (scale=scale, offset=offset, value=nothing, index=indices) else index = get_index_dimension(var, config, var[dim_name]) modifier = - (scale = scale, offset = offset, value = nothing, index = index) + (scale=scale, offset=offset, value=nothing, index=index) @info "NetCDF parameter `$ncname` is modified with scale `$scale` and offset `$offset` at index `$index`." end else modifier = - (scale = scale, offset = offset, value = nothing, index = nothing) + (scale=scale, offset=offset, value=nothing, index=nothing) @info "NetCDF parameter `$ncname` is modified with scale `$scale` and offset `$offset`." end elseif haskey(var, "value") modifier = - (scale = 1.0, offset = 0.0, value = param(var, "value"), index = nothing) + (scale=1.0, offset=0.0, value=param(var, "value"), index=nothing) else error("Unrecognized modifier $(Dict(var))") end @@ -174,7 +174,7 @@ function get_at( end function get_at(ds::CFDataset, varname::AbstractString, i) - return read_standardized(ds, varname, (x = :, y = :, time = i)) + return read_standardized(ds, varname, (x=:, y=:, time=i)) end function get_param_res(model) @@ -393,7 +393,7 @@ function setup_scalar_netcdf( time_units, extra_dim, config, - float_type = Float32, + float_type=Float32, ) ds = create_tracked_netcdf(path) defDim(ds, "time", Inf) # unlimited @@ -402,7 +402,7 @@ function setup_scalar_netcdf( "time", Float64, ("time",), - attrib = ["units" => time_units, "calendar" => calendar], + attrib=["units" => time_units, "calendar" => calendar], ) set_extradim_netcdf(ds, extra_dim) for (nc, netcdfvars) in zip(ncvars, config.netcdf.variable) @@ -413,7 +413,7 @@ function setup_scalar_netcdf( nc.location_dim, nc.locations, (nc.location_dim,), - attrib = ["cf_role" => "timeseries_id"], + attrib=["cf_role" => "timeseries_id"], ) v = param(modelmap, nc.par) if eltype(v) <: AbstractFloat @@ -422,7 +422,7 @@ function setup_scalar_netcdf( nc.var, float_type, (nc.location_dim, "time"), - attrib = ["_FillValue" => float_type(NaN)], + attrib=["_FillValue" => float_type(NaN)], ) elseif eltype(v) <: SVector if haskey(netcdfvars, extra_dim.name) @@ -432,7 +432,7 @@ function setup_scalar_netcdf( nc.var, float_type, (nc.location_dim, "time"), - attrib = ["_FillValue" => float_type(NaN)], + attrib=["_FillValue" => float_type(NaN)], ) else defVar( @@ -440,7 +440,7 @@ function setup_scalar_netcdf( nc.var, float_type, (nc.location_dim, extra_dim.name, "time"), - attrib = ["_FillValue" => float_type(NaN)], + attrib=["_FillValue" => float_type(NaN)], ) end else @@ -470,7 +470,7 @@ function set_extradim_netcdf( "axis" => "Z", ] end - defVar(ds, extra_dim.name, extra_dim.value, (extra_dim.name,), attrib = attributes) + defVar(ds, extra_dim.name, extra_dim.value, (extra_dim.name,), attrib=attributes) return nothing end @@ -486,8 +486,8 @@ function setup_grid_netcdf( time_units, extra_dim, sizeinmetres; - float_type = Float32, - deflatelevel = 0, + float_type=Float32, + deflatelevel=0 ) ds = create_tracked_netcdf(path) @@ -498,26 +498,26 @@ function setup_grid_netcdf( "x", ncx, ("x",), - attrib = [ + attrib=[ "long_name" => "x coordinate of projection", "standard_name" => "projection_x_coordinate", "axis" => "X", "units" => "m", ], - deflatelevel = deflatelevel, + deflatelevel=deflatelevel, ) defVar( ds, "y", ncy, ("y",), - attrib = [ + attrib=[ "long_name" => "y coordinate of projection", "standard_name" => "projection_y_coordinate", "axis" => "Y", "units" => "m", ], - deflatelevel = deflatelevel, + deflatelevel=deflatelevel, ) else @@ -526,7 +526,7 @@ function setup_grid_netcdf( "lon", ncx, ("lon",), - attrib = [ + attrib=[ "long_name" => "longitude", "standard_name" => "longitude", "axis" => "X", @@ -538,13 +538,13 @@ function setup_grid_netcdf( "lat", ncy, ("lat",), - attrib = [ + attrib=[ "long_name" => "latitude", "standard_name" => "latitude", "axis" => "Y", "units" => "degrees_north", ], - deflatelevel = deflatelevel, + deflatelevel=deflatelevel, ) end set_extradim_netcdf(ds, extra_dim) @@ -553,8 +553,8 @@ function setup_grid_netcdf( "time", Float64, ("time",), - attrib = ["units" => time_units, "calendar" => calendar], - deflatelevel = deflatelevel, + attrib=["units" => time_units, "calendar" => calendar], + deflatelevel=deflatelevel, ) if sizeinmetres for (key, val) in pairs(parameters) @@ -565,8 +565,8 @@ function setup_grid_netcdf( key, float_type, ("x", "y", "time"), - attrib = ["_FillValue" => float_type(NaN)], - deflatelevel = deflatelevel, + attrib=["_FillValue" => float_type(NaN)], + deflatelevel=deflatelevel, ) elseif eltype(val.vector) <: SVector # for SVectors an additional dimension (`extra_dim`) is required @@ -575,8 +575,8 @@ function setup_grid_netcdf( key, float_type, ("x", "y", extra_dim.name, "time"), - attrib = ["_FillValue" => float_type(NaN)], - deflatelevel = deflatelevel, + attrib=["_FillValue" => float_type(NaN)], + deflatelevel=deflatelevel, ) else error("Unsupported output type: ", typeof(val.vector)) @@ -591,8 +591,8 @@ function setup_grid_netcdf( key, float_type, ("lon", "lat", "time"), - attrib = ["_FillValue" => float_type(NaN)], - deflatelevel = deflatelevel, + attrib=["_FillValue" => float_type(NaN)], + deflatelevel=deflatelevel, ) elseif eltype(val.vector) <: SVector # for SVectors an additional dimension (`extra_dim`) is required @@ -601,8 +601,8 @@ function setup_grid_netcdf( key, float_type, ("lon", "lat", extra_dim.name, "time"), - attrib = ["_FillValue" => float_type(NaN)], - deflatelevel = deflatelevel, + attrib=["_FillValue" => float_type(NaN)], + deflatelevel=deflatelevel, ) else error("Unsupported output type: ", typeof(val.vector)) @@ -671,7 +671,7 @@ function prepare_reader(config) if isempty(dynamic_paths) error("No files found with name '$glob_path' in '$glob_dir'") end - dataset = NCDataset(dynamic_paths, aggdim = "time", deferopen = false) + dataset = NCDataset(dynamic_paths, aggdim="time", deferopen=false) if haskey(dataset["time"].attrib, "_FillValue") @warn "Time dimension contains `_FillValue` attribute, this is not in line with CF conventions." @@ -710,7 +710,7 @@ function prepare_reader(config) fields = symbols(par) ncname, mod = ncvar_name_modifier(param(config.input, fields)) forcing_parameters[fields] = - (name = ncname, scale = mod.scale, offset = mod.offset, value = mod.value) + (name=ncname, scale=mod.scale, offset=mod.offset, value=mod.value) @info "Set `$par` using NetCDF variable `$ncname` as forcing parameter." end @@ -722,7 +722,7 @@ function prepare_reader(config) fields = symbols(par) ncname, mod = ncvar_name_modifier(param(config.input, fields)) cyclic_parameters[fields] = - (name = ncname, scale = mod.scale, offset = mod.offset) + (name=ncname, scale=mod.scale, offset=mod.offset) @info "Set `$par` using NetCDF variable `$ncname` as cyclic parameter." end @@ -757,9 +757,9 @@ function locations_map(ds, mapname, config) ds, config, mapname; - optional = false, - type = Union{Int,Missing}, - allow_missing = true, + optional=false, + type=Union{Int,Missing}, + allow_missing=true ) ids = unique(skipmissing(map_2d)) return ids @@ -777,16 +777,16 @@ function nc_variables_dims(nc_variables, dataset, config) location_dim = string(var, '_', nc_var["map"]) push!( ncvars_dims, - (par = par, var = var, location_dim = location_dim, locations = ids), + (par=par, var=var, location_dim=location_dim, locations=ids), ) else push!( ncvars_dims, ( - par = par, - var = var, - location_dim = nc_var["location"], - locations = [nc_var["location"]], + par=par, + var=var, + location_dim=nc_var["location"], + locations=[nc_var["location"]], ), ) end @@ -889,7 +889,7 @@ function out_map(ncnames_dict, modelmap) output_map = Dict{String,Any}() for (par, ncname) in ncnames_dict A = param(modelmap, par) - output_map[ncname] = (par = par, vector = A) + output_map[ncname] = (par=par, vector=A) end return output_map end @@ -917,7 +917,7 @@ function prepare_writer( x_nc, y_nc, nc_static; - extra_dim = nothing, + extra_dim=nothing ) sizeinmetres = get(config.model, "sizeinmetres", false)::Bool @@ -943,7 +943,7 @@ function prepare_writer( time_units, extra_dim, sizeinmetres, - deflatelevel = deflatelevel, + deflatelevel=deflatelevel, ) else nc_path = nothing @@ -967,7 +967,7 @@ function prepare_writer( time_units, extra_dim, sizeinmetres; - float_type = Float, + float_type=Float ) else ds_outstate = nothing @@ -979,7 +979,7 @@ function prepare_writer( # data, but only if config.netcdf.variable has been set. if haskey(config, "netcdf") && haskey(config.netcdf, "variable") nc_scalar_path = output_path(config, config.netcdf.path) - @info "Create an output NetCDF file `$nc_state_path` for scalar data." + @info "Create an output NetCDF file `$nc_scalar_path` for scalar data." # get NetCDF info for scalar data (variable name, locationset (dim) and # location ids) ncvars_dims = nc_variables_dims(config.netcdf.variable, nc_static, config) @@ -999,7 +999,7 @@ function prepare_writer( parameter = var["parameter"] reducer_func = get_reducer_func(var, rev_inds, x_nc, y_nc, config, nc_static, "NetCDF") - push!(nc_scalar, (parameter = parameter, reducer = reducer_func)) + push!(nc_scalar, (parameter=parameter, reducer=reducer_func)) end else ds_scalar = nothing @@ -1027,7 +1027,7 @@ function prepare_writer( parameter = col["parameter"] reducer_func = get_reducer_func(col, rev_inds, x_nc, y_nc, config, nc_static, "CSV") - push!(csv_cols, (parameter = parameter, reducer = reducer_func)) + push!(csv_cols, (parameter=parameter, reducer=reducer_func)) end else # no CSV file is checked by isnothing(csv_path) @@ -1175,7 +1175,7 @@ function timecycles(times) end "Close input and output datasets that are opened on model initialization" -function close_files(model; delete_output::Bool = false) +function close_files(model; delete_output::Bool=false) @unpack reader, writer, config = model close(reader.dataset) @@ -1236,9 +1236,9 @@ function reducer(col, rev_inds, x_nc, y_nc, config, dataset, fileformat) dataset, config, mapname; - optional = false, - type = Union{Int,Missing}, - allow_missing = true, + optional=false, + type=Union{Int,Missing}, + allow_missing=true ) @info "Adding scalar output for a map with a reducer function." fileformat param mapname reducer_name ids = unique(skipmissing(map_2d)) @@ -1356,7 +1356,7 @@ end "Read a rating curve from CSV into a NamedTuple of vectors" function read_sh_csv(path) - data, header = readdlm(path, ',', Float, header = true) + data, header = readdlm(path, ',', Float, header=true) names = vec(uppercase.(header)) idx_h = findfirst(==("H"), names) idx_s = findfirst(==("S"), names) @@ -1365,14 +1365,14 @@ function read_sh_csv(path) error("$path needs to provide H and S columns, got $names") end - return (H = data[:, idx_h], S = data[:, idx_s]) + return (H=data[:, idx_h], S=data[:, idx_s]) end "Read a specific storage curve from CSV into a NamedTuple of vectors" function read_hq_csv(path) - data = readdlm(path, ',', Float, skipstart = 1) + data = readdlm(path, ',', Float, skipstart=1) # Q is a matrix with 365 columns, one for each day in the year - return (H = data[:, 1], Q = data[:, 2:end]) + return (H=data[:, 1], Q=data[:, 2:end]) end # these represent the type of the rating curve and specific storage data @@ -1548,21 +1548,21 @@ function reverse_data!(data, dims_increasing) # for the reverse call it is important that the dims_increasing tuple is ordered in the # desired internal ordering, just like the data is after permutation if length(dims_increasing) == 2 - dims_increasing_ordered = (x = dims_increasing.x, y = dims_increasing.y) + dims_increasing_ordered = (x=dims_increasing.x, y=dims_increasing.y) elseif length(dims_increasing) == 3 && haskey(dims_increasing, :layer) dims_increasing_ordered = - (x = dims_increasing.x, y = dims_increasing.y, layer = dims_increasing.layer) + (x=dims_increasing.x, y=dims_increasing.y, layer=dims_increasing.layer) elseif length(dims_increasing) == 3 && haskey(dims_increasing, :flood_depth) dims_increasing_ordered = ( - x = dims_increasing.x, - y = dims_increasing.y, - flood_depth = dims_increasing.flood_depth, + x=dims_increasing.x, + y=dims_increasing.y, + flood_depth=dims_increasing.flood_depth, ) elseif length(dims_increasing) == 3 && haskey(dims_increasing, :classes) dims_increasing_ordered = ( - x = dims_increasing.x, - y = dims_increasing.y, - classes = dims_increasing.classes, + x=dims_increasing.x, + y=dims_increasing.y, + classes=dims_increasing.classes, ) else error("Unsupported number of dimensions")