From 319b75621c78ba8d211941b48f94e8f9a51479ff Mon Sep 17 00:00:00 2001 From: Ya-Lan Yang <63822845+ylyangtw@users.noreply.github.com> Date: Wed, 3 Jul 2024 14:02:22 -0500 Subject: [PATCH] Apply black formatter (#590) * Apply black formatter * Add pre-commit install commands * Revert "Add pre-commit install commands" This reverts commit b260beb7df0e2dfa7abb1c1cfa1852ff37d7b21e. * Revert part of "Apply black formatter" This reverts commit 724932f0a05b55042c201667a7a52971ec5b32c3. * Update README.rst * ignore cge folders * exclude cge folders * update according to formatter * Revert "update according to formatter" This reverts commit 75ccc13a48091a7a3eeaa3b560d1582f24a1b1fa. * update formatter after team discussion * reformatted according to the formatter * I cannot removee e203, black keeps adding it back (#599) * removed the error codes (#600) Co-authored-by: Chen Wang * exclude docs; manual changes (#601) Co-authored-by: Chen Wang * fix e731, e741, f401 and f403 (#598) --------- Co-authored-by: Chen Wang Co-authored-by: Rashmil Panchani <32737711+Rashmil-1999@users.noreply.github.com> --- .flake8 | 4 + .pre-commit-config.yaml | 10 + CHANGELOG.md | 6 + README.rst | 3 +- docs/source/conf.py | 98 +- pyincore/__init__.py | 10 +- .../analyses/bridgedamage/bridgedamage.py | 324 ++-- pyincore/analyses/bridgedamage/bridgeutil.py | 31 +- .../buildingclusterrecovery/__init__.py | 4 +- .../buildingclusterrecovery.py | 732 ++++++--- .../buildingclusterrecovery/buildingdamage.py | 12 +- .../buildingclusterrecovery/buildingdata.py | 21 +- .../analyses/buildingdamage/buildingdamage.py | 14 +- .../analyses/buildingdamage/buildingutil.py | 1 + .../buildingeconloss/buildingeconloss.py | 126 +- .../buildingfunctionality/__init__.py | 4 +- .../buildingfunctionality.py | 229 +-- .../buildingnonstructuraldamage/__init__.py | 8 +- .../buildingnonstructuraldamage.py | 343 +++-- .../buildingnonstructuralutil.py | 25 +- .../buildingstructuraldamage/__init__.py | 4 +- .../buildingstructuraldamage.py | 393 +++-- .../buildingstructuraldamage/buildingutil.py | 1 + .../analyses/buyoutdecision/buyoutdecision.py | 271 +++- .../analyses/capitalshocks/capitalshocks.py | 104 +- .../__init__.py | 5 +- .../combinedwindwavesurgebuildingdamage.py | 291 ++-- .../__init__.py | 6 +- .../combinedwindwavesurgebuildingloss.py | 311 ++-- .../commercialbuildingrecovery/__init__.py | 4 +- .../commercialbuildingrecovery.py | 355 +++-- pyincore/analyses/core_cge_ml/__init__.py | 2 +- pyincore/analyses/core_cge_ml/corecgeml.py | 9 +- .../cumulativebuildingdamage/__init__.py | 4 +- .../cumulativebuildingdamage.py | 183 ++- pyincore/analyses/epfdamage/epfdamage.py | 333 ++-- pyincore/analyses/epfdamage/epfutil.py | 37 +- .../analyses/epfrepaircost/epfrepaircost.py | 64 +- .../analyses/epfrestoration/epfrestoration.py | 312 ++-- .../epfrestoration/epfrestorationutil.py | 44 +- .../epnfunctionality/epnfunctionality.py | 184 ++- .../epnfunctionality/epnfunctionalityutil.py | 17 +- pyincore/analyses/example/exampleanalysis.py | 48 +- .../gasfacilitydamage/gasfacilitydamage.py | 14 +- .../housingrecovery/housingrecovery.py | 7 +- .../housingrecovery/housingrecoveryutil.py | 19 +- .../housingrecoverysequential/__init__.py | 4 +- .../housingrecoverysequential.py | 362 +++-- .../housingunitallocation/__init__.py | 4 +- .../housingunitallocation.py | 229 ++- .../housingvaluationrecovery/__init__.py | 4 +- .../housingvaluationrecovery.py | 348 +++-- .../housingvaluationrecoveryutil.py | 19 +- pyincore/analyses/indp/dislocationutils.py | 183 ++- pyincore/analyses/indp/indp.py | 1243 +++++++++------ pyincore/analyses/indp/indpcomponents.py | 6 +- pyincore/analyses/indp/indpresults.py | 312 +++- pyincore/analyses/indp/indputil.py | 532 ++++--- pyincore/analyses/indp/infrastructurearc.py | 2 +- .../indp/infrastructureinterdeparc.py | 4 +- .../analyses/indp/infrastructurenetwork.py | 68 +- pyincore/analyses/indp/infrastructurenode.py | 8 +- pyincore/analyses/indp/infrastructureutil.py | 128 +- .../__init__.py | 8 +- .../joplinempiricalbuildingrestoration.py | 139 +- pyincore/analyses/meandamage/meandamage.py | 174 ++- pyincore/analyses/mlenabledcgeslc/__init__.py | 2 +- pyincore/analyses/mlenabledcgeslc/mlcgeslc.py | 19 +- .../montecarlofailureprobability/__init__.py | 4 +- .../montecarlofailureprobability.py | 11 +- .../__init__.py | 5 +- .../montecarlolimitstateprobability.py | 255 ++-- .../__init__.py | 5 +- .../multiobjectiveretrofitoptimization.py | 1336 +++++++++++------ .../analyses/ncifunctionality/__init__.py | 2 +- .../ncifunctionality/ncifunctionality.py | 516 ++++--- .../nonstructbuildingdamage/__init__.py | 8 +- .../nonstructbuildingdamage.py | 14 +- .../nonstructbuildingutil.py | 25 +- .../analyses/pipelinedamage/pipelinedamage.py | 266 ++-- .../pipelinedamagerepairrate/__init__.py | 4 +- .../pipelinedamagerepairrate.py | 437 +++--- .../pipelinedamagerepairrate/pipelineutil.py | 23 +- .../pipelinefunctionality/__init__.py | 4 +- .../pipelinefunctionality.py | 104 +- .../pipelinerepaircost/pipelinerepaircost.py | 92 +- .../analyses/pipelinerestoration/__init__.py | 4 +- .../pipelinerestoration.py | 142 +- .../populationdislocation/__init__.py | 8 +- .../populationdislocation.py | 227 +-- .../populationdislocationutil.py | 97 +- .../residentialbuildingrecovery/__init__.py | 4 +- .../residentialbuildingrecovery.py | 400 +++-- pyincore/analyses/roaddamage/roaddamage.py | 329 ++-- .../analyses/socialvulnerability/__init__.py | 4 +- .../socialvulnerability.py | 7 +- .../socialvulnerabilityscore/__init__.py | 4 +- .../socialvulnerabilityscore.py | 138 +- .../tornadoepndamage/tornadoepndamage.py | 424 ++++-- pyincore/analyses/trafficflowrecovery/WIPW.py | 73 +- .../analyses/trafficflowrecovery/__init__.py | 12 +- .../analyses/trafficflowrecovery/nsga2.py | 12 +- .../post_disaster_long_term_solution.py | 137 +- .../trafficflowrecovery.py | 248 +-- .../trafficflowrecoveryutil.py | 53 +- .../analyses/transportationrecovery/WIPW.py | 73 +- .../transportationrecovery/__init__.py | 12 +- .../analyses/transportationrecovery/nsga2.py | 11 +- .../post_disaster_long_term_solution.py | 137 +- .../transportationrecovery.py | 7 +- .../transportationrecoveryutil.py | 53 +- .../analyses/waterfacilitydamage/__init__.py | 4 +- .../waterfacilitydamage.py | 411 ++--- .../waterfacilityrepaircost/__init__.py | 4 +- .../waterfacilityrepaircost.py | 59 +- .../waterfacilityrestoration/__init__.py | 8 +- .../waterfacilityrestoration.py | 315 ++-- .../waterfacilityrestorationutil.py | 45 +- .../wfnfunctionality/wfnfunctionality.py | 235 +-- .../wfnfunctionality/wfnfunctionalityutil.py | 17 +- pyincore/baseanalysis.py | 212 ++- pyincore/client.py | 91 +- pyincore/dataservice.py | 165 +- pyincore/dataset.py | 60 +- pyincore/decorators.py | 1 + pyincore/dfr3service.py | 276 ++-- pyincore/fragilityservice.py | 42 +- pyincore/globals.py | 14 +- pyincore/hazardservice.py | 764 +++++----- pyincore/models/dfr3curve.py | 81 +- pyincore/models/fragilitycurveset.py | 165 +- pyincore/models/hazard/earthquake.py | 7 +- pyincore/models/hazard/flood.py | 5 +- pyincore/models/hazard/hazard.py | 45 +- pyincore/models/hazard/hazarddataset.py | 87 +- pyincore/models/hazard/hurricane.py | 7 +- pyincore/models/hazard/tornado.py | 88 +- pyincore/models/hazard/tsunami.py | 4 +- pyincore/models/mapping.py | 2 +- pyincore/models/mappingset.py | 15 +- pyincore/models/networkdataset.py | 74 +- pyincore/models/repaircurveset.py | 42 +- pyincore/models/restorationcurveset.py | 42 +- pyincore/models/units.py | 13 +- pyincore/networkdata.py | 7 +- pyincore/repairservice.py | 34 +- pyincore/restorationservice.py | 31 +- pyincore/semanticservice.py | 9 +- pyincore/spaceservice.py | 24 +- pyincore/utils/__init__.py | 2 +- pyincore/utils/analysisutil.py | 351 +++-- pyincore/utils/cge_ml_file_util.py | 12 +- pyincore/utils/cgeoutputprocess.py | 110 +- pyincore/utils/dataprocessutil.py | 23 +- pyincore/utils/datasetutil.py | 128 +- pyincore/utils/evaluateexpression.py | 61 +- pyincore/utils/expressioneval/__init__.py | 378 ++--- pyincore/utils/geoutil.py | 103 +- pyincore/utils/hhrsoutputprocess.py | 10 +- pyincore/utils/http_util.py | 27 +- pyincore/utils/networkutil.py | 200 ++- pyincore/utils/popdisloutputprocess.py | 269 ++-- scripts/build-release.py | 24 +- setup.py | 69 +- tests/conftest.py | 21 +- .../bridgedamage/test_bridgedamage.py | 36 +- .../bridgedamage/test_bridgedamage_legacy.py | 19 +- .../test_bridgedamage_w_local_hazard.py | 34 +- .../bridgedamage/test_mmsa_bridgedamage.py | 51 +- .../test_buildingclusterrecovery.py | 30 +- .../buildingdamage/test_buildingdamage.py | 26 +- .../test_buildingdamage_legacy.py | 14 +- .../test_buildingdamage_multihazard.py | 4 +- .../test_buildingdamage_offline.py | 64 +- .../test_buildingdamage_retrofit.py | 28 +- .../test_buildingdamage_w_local_hazard.py | 42 +- .../buildingdamage/test_slc_buildingdamage.py | 25 +- .../buildingeconloss/test_buildingeconloss.py | 6 +- .../test_buildingfunctionality.py | 22 +- .../test_buildingnonstructuraldamage.py | 43 +- ...uildingnonstructuraldamage_w_hazard_obj.py | 55 +- .../test_flood_buildingnonstructuraldamage.py | 16 +- .../test_buildingstructuraldamage.py | 30 +- .../test_buildingstructuraldamage_legacy.py | 18 +- ...st_buildingstructuraldamage_multihazard.py | 8 +- .../test_buildingstructuraldamage_offline.py | 68 +- .../test_buildingstructuraldamage_retrofit.py | 32 +- ...buildingstructuraldamage_w_local_hazard.py | 46 +- .../test_slc_buildingstructuraldamage.py | 29 +- .../buyoutdecision/test_buyoutdecision.py | 15 +- .../capitalshocks/test_capitalshocks.py | 6 +- ...est_combinedwindwavesurgebuildingdamage.py | 12 +- .../test_combinedwindwavesurgebuildingloss.py | 23 +- .../test_commercialbuildingrecovery.py | 18 +- .../test_cumulativebuildingdamage.py | 11 +- .../analyses/epfdamage/test_epfdamage.py | 32 +- .../epfdamage/test_epfdamage_w_hazard_obj.py | 64 +- .../epfrepaircost/test_epfrepaircost.py | 12 +- .../epfrestoration/test_epfrestoration.py | 40 +- .../epnfunctionality/test_epnfunctionality.py | 8 +- .../pyincore/analyses/example/test_example.py | 8 +- .../test_gasfacilitydamage.py | 8 +- .../housingrecovery/test_housingrecovery.py | 6 +- .../test_housingrecoverysequential.py | 26 +- .../test_housingunitallocation.py | 6 +- .../test_housingvaluationrecovery.py | 10 +- tests/pyincore/analyses/indp/test_indp.py | 127 +- ...test_joplinempiricalbuildingrestoration.py | 10 +- .../meandamage/test_meandamage_bridge.py | 5 +- .../meandamage/test_meandamage_building.py | 5 +- .../analyses/mlenabledcgeslc/test_mlcgeslc.py | 2 +- .../test_montecarlofailureprobability.py | 8 +- .../test_montecarlolimitstateprobability.py | 10 +- ...test_multiobjectiveretrofitoptimization.py | 14 +- .../ncifunctionality/test_ncifunctionality.py | 30 +- .../test_flood_nonstructbuildingdamage.py | 16 +- .../test_nonstructbuildingdamage.py | 43 +- ...st_nonstructbuildingdamage_w_hazard_obj.py | 55 +- .../pipelinedamage/test_pipelinedamage.py | 19 +- .../test_pipelinedamage_w_hazard_obj.py | 23 +- .../test_pipelinedamagerepairrate.py | 11 +- ...t_pipelinedamagerepairrate_w_hazard_obj.py | 25 +- .../test_pipelinefunctionality.py | 4 +- .../test_pipelinerepaircost.py | 14 +- .../test_pipelinerestoration.py | 14 +- .../test_populationdislocation.py | 4 +- .../test_residentialbuildingrecovery.py | 20 +- .../analyses/roaddamage/test_roaddamage.py | 22 +- .../test_roaddamage_w_hazard_obj.py | 39 +- .../test_socialvulnerability.py | 20 +- .../test_socialvulnerabilityscore.py | 20 +- .../tornadoepndamage/test_tornadoepndamage.py | 11 +- .../test_tornadoepndamage_w_hazard_obj.py | 9 +- .../test_trafficflowrecovery.py | 14 +- .../test_transportationrecovery.py | 14 +- .../test_waterfacilitydamage.py | 4 +- .../test_waterfacilitydamage_w_hazard_obj.py | 16 +- .../test_waterfacilityrepaircost.py | 16 +- .../test_waterfacilityrestoration.py | 33 +- .../wfnfunctionality/test_wfnfunctionality.py | 8 +- tests/pyincore/models/test_dataset.py | 4 +- tests/pyincore/models/test_dfr3curve.py | 196 ++- tests/pyincore/models/test_hazard.py | 331 ++-- tests/pyincore/models/test_networkdataset.py | 41 +- tests/pyincore/models/test_units.py | 18 +- tests/pyincore/test_client.py | 3 +- tests/pyincore/test_dataservice.py | 114 +- tests/pyincore/test_dfr3service.py | 126 +- tests/pyincore/test_hazardservice.py | 383 +++-- tests/pyincore/test_semanticservice.py | 41 +- tests/pyincore/test_spaceservice.py | 39 +- tests/pyincore/utils/test_analysisutil.py | 28 +- tests/pyincore/utils/test_cgecsvoutputjson.py | 194 ++- tests/pyincore/utils/test_dataprocessutil.py | 103 +- tests/pyincore/utils/test_datasetutil.py | 14 +- tests/pyincore/utils/test_networkutil.py | 50 +- tests/pyincore/utils/test_parser.py | 17 +- .../utils/test_popdisloutputprocess.py | 24 +- tests/test_format.py | 8 +- 259 files changed, 14588 insertions(+), 8727 deletions(-) create mode 100644 .flake8 create mode 100644 .pre-commit-config.yaml diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..9d9019381 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 180 +extend-ignore = E203 +exclude = __init__.py \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..923c98a6f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: + - repo: https://github.com/psf/black + rev: 23.11.0 + hooks: + - id: black + - repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 +exclude: 'pyincore/analyses/(joplincge|saltlakecge|seasidecge|galvestoncge)/.*|docs/*' \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e48d19cd0..da004bec8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] + +### Added +- Apply Black formatter [#589](https://github.com/IN-CORE/pyincore/issues/589) + + ## [1.19.0] - 2024-06-12 ### Changed diff --git a/README.rst b/README.rst index 5e6e176f8..078f3d44a 100644 --- a/README.rst +++ b/README.rst @@ -47,7 +47,8 @@ Please use pip for installing pyincore at your discretion. **Prerequisite** * GDAL C library must be installed to install pyincore. (for Ubuntu, **gdal-bin** and **libgdal-dev**) -* ipopt executable must be installed to run some analyses such as seaside CGE, joplin CGE, etc. +* ipopt executable must be installed to run some analyses such as seaside CGE, joplin CGE, etc. +* For developers, pre-install must be installed. If not, run `brew install pre-commit` or `pip install pre-commit`. To install **pyincore** package, run diff --git a/docs/source/conf.py b/docs/source/conf.py index 579ad834a..5169088f5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -20,22 +20,22 @@ import os import sys -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../..')) -sys.path.insert(0, os.path.abspath('../../pyincore')) -sys.path.insert(0, os.path.abspath('../../pyincore/analyses')) -sys.path.insert(0, os.path.abspath('../../tests')) -sys.path.insert(0, os.path.abspath('../../docs')) +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../..")) +sys.path.insert(0, os.path.abspath("../../pyincore")) +sys.path.insert(0, os.path.abspath("../../pyincore/analyses")) +sys.path.insert(0, os.path.abspath("../../tests")) +sys.path.insert(0, os.path.abspath("../../docs")) # -- Project information ----------------------------------------------------- -project = 'pyIncore' -author = '' +project = "pyIncore" +author = "" # The short X.Y version -version = '1.19' +version = "1.19" # The full version, including alpha/beta/rc tags -release = '1.19.0' +release = "1.19.0" # -- General configuration --------------------------------------------------- @@ -46,25 +46,26 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - # 'sphinx.ext.viewcode', - 'sphinx_rtd_theme', - 'sphinx.ext.ifconfig', - 'sphinx.ext.napoleon', - 'sphinx.ext.todo' - ] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + # 'sphinx.ext.viewcode', + "sphinx_rtd_theme", + "sphinx.ext.ifconfig", + "sphinx.ext.napoleon", + "sphinx.ext.todo", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = '.rst' -source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation.rst # for a list of supported languages. @@ -79,7 +80,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Custom configuration --------------------------------------------------- @@ -89,7 +90,7 @@ # See also: # http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_mock_importshttps://github.com/sphinx-doc/sphinx/issues/4182 -autodoc_mock_imports = ['pytest', 'rasterstats'] +autodoc_mock_imports = ["pytest", "rasterstats"] # This value selects what content will be inserted into the main body of an autoclass directive. # The possible values are: @@ -99,7 +100,7 @@ # “both”: Both the class ’ and the init method’s docstring are concatenated and inserted. # “init”: Only the init method’s docstring is inserted. -autoclass_content = 'both' +autoclass_content = "both" # -- Options for HTML output ------------------------------------------------- @@ -116,19 +117,19 @@ # # html_theme_options = {} html_theme_options = { - 'canonical_url': '', - 'analytics_id': '', - 'logo_only': False, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, - 'vcs_pageview_mode': '', + "canonical_url": "", + "analytics_id": "", + "logo_only": False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, + "vcs_pageview_mode": "", # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, @@ -150,7 +151,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'pyincoredoc' +htmlhelp_basename = "pyincoredoc" # -- Options for LaTeX output ------------------------------------------------ @@ -159,15 +160,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -177,8 +175,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'pyincore.tex', 'pyincore Documentation', - 'ISDA NCSA', 'manual'), + (master_doc, "pyincore.tex", "pyincore Documentation", "ISDA NCSA", "manual"), ] @@ -186,10 +183,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pyincore', 'pyincore Documentation', - [author], 1) -] +man_pages = [(master_doc, "pyincore", "pyincore Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -201,9 +195,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pyincore', 'pyincore Documentation', - author, 'pyincore', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "pyincore", + "pyincore Documentation", + author, + "pyincore", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. diff --git a/pyincore/__init__.py b/pyincore/__init__.py index 6e7b7e558..0ec3bf6a6 100644 --- a/pyincore/__init__.py +++ b/pyincore/__init__.py @@ -33,8 +33,14 @@ from pyincore.models.mappingset import MappingSet from pyincore.models.mapping import Mapping from pyincore.models.networkdataset import NetworkDataset -from pyincore.models.hazard.hazarddataset import HazardDataset, HurricaneDataset, EarthquakeDataset, TsunamiDataset, \ - TornadoDataset, FloodDataset +from pyincore.models.hazard.hazarddataset import ( + HazardDataset, + HurricaneDataset, + EarthquakeDataset, + TsunamiDataset, + TornadoDataset, + FloodDataset, +) from pyincore.models.hazard.hazard import Hazard from pyincore.models.hazard.hurricane import Hurricane from pyincore.models.hazard.flood import Flood diff --git a/pyincore/analyses/bridgedamage/bridgedamage.py b/pyincore/analyses/bridgedamage/bridgedamage.py index bd2609b91..df4bf882f 100644 --- a/pyincore/analyses/bridgedamage/bridgedamage.py +++ b/pyincore/analyses/bridgedamage/bridgedamage.py @@ -34,39 +34,53 @@ def run(self): bridge_set = self.get_input_dataset("bridges").get_inventory_reader() # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len( - bridge_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(bridge_set), user_defined_cpu + ) avg_bulk_input_size = int(len(bridge_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bridge_set) while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.bridge_damage_concurrent_future( - self.bridge_damage_analysis_bulk_input, num_workers, inventory_args, repeat(hazard), repeat(hazard_type), - repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + self.bridge_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True - def bridge_damage_concurrent_future(self, function_name, num_workers, - *args): + def bridge_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: @@ -80,14 +94,18 @@ def bridge_damage_concurrent_future(self, function_name, num_workers, """ output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard_dataset_id): + def bridge_damage_analysis_bulk_input( + self, bridges, hazard, hazard_type, hazard_dataset_id + ): """Run analysis for multiple bridges. Args: @@ -104,28 +122,27 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - fragility_key = BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY if hazard_type == 'tsunami' else \ - BridgeUtil.DEFAULT_FRAGILITY_KEY + fragility_key = ( + BridgeUtil.DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY + if hazard_type == "tsunami" + else BridgeUtil.DEFAULT_FRAGILITY_KEY + ) self.set_parameter("fragility_key", fragility_key) - # Hazard Uncertainty - use_hazard_uncertainty = False - if hazard_type == "earthquake" and self.get_parameter( - "use_hazard_uncertainty") is not None: - use_hazard_uncertainty = self.get_parameter( - "use_hazard_uncertainty") - # Liquefaction use_liquefaction = False - if hazard_type == "earthquake" and self.get_parameter( - "use_liquefaction") is not None: + if ( + hazard_type == "earthquake" + and self.get_parameter("use_liquefaction") is not None + ): use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id containing liquefaction susceptibility geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") - fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), bridges, - fragility_key) + fragility_set = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), bridges, fragility_key + ) values_payload = [] values_payload_liq = [] # for liquefaction, if used @@ -139,20 +156,12 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard demands = fragility_set[bridge_id].demand_types units = fragility_set[bridge_id].demand_units - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_bridges.append(b) if use_liquefaction and geology_dataset_id is not None: - value_liq = { - "demands": [""], - "units": [""], - "loc": loc - } + value_liq = {"demands": [""], "units": [""], "loc": loc} values_payload_liq.append(value_liq) else: @@ -165,8 +174,9 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard # Check if liquefaction is applicable if use_liquefaction and geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, - values_payload_liq) + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) ds_results = [] damage_results = [] @@ -182,7 +192,9 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility - hazard_val = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) + hazard_val = AnalysisUtil.update_precision_of_lists( + hazard_vals[i]["hazardValues"] + ) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] @@ -192,15 +204,26 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard hval_dict[d] = hazard_val[j] j += 1 - if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): - bridge_args = selected_fragility_set.construct_expression_args_from_inventory(bridge) - dmg_probability = \ - selected_fragility_set.calculate_limit_state(hval_dict, - inventory_type="bridge", - **bridge_args) - - if use_liquefaction and geology_dataset_id is not None and liquefaction_resp is not None: - ground_failure_prob = liquefaction_resp[i][BridgeUtil.GROUND_FAILURE_PROB] + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_vals[i]["hazardValues"] + ): + bridge_args = ( + selected_fragility_set.construct_expression_args_from_inventory( + bridge + ) + ) + dmg_probability = selected_fragility_set.calculate_limit_state( + hval_dict, inventory_type="bridge", **bridge_args + ) + + if ( + use_liquefaction + and geology_dataset_id is not None + and liquefaction_resp is not None + ): + ground_failure_prob = liquefaction_resp[i][ + BridgeUtil.GROUND_FAILURE_PROB + ] # TODO: put it in a util method # Adjust for 4 LS of bridges - the service returns ground failure @@ -209,25 +232,34 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard ground_failure_prob[2] = ground_failure_prob[1] dmg_probability = AnalysisUtil.update_precision_of_dicts( - AnalysisUtil.adjust_damage_for_liquefaction(dmg_probability, ground_failure_prob)) - - dmg_intervals = selected_fragility_set.calculate_damage_interval(dmg_probability, - hazard_type=hazard_type, - inventory_type="bridge") + AnalysisUtil.adjust_damage_for_liquefaction( + dmg_probability, ground_failure_prob + ) + ) + + dmg_intervals = selected_fragility_set.calculate_damage_interval( + dmg_probability, + hazard_type=hazard_type, + inventory_type="bridge", + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) retrofit_cost = BridgeUtil.get_retrofit_cost(fragility_key) retrofit_type = BridgeUtil.get_retrofit_type(fragility_key) - ds_result['guid'] = bridge['properties']['guid'] + ds_result["guid"] = bridge["properties"]["guid"] ds_result.update(dmg_probability) ds_result.update(dmg_intervals) - ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_val, hazard_type) + ds_result["haz_expose"] = AnalysisUtil.get_exposure_from_hazard_values( + hazard_val, hazard_type + ) - damage_result['guid'] = bridge['properties']['guid'] - damage_result['fragility_id'] = selected_fragility_set.id + damage_result["guid"] = bridge["properties"]["guid"] + damage_result["fragility_id"] = selected_fragility_set.id damage_result["retrofit"] = retrofit_type damage_result["retrocost"] = retrofit_cost damage_result["demandtypes"] = input_demand_types @@ -238,18 +270,30 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard damage_result[BridgeUtil.GROUND_FAILURE_PROB] = ground_failure_prob # add spans to bridge output so mean damage calculation can use that info - if "spans" in bridge["properties"] and bridge["properties"]["spans"] is not None: - if isinstance(bridge["properties"]["spans"], str) and bridge["properties"]["spans"].isdigit(): - damage_result['spans'] = int(bridge["properties"]["spans"]) + if ( + "spans" in bridge["properties"] + and bridge["properties"]["spans"] is not None + ): + if ( + isinstance(bridge["properties"]["spans"], str) + and bridge["properties"]["spans"].isdigit() + ): + damage_result["spans"] = int(bridge["properties"]["spans"]) elif isinstance(bridge["properties"]["spans"], int): - damage_result['spans'] = bridge["properties"]["spans"] - elif "SPANS" in bridge["properties"] and bridge["properties"]["SPANS"] is not None: - if isinstance(bridge["properties"]["SPANS"], str) and bridge["properties"]["SPANS"].isdigit(): - damage_result['SPANS'] = int(bridge["properties"]["SPANS"]) + damage_result["spans"] = bridge["properties"]["spans"] + elif ( + "SPANS" in bridge["properties"] + and bridge["properties"]["SPANS"] is not None + ): + if ( + isinstance(bridge["properties"]["SPANS"], str) + and bridge["properties"]["SPANS"].isdigit() + ): + damage_result["SPANS"] = int(bridge["properties"]["SPANS"]) elif isinstance(bridge["properties"]["SPANS"], int): - damage_result['SPANS'] = bridge["properties"]["SPANS"] + damage_result["SPANS"] = bridge["properties"]["SPANS"] else: - damage_result['spans'] = 1 + damage_result["spans"] = 1 ds_results.append(ds_result) damage_results.append(damage_result) @@ -259,16 +303,16 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard, hazard_type, hazard ds_result = dict() damage_result = dict() - ds_result['guid'] = bridge['properties']['guid'] + ds_result["guid"] = bridge["properties"]["guid"] - damage_result['guid'] = bridge['properties']['guid'] + damage_result["guid"] = bridge["properties"]["guid"] damage_result["retrofit"] = None damage_result["retrocost"] = None damage_result["demandtypes"] = None - damage_result['demandunits'] = None + damage_result["demandunits"] = None damage_result["hazardtype"] = None - damage_result['hazardval'] = None - damage_result['spans'] = None + damage_result["hazardval"] = None + damage_result["spans"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -283,93 +327,93 @@ def get_spec(self): """ return { - 'name': 'bridge-damage', - 'description': 'bridge damage analysis', - 'input_parameters': [ + "name": "bridge-damage", + "description": "bridge damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Geology dataset id', - 'type': str, + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Geology dataset id", + "type": str, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard object id', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard object id", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazards type', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazards type", + "type": str, }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tornado", "hurricane", "flood", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'bridges', - 'required': True, - 'description': 'Bridge Inventory', - 'type': ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'], + "id": "bridges", + "required": True, + "description": "Bridge Inventory", + "type": ["ergo:bridges", "ergo:bridgesVer2", "ergo:bridgesVer3"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'bridges', - 'description': 'CSV file of bridge structural damage', - 'type': 'ergo:bridgeDamageVer3' + "id": "result", + "parent_type": "bridges", + "description": "CSV file of bridge structural damage", + "type": "ergo:bridgeDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'bridges', - 'description': 'additional metadata in json file about applied hazard value and ' - 'fragility', - 'type': 'incore:bridgeDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "bridges", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:bridgeDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/bridgedamage/bridgeutil.py b/pyincore/analyses/bridgedamage/bridgeutil.py index d7bcb2b28..5a467a1de 100644 --- a/pyincore/analyses/bridgedamage/bridgeutil.py +++ b/pyincore/analyses/bridgedamage/bridgeutil.py @@ -7,20 +7,21 @@ class BridgeUtil: """Utility methods for the bridge damage analysis.""" + BRIDGE_FRAGILITY_KEYS = { - "elastomeric bearing retrofit fragility id code": [ - "Elastomeric Bearing", "eb"], + "elastomeric bearing retrofit fragility id code": ["Elastomeric Bearing", "eb"], "steel jacket retrofit fragility id code": ["Steel Jacket", "sj"], - "restrainer cables retrofit fragility id code": ["Restrainer Cables", - "rc"], + "restrainer cables retrofit fragility id code": ["Restrainer Cables", "rc"], "seat extender retrofit fragility id code": ["Seat Extender", "se"], "shear key retrofit fragility id code": ["Shear Key", "sk"], "non-retrofit fragility id code": ["as built", "none"], - "non-retrofit inundationdepth fragility id code": ["as built", "none"] + "non-retrofit inundationdepth fragility id code": ["as built", "none"], } DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" - DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code" + DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY = ( + "Non-Retrofit inundationDepth Fragility ID Code" + ) GROUND_FAILURE_PROB = "groundFailureProb" @staticmethod @@ -42,7 +43,7 @@ def get_retrofit_cost(target_fragility_key): if target_fragility_key.lower() == BridgeUtil.DEFAULT_FRAGILITY_KEY.lower(): return retrofit_cost else: - retrofit_code = BridgeUtil.get_retrofit_code(target_fragility_key) + _ = BridgeUtil.get_retrofit_code(target_fragility_key) return retrofit_cost @staticmethod @@ -56,9 +57,11 @@ def get_retrofit_type(target_fragility_key): str: A retrofit type. """ - return BridgeUtil.BRIDGE_FRAGILITY_KEYS[target_fragility_key.lower()][ - 0] \ - if target_fragility_key.lower() in BridgeUtil.BRIDGE_FRAGILITY_KEYS else "none" + return ( + BridgeUtil.BRIDGE_FRAGILITY_KEYS[target_fragility_key.lower()][0] + if target_fragility_key.lower() in BridgeUtil.BRIDGE_FRAGILITY_KEYS + else "none" + ) @staticmethod def get_retrofit_code(target_fragility_key): @@ -71,6 +74,8 @@ def get_retrofit_code(target_fragility_key): str: A retrofit code. """ - return BridgeUtil.BRIDGE_FRAGILITY_KEYS[target_fragility_key.lower()][ - 1] \ - if target_fragility_key.lower() in BridgeUtil.BRIDGE_FRAGILITY_KEYS else "none" + return ( + BridgeUtil.BRIDGE_FRAGILITY_KEYS[target_fragility_key.lower()][1] + if target_fragility_key.lower() in BridgeUtil.BRIDGE_FRAGILITY_KEYS + else "none" + ) diff --git a/pyincore/analyses/buildingclusterrecovery/__init__.py b/pyincore/analyses/buildingclusterrecovery/__init__.py index 863b9caf1..adcf1eff0 100644 --- a/pyincore/analyses/buildingclusterrecovery/__init__.py +++ b/pyincore/analyses/buildingclusterrecovery/__init__.py @@ -5,6 +5,8 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.buildingclusterrecovery.buildingclusterrecovery import BuildingClusterRecovery +from pyincore.analyses.buildingclusterrecovery.buildingclusterrecovery import ( + BuildingClusterRecovery, +) from pyincore.analyses.buildingclusterrecovery.buildingdamage import BuildingDamage from pyincore.analyses.buildingclusterrecovery.buildingdata import BuildingData diff --git a/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py b/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py index 82e9ca9cc..53e03cbeb 100644 --- a/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py @@ -25,149 +25,178 @@ class BuildingClusterRecovery(BaseAnalysis): incore_client (IncoreClient): Service authentication. """ + def __init__(self, incore_client): super(BuildingClusterRecovery, self).__init__(incore_client) def get_spec(self): return { - 'name': 'building-cluster-recovery-analysis', - 'description': 'Building Cluster Recovery Analysis (with uncertainty)', - 'input_parameters': [ + "name": "building-cluster-recovery-analysis", + "description": "Building Cluster Recovery Analysis (with uncertainty)", + "input_parameters": [ { - 'id': 'result_name', - 'required': False, - 'description': 'Result dataset name', - 'type': str + "id": "result_name", + "required": False, + "description": "Result dataset name", + "type": str, }, { - 'id': 'uncertainty', - 'required': True, - 'description': 'Use uncertainty', - 'type': bool + "id": "uncertainty", + "required": True, + "description": "Use uncertainty", + "type": bool, }, { - 'id': 'sample_size', - 'required': False, - 'description': 'No. of buildings to be considered from input buildings', - 'type': int + "id": "sample_size", + "required": False, + "description": "No. of buildings to be considered from input buildings", + "type": int, }, { - 'id': 'random_sample_size', - 'required': True, - 'description': 'Number of iterations for Monte Carlo Simulation', - 'type': int + "id": "random_sample_size", + "required": True, + "description": "Number of iterations for Monte Carlo Simulation", + "type": int, }, { - 'id': 'no_of_weeks', - 'required': True, - 'description': 'Number of weeks to run the recovery model', - 'type': int + "id": "no_of_weeks", + "required": True, + "description": "Number of weeks to run the recovery model", + "type": int, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - } + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'building_data', - 'required': True, - 'description': 'Building Data', - 'type': ['incore:portfolioBuildingInventory'], + "id": "building_data", + "required": True, + "description": "Building Data", + "type": ["incore:portfolioBuildingInventory"], }, { - 'id': 'occupancy_mapping', - 'required': True, - 'description': 'Occupancy code mapping', - 'type': ['incore:portfolioOccupancyMapping'], + "id": "occupancy_mapping", + "required": True, + "description": "Occupancy code mapping", + "type": ["incore:portfolioOccupancyMapping"], }, { - 'id': 'building_damage', - 'required': True, - 'description': 'Building Damage Results', - 'type': ['incore:portfolioBuildingDamage'], + "id": "building_damage", + "required": True, + "description": "Building Damage Results", + "type": ["incore:portfolioBuildingDamage"], }, { - 'id': 'dmg_ratios', - 'required': True, - 'description': 'Percentage of mean repair by occupancy / building type', - 'type': ['incore:portfolioDamageRatios'], + "id": "dmg_ratios", + "required": True, + "description": "Percentage of mean repair by occupancy / building type", + "type": ["incore:portfolioDamageRatios"], }, { - 'id': 'utility', - 'required': True, - 'description': 'Full utility availability at each utility service area - joint area of power' - 'and water (row), at each week (column)"', - 'type': ['incore:portfolioUtilityAvailability'], + "id": "utility", + "required": True, + "description": "Full utility availability at each utility service area - joint area of power" + 'and water (row), at each week (column)"', + "type": ["incore:portfolioUtilityAvailability"], }, { - 'id': 'utility_partial', - 'required': True, - 'description': 'Partial utility availability at each utility service area', - 'type': ['incore:portfolioUtilityAvailability'], + "id": "utility_partial", + "required": True, + "description": "Partial utility availability at each utility service area", + "type": ["incore:portfolioUtilityAvailability"], }, { - 'id': 'coefFL', - 'required': True, - 'description': 'Correlation coefficient of initial functionality states', - 'type': ['incore:portfolioCoefficients'], + "id": "coefFL", + "required": True, + "description": "Correlation coefficient of initial functionality states", + "type": ["incore:portfolioCoefficients"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'buildingClusterRecovery', - 'description': 'Building cluster recovery result.', - 'type': 'incore:clusterRecovery' + "id": "result", + "parent_type": "buildingClusterRecovery", + "description": "Building cluster recovery result.", + "type": "incore:clusterRecovery", } - ] + ], } def run(self): - uncertainty = self.get_parameter("uncertainty") utility_initial = self.get_input_dataset("utility").get_dataframe_from_csv() - building_damage_results = self.get_input_dataset("building_damage").get_dataframe_from_csv() + building_damage_results = self.get_input_dataset( + "building_damage" + ).get_dataframe_from_csv() building_data = self.get_input_dataset("building_data").get_dataframe_from_csv() mean_repair = self.get_input_dataset("dmg_ratios").get_dataframe_from_csv() - occupancy_mapping = self.get_input_dataset("occupancy_mapping").get_dataframe_from_csv() + occupancy_mapping = self.get_input_dataset( + "occupancy_mapping" + ).get_dataframe_from_csv() coeFL = self.get_input_dataset("coefFL").get_dataframe_from_csv() - print('INFO: Data for Building Portfolio Recovery Analysis loaded successfully.') + print( + "INFO: Data for Building Portfolio Recovery Analysis loaded successfully." + ) - sample_size = self.get_parameter("sample_size") # len(building _damage_results) + sample_size = self.get_parameter( + "sample_size" + ) # len(building _damage_results) if sample_size is None: sample_size = len(building_damage_results) else: building_damage_results = building_damage_results.head(sample_size) coeFL = coeFL.iloc[0:sample_size, 0:sample_size] - user_defined_cpu = self.get_parameter("num_cpu") permutation = np.random.permutation(len(building_data)) permutation_subset = permutation[0:sample_size] - sample_buildings = [BuildingData(building_data['Tract_ID'][i], building_data['X_Lon'][i], - building_data['Y_Lat'][i], building_data['Structural'][i], - building_data['Code_Level'][i], building_data['EPSANodeID'][i], - building_data['PWSANodeID'][i], building_data['TEP_ID'][i], - building_data['Build_ID_X'][i], building_data['EPSAID'][i], - building_data['PWSAID'][i], building_data['Finance'][i], - building_data['EP_PW_ID'][i], building_data['Occu_Code'][i] - ) - for i in permutation_subset] - occupancy_map = {occupancy_mapping["Occu_ID"][i]: occupancy_mapping['Occupancy'][i] for i in - range(len(occupancy_mapping))} - repair_mean = {mean_repair['Occupancy'][i]: [mean_repair['RC1'][i], mean_repair['RC2'][i], - mean_repair['RC3'][i], mean_repair['RC4'][i]] - for i in range(len(mean_repair))} - - building_damage = [[building_damage_results['Restricted Entry'][i], - building_damage_results['Restricted Use'][i], - building_damage_results['Reoccupancy'][i], - building_damage_results['Best Line Functionality'][i], - building_damage_results['Full Functionality'][i]] - for i in range(len(building_damage_results))] + sample_buildings = [ + BuildingData( + building_data["Tract_ID"][i], + building_data["X_Lon"][i], + building_data["Y_Lat"][i], + building_data["Structural"][i], + building_data["Code_Level"][i], + building_data["EPSANodeID"][i], + building_data["PWSANodeID"][i], + building_data["TEP_ID"][i], + building_data["Build_ID_X"][i], + building_data["EPSAID"][i], + building_data["PWSAID"][i], + building_data["Finance"][i], + building_data["EP_PW_ID"][i], + building_data["Occu_Code"][i], + ) + for i in permutation_subset + ] + occupancy_map = { + occupancy_mapping["Occu_ID"][i]: occupancy_mapping["Occupancy"][i] + for i in range(len(occupancy_mapping)) + } + repair_mean = { + mean_repair["Occupancy"][i]: [ + mean_repair["RC1"][i], + mean_repair["RC2"][i], + mean_repair["RC3"][i], + mean_repair["RC4"][i], + ] + for i in range(len(mean_repair)) + } + + building_damage = [ + [ + building_damage_results["Restricted Entry"][i], + building_damage_results["Restricted Use"][i], + building_damage_results["Reoccupancy"][i], + building_damage_results["Best Line Functionality"][i], + building_damage_results["Full Functionality"][i], + ] + for i in range(len(building_damage_results)) + ] # START: Calculate waiting time statistics using Monte Carlo Simulations nsd = 5000 # TODO: Input? @@ -213,9 +242,18 @@ def run(self): # repair_mean, occupancy_map, uncertainty, # impeding_mean, impeding_std, # building_damage, utility, utility2)): - response = self.calculate_transition_probability_matrix(time_steps, sample_buildings, repair_mean, - occupancy_map, uncertainty, impeding_mean, impeding_std, - building_damage, utility, utility2) + response = self.calculate_transition_probability_matrix( + time_steps, + sample_buildings, + repair_mean, + occupancy_map, + uncertainty, + impeding_mean, + impeding_std, + building_damage, + utility, + utility2, + ) temporary_correlation1 = response["temporary_correlation1"] temporary_correlation2 = response["temporary_correlation2"] mean_over_time = response["mean_over_time"] @@ -233,13 +271,25 @@ def run(self): # Trajectory for Best Line Functionality and Full Functionality mean_recovery_output = sum(recovery_fp) / sample_size - with open(output_base_name + '_building-recovery.csv', 'w+', newline='') as output_file: - spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) - spam_writer.writerow(['Building_ID', 'Building_Lon', 'Building_Lat'] - + list(range(1, time_steps + 1))) + with open( + output_base_name + "_building-recovery.csv", "w+", newline="" + ) as output_file: + spam_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + spam_writer.writerow( + ["Building_ID", "Building_Lon", "Building_Lat"] + + list(range(1, time_steps + 1)) + ) for i in range(sample_size): - spam_writer.writerow([building_data['Build_ID_X'][i], building_data['X_Lon'][i], - building_data['Y_Lat'][i]] + list(recovery_fp[i])) + spam_writer.writerow( + [ + building_data["Build_ID_X"][i], + building_data["X_Lon"][i], + building_data["Y_Lat"][i], + ] + + list(recovery_fp[i]) + ) if uncertainty: # START: Additional Code for uncertainty analysis @@ -248,8 +298,9 @@ def run(self): random_distribution = np.random.multivariate_normal(mean_u, covar, 10000) random_samples = sp.stats.norm.cdf(random_distribution) - sample_total = self.calculate_sample_total(number_of_simulations, sample_size, - building_damage, random_samples) + sample_total = self.calculate_sample_total( + number_of_simulations, sample_size, building_damage, random_samples + ) for k in range(sample_size): for t in range(time_steps): @@ -257,19 +308,29 @@ def run(self): variance_over_time[t][k] = 0 # Start calculating standard deviation of the mean recovery trajectory - total_standard_deviation = self.calculate_std_of_mean_bulk_input(range(time_steps), sample_size, - number_of_simulations, variance_over_time, - mean_over_time, temporary_correlation1, - temporary_correlation2, sample_total) + total_standard_deviation = self.calculate_std_of_mean_bulk_input( + range(time_steps), + sample_size, + number_of_simulations, + variance_over_time, + mean_over_time, + temporary_correlation1, + temporary_correlation2, + sample_total, + ) # Calculate distribution of Portfolio Recovery Time (PRT) assume normal distribution x_range = np.arange(0.0, 1.001, 0.001) pdf_full = np.zeros((time_steps, len(x_range))) irt = np.zeros(time_steps) for t in range(time_steps): - total_standard_deviation[t] = math.sqrt(total_standard_deviation[t]) / sample_size + total_standard_deviation[t] = ( + math.sqrt(total_standard_deviation[t]) / sample_size + ) target_mean_recovery[t] = mean_recovery[t][3] + mean_recovery[t][4] - pdf_full[t] = sp.stats.norm.pdf(x_range, target_mean_recovery[t], total_standard_deviation[t]) + pdf_full[t] = sp.stats.norm.pdf( + x_range, target_mean_recovery[t], total_standard_deviation[t] + ) coeR = np.trapz(pdf_full, x_range) @@ -284,52 +345,100 @@ def run(self): # Calculate truncated normal distribution and 75% & 95% percentile band # 75% percentile upper bound - upper_bound75 = target_mean_recovery + [1.15 * i for i in total_standard_deviation] + upper_bound75 = target_mean_recovery + [ + 1.15 * i for i in total_standard_deviation + ] # 75% percentile lower bound - lower_bound75 = target_mean_recovery - [1.15 * i for i in total_standard_deviation] + lower_bound75 = target_mean_recovery - [ + 1.15 * i for i in total_standard_deviation + ] # 95% percentile upper bound - upper_bound95 = target_mean_recovery + [1.96 * i for i in total_standard_deviation] + upper_bound95 = target_mean_recovery + [ + 1.96 * i for i in total_standard_deviation + ] # 95% percentile lower bound - lower_bound95 = target_mean_recovery - [1.96 * i for i in total_standard_deviation] + lower_bound95 = target_mean_recovery - [ + 1.96 * i for i in total_standard_deviation + ] for t in range(time_steps): - coet = sp.stats.norm.cdf(0, target_mean_recovery[t], total_standard_deviation[t]) - coet2 = sp.stats.norm.cdf(1, target_mean_recovery[t], total_standard_deviation[t]) + coet = sp.stats.norm.cdf( + 0, target_mean_recovery[t], total_standard_deviation[t] + ) + coet2 = sp.stats.norm.cdf( + 1, target_mean_recovery[t], total_standard_deviation[t] + ) if coet >= 0.000005 and 1 - coet2 < 0.00005: coeAmp = 1 / (1 - coet) - lower_bound95[t] = sp.stats.norm.ppf(0.05 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound95[t] = sp.stats.norm.ppf(0.95 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) - lower_bound75[t] = sp.stats.norm.ppf(0.25 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound75[t] = sp.stats.norm.ppf(0.75 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) + lower_bound95[t] = sp.stats.norm.ppf( + 0.05 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound95[t] = sp.stats.norm.ppf( + 0.95 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) + lower_bound75[t] = sp.stats.norm.ppf( + 0.25 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound75[t] = sp.stats.norm.ppf( + 0.75 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) pdf_full[t] = pdf_full[t] * coeAmp if coet < 0.00005 and 1 - coet2 >= 0.000005: coeAmp = 1 / coet2 - lower_bound95[t] = sp.stats.norm.ppf((coet2) - 0.95 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound95[t] = sp.stats.norm.ppf((coet2) - 0.05 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) - lower_bound75[t] = sp.stats.norm.ppf((coet2) - 0.75 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound75[t] = sp.stats.norm.ppf((coet2) - 0.25 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) + lower_bound95[t] = sp.stats.norm.ppf( + (coet2) - 0.95 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound95[t] = sp.stats.norm.ppf( + (coet2) - 0.05 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) + lower_bound75[t] = sp.stats.norm.ppf( + (coet2) - 0.75 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound75[t] = sp.stats.norm.ppf( + (coet2) - 0.25 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) pdf_full[t] = pdf_full[t] * coeAmp if coet >= 0.000005 and 1 - coet2 >= 0.000005: coeAmp = 1 / (coet2 - coet) - lower_bound95[t] = sp.stats.norm.ppf(0.05 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound95[t] = sp.stats.norm.ppf(coet2 - 0.05 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) - lower_bound75[t] = sp.stats.norm.ppf(0.25 / coeAmp + coet, target_mean_recovery[t], - total_standard_deviation[t]) - upper_bound75[t] = sp.stats.norm.ppf(coet2 - 0.25 / coeAmp, target_mean_recovery[t], - total_standard_deviation[t]) + lower_bound95[t] = sp.stats.norm.ppf( + 0.05 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound95[t] = sp.stats.norm.ppf( + coet2 - 0.05 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) + lower_bound75[t] = sp.stats.norm.ppf( + 0.25 / coeAmp + coet, + target_mean_recovery[t], + total_standard_deviation[t], + ) + upper_bound75[t] = sp.stats.norm.ppf( + coet2 - 0.25 / coeAmp, + target_mean_recovery[t], + total_standard_deviation[t], + ) # TODO: Confirm with PI if this can be removed. These conditions are never hit if time_steps > 100: @@ -345,35 +454,89 @@ def run(self): upper_bound95[t] = 1 # END: Additional Code for uncertainty Analysis - with open(output_base_name + '_cluster-recovery.csv', 'w+', newline='') as output_file: - spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) - spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', '75P_Upper_Bound', - '75P_Lower_Bound', '95P_Upper_Bound', '95P_Lower_Bound', - 'RecPercent_RE', 'RecPercent_RU', 'RecPercent_RO', 'RecPercent_BF', - 'RecPercent_FF', 'Probability_Density_Func']) + with open( + output_base_name + "_cluster-recovery.csv", "w+", newline="" + ) as output_file: + spam_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + spam_writer.writerow( + [ + "Week", + "Recovery_Percent_Func_Probability", + "75P_Upper_Bound", + "75P_Lower_Bound", + "95P_Upper_Bound", + "95P_Lower_Bound", + "RecPercent_RE", + "RecPercent_RU", + "RecPercent_RO", + "RecPercent_BF", + "RecPercent_FF", + "Probability_Density_Func", + ] + ) for i in range(time_steps): - spam_writer.writerow([i + 1, mean_recovery_output[i], lower_bound75[i], upper_bound75[i], - lower_bound95[i], upper_bound95[i]] + list(mean_recovery[i]) + - [pdf[i]]) + spam_writer.writerow( + [ + i + 1, + mean_recovery_output[i], + lower_bound75[i], + upper_bound75[i], + lower_bound95[i], + upper_bound95[i], + ] + + list(mean_recovery[i]) + + [pdf[i]] + ) else: - with open(output_base_name + '_cluster-recovery.csv', 'w+', newline='') as output_file: - spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) - spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', 'RecPercent_RE', - 'RecPercent_RU', 'RecPercent_RO', 'RecPercent_BF', 'RecPercent_FF']) + with open( + output_base_name + "_cluster-recovery.csv", "w+", newline="" + ) as output_file: + spam_writer = csv.writer( + output_file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL + ) + spam_writer.writerow( + [ + "Week", + "Recovery_Percent_Func_Probability", + "RecPercent_RE", + "RecPercent_RU", + "RecPercent_RO", + "RecPercent_BF", + "RecPercent_FF", + ] + ) for i in range(time_steps): - spam_writer.writerow([i + 1, mean_recovery_output[i]] + list(mean_recovery[i])) - - self.set_output_dataset("result", Dataset.from_file(output_base_name + '_cluster-recovery.csv', - data_type=self.output_datasets["result"]["spec"]["type"])) + spam_writer.writerow( + [i + 1, mean_recovery_output[i]] + list(mean_recovery[i]) + ) + + self.set_output_dataset( + "result", + Dataset.from_file( + output_base_name + "_cluster-recovery.csv", + data_type=self.output_datasets["result"]["spec"]["type"], + ), + ) print("INFO: Finished executing Building Portfolio Recovery Analysis") return True - def calculate_transition_probability_matrix(self, time_steps, sample_buildings, repair_mean, - occupancy_map, - uncertainty, impeding_mean, impeding_std, building_damage, - utility, utility2): + def calculate_transition_probability_matrix( + self, + time_steps, + sample_buildings, + repair_mean, + occupancy_map, + uncertainty, + impeding_mean, + impeding_std, + building_damage, + utility, + utility2, + ): sample_size = len(sample_buildings) total_mean = np.zeros((4, 4)) total_var = np.zeros((4, 4)) @@ -397,33 +560,64 @@ def calculate_transition_probability_matrix(self, time_steps, sample_buildings, std = impeding_std[finance_id, j] ** 2 for i in range(j, 4): - mean += repair_mean[occupancy_map[sample_buildings[k].occupation_code]][i] + mean += repair_mean[ + occupancy_map[sample_buildings[k].occupation_code] + ][i] total_mean[j][i] = mean - std += 0.4 * repair_mean[occupancy_map[sample_buildings[k].occupation_code]][i] ** 2 + std += ( + 0.4 + * repair_mean[ + occupancy_map[sample_buildings[k].occupation_code] + ][i] + ** 2 + ) total_var[j][i] = math.sqrt(std) for t in range(time_steps): for i in range(4): for j in range(i, 4): - zeta = math.sqrt(math.log(1 + (total_var[i][j] / total_mean[i][j]) ** 2)) - lambda_log = math.log(total_mean[i][j]) - 1 / 2 * zeta ** 2 - transition_probability[i][j] = self.log_n_cdf(t + 1, lambda_log, zeta) + zeta = math.sqrt( + math.log(1 + (total_var[i][j] / total_mean[i][j]) ** 2) + ) + lambda_log = math.log(total_mean[i][j]) - 1 / 2 * zeta**2 + transition_probability[i][j] = self.log_n_cdf( + t + 1, lambda_log, zeta + ) # tpm = transition probability matrix - tpm = np.matrix([[1 - transition_probability[0, 0], - transition_probability[0, 0] - transition_probability[0, 1], - transition_probability[0, 1] - transition_probability[0, 2], - transition_probability[0, 2] - transition_probability[0, 3], - transition_probability[0, 3]], - [0.0, 1 - transition_probability[1, 1], - transition_probability[1, 1] - transition_probability[1, 2], - transition_probability[1, 2] - transition_probability[1, 3], - transition_probability[1, 3]], - [0.0, 0.0, 1 - transition_probability[2, 2], - transition_probability[2, 2] - transition_probability[2, 3], - transition_probability[2, 3]], - [0.0, 0.0, 0.0, 1 - transition_probability[3, 3], - transition_probability[3, 3]], - [0.0, 0.0, 0.0, 0.0, 1.0]], dtype=float) + tpm = np.matrix( + [ + [ + 1 - transition_probability[0, 0], + transition_probability[0, 0] - transition_probability[0, 1], + transition_probability[0, 1] - transition_probability[0, 2], + transition_probability[0, 2] - transition_probability[0, 3], + transition_probability[0, 3], + ], + [ + 0.0, + 1 - transition_probability[1, 1], + transition_probability[1, 1] - transition_probability[1, 2], + transition_probability[1, 2] - transition_probability[1, 3], + transition_probability[1, 3], + ], + [ + 0.0, + 0.0, + 1 - transition_probability[2, 2], + transition_probability[2, 2] - transition_probability[2, 3], + transition_probability[2, 3], + ], + [ + 0.0, + 0.0, + 0.0, + 1 - transition_probability[3, 3], + transition_probability[3, 3], + ], + [0.0, 0.0, 0.0, 0.0, 1.0], + ], + dtype=float, + ) # State Probability vector, pie(t) = initial vector * Transition Probability Matrix state_probabilities[t] = np.matmul(building_damage[k], tpm) if not uncertainty: @@ -432,16 +626,32 @@ def calculate_transition_probability_matrix(self, time_steps, sample_buildings, if uncertainty: # Considering the effect of utility availability # Utility Dependence Matrix - utility_matrix = np.matrix([[1, 0, 0, 0, 0], - [0, 1, 0, 0, 0], - [0, 0, 1, utility[utility_id][t], utility[utility_id][t]], - [0, 0, 0, 1 - utility[utility_id][t], - utility2[utility_id][t]], - [0, 0, 0, 0, - 1 - utility[utility_id][t] - utility2[utility_id][t]] - ], dtype=float) + utility_matrix = np.matrix( + [ + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 1, utility[utility_id][t], utility[utility_id][t]], + [ + 0, + 0, + 0, + 1 - utility[utility_id][t], + utility2[utility_id][t], + ], + [ + 0, + 0, + 0, + 0, + 1 - utility[utility_id][t] - utility2[utility_id][t], + ], + ], + dtype=float, + ) updated_tpm = np.matmul(tpm, utility_matrix.transpose()) - state_probabilities[t] = np.matmul(state_probabilities[t], utility_matrix.transpose()) + state_probabilities[t] = np.matmul( + state_probabilities[t], utility_matrix.transpose() + ) # Calculation functionality statee indicator wheen j=4+5 Conditional mean temporary_correlation1[t][k][0] = updated_tpm[0, 3] @@ -454,20 +664,29 @@ def calculate_transition_probability_matrix(self, time_steps, sample_buildings, temporary_correlation2[t][k][2] = updated_tpm[2, 4] temporary_correlation2[t][k][3] = updated_tpm[3, 4] temporary_correlation2[t][k][4] = updated_tpm[4, 4] - mean_over_time[t][k] = state_probabilities[t][3] + state_probabilities[t][4] - variance_over_time[t][k] = (state_probabilities[t][3] + state_probabilities[t][4]) * \ - (1 - (state_probabilities[t][3] + state_probabilities[t][4])) + mean_over_time[t][k] = ( + state_probabilities[t][3] + state_probabilities[t][4] + ) + variance_over_time[t][k] = ( + state_probabilities[t][3] + state_probabilities[t][4] + ) * (1 - (state_probabilities[t][3] + state_probabilities[t][4])) # Considering the effect of utility availability # Service Area ID of individual buildings # START: Code from only recovery analysis if not uncertainty: - for i in range(len(state_probabilities)): - state_probabilities[i, 2] = state_probabilities[i, 2] + state_probabilities[i, 3] + \ - state_probabilities[i, 4] * (1 - utility[utility_id, i]) - state_probabilities[i, 3] = state_probabilities[i, 3] * utility[utility_id, i] - state_probabilities[i, 4] = state_probabilities[i, 4] * utility[utility_id, i] + state_probabilities[i, 2] = ( + state_probabilities[i, 2] + + state_probabilities[i, 3] + + state_probabilities[i, 4] * (1 - utility[utility_id, i]) + ) + state_probabilities[i, 3] = ( + state_probabilities[i, 3] * utility[utility_id, i] + ) + state_probabilities[i, 4] = ( + state_probabilities[i, 4] * utility[utility_id, i] + ) # END: Code from only recovery analysis @@ -479,14 +698,19 @@ def calculate_transition_probability_matrix(self, time_steps, sample_buildings, print("Transition probability matrix calculation complete.") - return {"temporary_correlation1": temporary_correlation1, - "temporary_correlation2": temporary_correlation2, - "mean_over_time": mean_over_time, "variance_over_time": variance_over_time, - "recovery_fp": recovery_fp, - "mean_recovery": mean_recovery} + return { + "temporary_correlation1": temporary_correlation1, + "temporary_correlation2": temporary_correlation2, + "mean_over_time": mean_over_time, + "variance_over_time": variance_over_time, + "recovery_fp": recovery_fp, + "mean_recovery": mean_recovery, + } # TODO: nS=10000 should be used line:301 - def calculate_sample_total(self, number_of_simulations, sample_size, building_damage, random_samples): + def calculate_sample_total( + self, number_of_simulations, sample_size, building_damage, random_samples + ): sample_total = np.zeros((sample_size, number_of_simulations)) for j in range(number_of_simulations): sample = np.zeros(sample_size) @@ -494,43 +718,78 @@ def calculate_sample_total(self, number_of_simulations, sample_size, building_da threshold = building_damage[i] if random_samples[j][i] <= threshold[0]: sample[i] = 1 - elif random_samples[j][i] <= threshold[0] + threshold[1] and \ - random_samples[j][i] >= threshold[0]: + elif ( + random_samples[j][i] <= threshold[0] + threshold[1] + and random_samples[j][i] >= threshold[0] + ): sample[i] = 2 - elif threshold[0] + threshold[1] <= random_samples[j][i] <= \ - threshold[0] + threshold[1] + threshold[2]: + elif ( + threshold[0] + threshold[1] + <= random_samples[j][i] + <= threshold[0] + threshold[1] + threshold[2] + ): sample[i] = 3 - elif threshold[0] + threshold[1] + threshold[2] <= random_samples[j][i] <= \ - threshold[0] + threshold[1] + threshold[2] + threshold[3]: + elif ( + threshold[0] + threshold[1] + threshold[2] + <= random_samples[j][i] + <= threshold[0] + threshold[1] + threshold[2] + threshold[3] + ): sample[i] = 4 else: sample[i] = 5 sample_total[i][j] = sample[i] return sample_total - def calculate_std_of_mean_concurrent_future(self, function_name, parallelism, *args): - + def calculate_std_of_mean_concurrent_future( + self, function_name, parallelism, *args + ): output = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallelism + ) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output - def calculate_std_of_mean_bulk_input(self, time_steps, sample_size, number_of_simulations, - variance_over_time, mean_over_time, temporary_correlation1, - temporary_correlation2, sample_total): + def calculate_std_of_mean_bulk_input( + self, + time_steps, + sample_size, + number_of_simulations, + variance_over_time, + mean_over_time, + temporary_correlation1, + temporary_correlation2, + sample_total, + ): result = [] for step in time_steps: result.append( - self.calculate_std_of_mean(step, sample_size, number_of_simulations, variance_over_time, - mean_over_time, temporary_correlation1, temporary_correlation2, - sample_total)) + self.calculate_std_of_mean( + step, + sample_size, + number_of_simulations, + variance_over_time, + mean_over_time, + temporary_correlation1, + temporary_correlation2, + sample_total, + ) + ) return result # calculating standard deviation of the mean recovery trajectory - def calculate_std_of_mean(self, t, sample_size, number_of_simulations, variance_over_time, mean_over_time, - temporary_correlation1, temporary_correlation2, sample_total): - + def calculate_std_of_mean( + self, + t, + sample_size, + number_of_simulations, + variance_over_time, + mean_over_time, + temporary_correlation1, + temporary_correlation2, + sample_total, + ): print("Calculating std mean for week " + str(t)) output = np.sum(variance_over_time[t]) @@ -543,24 +802,31 @@ def calculate_std_of_mean(self, t, sample_size, number_of_simulations, variance_ # starti = timer() expect1 = 0 # Joint probability of initial functionality state P(S0i=k, S0j=l) - joint_probability = self.joint_probability_calculation(sample_total[i], sample_total[j], - number_of_simulations) + joint_probability = self.joint_probability_calculation( + sample_total[i], sample_total[j], number_of_simulations + ) # Functionality State k for k in range(5): - # Functionality State l - for l in range(5): - expect1 += joint_probability[k][l] * (temporary_correlation1[t][i][k] * - temporary_correlation1[t][j][l] - + temporary_correlation1[t][i][k] * - temporary_correlation1[t][j][l] - + temporary_correlation2[t][i][k] * - temporary_correlation1[t][j][l] - + temporary_correlation2[t][i][k] * - temporary_correlation2[t][j][l]) + # Functionality State m + for m in range(5): + expect1 += joint_probability[k][m] * ( + temporary_correlation1[t][i][k] + * temporary_correlation1[t][j][m] + + temporary_correlation1[t][i][k] + * temporary_correlation1[t][j][m] + + temporary_correlation2[t][i][k] + * temporary_correlation1[t][j][m] + + temporary_correlation2[t][i][k] + * temporary_correlation2[t][j][m] + ) expect2 = mean_over_time[t][i] * mean_over_time[t][j] - if variance_over_time[t][i] > 0 and variance_over_time[t][j] > 0 and expect1 - expect2 > 0: + if ( + variance_over_time[t][i] > 0 + and variance_over_time[t][j] > 0 + and expect1 - expect2 > 0 + ): covariance = expect1 - expect2 output += 2 * covariance diff --git a/pyincore/analyses/buildingclusterrecovery/buildingdamage.py b/pyincore/analyses/buildingclusterrecovery/buildingdamage.py index fe4ec4c72..fb107b9a1 100644 --- a/pyincore/analyses/buildingclusterrecovery/buildingdamage.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingdamage.py @@ -6,9 +6,15 @@ class BuildingDamage(object): - - def __init__(self, distance_to_center, restricted_entry, restricted_use, reoccupancy, best_line_functionality, - full_functionality): + def __init__( + self, + distance_to_center, + restricted_entry, + restricted_use, + reoccupancy, + best_line_functionality, + full_functionality, + ): self.distance_to_center = distance_to_center self.restricted_entry = restricted_entry self.restricted_use = restricted_use diff --git a/pyincore/analyses/buildingclusterrecovery/buildingdata.py b/pyincore/analyses/buildingclusterrecovery/buildingdata.py index cbd5b0e46..13f47587b 100644 --- a/pyincore/analyses/buildingclusterrecovery/buildingdata.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingdata.py @@ -6,13 +6,28 @@ class BuildingData: - def __init__(self, tract_id, lon, lat, structural, code_level, epsa_node_id, pwsa_node_id, tep_id, build_id_x, - epsa_id, pwsa_id, finance, ep_pw_id, occupation_code): + def __init__( + self, + tract_id, + lon, + lat, + structural, + code_level, + epsa_node_id, + pwsa_node_id, + tep_id, + build_id_x, + epsa_id, + pwsa_id, + finance, + ep_pw_id, + occupation_code, + ): self.tract_id = tract_id self.lon = lon self.lat = lat self.structural = structural - self.code_level = code_level, + self.code_level = (code_level,) self.epsa_node_id = epsa_node_id self.pwsa_node_id = pwsa_node_id self.tep_id = tep_id diff --git a/pyincore/analyses/buildingdamage/buildingdamage.py b/pyincore/analyses/buildingdamage/buildingdamage.py index 807aea5ac..b7d30d74e 100755 --- a/pyincore/analyses/buildingdamage/buildingdamage.py +++ b/pyincore/analyses/buildingdamage/buildingdamage.py @@ -5,10 +5,16 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ from deprecated.sphinx import deprecated -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use BuildingStructuralDamage instead.") -class BuildingDamage(): + +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use BuildingStructuralDamage instead.", +) +class BuildingDamage: def __init__(self, incore_client): self._delegate = BuildingStructuralDamage(incore_client) @@ -16,4 +22,4 @@ def __getattr__(self, name): """ Delegate attribute access to the BuildingStructuralDamage instance. """ - return getattr(self._delegate, name) \ No newline at end of file + return getattr(self._delegate, name) diff --git a/pyincore/analyses/buildingdamage/buildingutil.py b/pyincore/analyses/buildingdamage/buildingutil.py index 07d7a4094..38fa460bd 100644 --- a/pyincore/analyses/buildingdamage/buildingutil.py +++ b/pyincore/analyses/buildingdamage/buildingutil.py @@ -7,6 +7,7 @@ class BuildingUtil: """Utility methods for the building damage analysis.""" + DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY = "Non-Retrofit Inundation Fragility ID Code" DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY = "Non-Retrofit MomentumFlux Fragility ID Code" diff --git a/pyincore/analyses/buildingeconloss/buildingeconloss.py b/pyincore/analyses/buildingeconloss/buildingeconloss.py index 75b795420..cfefd8614 100755 --- a/pyincore/analyses/buildingeconloss/buildingeconloss.py +++ b/pyincore/analyses/buildingeconloss/buildingeconloss.py @@ -56,12 +56,21 @@ def run(self): occ_type = bldg_item["properties"]["occ_type"] prop_select.append([guid, year_built, occ_type, appr_bldg]) - bldg_set_df = pd.DataFrame(prop_select, columns=["guid", "year_built", "occ_type", "appr_bldg"]) + bldg_set_df = pd.DataFrame( + prop_select, columns=["guid", "year_built", "occ_type", "appr_bldg"] + ) bldg_dmg_set = self.get_input_dataset("building_mean_dmg").get_csv_reader() bldg_dmg_df = pd.DataFrame(list(bldg_dmg_set)) - dmg_set_df = pd.merge(bldg_set_df, bldg_dmg_df, how="outer", left_on="guid", right_on="guid", - sort=True, copy=True) + dmg_set_df = pd.merge( + bldg_set_df, + bldg_dmg_df, + how="outer", + left_on="guid", + right_on="guid", + sort=True, + copy=True, + ) infl_mult = self.get_inflation_mult() dmg_set_df = self.add_multipliers(dmg_set_df, occ_mult_df) @@ -71,10 +80,18 @@ def run(self): lossdev = 0.0 if "appr_bldg" in dmg_set_df: - loss = dmg_set_df["appr_bldg"].astype(float) * dmg_set_df["meandamage"].astype(float) * dmg_set_df[ - "Multiplier"].astype(float) * infl_mult - lossdev = dmg_set_df["appr_bldg"].astype(float) * dmg_set_df["mdamagedev"].astype(float) * dmg_set_df[ - "Multiplier"].astype(float) * infl_mult + loss = ( + dmg_set_df["appr_bldg"].astype(float) + * dmg_set_df["meandamage"].astype(float) + * dmg_set_df["Multiplier"].astype(float) + * infl_mult + ) + lossdev = ( + dmg_set_df["appr_bldg"].astype(float) + * dmg_set_df["mdamagedev"].astype(float) + * dmg_set_df["Multiplier"].astype(float) + * infl_mult + ) bldg_results["loss"] = loss.round(2) bldg_results["loss_dev"] = lossdev.round(2) @@ -110,11 +127,20 @@ def add_multipliers(self, dmg_set_df, occ_mult_df): """ if occ_mult_df is not None: # Occupancy multipliers are in percentages, convert to multiplication factors - occ_mult_df["Multiplier"] = (occ_mult_df["Multiplier"].astype(float) / 100.0) + 1.0 + occ_mult_df["Multiplier"] = ( + occ_mult_df["Multiplier"].astype(float) / 100.0 + ) + 1.0 occ_mult_df = occ_mult_df.rename(columns={"Occupancy": "occ_type"}) - dmg_set_df = pd.merge(dmg_set_df, occ_mult_df, how="left", left_on="occ_type", - right_on="occ_type", sort=True, copy=True) + dmg_set_df = pd.merge( + dmg_set_df, + occ_mult_df, + how="left", + left_on="occ_type", + right_on="occ_type", + sort=True, + copy=True, + ) else: dmg_set_df["Multiplier"] = 1.0 @@ -128,55 +154,59 @@ def get_spec(self): """ return { - 'name': 'building-economy-damage', - 'description': 'building economy damage analysis', - 'input_parameters': [ + "name": "building-economy-damage", + "description": "building economy damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'inflation_factor', - 'required': False, - 'description': 'Inflation factor to adjust the appraisal values of buildings. Default 0.0', - 'type': float + "id": "inflation_factor", + "required": False, + "description": "Inflation factor to adjust the appraisal values of buildings. Default 0.0", + "type": float, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventory', 'ergo:buildingInventoryVer4', - 'ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6', - 'ergo:buildingInventoryVer7'] + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventory", + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'building_mean_dmg', - 'required': True, - 'description': 'A CSV file with building mean damage results for either Structural, ' - 'Drift-Sensitive Nonstructural, Acceleration-Sensitive Nonstructural ' - 'or Contents Damage component.', - 'type': ['ergo:meanDamage'] + "id": "building_mean_dmg", + "required": True, + "description": "A CSV file with building mean damage results for either Structural, " + "Drift-Sensitive Nonstructural, Acceleration-Sensitive Nonstructural " + "or Contents Damage component.", + "type": ["ergo:meanDamage"], }, { - 'id': 'occupancy_multiplier', - 'required': False, - 'description': 'Building occupancy damage multipliers. These percentage multipliers account ' - 'for the value associated with different types of components (structural, ' - 'acceleration-sensitive nonstructural, ' - 'drift-sensitive nonstructural, contents).', - 'type': ['incore:buildingOccupancyMultiplier'] - } + "id": "occupancy_multiplier", + "required": False, + "description": "Building occupancy damage multipliers. These percentage multipliers account " + "for the value associated with different types of components (structural, " + "acceleration-sensitive nonstructural, " + "drift-sensitive nonstructural, contents).", + "type": ["incore:buildingOccupancyMultiplier"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of building economy damages', - 'type': 'incore:buildingEconomicLoss' + "id": "result", + "parent_type": "buildings", + "description": "CSV file of building economy damages", + "type": "incore:buildingEconomicLoss", } - ] + ], } diff --git a/pyincore/analyses/buildingfunctionality/__init__.py b/pyincore/analyses/buildingfunctionality/__init__.py index 13a3ca5a8..5a5b40547 100644 --- a/pyincore/analyses/buildingfunctionality/__init__.py +++ b/pyincore/analyses/buildingfunctionality/__init__.py @@ -1 +1,3 @@ -from pyincore.analyses.buildingfunctionality.buildingfunctionality import BuildingFunctionality +from pyincore.analyses.buildingfunctionality.buildingfunctionality import ( + BuildingFunctionality, +) diff --git a/pyincore/analyses/buildingfunctionality/buildingfunctionality.py b/pyincore/analyses/buildingfunctionality/buildingfunctionality.py index cede29948..5348855c3 100644 --- a/pyincore/analyses/buildingfunctionality/buildingfunctionality.py +++ b/pyincore/analyses/buildingfunctionality/buildingfunctionality.py @@ -34,61 +34,65 @@ def get_spec(self): """ return { - 'name': 'functionality_probability', - 'description': 'calculate the functionality probability of each building', - 'input_parameters': [ + "name": "functionality_probability", + "description": "calculate the functionality probability of each building", + "input_parameters": [ { - 'id': 'result_name', - 'required': False, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": False, + "description": "result dataset name", + "type": str, } ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'building_damage_mcs_samples', - 'required': True, - 'description': 'building damage samples', - 'type': ['incore:sampleFailureState'], + "id": "building_damage_mcs_samples", + "required": True, + "description": "building damage samples", + "type": ["incore:sampleFailureState"], }, { - 'id': 'substations_damage_mcs_samples', - 'required': False, - 'description': 'substations damage samples', - 'type': ['incore:sampleFailureState'], + "id": "substations_damage_mcs_samples", + "required": False, + "description": "substations damage samples", + "type": ["incore:sampleFailureState"], }, { - 'id': 'poles_damage_mcs_samples', - 'required': False, - 'description': 'poles damage samples', - 'type': ['incore:sampleFailureState'], + "id": "poles_damage_mcs_samples", + "required": False, + "description": "poles damage samples", + "type": ["incore:sampleFailureState"], }, { - 'id': 'interdependency_dictionary', - 'required': False, - 'description': 'JSON file of interdependency between buildings and substations and poles', - 'type': ['incore:buildingInterdependencyDict'], + "id": "interdependency_dictionary", + "required": False, + "description": "JSON file of interdependency between buildings and substations and poles", + "type": ["incore:buildingInterdependencyDict"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'functionality_samples', - 'description': 'CSV file of functionality samples', - 'type': 'incore:funcSample' + "id": "functionality_samples", + "description": "CSV file of functionality samples", + "type": "incore:funcSample", }, { - 'id': 'functionality_probability', - 'description': 'CSV file of functionality probability', - 'type': 'incore:funcProbability' - } - ] + "id": "functionality_probability", + "description": "CSV file of functionality probability", + "type": "incore:funcProbability", + }, + ], } def run(self): """Executes building functionality analysis""" # enable index on "guid" column - buildings_df = self.get_input_dataset("building_damage_mcs_samples").get_dataframe_from_csv().set_index("guid") + buildings_df = ( + self.get_input_dataset("building_damage_mcs_samples") + .get_dataframe_from_csv() + .set_index("guid") + ) interdependency_dataset = self.get_input_dataset("interdependency_dictionary") if interdependency_dataset is not None: @@ -98,7 +102,9 @@ def run(self): substations_dataset = self.get_input_dataset("substations_damage_mcs_samples") if substations_dataset is not None: - substations_df = substations_dataset.get_dataframe_from_csv().set_index("guid") + substations_df = substations_dataset.get_dataframe_from_csv().set_index( + "guid" + ) else: substations_df = None @@ -108,35 +114,51 @@ def run(self): else: poles_df = None - if (poles_dataset is not None or substations_dataset is not None) and interdependency_dataset is None: - raise ValueError("Please provide interdependency table if pole or substation damage is " - "considered in the building functionality calculation.") + if ( + poles_dataset is not None or substations_dataset is not None + ) and interdependency_dataset is None: + raise ValueError( + "Please provide interdependency table if pole or substation damage is " + "considered in the building functionality calculation." + ) functionality_probabilities = [] functionality_samples = [] for building_guid in buildings_df.index: - building_guid, sample, probability = self.functionality(building_guid, buildings_df, substations_df, - poles_df, - interdependency_dict) + building_guid, sample, probability = self.functionality( + building_guid, + buildings_df, + substations_df, + poles_df, + interdependency_dict, + ) functionality_probabilities.append([building_guid, probability]) functionality_samples.append([building_guid, sample]) - fp_results = pd.DataFrame(functionality_probabilities, columns=['guid', 'probability']) - fs_results = pd.DataFrame(functionality_samples, columns=['guid', 'failure']) - - self.set_result_csv_data("functionality_probability", - fp_results, - name=self.get_parameter("result_name") + "_functionality_probability", - source='dataframe') - - self.set_result_csv_data("functionality_samples", - fs_results, - name=self.get_parameter("result_name") + "_functionality_samples", - source='dataframe') + fp_results = pd.DataFrame( + functionality_probabilities, columns=["guid", "probability"] + ) + fs_results = pd.DataFrame(functionality_samples, columns=["guid", "failure"]) + + self.set_result_csv_data( + "functionality_probability", + fp_results, + name=self.get_parameter("result_name") + "_functionality_probability", + source="dataframe", + ) + + self.set_result_csv_data( + "functionality_samples", + fs_results, + name=self.get_parameter("result_name") + "_functionality_samples", + source="dataframe", + ) return True - def functionality(self, building_guid, buildings, substations, poles, interdependency): + def functionality( + self, building_guid, buildings, substations, poles, interdependency + ): """ Args: @@ -164,10 +186,11 @@ def functionality(self, building_guid, buildings, substations, poles, interdepen # if building is defined in the interdependency lookup table if interdependency is not None: - if building_guid in interdependency.keys(): if substations is not None: - substations_mc_samples = substations.loc[interdependency[building_guid]["substations_guid"]] + substations_mc_samples = substations.loc[ + interdependency[building_guid]["substations_guid"] + ] substation_list = [] try: substation_list = substations_mc_samples["failure"].split(",") @@ -179,7 +202,9 @@ def functionality(self, building_guid, buildings, substations, poles, interdepen substation_list = None if poles is not None: - poles_mc_samples = poles.loc[interdependency[building_guid]["poles_guid"]] + poles_mc_samples = poles.loc[ + interdependency[building_guid]["poles_guid"] + ] pole_list = [] try: pole_list = poles_mc_samples["failure"].split(",") @@ -191,44 +216,71 @@ def functionality(self, building_guid, buildings, substations, poles, interdepen pole_list = None if substation_list is not None and pole_list is not None: - functionality_samples = [BuildingFunctionality._calc_functionality_samples(building_sample, - substation_sample, - pole_sample) - for building_sample, substation_sample, pole_sample in - zip(building_list, substation_list, pole_list)] + functionality_samples = [ + BuildingFunctionality._calc_functionality_samples( + building_sample, substation_sample, pole_sample + ) + for building_sample, substation_sample, pole_sample in zip( + building_list, substation_list, pole_list + ) + ] elif substation_list is not None: - functionality_samples = [BuildingFunctionality._calc_functionality_samples(building_sample, - substation_sample, - None) - for building_sample, substation_sample in - zip(building_list, substation_list)] + functionality_samples = [ + BuildingFunctionality._calc_functionality_samples( + building_sample, substation_sample, None + ) + for building_sample, substation_sample in zip( + building_list, substation_list + ) + ] elif pole_list is not None: - functionality_samples = [BuildingFunctionality._calc_functionality_samples(building_sample, - None, - pole_sample) - for building_sample, pole_sample in - zip(building_list, pole_list)] + functionality_samples = [ + BuildingFunctionality._calc_functionality_samples( + building_sample, None, pole_sample + ) + for building_sample, pole_sample in zip( + building_list, pole_list + ) + ] else: - functionality_samples = [BuildingFunctionality._calc_functionality_samples(building_sample, - None, - None) - for building_sample in building_list] - probability = BuildingFunctionality._calc_functionality_probability(functionality_samples) - return building_guid, ",".join([str(sample) for sample in functionality_samples]), probability + functionality_samples = [ + BuildingFunctionality._calc_functionality_samples( + building_sample, None, None + ) + for building_sample in building_list + ] + probability = BuildingFunctionality._calc_functionality_probability( + functionality_samples + ) + return ( + building_guid, + ",".join([str(sample) for sample in functionality_samples]), + probability, + ) else: return building_guid, "NA", "NA" # else if only building MC failure is available else: - functionality_samples = [BuildingFunctionality._calc_functionality_samples(building_sample) for - building_sample in building_list] - probability = BuildingFunctionality._calc_functionality_probability(functionality_samples) - return building_guid, ",".join([str(sample) for sample in functionality_samples]), probability + functionality_samples = [ + BuildingFunctionality._calc_functionality_samples(building_sample) + for building_sample in building_list + ] + probability = BuildingFunctionality._calc_functionality_probability( + functionality_samples + ) + return ( + building_guid, + ",".join([str(sample) for sample in functionality_samples]), + probability, + ) @staticmethod - def _calc_functionality_samples(building_sample, substation_sample=None, pole_sample=None): - """ This function is subject to change. For now, buildings have a 1-to-1 relationship with + def _calc_functionality_samples( + building_sample, substation_sample=None, pole_sample=None + ): + """This function is subject to change. For now, buildings have a 1-to-1 relationship with substations and poles, so it suffices to check that the poles and substations are up. Args: @@ -240,8 +292,11 @@ def _calc_functionality_samples(building_sample, substation_sample=None, pole_sa int: 1 if building is functional, 0 otherwise """ - if building_sample == "1" and (substation_sample == "1" or substation_sample is None) \ - and (pole_sample == "1" or pole_sample is None): + if ( + building_sample == "1" + and (substation_sample == "1" or substation_sample is None) + and (pole_sample == "1" or pole_sample is None) + ): return 1 else: return 0 @@ -252,6 +307,6 @@ def _calc_functionality_probability(functionality_samples): num_samples = len(functionality_samples) probability = 0.0 if functionality_sum > 0: - probability = (functionality_sum / num_samples) + probability = functionality_sum / num_samples return probability diff --git a/pyincore/analyses/buildingnonstructuraldamage/__init__.py b/pyincore/analyses/buildingnonstructuraldamage/__init__.py index bd8682b04..a74b6045e 100644 --- a/pyincore/analyses/buildingnonstructuraldamage/__init__.py +++ b/pyincore/analyses/buildingnonstructuraldamage/__init__.py @@ -5,5 +5,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import BuildingNonStructDamage -from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import BuildingNonStructUtil +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import ( + BuildingNonStructDamage, +) +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import ( + BuildingNonStructUtil, +) diff --git a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py index 2a50d6623..2497f7456 100644 --- a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py +++ b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py @@ -9,8 +9,9 @@ from pyincore import AnalysisUtil, GeoUtil from pyincore import BaseAnalysis, HazardService, FragilityService -from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import \ - BuildingNonStructUtil +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import ( + BuildingNonStructUtil, +) from pyincore.models.dfr3curve import DFR3Curve from pyincore.utils.datasetutil import DatasetUtil @@ -41,18 +42,26 @@ def run(self): dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") # Update the building inventory dataset if applicable - bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(building_dataset, - add_info_dataset=retrofit_strategy_dataset, - mapping=dfr3_mapping_set) + bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories( + building_dataset, + add_info_dataset=retrofit_strategy_dataset, + mapping=dfr3_mapping_set, + ) building_set = bldg_dataset.get_inventory_reader() # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() # set Default Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - self.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + self.set_parameter( + "fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS + ) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") @@ -66,10 +75,15 @@ def run(self): user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(building_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(building_set), user_defined_cpu + ) avg_bulk_input_size = int(len(building_set) / num_workers) inventory_args = [] @@ -77,20 +91,26 @@ def run(self): inventory_list = list(building_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazard), - repeat(hazard_type), - repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("damage_result", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + (ds_results, damage_results) = self.building_damage_concurrent_future( + self.building_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "damage_result", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True def building_damage_concurrent_future(self, function_name, num_workers, *args): @@ -108,14 +128,18 @@ def building_damage_concurrent_future(self, function_name, num_workers, *args): """ output = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output.extend(ret1) output_dmg.extend(ret2) return output, output_dmg - def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, hazard_dataset_id): + def building_damage_analysis_bulk_input( + self, buildings, hazard, hazard_type, hazard_dataset_id + ): """Run analysis for multiple buildings. Args: @@ -135,13 +159,18 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") # get allowed demand types for the hazard type - allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( - hazard_type)] + allowed_demand_types = [ + item["demand_type"].lower() + for item in self.hazardsvc.get_allowed_demands(hazard_type) + ] building_results = [] damage_results = [] - fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - self.get_parameter("fragility_key")) + fragility_sets = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), + buildings, + self.get_parameter("fragility_key"), + ) values_payload = [] values_payload_liq = [] mapped_buildings = [] @@ -154,13 +183,10 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive - demands, units, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set, hazard_type, - allowed_demand_types) - value = { - "demands": demands, - "units": units, - "loc": loc - } + demands, units, _ = AnalysisUtil.get_hazard_demand_types_units( + building, fragility_set, hazard_type, allowed_demand_types + ) + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) # liquefaction @@ -168,7 +194,7 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha value_liq = { "demands": ["pgd"], # implied... "units": ["in"], - "loc": loc + "loc": loc, } values_payload_liq.append(value_liq) @@ -179,25 +205,29 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha del buildings # get hazard values and liquefaction - if hazard_type == 'earthquake': + if hazard_type == "earthquake": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, - liq_geology_dataset_id, - values_payload_liq) + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, liq_geology_dataset_id, values_payload_liq + ) else: - raise ValueError('Hazard does not support liquefaction! Check to make sure you defined the ' - 'liquefaction portion of your scenario earthquake.') - elif hazard_type == 'flood': + raise ValueError( + "Hazard does not support liquefaction! Check to make sure you defined the " + "liquefaction portion of your scenario earthquake." + ) + elif hazard_type == "flood": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) - elif hazard_type == 'hurricane': + elif hazard_type == "hurricane": # include hurricane flood hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) else: - raise ValueError("The provided hazard type is not supported yet by this analysis") + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) # calculate LS and DS for i, building in enumerate(mapped_buildings): @@ -208,67 +238,86 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty if use_hazard_uncertainty: - raise ValueError('Uncertainty has not yet been implemented!') + raise ValueError("Uncertainty has not yet been implemented!") ############### if isinstance(fragility_set.fragility_curves[0], DFR3Curve): - hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + hazard_vals = AnalysisUtil.update_precision_of_lists( + hazard_resp[i]["hazardValues"] + ) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): - building_args = fragility_set.construct_expression_args_from_inventory(building) - dmg_probability = fragility_set. \ - calculate_limit_state(hval_dict, inventory_type="building", - **building_args) + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_resp[i]["hazardValues"] + ): + building_args = ( + fragility_set.construct_expression_args_from_inventory(building) + ) + dmg_probability = fragility_set.calculate_limit_state( + hval_dict, inventory_type="building", **building_args + ) # adjust dmg probability for liquefaction - if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: - liquefaction_dmg = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i][ - "groundFailureProb"]) + if ( + hazard_type == "earthquake" + and use_liquefaction + and liq_geology_dataset_id is not None + ): + liquefaction_dmg = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["groundFailureProb"] + ) dmg_probability = AnalysisUtil.update_precision_of_dicts( - BuildingNonStructUtil.adjust_damage_for_liquefaction(dmg_probability, - liquefaction_dmg)) - - dmg_interval = fragility_set.calculate_damage_interval(dmg_probability, - hazard_type=hazard_type, - inventory_type="building") + BuildingNonStructUtil.adjust_damage_for_liquefaction( + dmg_probability, liquefaction_dmg + ) + ) + + dmg_interval = fragility_set.calculate_damage_interval( + dmg_probability, + hazard_type=hazard_type, + inventory_type="building", + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) # put results in dictionary building_result = dict() - building_result['guid'] = building['properties']['guid'] + building_result["guid"] = building["properties"]["guid"] building_result.update(dmg_probability) building_result.update(dmg_interval) - hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) - building_result['haz_expose'] = hazard_exposure + hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values( + hazard_vals, hazard_type + ) + building_result["haz_expose"] = hazard_exposure # put damage results in dictionary damage_result = dict() - damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id'] = fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardvals'] = hazard_vals + damage_result["guid"] = building["properties"]["guid"] + damage_result["fragility_id"] = fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardvals"] = hazard_vals building_results.append(building_result) damage_results.append(damage_result) for building in unmapped_buildings: building_result = dict() - building_result['guid'] = building['properties']['guid'] + building_result["guid"] = building["properties"]["guid"] damage_result = dict() - damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardvals'] = None + damage_result["guid"] = building["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardvals"] = None building_results.append(building_result) damage_results.append(damage_result) @@ -283,100 +332,104 @@ def get_spec(self): """ return { - 'name': 'building-damage', - 'description': 'building damage analysis', - 'input_parameters': [ + "name": "building-damage", + "description": "building damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake, flood, hurricane)', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type (e.g. earthquake, flood, hurricane)", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Non-structural Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Non-structural Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, { - 'id': 'liq_geology_dataset_id', - 'required': False, - 'description': 'liquefaction geology dataset id, \ - if use liquefaction, you have to provide this id', - 'type': str + "id": "liq_geology_dataset_id", + "required": False, + "description": "liquefaction geology dataset id, \ + if use liquefaction, you have to provide this id", + "type": str, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "flood", "hurricane"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "flood", "hurricane"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - 'id': 'retrofit_strategy', - 'required': False, - 'description': 'Building retrofit strategy that contains guid and retrofit method', - 'type': ['incore:retrofitStrategy'] - } + "id": "retrofit_strategy", + "required": False, + "description": "Building retrofit strategy that contains guid and retrofit method", + "type": ["incore:retrofitStrategy"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of damage states for building non-structural damage', - 'type': 'ergo:nsBuildingInventoryDamageVer4' + "id": "result", + "parent_type": "buildings", + "description": "CSV file of damage states for building non-structural damage", + "type": "ergo:nsBuildingInventoryDamageVer4", }, { - 'id': 'damage_result', - 'parent_type': 'buildings', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:nsBuildingInventoryDamageSupplement' - } - ] + "id": "damage_result", + "parent_type": "buildings", + "description": "Json file with information about applied hazard value and fragility", + "type": "incore:nsBuildingInventoryDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py index 2b5775f34..731f76cc0 100644 --- a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py +++ b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py @@ -9,18 +9,21 @@ class BuildingNonStructUtil: """Utility methods for the non-structural building damage analysis.""" + BUILDING_FRAGILITY_KEYSBUILDING_FRAGILITY_KEYS = { "drift-sensitive fragility id code": ["Drift Sensitive", "DS"], "parametric non-retrofit fragility id code": ["Parametric Non-Retrofit", "PNR"], "acceleration-sensitive fragility id code": ["Acceleration Sensitive", "AS"], - "non-retrofit fragility id code": ["as built", "none"] + "non-retrofit fragility id code": ["as built", "none"], } DEFAULT_FRAGILITY_KEY_DS = "Drift-Sensitive Fragility ID Code" DEFAULT_FRAGILITY_KEY_AS = "Acceleration-Sensitive Fragility ID Code" @staticmethod - def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_probabilities): + def adjust_damage_for_liquefaction( + limit_state_probabilities, ground_failure_probabilities + ): """Adjusts building damage probability based on liquefaction ground failure probability with the liq_dmg, we know that it is 3 values, the first two are the same. The 3rd might be different. @@ -44,19 +47,25 @@ def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_pro # second-to-last probability of ground failure instead. if i > len(ground_failure_probabilities) - 1: - prob_ground_failure = ground_failure_probabilities[len(ground_failure_probabilities) - 2] + prob_ground_failure = ground_failure_probabilities[ + len(ground_failure_probabilities) - 2 + ] else: prob_ground_failure = ground_failure_probabilities[i] - adjusted_limit_state_probabilities[keys[i]] = \ - limit_state_probabilities[keys[i]] + prob_ground_failure \ + adjusted_limit_state_probabilities[keys[i]] = ( + limit_state_probabilities[keys[i]] + + prob_ground_failure - limit_state_probabilities[keys[i]] * prob_ground_failure + ) # the final one is the last of limitStates should match with the last of ground failures j = len(limit_state_probabilities) - 1 prob_ground_failure = ground_failure_probabilities[-1] - adjusted_limit_state_probabilities[keys[j]] = \ - limit_state_probabilities[keys[j]] \ - + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure + adjusted_limit_state_probabilities[keys[j]] = ( + limit_state_probabilities[keys[j]] + + prob_ground_failure + - limit_state_probabilities[keys[j]] * prob_ground_failure + ) return adjusted_limit_state_probabilities diff --git a/pyincore/analyses/buildingstructuraldamage/__init__.py b/pyincore/analyses/buildingstructuraldamage/__init__.py index e46d58b8d..3ac1fa5ac 100644 --- a/pyincore/analyses/buildingstructuraldamage/__init__.py +++ b/pyincore/analyses/buildingstructuraldamage/__init__.py @@ -6,4 +6,6 @@ from pyincore.analyses.buildingstructuraldamage.buildingutil import BuildingUtil -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) diff --git a/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py b/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py index ef3ef2993..14df4a9eb 100755 --- a/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py +++ b/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py @@ -8,8 +8,13 @@ import concurrent.futures from itertools import repeat -from pyincore import BaseAnalysis, HazardService, \ - FragilityService, AnalysisUtil, GeoUtil +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + AnalysisUtil, + GeoUtil, +) from pyincore.analyses.buildingstructuraldamage.buildingutil import BuildingUtil from pyincore.models.dfr3curve import DFR3Curve from pyincore.utils.datasetutil import DatasetUtil @@ -43,9 +48,11 @@ def run(self): dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") # Update the building inventory dataset if applicable - bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(bldg_dataset, - add_info_dataset=retrofit_strategy_dataset, - mapping=dfr3_mapping_set) + bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories( + bldg_dataset, + add_info_dataset=retrofit_strategy_dataset, + mapping=dfr3_mapping_set, + ) bldg_set = bldg_dataset.get_inventory_reader() @@ -60,47 +67,70 @@ def run(self): hazard_dataset_ids = [hazard_object.id] hazards = [hazard_object] # To use remote hazard - elif self.get_parameter("hazard_id") is not None and self.get_parameter("hazard_type") is not None: + elif ( + self.get_parameter("hazard_id") is not None + and self.get_parameter("hazard_type") is not None + ): hazard_dataset_ids = self.get_parameter("hazard_id").split("+") hazard_types = self.get_parameter("hazard_type").split("+") for hazard_type, hazard_dataset_id in zip(hazard_types, hazard_dataset_ids): - hazards.append(BaseAnalysis._create_hazard_object(hazard_type, hazard_dataset_id, self.hazardsvc)) + hazards.append( + BaseAnalysis._create_hazard_object( + hazard_type, hazard_dataset_id, self.hazardsvc + ) + ) else: - raise ValueError("Either hazard object or hazard id + hazard type must be provided") + raise ValueError( + "Either hazard object or hazard id + hazard type must be provided" + ) # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if 'tsunami' in hazard_types else \ - BuildingUtil.DEFAULT_FRAGILITY_KEY + fragility_key = ( + BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY + if "tsunami" in hazard_types + else BuildingUtil.DEFAULT_FRAGILITY_KEY + ) self.set_parameter("fragility_key", fragility_key) user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(bldg_set), user_defined_cpu + ) avg_bulk_input_size = int(len(bldg_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(bldg_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazards), - repeat(hazard_types), - repeat(hazard_dataset_ids)) - - self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("damage_result", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + (ds_results, damage_results) = self.building_damage_concurrent_future( + self.building_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazards), + repeat(hazard_types), + repeat(hazard_dataset_ids), + ) + + self.set_result_csv_data( + "ds_result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "damage_result", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) # clean up temp folder if applicable if tmpdirname is not None: @@ -122,14 +152,18 @@ def building_damage_concurrent_future(self, function_name, parallelism, *args): """ output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallelism + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, hazard_dataset_ids): + def building_damage_analysis_bulk_input( + self, buildings, hazards, hazard_types, hazard_dataset_ids + ): """Run analysis for multiple buildings. Args: @@ -144,8 +178,9 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, """ fragility_key = self.get_parameter("fragility_key") - fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - fragility_key) + fragility_sets = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), buildings, fragility_key + ) use_liquefaction = False liquefaction_resp = None # Get geology dataset id containing liquefaction susceptibility @@ -154,13 +189,20 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, multihazard_vals = {} adjust_demand_types_mapping = {} - for hazard, hazard_type, hazard_dataset_id in zip(hazards, hazard_types, hazard_dataset_ids): + for hazard, hazard_type, hazard_dataset_id in zip( + hazards, hazard_types, hazard_dataset_ids + ): # get allowed demand types for the hazard type - allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( - hazard_type)] + allowed_demand_types = [ + item["demand_type"].lower() + for item in self.hazardsvc.get_allowed_demands(hazard_type) + ] # Liquefaction - if hazard_type == "earthquake" and self.get_parameter("use_liquefaction") is not None: + if ( + hazard_type == "earthquake" + and self.get_parameter("use_liquefaction") is not None + ): use_liquefaction = self.get_parameter("use_liquefaction") values_payload = [] @@ -174,25 +216,19 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, bldg_id = b["id"] location = GeoUtil.get_location(b) loc = str(location.y) + "," + str(location.x) - demands, units, adjusted_to_original = \ - AnalysisUtil.get_hazard_demand_types_units(b, - fragility_sets[bldg_id], - hazard_type, - allowed_demand_types) + ( + demands, + units, + adjusted_to_original, + ) = AnalysisUtil.get_hazard_demand_types_units( + b, fragility_sets[bldg_id], hazard_type, allowed_demand_types + ) adjust_demand_types_mapping.update(adjusted_to_original) - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) if use_liquefaction and geology_dataset_id is not None: - value_liq = { - "demands": [""], - "units": [""], - "loc": loc - } + value_liq = {"demands": [""], "units": [""], "loc": loc} values_payload_liq.append(value_liq) hazard_vals = hazard.read_hazard_values(values_payload, self.hazardsvc) @@ -201,18 +237,27 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, # worst code I have ever written # e.g. 1.04 Sec Sa --> 1.04 SA --> 1.0 SA for payload, response in zip(values_payload, hazard_vals): - adjust_demand_types_mapping.update({ - response_demand: adjust_demand_types_mapping[payload_demand] - for payload_demand, response_demand in zip(payload["demands"], response["demands"]) - }) + adjust_demand_types_mapping.update( + { + response_demand: adjust_demand_types_mapping[payload_demand] + for payload_demand, response_demand in zip( + payload["demands"], response["demands"] + ) + } + ) # record hazard value for each hazard type for later calcu multihazard_vals[hazard_type] = hazard_vals # Check if liquefaction is applicable - if hazard_type == "earthquake" and use_liquefaction and geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, - values_payload_liq) + if ( + hazard_type == "earthquake" + and use_liquefaction + and geology_dataset_id is not None + ): + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) # not needed anymore as they are already split into mapped and unmapped del buildings @@ -238,8 +283,9 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, b_demands = dict() b_units = dict() for hazard_type in hazard_types: - b_haz_vals = AnalysisUtil.update_precision_of_lists(multihazard_vals[hazard_type][i][ - "hazardValues"]) + b_haz_vals = AnalysisUtil.update_precision_of_lists( + multihazard_vals[hazard_type][i]["hazardValues"] + ) b_demands[hazard_type] = multihazard_vals[hazard_type][i]["demands"] b_units[hazard_type] = multihazard_vals[hazard_type][i]["units"] b_multihaz_vals[hazard_type] = b_haz_vals @@ -247,7 +293,9 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, # instead of using what the hazard service returns. There could be a difference "SA" in DFR3 vs # "1.07 SA" from hazard j = 0 - for adjusted_demand_type in multihazard_vals[hazard_type][i]["demands"]: + for adjusted_demand_type in multihazard_vals[hazard_type][i][ + "demands" + ]: d = adjust_demand_types_mapping[adjusted_demand_type] hval_dict[d] = b_haz_vals[j] j += 1 @@ -255,31 +303,57 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, # catch any of the hazard values error hazard_values_errors = False for hazard_type in hazard_types: - hazard_values_errors = hazard_values_errors or AnalysisUtil.do_hazard_values_have_errors( - b_multihaz_vals[hazard_type]) + hazard_values_errors = ( + hazard_values_errors + or AnalysisUtil.do_hazard_values_have_errors( + b_multihaz_vals[hazard_type] + ) + ) if not hazard_values_errors: - building_args = selected_fragility_set.construct_expression_args_from_inventory(b) - - building_period = selected_fragility_set.fragility_curves[0].get_building_period( - selected_fragility_set.curve_parameters, **building_args) + building_args = ( + selected_fragility_set.construct_expression_args_from_inventory( + b + ) + ) + + building_period = selected_fragility_set.fragility_curves[ + 0 + ].get_building_period( + selected_fragility_set.curve_parameters, **building_args + ) dmg_probability = selected_fragility_set.calculate_limit_state( - hval_dict, **building_args, period=building_period) - - if use_liquefaction and geology_dataset_id is not None and liquefaction_resp is not None: - ground_failure_prob = liquefaction_resp[i][BuildingUtil.GROUND_FAILURE_PROB] + hval_dict, **building_args, period=building_period + ) + + if ( + use_liquefaction + and geology_dataset_id is not None + and liquefaction_resp is not None + ): + ground_failure_prob = liquefaction_resp[i][ + BuildingUtil.GROUND_FAILURE_PROB + ] dmg_probability = AnalysisUtil.update_precision_of_dicts( - AnalysisUtil.adjust_damage_for_liquefaction(dmg_probability, ground_failure_prob)) + AnalysisUtil.adjust_damage_for_liquefaction( + dmg_probability, ground_failure_prob + ) + ) dmg_interval = selected_fragility_set.calculate_damage_interval( - dmg_probability, hazard_type="+".join(hazard_types), inventory_type="building") + dmg_probability, + hazard_type="+".join(hazard_types), + inventory_type="building", + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) - ds_result['guid'] = b['properties']['guid'] - damage_result['guid'] = b['properties']['guid'] + ds_result["guid"] = b["properties"]["guid"] + damage_result["guid"] = b["properties"]["guid"] ds_result.update(dmg_probability) ds_result.update(dmg_interval) @@ -287,14 +361,15 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, # determine expose from multiple hazard haz_expose = False for hazard_type in hazard_types: - haz_expose = haz_expose or AnalysisUtil.get_exposure_from_hazard_values(b_multihaz_vals[ - hazard_type], hazard_type) - ds_result['haz_expose'] = haz_expose + haz_expose = haz_expose or AnalysisUtil.get_exposure_from_hazard_values( + b_multihaz_vals[hazard_type], hazard_type + ) + ds_result["haz_expose"] = haz_expose - damage_result['fragility_id'] = selected_fragility_set.id - damage_result['demandtype'] = b_demands - damage_result['demandunits'] = b_units - damage_result['hazardval'] = b_multihaz_vals + damage_result["fragility_id"] = selected_fragility_set.id + damage_result["demandtype"] = b_demands + damage_result["demandunits"] = b_units + damage_result["hazardval"] = b_multihaz_vals if use_liquefaction and geology_dataset_id is not None: damage_result[BuildingUtil.GROUND_FAILURE_PROB] = ground_failure_prob @@ -306,12 +381,12 @@ def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, for b in unmapped_buildings: ds_result = dict() damage_result = dict() - ds_result['guid'] = b['properties']['guid'] - damage_result['guid'] = b['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtype'] = None - damage_result['demandunits'] = None - damage_result['hazardval'] = None + ds_result["guid"] = b["properties"]["guid"] + damage_result["guid"] = b["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtype"] = None + damage_result["demandunits"] = None + damage_result["hazardval"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -326,105 +401,109 @@ def get_spec(self): """ return { - 'name': 'building-damage', - 'description': 'building damage analysis', - 'input_parameters': [ + "name": "building-damage", + "description": "building damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type (e.g. earthquake)", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the tornado hazard value', - 'type': int + "id": "seed", + "required": False, + "description": "Initial seed for the tornado hazard value", + "type": int, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Geology dataset id', - 'type': str, - } + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Geology dataset id", + "type": str, + }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tornado", "hurricane", "flood", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - 'id': 'retrofit_strategy', - 'required': False, - 'description': 'Building retrofit strategy that contains guid and retrofit method', - 'type': ['incore:retrofitStrategy'] - } + "id": "retrofit_strategy", + "required": False, + "description": "Building retrofit strategy that contains guid and retrofit method", + "type": ["incore:retrofitStrategy"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'ds_result', - 'parent_type': 'buildings', - 'description': 'CSV file of damage states for building structural damage', - 'type': 'ergo:buildingDamageVer6' + "id": "ds_result", + "parent_type": "buildings", + "description": "CSV file of damage states for building structural damage", + "type": "ergo:buildingDamageVer6", }, { - 'id': 'damage_result', - 'parent_type': 'buildings', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:buildingDamageSupplement' - } - ] + "id": "damage_result", + "parent_type": "buildings", + "description": "Json file with information about applied hazard value and fragility", + "type": "incore:buildingDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/buildingstructuraldamage/buildingutil.py b/pyincore/analyses/buildingstructuraldamage/buildingutil.py index 07d7a4094..38fa460bd 100644 --- a/pyincore/analyses/buildingstructuraldamage/buildingutil.py +++ b/pyincore/analyses/buildingstructuraldamage/buildingutil.py @@ -7,6 +7,7 @@ class BuildingUtil: """Utility methods for the building damage analysis.""" + DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY = "Non-Retrofit Inundation Fragility ID Code" DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY = "Non-Retrofit MomentumFlux Fragility ID Code" diff --git a/pyincore/analyses/buyoutdecision/buyoutdecision.py b/pyincore/analyses/buyoutdecision/buyoutdecision.py index 0fa352f77..0b7103fcd 100644 --- a/pyincore/analyses/buyoutdecision/buyoutdecision.py +++ b/pyincore/analyses/buyoutdecision/buyoutdecision.py @@ -22,27 +22,55 @@ def __init__(self, incore_client): def run(self): # Get input parameters - fema_buyout_cap = self.get_parameter('fema_buyout_cap') - residential_archetypes = self.get_parameter('residential_archetypes') + fema_buyout_cap = self.get_parameter("fema_buyout_cap") + residential_archetypes = self.get_parameter("residential_archetypes") # Get input datasets - past_building_damage = self.get_input_dataset('past_building_damage').get_dataframe_from_csv(low_memory=False) - future_building_damage = self.get_input_dataset('future_building_damage').get_dataframe_from_csv( - low_memory=False) + past_building_damage = self.get_input_dataset( + "past_building_damage" + ).get_dataframe_from_csv(low_memory=False) + future_building_damage = self.get_input_dataset( + "future_building_damage" + ).get_dataframe_from_csv(low_memory=False) - building_inventory = self.get_input_dataset('buildings').get_dataframe_from_shapefile() + building_inventory = self.get_input_dataset( + "buildings" + ).get_dataframe_from_shapefile() - hua = (self.get_input_dataset('housing_unit_allocation').get_dataframe_from_csv(low_memory=False)) - pop_dislocation = self.get_input_dataset('population_dislocation').get_dataframe_from_csv(low_memory=False) + hua = self.get_input_dataset("housing_unit_allocation").get_dataframe_from_csv( + low_memory=False + ) + pop_dislocation = self.get_input_dataset( + "population_dislocation" + ).get_dataframe_from_csv(low_memory=False) - buyout_decision_df = self.buyout_decision(past_building_damage, future_building_damage, building_inventory, hua, - pop_dislocation, fema_buyout_cap, residential_archetypes) + buyout_decision_df = self.buyout_decision( + past_building_damage, + future_building_damage, + building_inventory, + hua, + pop_dislocation, + fema_buyout_cap, + residential_archetypes, + ) # Create the result dataset - self.set_result_csv_data("result", buyout_decision_df, self.get_parameter("result_name") + "_loss", - "dataframe") + self.set_result_csv_data( + "result", + buyout_decision_df, + self.get_parameter("result_name") + "_loss", + "dataframe", + ) - def buyout_decision(self, past_building_damage, future_building_damage, building_inventory, hua, pop_dislocation, - fema_buyout_cap, residential_archetpyes): + def buyout_decision( + self, + past_building_damage, + future_building_damage, + building_inventory, + hua, + pop_dislocation, + fema_buyout_cap, + residential_archetpyes, + ): """Select households for buyout based on past and future flood damaged. Args: @@ -53,58 +81,131 @@ def buyout_decision(self, past_building_damage, future_building_damage, building pop_dislocation (DataFrame): Population dislocation from past hazard event. fema_buyout_cap (float): FEMA buyout cap. residential_archetpyes (list): Residential archetypes. - + Returns: buyout_decision_df (DataFrame): A dataframe with buyout decision for each household. """ - past_building_max_damage = DataProcessUtil.get_max_damage_state(past_building_damage) - future_building_max_damage = DataProcessUtil.get_max_damage_state(future_building_damage) + past_building_max_damage = DataProcessUtil.get_max_damage_state( + past_building_damage + ) + future_building_max_damage = DataProcessUtil.get_max_damage_state( + future_building_damage + ) # Criterion 1: Filter only residential buildings with damage state DS3 from past building damage - buyout_inventory = pd.merge(building_inventory, past_building_max_damage, on='guid', how='outer') - buyout_inventory = buyout_inventory[buyout_inventory['arch_wind'].isin(residential_archetpyes) - & (buyout_inventory['max_state'] == 'DS_3')] - buyout_inventory.rename(columns={'max_state': 'max_state_past_damage'}, inplace=True) + buyout_inventory = pd.merge( + building_inventory, past_building_max_damage, on="guid", how="outer" + ) + buyout_inventory = buyout_inventory[ + buyout_inventory["arch_wind"].isin(residential_archetpyes) + & (buyout_inventory["max_state"] == "DS_3") + ] + buyout_inventory.rename( + columns={"max_state": "max_state_past_damage"}, inplace=True + ) # Criterion 2: Filter only residential buildings with damage state DS3 from predicted future building damage - buyout_inventory = pd.merge(buyout_inventory, future_building_max_damage, on='guid', how='inner') - buyout_inventory = buyout_inventory[buyout_inventory['max_state'] == 'DS_3'] - buyout_inventory.rename(columns={'max_state': 'max_state_future_damage'}, inplace=True) + buyout_inventory = pd.merge( + buyout_inventory, future_building_max_damage, on="guid", how="inner" + ) + buyout_inventory = buyout_inventory[buyout_inventory["max_state"] == "DS_3"] + buyout_inventory.rename( + columns={"max_state": "max_state_future_damage"}, inplace=True + ) # Criterion 3: Fall within the FEMA buyout cap - buyout_inventory = buyout_inventory[buyout_inventory['appr_bldg'] <= fema_buyout_cap] buyout_inventory = buyout_inventory[ - ["guid", "appr_bldg", "max_state_future_damage", "max_state_past_damage", "geometry"]] + buyout_inventory["appr_bldg"] <= fema_buyout_cap + ] + buyout_inventory = buyout_inventory[ + [ + "guid", + "appr_bldg", + "max_state_future_damage", + "max_state_past_damage", + "geometry", + ] + ] # Criterion 4: Use HUA to filter out buildings with 0 occupants - buyout_inventory = pd.merge(buyout_inventory, hua, on='guid', how='left') - buyout_inventory = buyout_inventory[(buyout_inventory['numprec'] != 0) & (~buyout_inventory['numprec'].isna())] + buyout_inventory = pd.merge(buyout_inventory, hua, on="guid", how="left") + buyout_inventory = buyout_inventory[ + (buyout_inventory["numprec"] != 0) & (~buyout_inventory["numprec"].isna()) + ] # Removing any rows with NAN values in column "Race" - buyout_inventory = buyout_inventory.dropna(subset=['race']) + buyout_inventory = buyout_inventory.dropna(subset=["race"]) # Merging with population dislocation - buyout_inventory = pd.merge(buyout_inventory, pop_dislocation[['huid', 'dislocated']], on='huid', how='left') + buyout_inventory = pd.merge( + buyout_inventory, + pop_dislocation[["huid", "dislocated"]], + on="huid", + how="left", + ) # Create a new column showing the appraisal value of each building ('appr_bldg' divided by the number of times # a guid is repeated) # For the instances that a structure has more than one housing units. - buyout_inventory['count'] = buyout_inventory.groupby('guid')['guid'].transform('count') - buyout_inventory['housing_unit_appraisal_value'] = buyout_inventory['appr_bldg'] / buyout_inventory['count'] + buyout_inventory["count"] = buyout_inventory.groupby("guid")["guid"].transform( + "count" + ) + buyout_inventory["housing_unit_appraisal_value"] = ( + buyout_inventory["appr_bldg"] / buyout_inventory["count"] + ) # Cleaning the dataframe - buyout_inventory.drop(['blockid', 'bgid', 'tractid', 'FIPScounty', - 'gqtype', 'BLOCKID10_str', 'placeNAME10', 'geometry_y'], axis=1, inplace=True) - buyout_inventory.rename(columns={'appr_bldg': 'building_appraisal_value', 'ownershp': 'ownership', - 'dislocated_combined_dmg': 'dislocated', 'count': 'number_of_housing_units', - 'geometry_x': 'geometry'}, - inplace=True) + buyout_inventory.drop( + [ + "blockid", + "bgid", + "tractid", + "FIPScounty", + "gqtype", + "BLOCKID10_str", + "placeNAME10", + "geometry_y", + ], + axis=1, + inplace=True, + ) + buyout_inventory.rename( + columns={ + "appr_bldg": "building_appraisal_value", + "ownershp": "ownership", + "dislocated_combined_dmg": "dislocated", + "count": "number_of_housing_units", + "geometry_x": "geometry", + }, + inplace=True, + ) buyout_inventory = buyout_inventory[ - ['guid', 'huid', 'building_appraisal_value', 'housing_unit_appraisal_value', 'geometry', - 'number_of_housing_units', 'numprec', 'ownership', 'race', 'hispan', 'family', 'vacancy', 'incomegroup', - 'hhinc', 'randincome', 'poverty', 'huestimate', 'dislocated', 'max_state_future_damage', - 'max_state_past_damage', 'x', 'y', ]] + [ + "guid", + "huid", + "building_appraisal_value", + "housing_unit_appraisal_value", + "geometry", + "number_of_housing_units", + "numprec", + "ownership", + "race", + "hispan", + "family", + "vacancy", + "incomegroup", + "hhinc", + "randincome", + "poverty", + "huestimate", + "dislocated", + "max_state_future_damage", + "max_state_past_damage", + "x", + "y", + ] + ] return buyout_inventory @@ -114,64 +215,68 @@ def get_spec(self): "description": "Buyout decision framework", "input_parameters": [ { - 'id': 'fema_buyout_cap', - 'required': True, - 'description': 'FEMA buyout cap', - 'type': float, + "id": "fema_buyout_cap", + "required": True, + "description": "FEMA buyout cap", + "type": float, }, { - 'id': 'residential_archetypes', - 'required': True, - 'description': 'Residential archetypes', - 'type': list, + "id": "residential_archetypes", + "required": True, + "description": "Residential archetypes", + "type": list, }, { - 'id': 'result_name', - 'required': True, - 'description': 'Result name', - 'type': str, - } + "id": "result_name", + "required": True, + "description": "Result name", + "type": str, + }, ], "input_datasets": [ { - 'id': 'past_building_damage', - 'required': True, - 'description': 'Building Damage Results', - 'type': ['ergo:buildingDamageVer6'], + "id": "past_building_damage", + "required": True, + "description": "Building Damage Results", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'future_building_damage', - 'required': True, - 'description': 'Building Damage Results', - 'type': ['ergo:buildingDamageVer6'], + "id": "future_building_damage", + "required": True, + "description": "Building Damage Results", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'housing_unit_allocation', - 'required': True, - 'description': 'A csv file with the merged dataset of the inputs, aka Probabilistic' - 'House Unit Allocation', - 'type': ['incore:housingUnitAllocation'] + "id": "housing_unit_allocation", + "required": True, + "description": "A csv file with the merged dataset of the inputs, aka Probabilistic" + "House Unit Allocation", + "type": ["incore:housingUnitAllocation"], }, { - 'id': 'population_dislocation', - 'required': True, - 'description': 'Population Dislocation from past hazard event', - 'type': ['incore:popDislocation'] - } + "id": "population_dislocation", + "required": True, + "description": "Population Dislocation from past hazard event", + "type": ["incore:popDislocation"], + }, ], "output_datasets": [ { - 'id': 'result', - 'label': 'Buyout Decision Results', - 'description': 'Buyout Decision Results', - 'type': ['incore:buyoutDecision'] + "id": "result", + "label": "Buyout Decision Results", + "description": "Buyout Decision Results", + "type": ["incore:buyoutDecision"], } - ] + ], } diff --git a/pyincore/analyses/capitalshocks/capitalshocks.py b/pyincore/analyses/capitalshocks/capitalshocks.py index a413832a7..02a15da8c 100644 --- a/pyincore/analyses/capitalshocks/capitalshocks.py +++ b/pyincore/analyses/capitalshocks/capitalshocks.py @@ -22,80 +22,98 @@ def __init__(self, incore_client): def get_spec(self): return { - 'name': 'Capital Shocks', - 'description': 'Capital Shocks generation for cge models.', - 'input_parameters': [ + "name": "Capital Shocks", + "description": "Capital Shocks generation for cge models.", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result dataset name.', - 'type': str + "id": "result_name", + "required": True, + "description": "Result dataset name.", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6', - 'ergo:buildingInventoryVer7'] + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'buildings_to_sectors', - 'required': True, - 'description': 'Mapping of buildings to economic sectors.', - 'type': ['incore:buildingsToSectors'] + "id": "buildings_to_sectors", + "required": True, + "description": "Mapping of buildings to economic sectors.", + "type": ["incore:buildingsToSectors"], }, { - 'id': 'failure_probability', - 'required': True, - 'description': 'Failure probability of buildings.', - 'type': ['incore:failureProbability'] - } + "id": "failure_probability", + "required": True, + "description": "Failure probability of buildings.", + "type": ["incore:failureProbability"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'sector_shocks', - 'required': True, - 'description': 'Aggregation of building functionality states to capital shocks per sector', - 'type': 'incore:capitalShocks' + "id": "sector_shocks", + "required": True, + "description": "Aggregation of building functionality states to capital shocks per sector", + "type": "incore:capitalShocks", } - ] + ], } def run(self): buildings = self.get_input_dataset("buildings").get_inventory_reader() buildings_df = pd.DataFrame(list(buildings)) - failure_probability = self.get_input_dataset("failure_probability").get_dataframe_from_csv() - buildings_to_sectors = self.get_input_dataset("buildings_to_sectors").get_dataframe_from_csv() + failure_probability = self.get_input_dataset( + "failure_probability" + ).get_dataframe_from_csv() + buildings_to_sectors = self.get_input_dataset( + "buildings_to_sectors" + ).get_dataframe_from_csv() # drop buildings with no sector - buildings_to_sectors = buildings_to_sectors[pd.notnull(buildings_to_sectors['sector'])] + buildings_to_sectors = buildings_to_sectors[ + pd.notnull(buildings_to_sectors["sector"]) + ] building_inventory = pd.DataFrame.from_records(buildings_df["properties"]) # drop buildings with no appraisal value - building_inventory = building_inventory[pd.notnull(building_inventory['appr_bldg'])] - building_inventory['appr_bldg'] = building_inventory['appr_bldg'].astype(float) + building_inventory = building_inventory[ + pd.notnull(building_inventory["appr_bldg"]) + ] + building_inventory["appr_bldg"] = building_inventory["appr_bldg"].astype(float) # drop buildings with no failure probability - failure_probability = failure_probability[pd.notnull(failure_probability['failure_probability'])] + failure_probability = failure_probability[ + pd.notnull(failure_probability["failure_probability"]) + ] - inventory_failure = pd.merge(building_inventory, failure_probability, on='guid') - inventory_failure = pd.merge(inventory_failure, buildings_to_sectors, on='guid') - inventory_failure['cap_rem'] = inventory_failure.appr_bldg * ( - 1 - inventory_failure.failure_probability) + inventory_failure = pd.merge(building_inventory, failure_probability, on="guid") + inventory_failure = pd.merge(inventory_failure, buildings_to_sectors, on="guid") + inventory_failure["cap_rem"] = inventory_failure.appr_bldg * ( + 1 - inventory_failure.failure_probability + ) sectors = buildings_to_sectors.sector.unique() sector_shocks = {} for sector in sectors: - sector_values = inventory_failure.loc[(inventory_failure['sector'] == sector)] - sector_cap = sector_values['cap_rem'].sum() - sector_total = sector_values['appr_bldg'].sum() + sector_values = inventory_failure.loc[ + (inventory_failure["sector"] == sector) + ] + sector_cap = sector_values["cap_rem"].sum() + sector_total = sector_values["appr_bldg"].sum() if sector_total == 0: continue sector_shock = np.divide(sector_cap, sector_total) sector_shocks[sector] = sector_shock - sector_shocks = pd.DataFrame(sector_shocks.items(), columns=['sector', 'shock']) + sector_shocks = pd.DataFrame(sector_shocks.items(), columns=["sector", "shock"]) result_name = self.get_parameter("result_name") - self.set_result_csv_data("sector_shocks", sector_shocks, name=result_name, source="dataframe") + self.set_result_csv_data( + "sector_shocks", sector_shocks, name=result_name, source="dataframe" + ) return True diff --git a/pyincore/analyses/combinedwindwavesurgebuildingdamage/__init__.py b/pyincore/analyses/combinedwindwavesurgebuildingdamage/__init__.py index 8cd65f609..c82f9e002 100644 --- a/pyincore/analyses/combinedwindwavesurgebuildingdamage/__init__.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingdamage/__init__.py @@ -5,5 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.combinedwindwavesurgebuildingdamage.combinedwindwavesurgebuildingdamage import CombinedWindWaveSurgeBuildingDamage - +from pyincore.analyses.combinedwindwavesurgebuildingdamage.combinedwindwavesurgebuildingdamage import ( + CombinedWindWaveSurgeBuildingDamage, +) diff --git a/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py b/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py index a153f40aa..f60a6ce01 100755 --- a/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py @@ -10,9 +10,9 @@ class CombinedWindWaveSurgeBuildingDamage(BaseAnalysis): - """ Determines overall building maximum damage state from wind, flood and surge-wave damage + """Determines overall building maximum damage state from wind, flood and surge-wave damage and uses the maximum damage probabilities from the 3 damages to determine overall damage - + Args: incore_client (IncoreClient): Service authentication. """ @@ -26,106 +26,214 @@ def run(self): wind_damage = self.get_input_dataset("wind_damage").get_dataframe_from_csv() # Read Building surge-wave damage - surge_wave_damage = self.get_input_dataset("surge_wave_damage").get_dataframe_from_csv() + surge_wave_damage = self.get_input_dataset( + "surge_wave_damage" + ).get_dataframe_from_csv() # Read Building flood damage flood_damage = self.get_input_dataset("flood_damage").get_dataframe_from_csv() wind_max_damage = DataProcessUtil.get_max_damage_state(wind_damage) - wind_max_damage.rename(columns={'max_state': 'w_max_ds', 'max_prob': 'w_maxprob'}, inplace=True) + wind_max_damage.rename( + columns={"max_state": "w_max_ds", "max_prob": "w_maxprob"}, inplace=True + ) surge_wave_max_damage = DataProcessUtil.get_max_damage_state(surge_wave_damage) - surge_wave_max_damage.rename(columns={'max_state': 'sw_max_ds', 'max_prob': 'sw_maxprob'}, inplace=True) + surge_wave_max_damage.rename( + columns={"max_state": "sw_max_ds", "max_prob": "sw_maxprob"}, inplace=True + ) flood_max_damage = DataProcessUtil.get_max_damage_state(flood_damage) - flood_max_damage.rename(columns={'max_state': 'f_max_ds', 'max_prob': 'f_maxprob'}, inplace=True) + flood_max_damage.rename( + columns={"max_state": "f_max_ds", "max_prob": "f_maxprob"}, inplace=True + ) + + combined_output = pd.merge( + pd.merge(wind_max_damage, surge_wave_max_damage, on="guid"), + flood_max_damage, + on="guid", + ) - combined_output = pd.merge(pd.merge(wind_max_damage, surge_wave_max_damage, on='guid'), flood_max_damage, - on='guid') - # Replace DS strings with integers to find maximum damage state - replace_vals_int = {'DS_0': 0, 'DS_1': 1, 'DS_2': 2, 'DS_3': 3} - combined_output = combined_output.apply(lambda x: x.replace(replace_vals_int, regex=True)) + replace_vals_int = {"DS_0": 0, "DS_1": 1, "DS_2": 2, "DS_3": 3} + combined_output = combined_output.apply( + lambda x: x.replace(replace_vals_int, regex=True) + ) # Find maximum among the max_ds columns - max_damage_states = ['w_max_ds', 'sw_max_ds', 'f_max_ds'] + max_damage_states = ["w_max_ds", "sw_max_ds", "f_max_ds"] max_val = combined_output[max_damage_states].max(axis=1) # Add maximum of the max damage states - combined_output['max_state'] = max_val + combined_output["max_state"] = max_val # Replace integers with DS strings old_ds_vals = [0, 1, 2, 3] - new_ds_vals = ['DS_0', 'DS_1', 'DS_2', 'DS_3'] + new_ds_vals = ["DS_0", "DS_1", "DS_2", "DS_3"] # Put DS strings back in the final output before storing - combined_output['w_max_ds'] = combined_output['w_max_ds'].replace(old_ds_vals, new_ds_vals) - combined_output['sw_max_ds'] = combined_output['sw_max_ds'].replace(old_ds_vals, new_ds_vals) - combined_output['f_max_ds'] = combined_output['f_max_ds'].replace(old_ds_vals, new_ds_vals) - combined_output['max_state'] = combined_output['max_state'].replace(old_ds_vals, new_ds_vals) + combined_output["w_max_ds"] = combined_output["w_max_ds"].replace( + old_ds_vals, new_ds_vals + ) + combined_output["sw_max_ds"] = combined_output["sw_max_ds"].replace( + old_ds_vals, new_ds_vals + ) + combined_output["f_max_ds"] = combined_output["f_max_ds"].replace( + old_ds_vals, new_ds_vals + ) + combined_output["max_state"] = combined_output["max_state"].replace( + old_ds_vals, new_ds_vals + ) # Find combined damage - combined_bldg_dmg = self.get_combined_damage(wind_damage, surge_wave_damage, flood_damage) + combined_bldg_dmg = self.get_combined_damage( + wind_damage, surge_wave_damage, flood_damage + ) # Create the result containing the 3 combined damages into a single damage - self.set_result_csv_data("ds_result", combined_bldg_dmg, self.get_parameter("result_name") + "_combined_dmg", - "dataframe") + self.set_result_csv_data( + "ds_result", + combined_bldg_dmg, + self.get_parameter("result_name") + "_combined_dmg", + "dataframe", + ) # Create the result dataset - self.set_result_csv_data("result", combined_output, self.get_parameter("result_name") + "_max_state", "dataframe") + self.set_result_csv_data( + "result", + combined_output, + self.get_parameter("result_name") + "_max_state", + "dataframe", + ) return True - - def get_combined_damage(self, wind_dmg: pd.DataFrame, sw_dmg: pd.DataFrame, flood_dmg: pd.DataFrame): - """Calculates overall building damage - Determines the overall building damage probabilities from the 3 hazards by taking the maximum. - Args: - wind_dmg (pd.DataFrame): Table of wind damage for the building inventory - sw_dmg (pd.DataFrame): Table of surge-wave damage for the building inventory - flood_dmg (pd.DataFrame): Table of flood damage for the building inventory - - Returns: - pd.DataFrame: An table of combined damage probabilities for the building inventory - - """ - flood_dmg.rename(columns={'LS_0': 'f_LS_0', 'LS_1': 'f_LS_1', 'LS_2': 'f_LS_2', 'DS_0': 'f_DS_0', - 'DS_1': 'f_DS_1', 'DS_2': 'f_DS_2', 'DS_3': 'f_DS_3', 'haz_expose': 'f_haz_expose'}, - inplace=True) + def get_combined_damage( + self, wind_dmg: pd.DataFrame, sw_dmg: pd.DataFrame, flood_dmg: pd.DataFrame + ): + """Calculates overall building damage + Determines the overall building damage probabilities from the 3 hazards by taking the maximum. - sw_dmg.rename(columns={'LS_0': 'sw_LS_0', 'LS_1': 'sw_LS_1', 'LS_2': 'sw_LS_2', 'DS_0': 'sw_DS_0', - 'DS_1': 'sw_DS_1', 'DS_2': 'sw_DS_2', 'DS_3': 'sw_DS_3', 'haz_expose': - 'sw_haz_expose'}, inplace=True) + Args: + wind_dmg (pd.DataFrame): Table of wind damage for the building inventory + sw_dmg (pd.DataFrame): Table of surge-wave damage for the building inventory + flood_dmg (pd.DataFrame): Table of flood damage for the building inventory - wind_dmg.rename(columns={'LS_0': 'w_LS_0', 'LS_1': 'w_LS_1', 'LS_2': 'w_LS_2', 'DS_0': 'w_DS_0', - 'DS_1': 'w_DS_1', 'DS_2': 'w_DS_2', 'DS_3': 'w_DS_3', 'haz_expose': 'w_haz_expose'}, - inplace=True) + Returns: + pd.DataFrame: An table of combined damage probabilities for the building inventory - combined_df = pd.merge(pd.merge(wind_dmg, sw_dmg, on='guid'), flood_dmg, on='guid') + """ + flood_dmg.rename( + columns={ + "LS_0": "f_LS_0", + "LS_1": "f_LS_1", + "LS_2": "f_LS_2", + "DS_0": "f_DS_0", + "DS_1": "f_DS_1", + "DS_2": "f_DS_2", + "DS_3": "f_DS_3", + "haz_expose": "f_haz_expose", + }, + inplace=True, + ) + + sw_dmg.rename( + columns={ + "LS_0": "sw_LS_0", + "LS_1": "sw_LS_1", + "LS_2": "sw_LS_2", + "DS_0": "sw_DS_0", + "DS_1": "sw_DS_1", + "DS_2": "sw_DS_2", + "DS_3": "sw_DS_3", + "haz_expose": "sw_haz_expose", + }, + inplace=True, + ) + + wind_dmg.rename( + columns={ + "LS_0": "w_LS_0", + "LS_1": "w_LS_1", + "LS_2": "w_LS_2", + "DS_0": "w_DS_0", + "DS_1": "w_DS_1", + "DS_2": "w_DS_2", + "DS_3": "w_DS_3", + "haz_expose": "w_haz_expose", + }, + inplace=True, + ) + + combined_df = pd.merge( + pd.merge(wind_dmg, sw_dmg, on="guid"), flood_dmg, on="guid" + ) def find_match(row, col_name): max_finder = { - row['f_DS_3']: 'f_', - row['w_DS_3']: 'w_', - row['sw_DS_3']: 'sw_' + row["f_DS_3"]: "f_", + row["w_DS_3"]: "w_", + row["sw_DS_3"]: "sw_", } return row[max_finder[max(max_finder.keys())] + col_name] - combined_df['LS_0'] = combined_df.apply(lambda x: find_match(x, col_name="LS_0"), axis=1) - combined_df['LS_1'] = combined_df.apply(lambda x: find_match(x, col_name="LS_1"), axis=1) - combined_df['LS_2'] = combined_df.apply(lambda x: find_match(x, col_name="LS_2"), axis=1) - combined_df['DS_0'] = combined_df.apply(lambda x: find_match(x, col_name="DS_0"), axis=1) - combined_df['DS_1'] = combined_df.apply(lambda x: find_match(x, col_name="DS_1"), axis=1) - combined_df['DS_2'] = combined_df.apply(lambda x: find_match(x, col_name="DS_2"), axis=1) - combined_df['DS_3'] = combined_df.apply(lambda x: find_match(x, col_name="DS_3"), axis=1) - combined_df['haz_expose'] = combined_df.apply(lambda x: find_match(x, col_name="haz_expose"), axis=1) + combined_df["LS_0"] = combined_df.apply( + lambda x: find_match(x, col_name="LS_0"), axis=1 + ) + combined_df["LS_1"] = combined_df.apply( + lambda x: find_match(x, col_name="LS_1"), axis=1 + ) + combined_df["LS_2"] = combined_df.apply( + lambda x: find_match(x, col_name="LS_2"), axis=1 + ) + combined_df["DS_0"] = combined_df.apply( + lambda x: find_match(x, col_name="DS_0"), axis=1 + ) + combined_df["DS_1"] = combined_df.apply( + lambda x: find_match(x, col_name="DS_1"), axis=1 + ) + combined_df["DS_2"] = combined_df.apply( + lambda x: find_match(x, col_name="DS_2"), axis=1 + ) + combined_df["DS_3"] = combined_df.apply( + lambda x: find_match(x, col_name="DS_3"), axis=1 + ) + combined_df["haz_expose"] = combined_df.apply( + lambda x: find_match(x, col_name="haz_expose"), axis=1 + ) # Remove extra columns that are no longer needed - combined_df.drop(['w_LS_0', 'w_LS_1', 'w_LS_2', 'sw_LS_0', 'sw_LS_1', 'sw_LS_2', 'f_LS_0', 'f_LS_1', - 'f_LS_2', 'w_DS_0', 'w_DS_1', 'w_DS_2', 'w_DS_3', 'sw_DS_0', 'sw_DS_1', 'sw_DS_2', 'sw_DS_3', - 'f_DS_0', 'f_DS_1', 'f_DS_2', 'f_DS_3', 'w_haz_expose', 'sw_haz_expose', 'f_haz_expose'], - axis=1, inplace=True) + combined_df.drop( + [ + "w_LS_0", + "w_LS_1", + "w_LS_2", + "sw_LS_0", + "sw_LS_1", + "sw_LS_2", + "f_LS_0", + "f_LS_1", + "f_LS_2", + "w_DS_0", + "w_DS_1", + "w_DS_2", + "w_DS_3", + "sw_DS_0", + "sw_DS_1", + "sw_DS_2", + "sw_DS_3", + "f_DS_0", + "f_DS_1", + "f_DS_2", + "f_DS_3", + "w_haz_expose", + "sw_haz_expose", + "f_haz_expose", + ], + axis=1, + inplace=True, + ) return combined_df @@ -137,49 +245,48 @@ def get_spec(self): """ return { - 'name': 'combined-wind-wave-surge-building-damage', - 'description': 'Combined wind wave and surge building damage analysis', - 'input_parameters': [ + "name": "combined-wind-wave-surge-building-damage", + "description": "Combined wind wave and surge building damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'wind_damage', - 'required': True, - 'description': 'Wind damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + "id": "wind_damage", + "required": True, + "description": "Wind damage result that has damage intervals in it", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'surge_wave_damage', - 'required': True, - 'description': 'Surge-wave damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + "id": "surge_wave_damage", + "required": True, + "description": "Surge-wave damage result that has damage intervals in it", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'flood_damage', - 'required': True, - 'description': 'Flood damage result that has damage intervals in it', - 'type': ['ergo:nsBuildingInventoryDamageVer4'] + "id": "flood_damage", + "required": True, + "description": "Flood damage result that has damage intervals in it", + "type": ["ergo:nsBuildingInventoryDamageVer4"], }, - ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'ds_result', - 'parent_type': 'buildings', - 'description': 'CSV file of damage states for building structural damage', - 'type': 'ergo:buildingDamageVer6' + "id": "ds_result", + "parent_type": "buildings", + "description": "CSV file of damage states for building structural damage", + "type": "ergo:buildingDamageVer6", }, { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of building maximum damage state', - 'type': 'incore:maxDamageState' - } - ] + "id": "result", + "parent_type": "buildings", + "description": "CSV file of building maximum damage state", + "type": "incore:maxDamageState", + }, + ], } diff --git a/pyincore/analyses/combinedwindwavesurgebuildingloss/__init__.py b/pyincore/analyses/combinedwindwavesurgebuildingloss/__init__.py index 47de6aefe..b1e2a0fbc 100644 --- a/pyincore/analyses/combinedwindwavesurgebuildingloss/__init__.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingloss/__init__.py @@ -5,6 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.combinedwindwavesurgebuildingloss.combinedwindwavesurgebuildingloss import \ - CombinedWindWaveSurgeBuildingLoss - +from pyincore.analyses.combinedwindwavesurgebuildingloss.combinedwindwavesurgebuildingloss import ( + CombinedWindWaveSurgeBuildingLoss, +) diff --git a/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py b/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py index 4da14a672..227fd2277 100755 --- a/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py @@ -10,7 +10,7 @@ class CombinedWindWaveSurgeBuildingLoss(BaseAnalysis): - """ + """ This analysis computes the building structural and content loss from wind, flood and surge-wave damage Contributors @@ -21,7 +21,7 @@ class CombinedWindWaveSurgeBuildingLoss(BaseAnalysis): Nofal, Omar & Lindt, John & Do, Trung & Yan, Guirong & Hamideh, Sara & Cox, Daniel & Dietrich, Joel. (2021). Methodology for Regional Multi-Hazard Hurricane Damage and Risk Assessment. Journal of Structural Engineering. 147. 04021185. 10.1061/(ASCE)ST.1943-541X.0003144. - + Args: incore_client (IncoreClient): Service authentication. """ @@ -39,7 +39,9 @@ def run(self): wind_damage = self.get_input_dataset("wind_damage").get_dataframe_from_csv() # Read Building surge-wave damage - surge_wave_damage = self.get_input_dataset("surge_wave_damage").get_dataframe_from_csv() + surge_wave_damage = self.get_input_dataset( + "surge_wave_damage" + ).get_dataframe_from_csv() # Read Building flood damage flood_damage = self.get_input_dataset("flood_damage").get_dataframe_from_csv() @@ -48,98 +50,190 @@ def run(self): content_cost = self.get_input_dataset("content_cost").get_dataframe_from_csv() # Read cumulative replacement cost ratio of structural damage - structure_cost = self.get_input_dataset("structural_cost").get_dataframe_from_csv() - - combined_loss = self.get_combined_loss(wind_damage, surge_wave_damage, flood_damage, buildings, content_cost, - structure_cost) + structure_cost = self.get_input_dataset( + "structural_cost" + ).get_dataframe_from_csv() + + combined_loss = self.get_combined_loss( + wind_damage, + surge_wave_damage, + flood_damage, + buildings, + content_cost, + structure_cost, + ) # Create the result dataset - self.set_result_csv_data("result", combined_loss, self.get_parameter("result_name") + "_loss", - "dataframe") + self.set_result_csv_data( + "result", + combined_loss, + self.get_parameter("result_name") + "_loss", + "dataframe", + ) return True - - def get_combined_loss(self, wind_dmg: pd.DataFrame, sw_dmg: pd.DataFrame, flood_dmg: pd.DataFrame, buildings: - pd.DataFrame, content_cost: pd.DataFrame, structure_cost: pd.DataFrame): + + def get_combined_loss( + self, + wind_dmg: pd.DataFrame, + sw_dmg: pd.DataFrame, + flood_dmg: pd.DataFrame, + buildings: pd.DataFrame, + content_cost: pd.DataFrame, + structure_cost: pd.DataFrame, + ): """Calculates structural and content loss from wind, surge-wave and flood damage - Args: - wind_dmg (pd.DataFrame): Table of wind damage for the building inventory - sw_dmg (pd.DataFrame): Table of surge-wave damage for the building inventory - flood_dmg (pd.DataFrame): Table of flood damage for the building inventory - buildings (pd.DataFrame): Table of building attributes - content_cost (pd.DataFrame): Table of content cost ratios for each archetype - structure_cost (pd.DataFrame): Table of structural cost ratio for each archetype and loss type + Args: + wind_dmg (pd.DataFrame): Table of wind damage for the building inventory + sw_dmg (pd.DataFrame): Table of surge-wave damage for the building inventory + flood_dmg (pd.DataFrame): Table of flood damage for the building inventory + buildings (pd.DataFrame): Table of building attributes + content_cost (pd.DataFrame): Table of content cost ratios for each archetype + structure_cost (pd.DataFrame): Table of structural cost ratio for each archetype and loss type - Returns: - pd.DataFrame: An table of structural and content loss for each building + Returns: + pd.DataFrame: An table of structural and content loss for each building - """ + """ # Rename the columns so there are no overlapping names among the hazard damage states - flood_dmg.rename(columns={'LS_0': 'f_LS_0', 'LS_1': 'f_LS_1', 'LS_2': 'f_LS_2', 'DS_0': 'f_DS_0', - 'DS_1': 'f_DS_1', 'DS_2': 'f_DS_2', 'DS_3': 'f_DS_3', 'haz_expose': 'f_haz_expose'}, - inplace=True) - - sw_dmg.rename(columns={'LS_0': 'sw_LS_0', 'LS_1': 'sw_LS_1', 'LS_2': 'sw_LS_2', 'DS_0': 'sw_DS_0', - 'DS_1': 'sw_DS_1', 'DS_2': 'sw_DS_2', 'DS_3': 'sw_DS_3', 'haz_expose': - 'sw_haz_expose'}, inplace=True) - - wind_dmg.rename(columns={'LS_0': 'w_LS_0', 'LS_1': 'w_LS_1', 'LS_2': 'w_LS_2', 'DS_0': 'w_DS_0', - 'DS_1': 'w_DS_1', 'DS_2': 'w_DS_2', 'DS_3': 'w_DS_3', 'haz_expose': 'w_haz_expose'}, - inplace=True) + flood_dmg.rename( + columns={ + "LS_0": "f_LS_0", + "LS_1": "f_LS_1", + "LS_2": "f_LS_2", + "DS_0": "f_DS_0", + "DS_1": "f_DS_1", + "DS_2": "f_DS_2", + "DS_3": "f_DS_3", + "haz_expose": "f_haz_expose", + }, + inplace=True, + ) + + sw_dmg.rename( + columns={ + "LS_0": "sw_LS_0", + "LS_1": "sw_LS_1", + "LS_2": "sw_LS_2", + "DS_0": "sw_DS_0", + "DS_1": "sw_DS_1", + "DS_2": "sw_DS_2", + "DS_3": "sw_DS_3", + "haz_expose": "sw_haz_expose", + }, + inplace=True, + ) + + wind_dmg.rename( + columns={ + "LS_0": "w_LS_0", + "LS_1": "w_LS_1", + "LS_2": "w_LS_2", + "DS_0": "w_DS_0", + "DS_1": "w_DS_1", + "DS_2": "w_DS_2", + "DS_3": "w_DS_3", + "haz_expose": "w_haz_expose", + }, + inplace=True, + ) # Combine all three sets of damages into a single data frame - combined_df = pd.merge(pd.merge(wind_dmg, sw_dmg, on='guid'), flood_dmg, on='guid') + combined_df = pd.merge( + pd.merge(wind_dmg, sw_dmg, on="guid"), flood_dmg, on="guid" + ) # Add the flood archetype to the combined damage since this is used to find loss multipliers - new_combined_df = pd.merge(combined_df, buildings[["guid", "arch_flood"]], on="guid") + new_combined_df = pd.merge( + combined_df, buildings[["guid", "arch_flood"]], on="guid" + ) # Create a result data frame for the loss calculations - loss_df = new_combined_df[['guid']].copy() + loss_df = new_combined_df[["guid"]].copy() # Compute content and structural loss - loss_df['cont_loss'] = new_combined_df.apply(lambda row: np.dot(row[['f_DS_0', 'f_DS_1', 'f_DS_2']], - content_cost.iloc[int(row['arch_flood']-1), - 1:4]), axis=1) - - loss_df['roof_loss'] = new_combined_df.apply(lambda row: np.dot(row[['w_DS_0', 'w_DS_1', 'w_DS_2', 'w_DS_3']], - np.array([0.15, 0.5, 0.75, 1])) * - structure_cost.loc[int(row['arch_flood']-1), "Roofing"], axis=1) - - loss_df['ff_loss'] = new_combined_df.apply(lambda row: np.dot(row[['sw_DS_0', 'sw_DS_1', 'sw_DS_2', - 'sw_DS_3']], np.array([0.1, 0.5, 0.75, 1])) * - structure_cost.loc[int(row['arch_flood'] - 1), - "Flooring and Foundation"], axis=1) + loss_df["cont_loss"] = new_combined_df.apply( + lambda row: np.dot( + row[["f_DS_0", "f_DS_1", "f_DS_2"]], + content_cost.iloc[int(row["arch_flood"] - 1), 1:4], + ), + axis=1, + ) + + loss_df["roof_loss"] = new_combined_df.apply( + lambda row: np.dot( + row[["w_DS_0", "w_DS_1", "w_DS_2", "w_DS_3"]], + np.array([0.15, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Roofing"], + axis=1, + ) + + loss_df["ff_loss"] = new_combined_df.apply( + lambda row: np.dot( + row[["sw_DS_0", "sw_DS_1", "sw_DS_2", "sw_DS_3"]], + np.array([0.1, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Flooring and Foundation"], + axis=1, + ) # Computes frame loss from the dominant hazard between wind and surge-wave intensities def compute_frame_loss(row): - if row['w_DS_3'] >= row['sw_DS_3']: - return np.dot(row[['w_DS_0', 'w_DS_1', 'w_DS_2', 'w_DS_3']],np.array([0.25, 0.5, 0.75, 1])) * \ - structure_cost.loc[int(row['arch_flood'] - 1), "Wood Framing"] - elif row['sw_DS_3'] > row['w_DS_3']: - return np.dot(row[['sw_DS_0', 'sw_DS_1', 'sw_DS_2', 'sw_DS_3']], np.array([0.25, 0.5, 0.75, 1])) * \ - structure_cost.loc[int(row['arch_flood'] - 1), "Wood Framing"] + if row["w_DS_3"] >= row["sw_DS_3"]: + return ( + np.dot( + row[["w_DS_0", "w_DS_1", "w_DS_2", "w_DS_3"]], + np.array([0.25, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Wood Framing"] + ) + elif row["sw_DS_3"] > row["w_DS_3"]: + return ( + np.dot( + row[["sw_DS_0", "sw_DS_1", "sw_DS_2", "sw_DS_3"]], + np.array([0.25, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Wood Framing"] + ) else: # If one or both are nan, they can't be compared - if pd.isnull(row['sw_DS_3']) and pd.isnull(row['w_DS_3']): + if pd.isnull(row["sw_DS_3"]) and pd.isnull(row["w_DS_3"]): return 0 - elif not pd.isnull(row['sw_DS_3']): - return np.dot(row[['sw_DS_0', 'sw_DS_1', 'sw_DS_2', 'sw_DS_3']], np.array([0.25, 0.5, 0.75, 1])) * \ - structure_cost.loc[int(row['arch_flood'] - 1), "Wood Framing"] + elif not pd.isnull(row["sw_DS_3"]): + return ( + np.dot( + row[["sw_DS_0", "sw_DS_1", "sw_DS_2", "sw_DS_3"]], + np.array([0.25, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Wood Framing"] + ) else: - return np.dot(row[['w_DS_0', 'w_DS_1', 'w_DS_2', 'w_DS_3']], np.array([0.25, 0.5, 0.75, - 1])) * structure_cost.loc[ - int(row['arch_flood'] - 1), "Wood Framing"] - - loss_df['frame_loss'] = new_combined_df.apply(lambda row: compute_frame_loss(row), axis=1) + return ( + np.dot( + row[["w_DS_0", "w_DS_1", "w_DS_2", "w_DS_3"]], + np.array([0.25, 0.5, 0.75, 1]), + ) + * structure_cost.loc[int(row["arch_flood"] - 1), "Wood Framing"] + ) + + loss_df["frame_loss"] = new_combined_df.apply( + lambda row: compute_frame_loss(row), axis=1 + ) # Fill NA values with 0 otherwise we can't compute the total loss since empty are NaN values and won't add loss_df.fillna(0, inplace=True) - loss_df['total_loss'] = loss_df.apply(lambda row: row['cont_loss'] + row['frame_loss'] + row[ - 'roof_loss'] + row['ff_loss'], axis=1) + loss_df["total_loss"] = loss_df.apply( + lambda row: row["cont_loss"] + + row["frame_loss"] + + row["roof_loss"] + + row["ff_loss"], + axis=1, + ) return loss_df - + def get_spec(self): """Get specifications of the combined wind, wave, and surge building loss analysis. @@ -148,62 +242,65 @@ def get_spec(self): """ return { - 'name': 'combined-wind-wave-surge-building-loss', - 'description': 'Combined wind wave and surge building loss analysis', - 'input_parameters': [ + "name": "combined-wind-wave-surge-building-loss", + "description": "Combined wind wave and surge building loss analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'wind_damage', - 'required': True, - 'description': 'Wind damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + "id": "wind_damage", + "required": True, + "description": "Wind damage result that has damage intervals in it", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'surge_wave_damage', - 'required': True, - 'description': 'Surge-wave damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + "id": "surge_wave_damage", + "required": True, + "description": "Surge-wave damage result that has damage intervals in it", + "type": ["ergo:buildingDamageVer6"], }, { - 'id': 'flood_damage', - 'required': True, - 'description': 'Flood damage result that has damage intervals in it', - 'type': ['ergo:nsBuildingInventoryDamageVer4'] + "id": "flood_damage", + "required": True, + "description": "Flood damage result that has damage intervals in it", + "type": ["ergo:nsBuildingInventoryDamageVer4"], }, { - 'id': 'structural_cost', - 'required': True, - 'description': 'Structural cost ratio for each archetype and loss type', - 'type': ['incore:structuralCostRatio'] + "id": "structural_cost", + "required": True, + "description": "Structural cost ratio for each archetype and loss type", + "type": ["incore:structuralCostRatio"], }, { - 'id': 'content_cost', - 'required': True, - 'description': 'Content cost ratio for each archetype and damage state', - 'type': ['incore:contentCostRatio'] - } - + "id": "content_cost", + "required": True, + "description": "Content cost ratio for each archetype and damage state", + "type": ["incore:contentCostRatio"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of building structural and content loss', - 'type': 'incore:buildingLoss' + "id": "result", + "parent_type": "buildings", + "description": "CSV file of building structural and content loss", + "type": "incore:buildingLoss", } - ] + ], } diff --git a/pyincore/analyses/commercialbuildingrecovery/__init__.py b/pyincore/analyses/commercialbuildingrecovery/__init__.py index f2f9c0732..e5124be63 100644 --- a/pyincore/analyses/commercialbuildingrecovery/__init__.py +++ b/pyincore/analyses/commercialbuildingrecovery/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.commercialbuildingrecovery.commercialbuildingrecovery import CommercialBuildingRecovery +from pyincore.analyses.commercialbuildingrecovery.commercialbuildingrecovery import ( + CommercialBuildingRecovery, +) diff --git a/pyincore/analyses/commercialbuildingrecovery/commercialbuildingrecovery.py b/pyincore/analyses/commercialbuildingrecovery/commercialbuildingrecovery.py index 4776ef4af..23eaa6fcd 100644 --- a/pyincore/analyses/commercialbuildingrecovery/commercialbuildingrecovery.py +++ b/pyincore/analyses/commercialbuildingrecovery/commercialbuildingrecovery.py @@ -6,7 +6,6 @@ import numpy as np import pandas as pd -from scipy.stats import lognorm import time from pyincore import BaseAnalysis, RepairService @@ -52,7 +51,7 @@ def run(self): """ # TODO: Start using seed - seed = self.get_parameter("seed") + # seed = self.get_parameter("seed") num_samples = self.get_parameter("num_samples") result_name = self.get_parameter("result_name") @@ -64,23 +63,52 @@ def run(self): buildings = self.get_input_dataset("buildings").get_inventory_reader() buildings = list(buildings) - sample_damage_states = self.get_input_dataset("sample_damage_states").get_dataframe_from_csv(low_memory=False) - mcs_failure = self.get_input_dataset("mcs_failure").get_dataframe_from_csv(low_memory=False) - redi_delay_factors = self.get_input_dataset("delay_factors").get_dataframe_from_csv(low_memory=False) - building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv(low_memory=False) + sample_damage_states = self.get_input_dataset( + "sample_damage_states" + ).get_dataframe_from_csv(low_memory=False) + mcs_failure = self.get_input_dataset("mcs_failure").get_dataframe_from_csv( + low_memory=False + ) + redi_delay_factors = self.get_input_dataset( + "delay_factors" + ).get_dataframe_from_csv(low_memory=False) + building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv( + low_memory=False + ) # Returns dataframe - total_delay, recovery, time_stepping_recovery = self.commercial_recovery(buildings, sample_damage_states, - mcs_failure, redi_delay_factors, - building_dmg, num_samples) - self.set_result_csv_data("total_delay", total_delay, result_name + "_delay", "dataframe") - self.set_result_csv_data("recovery", recovery, result_name + "_recovery", "dataframe") - self.set_result_csv_data("time_stepping_recovery", time_stepping_recovery, - result_name + "_time_stepping_recovery", "dataframe") + total_delay, recovery, time_stepping_recovery = self.commercial_recovery( + buildings, + sample_damage_states, + mcs_failure, + redi_delay_factors, + building_dmg, + num_samples, + ) + self.set_result_csv_data( + "total_delay", total_delay, result_name + "_delay", "dataframe" + ) + self.set_result_csv_data( + "recovery", recovery, result_name + "_recovery", "dataframe" + ) + self.set_result_csv_data( + "time_stepping_recovery", + time_stepping_recovery, + result_name + "_time_stepping_recovery", + "dataframe", + ) return True - def commercial_recovery(self, buildings, sample_damage_states, mcs_failure, redi_delay_factors, building_dmg, num_samples): + def commercial_recovery( + self, + buildings, + sample_damage_states, + mcs_failure, + redi_delay_factors, + building_dmg, + num_samples, + ): """ Calculates commercial building recovery for buildings @@ -98,19 +126,38 @@ def commercial_recovery(self, buildings, sample_damage_states, mcs_failure, redi """ start_total_delay = time.process_time() - total_delay = CommercialBuildingRecovery.total_delay(buildings, sample_damage_states, mcs_failure, - redi_delay_factors, building_dmg, num_samples) + total_delay = CommercialBuildingRecovery.total_delay( + buildings, + sample_damage_states, + mcs_failure, + redi_delay_factors, + building_dmg, + num_samples, + ) end_total_delay = time.process_time() - print("Finished executing total_delay() in " + str(end_total_delay - start_total_delay) + " secs") + print( + "Finished executing total_delay() in " + + str(end_total_delay - start_total_delay) + + " secs" + ) recovery = self.recovery_rate(buildings, sample_damage_states, total_delay) end_recovery = time.process_time() - print("Finished executing recovery_rate() in " + str(end_recovery - end_total_delay) + " secs") - - time_stepping_recovery = CommercialBuildingRecovery.time_stepping_recovery(recovery) + print( + "Finished executing recovery_rate() in " + + str(end_recovery - end_total_delay) + + " secs" + ) + + time_stepping_recovery = CommercialBuildingRecovery.time_stepping_recovery( + recovery + ) end_time_stepping_recovery = time.process_time() - print("Finished executing time_stepping_recovery() in " + - str(end_time_stepping_recovery - end_recovery) + " secs") + print( + "Finished executing time_stepping_recovery() in " + + str(end_time_stepping_recovery - end_recovery) + + " secs" + ) end_time = time.process_time() print("Analysis completed in " + str(end_time - start_total_delay) + " secs") @@ -118,8 +165,15 @@ def commercial_recovery(self, buildings, sample_damage_states, mcs_failure, redi return total_delay, recovery, time_stepping_recovery @staticmethod - def total_delay(buildings, sample_damage_states, mcs_failure, redi_delay_factors, damage, num_samples): - """ Calculates total delay by combining financial delay and other factors from REDi framework + def total_delay( + buildings, + sample_damage_states, + mcs_failure, + redi_delay_factors, + damage, + num_samples, + ): + """Calculates total delay by combining financial delay and other factors from REDi framework Args: buildings (list): List of buildings @@ -135,39 +189,50 @@ def total_delay(buildings, sample_damage_states, mcs_failure, redi_delay_factors """ # Obtain the commercial buildings in damage - damage = mcs_failure[damage['haz_expose'] == 'yes'] + damage = mcs_failure[damage["haz_expose"] == "yes"] commercial = [] commercial_archetypes = [6, 7, 8, 15, 16, 18, 19] for i, b in enumerate(buildings): - if b['properties']['archetype'] in commercial_archetypes: - commercial.append(b['properties']['guid']) - commercial_pd = pd.DataFrame(commercial, columns=['guid']) - commercial_damage = pd.merge(damage, commercial_pd, on='guid') + if b["properties"]["archetype"] in commercial_archetypes: + commercial.append(b["properties"]["guid"]) + commercial_pd = pd.DataFrame(commercial, columns=["guid"]) + commercial_damage = pd.merge(damage, commercial_pd, on="guid") # Obtain the column names - colnames = [f'sample_{i}' for i in range(0, num_samples)] + colnames = [f"sample_{i}" for i in range(0, num_samples)] samples = np.zeros((len(commercial_damage), num_samples)) delay_time = pd.DataFrame(samples, columns=colnames) - delay_time.insert(0, 'guid', commercial_damage.reset_index(drop=True)['guid']) + delay_time.insert(0, "guid", commercial_damage.reset_index(drop=True)["guid"]) # Perform an inner join to ensure only buildings with damage states are processed - merged_delay = pd.merge(sample_damage_states, delay_time, on='guid') + merged_delay = pd.merge(sample_damage_states, delay_time, on="guid") # Obtain the guids - merged_delay_guids = merged_delay['guid'] + merged_delay_guids = merged_delay["guid"] # Obtain the damage states - merged_delay_damage_states = merged_delay['sample_damage_states'] + merged_delay_damage_states = merged_delay["sample_damage_states"] # Convert to numpy - samples_np = merged_delay.drop(columns=['guid', 'sample_damage_states']).to_numpy() + samples_np = merged_delay.drop( + columns=["guid", "sample_damage_states"] + ).to_numpy() # First, we decompose redi_delay_factors into two dictionaries that can be used to compute vector operations - redi_idx = dict(zip(redi_delay_factors['Building_specific_conditions'], redi_delay_factors.index)) + redi_idx = dict( + zip( + redi_delay_factors["Building_specific_conditions"], + redi_delay_factors.index, + ) + ) # Next, we produce two intermediate numpy matrices: one for med and one for sdv - redi_med = redi_delay_factors[['Ins_med', 'Enmo_med', 'Como_med', 'Per_med', 'Fin_med']].to_numpy() - redi_sdv = redi_delay_factors[['Ins_sdv', 'Enmo_sdv', 'Como_sdv', 'Per_sdv', 'Fin_sdv']].to_numpy() + redi_med = redi_delay_factors[ + ["Ins_med", "Enmo_med", "Como_med", "Per_med", "Fin_med"] + ].to_numpy() + redi_sdv = redi_delay_factors[ + ["Ins_sdv", "Enmo_sdv", "Como_sdv", "Per_sdv", "Fin_sdv"] + ].to_numpy() # Define indices to facilitate interpretation of the code inspection_idx = 0 @@ -195,21 +260,25 @@ def total_delay(buildings, sample_damage_states, mcs_failure, redi_delay_factors delay_vec[4] = np.random.normal(mean_vec[4], sdv_vec[4]) # Compute the delay using that vector, already computed in the prior step - samples_np[i, j] = np.round(delay_vec[inspection_idx] + - np.max([ - delay_vec[engineer_idx], - delay_vec[financing_idx], - delay_vec[contractor_idx] - ]) + - delay_vec[permit_idx]) + samples_np[i, j] = np.round( + delay_vec[inspection_idx] + + np.max( + [ + delay_vec[engineer_idx], + delay_vec[financing_idx], + delay_vec[contractor_idx], + ] + ) + + delay_vec[permit_idx] + ) total_delay = pd.DataFrame(samples_np, columns=colnames) - total_delay.insert(0, 'guid', merged_delay_guids) + total_delay.insert(0, "guid", merged_delay_guids) return total_delay def recovery_rate(self, buildings, sample_damage_states, total_delay): - """ Gets total time required for each commercial building to receive full restoration. Determined by the + """Gets total time required for each commercial building to receive full restoration. Determined by the combination of delay time and repair time Args: @@ -222,31 +291,37 @@ def recovery_rate(self, buildings, sample_damage_states, total_delay): """ repair_key = self.get_parameter("repair_key") - repair_sets = self.repairsvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, repair_key) - repair_sets_by_guid = {} # get repair sets by guid so they can be mapped with output of monte carlo + repair_sets = self.repairsvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), buildings, repair_key + ) + repair_sets_by_guid = ( + {} + ) # get repair sets by guid so they can be mapped with output of monte carlo # This is sort of a workaround until we define Repair Curve models and abstract this out there for i, b in enumerate(buildings): # if building id has a matched repair curve set - if b['id'] in repair_sets.keys(): - repair_sets_by_guid[b["properties"]['guid']] = repair_sets[b['id']] + if b["id"] in repair_sets.keys(): + repair_sets_by_guid[b["properties"]["guid"]] = repair_sets[b["id"]] else: - repair_sets_by_guid[b["properties"]['guid']] = None + repair_sets_by_guid[b["properties"]["guid"]] = None # Obtain the column names colnames = list(total_delay.columns)[1:] # Perform an inner join to ensure only buildings with damage states are processed - merged_delay = pd.merge(total_delay, sample_damage_states, on='guid') + merged_delay = pd.merge(total_delay, sample_damage_states, on="guid") # Obtain the guids - merged_delay_guids = merged_delay['guid'] + merged_delay_guids = merged_delay["guid"] # Obtain the damage states - merged_delay_damage_states = merged_delay['sample_damage_states'] + merged_delay_damage_states = merged_delay["sample_damage_states"] # Convert to numpy - samples_np = merged_delay.drop(columns=['guid', 'sample_damage_states']).to_numpy() + samples_np = merged_delay.drop( + columns=["guid", "sample_damage_states"] + ).to_numpy() num_samples = len(colnames) num_buildings = samples_np.shape[0] @@ -265,8 +340,8 @@ def idx(x, y): # Use a lambda to obtain the damage state in numeric form. Note that since damage states are single digits, # it suffices to look at the last character and convert into an integer value. Do this computation once # per building only. - extract_ds = lambda x: int(x[-1]) - samples_mcs_ds = list(map(extract_ds, samples_mcs)) + extract_ds = lambda x: int(x[-1]) # noqa: E731 + samples_mcs_ds = list(map(extract_ds, samples_mcs)) # noqa: E731 # Now, perform the two nested loops, using the indexing function to simplify the syntax. for i in range(0, num_samples): @@ -275,25 +350,36 @@ def idx(x, y): # NOTE: Even though the kwarg name is "repair_time", it actually takes percent of functionality. DFR3 # system currently doesn't have a way to represent the name correctly when calculating the inverse. if mapped_repair is not None: - repair_time = mapped_repair.repair_curves[state].solve_curve_for_inverse( - hazard_values={}, curve_parameters=mapped_repair.curve_parameters, **{"repair_time": percent_func} - ) / 7 + repair_time = ( + mapped_repair.repair_curves[state].solve_curve_for_inverse( + hazard_values={}, + curve_parameters=mapped_repair.curve_parameters, + **{"repair_time": percent_func}, + ) + / 7 + ) else: repair_time = np.full(num_samples, np.nan) - + for j in range(0, num_samples): - samples_n1_n2[build, idx(i, j)] = round(samples_np[build, i] + repair_time[j], 1) + samples_n1_n2[build, idx(i, j)] = round( + samples_np[build, i] + repair_time[j], 1 + ) # Now, generate all the labels using list comprehension outside the loops - colnames = [f'sample_{i}_{j}' for i in range(0, num_samples) for j in range(0, num_samples)] + colnames = [ + f"sample_{i}_{j}" + for i in range(0, num_samples) + for j in range(0, num_samples) + ] recovery_time = pd.DataFrame(samples_n1_n2, columns=colnames) - recovery_time.insert(0, 'guid', merged_delay_guids) + recovery_time.insert(0, "guid", merged_delay_guids) return recovery_time @staticmethod def time_stepping_recovery(recovery_results): - """ Converts results to a time frame. Currently gives results for 16 quarters over 4 year. + """Converts results to a time frame. Currently gives results for 16 quarters over 4 year. Args: recovery_results (pd.DataFrame): Total recovery time of financial delay and other factors from REDi framework. @@ -308,10 +394,10 @@ def time_stepping_recovery(recovery_results): total_time = time_step * np.linspace(0, 4 * year, num=17, endpoint=True) # Save guid's for later - recovery_results_guids = recovery_results['guid'] + recovery_results_guids = recovery_results["guid"] # Convert recovery time results to numpy - samples_n1_n2 = recovery_results.drop(columns=['guid']).to_numpy() + samples_n1_n2 = recovery_results.drop(columns=["guid"]).to_numpy() # Number of guids num_buildings = recovery_results.shape[0] @@ -323,13 +409,16 @@ def time_stepping_recovery(recovery_results): for build in range(0, num_buildings): for i in range(len(total_time)): - fun_state = np.count_nonzero(samples_n1_n2[build, :] < total_time[i]) / num_samples + fun_state = ( + np.count_nonzero(samples_n1_n2[build, :] < total_time[i]) + / num_samples + ) times_np[build, i] = np.round(fun_state, 2) - colnames = [f'quarter_{i}' for i in range(0, num_times)] + colnames = [f"quarter_{i}" for i in range(0, num_times)] time_stepping_recovery = pd.DataFrame(times_np, columns=colnames) - time_stepping_recovery.insert(0, 'guid', recovery_results_guids) + time_stepping_recovery.insert(0, "guid", recovery_results_guids) return time_stepping_recovery @@ -341,89 +430,93 @@ def get_spec(self): """ return { - 'name': 'commercial-building-recovery', - 'description': 'calculate commercial building recovery', - 'input_parameters': [ + "name": "commercial-building-recovery", + "description": "calculate commercial building recovery", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'name of the result', - 'type': str + "id": "result_name", + "required": True, + "description": "name of the result", + "type": str, }, { - 'id': 'num_samples', - 'required': True, - 'description': 'Number of sample scenarios', - 'type': int + "id": "num_samples", + "required": True, + "description": "Number of sample scenarios", + "type": int, }, { - 'id': 'repair_key', - 'required': False, - 'description': 'Repair key to use in mapping dataset', - 'type': str + "id": "repair_key", + "required": False, + "description": "Repair key to use in mapping dataset", + "type": str, }, { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the probabilistic model', - 'type': int - } + "id": "seed", + "required": False, + "description": "Initial seed for the probabilistic model", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6', - 'ergo:buildingInventoryVer7'] + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - 'id': 'sample_damage_states', - 'required': True, - 'description': 'Sample damage states', - 'type': ['incore:sampleDamageState'] + "id": "sample_damage_states", + "required": True, + "description": "Sample damage states", + "type": ["incore:sampleDamageState"], }, { - 'id': 'mcs_failure', - 'required': True, - 'description': 'mcs_failure', - 'type': ['incore:failureProbability'] + "id": "mcs_failure", + "required": True, + "description": "mcs_failure", + "type": ["incore:failureProbability"], }, { - 'id': 'delay_factors', - 'required': True, - 'description': 'Delay impeding factors such as post-disaster inspection, insurance claim, ' - 'and government permit based on building\'s damage state. Provided by REDi framework', - 'type': ['incore:buildingRecoveryFactors'] + "id": "delay_factors", + "required": True, + "description": "Delay impeding factors such as post-disaster inspection, insurance claim, " + "and government permit based on building's damage state. Provided by REDi framework", + "type": ["incore:buildingRecoveryFactors"], }, { - 'id': 'building_dmg', - 'required': True, - 'description': 'damage result that has damage intervals', - 'type': ['ergo:buildingDamageVer6'] - } + "id": "building_dmg", + "required": True, + "description": "damage result that has damage intervals", + "type": ["ergo:buildingDamageVer6"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'total_delay', - 'description': 'CSV file of commercial building delay time', - 'type': 'incore:buildingRecoveryDelay' + "id": "total_delay", + "description": "CSV file of commercial building delay time", + "type": "incore:buildingRecoveryDelay", }, { - 'id': 'recovery', - 'description': 'CSV file of commercial building recovery time', - 'type': 'incore:buildingRecoveryTime' + "id": "recovery", + "description": "CSV file of commercial building recovery time", + "type": "incore:buildingRecoveryTime", }, { - 'id': 'time_stepping_recovery', - 'description': 'CSV file of commercial building recovery percent', - 'type': 'incore:buildingRecovery' - } - ] + "id": "time_stepping_recovery", + "description": "CSV file of commercial building recovery percent", + "type": "incore:buildingRecovery", + }, + ], } diff --git a/pyincore/analyses/core_cge_ml/__init__.py b/pyincore/analyses/core_cge_ml/__init__.py index 3c5fba791..6888ca641 100644 --- a/pyincore/analyses/core_cge_ml/__init__.py +++ b/pyincore/analyses/core_cge_ml/__init__.py @@ -4,4 +4,4 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.core_cge_ml.corecgeml import CoreCGEML \ No newline at end of file +from pyincore.analyses.core_cge_ml.corecgeml import CoreCGEML diff --git a/pyincore/analyses/core_cge_ml/corecgeml.py b/pyincore/analyses/core_cge_ml/corecgeml.py index 487f8dc3f..2125f6028 100644 --- a/pyincore/analyses/core_cge_ml/corecgeml.py +++ b/pyincore/analyses/core_cge_ml/corecgeml.py @@ -116,7 +116,6 @@ def construct_output(self, predictions: dict) -> Dict[str, Dict[str, list]]: prefd_l = [] postfd_l = [] for grp in self.labor_groups: - if temp_prefd[sector].get(grp, None) is None: prefd_l.append(-1) else: @@ -143,11 +142,11 @@ def run_core_cge_ml( base_cap_factors: List[np.ndarray], ) -> None: """run_core_cge_ml will use the model coefficients to predict the change in capital stock for each sector. - The predicted change will then be added to base_cap_factors to get the final capital stock for each sector + The predicted change will then be added to base_cap_factors to get the final capital stock for each sector after a disaster. - The model requires capital stock loss in dollar amount, hence the base_cap will be used to + The model requires capital stock loss in dollar amount, hence the base_cap will be used to calculate the loss in dollar amount. - The capital_shocks is the percentage of capital stock that remains and hence to get the loss we + The capital_shocks is the percentage of capital stock that remains and hence to get the loss we use 1 - capital_shocks. Some variables for parameters: @@ -168,7 +167,7 @@ def run_core_cge_ml( capital_shocks : (1 X K) np.ndarray This is the capital shock for each sector in percentage. This is a (1, K) array with K elements. model_coeffs : Dict[str, np.ndarray] - This is a dictionary of 2D arrays with shape [n, (k_i, l_i)]. + This is a dictionary of 2D arrays with shape [n, (k_i, l_i)]. Each entry in the dictionary corresponds to a factor and each factor has k_i number of models. It is assumed that the intercept term is included in the model coefficients and is at the 0th column. base_cap_factors : List[np.ndarray] diff --git a/pyincore/analyses/cumulativebuildingdamage/__init__.py b/pyincore/analyses/cumulativebuildingdamage/__init__.py index e541d8cfe..1ef5be12a 100644 --- a/pyincore/analyses/cumulativebuildingdamage/__init__.py +++ b/pyincore/analyses/cumulativebuildingdamage/__init__.py @@ -1 +1,3 @@ -from pyincore.analyses.cumulativebuildingdamage.cumulativebuildingdamage import CumulativeBuildingDamage +from pyincore.analyses.cumulativebuildingdamage.cumulativebuildingdamage import ( + CumulativeBuildingDamage, +) diff --git a/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py b/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py index 5f4be6738..2c3aa5eea 100644 --- a/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py +++ b/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py @@ -24,40 +24,45 @@ def run(self): """Executes Cumulative Building Damage Analysis""" eq_damage_set = self.get_input_dataset("eq_bldg_dmg").get_csv_reader() eq_damage_df = pd.DataFrame(list(eq_damage_set)) - tsunami_damage_set = self.get_input_dataset( - "tsunami_bldg_dmg").get_csv_reader() + tsunami_damage_set = self.get_input_dataset("tsunami_bldg_dmg").get_csv_reader() tsunami_damage_df = pd.DataFrame(list(tsunami_damage_set)) user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len( - eq_damage_df), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(eq_damage_df), user_defined_cpu + ) avg_bulk_input_size = int(len(eq_damage_df) / num_workers) eq_damage_args = [] count = 0 while count < len(eq_damage_df): - eq_damage_args.append( - eq_damage_df[count:count + avg_bulk_input_size]) + eq_damage_args.append(eq_damage_df[count : count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.cumulative_building_damage_concurrent_future( self.cumulative_building_damage_bulk_input, - num_workers, eq_damage_args, - repeat(tsunami_damage_df)) + num_workers, + eq_damage_args, + repeat(tsunami_damage_df), + ) - self.set_result_csv_data("combined-result", results, - name=self.get_parameter("result_name")) + self.set_result_csv_data( + "combined-result", results, name=self.get_parameter("result_name") + ) return True - def cumulative_building_damage_concurrent_future(self, function_name, - num_workers, *args): + def cumulative_building_damage_concurrent_future( + self, function_name, num_workers, *args + ): """Utilizes concurrent.future module. Args: @@ -71,77 +76,92 @@ def cumulative_building_damage_concurrent_future(self, function_name, """ output = [] with concurrent.futures.ProcessPoolExecutor( - max_workers=num_workers) as executor: + max_workers=num_workers + ) as executor: for ret in executor.map(function_name, *args): output.extend(ret) return output - def cumulative_building_damage_bulk_input(self, eq_building_damage_set, - tsunami_building_damage_set): + def cumulative_building_damage_bulk_input( + self, eq_building_damage_set, tsunami_building_damage_set + ): """Run analysis for building damage results. - Args: - eq_building_damage_set (obj): A set of earthquake building damage results. - tsunami_building_damage_set (obj): A set of all tsunami building damage results. + Args: + eq_building_damage_set (obj): A set of earthquake building damage results. + tsunami_building_damage_set (obj): A set of all tsunami building damage results. - Returns: - list: A list of ordered dictionaries with multiple damage values and other data/metadata. + Returns: + list: A list of ordered dictionaries with multiple damage values and other data/metadata. """ result = [] for idx, eq_building_damage in eq_building_damage_set.iterrows(): - result.append(self.cumulative_building_damage(eq_building_damage, - tsunami_building_damage_set)) + result.append( + self.cumulative_building_damage( + eq_building_damage, tsunami_building_damage_set + ) + ) return result - def cumulative_building_damage(self, eq_building_damage, - tsunami_building_damage): + def cumulative_building_damage(self, eq_building_damage, tsunami_building_damage): """Run analysis for building damage results. - Args: - eq_building_damage (obj): A JSON description of an earthquake building damage. - tsunami_building_damage (obj): Set of all tsunami building damage results. + Args: + eq_building_damage (obj): A JSON description of an earthquake building damage. + tsunami_building_damage (obj): Set of all tsunami building damage results. - Returns: - OrderedDict: A dictionary with building damage values and other data/metadata. + Returns: + OrderedDict: A dictionary with building damage values and other data/metadata. """ - guid = eq_building_damage['guid'] + guid = eq_building_damage["guid"] tsunami_building = tsunami_building_damage.loc[ - tsunami_building_damage['guid'] == guid] + tsunami_building_damage["guid"] == guid + ] for idy, tsunami_building in tsunami_building.iterrows(): eq_limit_states = collections.OrderedDict() try: - eq_limit_states['LS_0'] = float(eq_building_damage["LS_0"]) - eq_limit_states['LS_1'] = float(eq_building_damage["LS_1"]) - eq_limit_states['LS_2'] = float(eq_building_damage["LS_2"]) + eq_limit_states["LS_0"] = float(eq_building_damage["LS_0"]) + eq_limit_states["LS_1"] = float(eq_building_damage["LS_1"]) + eq_limit_states["LS_2"] = float(eq_building_damage["LS_2"]) tsunami_limit_states = collections.OrderedDict() - tsunami_limit_states['LS_0'] = float(tsunami_building["LS_0"]) - tsunami_limit_states['LS_1'] = float(tsunami_building["LS_1"]) - tsunami_limit_states['LS_2'] = float(tsunami_building["LS_2"]) + tsunami_limit_states["LS_0"] = float(tsunami_building["LS_0"]) + tsunami_limit_states["LS_1"] = float(tsunami_building["LS_1"]) + tsunami_limit_states["LS_2"] = float(tsunami_building["LS_2"]) limit_states = collections.OrderedDict() - limit_states["LS_0"] = \ - eq_limit_states["LS_0"] + tsunami_limit_states["LS_0"] \ + limit_states["LS_0"] = ( + eq_limit_states["LS_0"] + + tsunami_limit_states["LS_0"] - eq_limit_states["LS_0"] * tsunami_limit_states["LS_0"] - - limit_states["LS_1"] = \ - eq_limit_states["LS_1"] + tsunami_limit_states["LS_1"] \ - - eq_limit_states["LS_1"] * tsunami_limit_states["LS_1"] \ - + ((eq_limit_states["LS_0"] - - eq_limit_states["LS_1"]) * (tsunami_limit_states["LS_0"] - tsunami_limit_states["LS_1"])) - - limit_states["LS_2"] = \ - eq_limit_states["LS_2"] + tsunami_limit_states["LS_2"] \ - - eq_limit_states["LS_2"] * tsunami_limit_states["LS_2"] \ - + ((eq_limit_states["LS_1"] - - eq_limit_states["LS_2"]) * (tsunami_limit_states["LS_1"] - tsunami_limit_states["LS_2"])) + ) + + limit_states["LS_1"] = ( + eq_limit_states["LS_1"] + + tsunami_limit_states["LS_1"] + - eq_limit_states["LS_1"] * tsunami_limit_states["LS_1"] + + ( + (eq_limit_states["LS_0"] - eq_limit_states["LS_1"]) + * (tsunami_limit_states["LS_0"] - tsunami_limit_states["LS_1"]) + ) + ) + + limit_states["LS_2"] = ( + eq_limit_states["LS_2"] + + tsunami_limit_states["LS_2"] + - eq_limit_states["LS_2"] * tsunami_limit_states["LS_2"] + + ( + (eq_limit_states["LS_1"] - eq_limit_states["LS_2"]) + * (tsunami_limit_states["LS_1"] - tsunami_limit_states["LS_2"]) + ) + ) damage_state = FragilityCurveSet._3ls_to_4ds(limit_states) @@ -180,43 +200,42 @@ def get_spec(self): """ return { - 'name': 'cumulative-building-damage', - 'description': 'cumulative building damage (earthquake + tsunami)', - 'input_parameters': [ + "name": "cumulative-building-damage", + "description": "cumulative building damage (earthquake + tsunami)", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - } + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'eq_bldg_dmg', - 'required': True, - 'description': 'Earthquake Building Damage Results', - 'type': ['ergo:buildingDamageVer5', 'ergo:buildingDamageVer6'] + "id": "eq_bldg_dmg", + "required": True, + "description": "Earthquake Building Damage Results", + "type": ["ergo:buildingDamageVer5", "ergo:buildingDamageVer6"], }, { - 'id': 'tsunami_bldg_dmg', - 'required': True, - 'description': 'Tsunami Building Damage Results', - 'type': ['ergo:buildingDamageVer5', 'ergo:buildingDamageVer6'], - } + "id": "tsunami_bldg_dmg", + "required": True, + "description": "Tsunami Building Damage Results", + "type": ["ergo:buildingDamageVer5", "ergo:buildingDamageVer6"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'combined-result', - 'parent_type': 'buildings', - 'description': 'CSV file of building cumulative damage', - 'type': 'ergo:buildingDamageVer5' + "id": "combined-result", + "parent_type": "buildings", + "description": "CSV file of building cumulative damage", + "type": "ergo:buildingDamageVer5", } - - ] + ], } diff --git a/pyincore/analyses/epfdamage/epfdamage.py b/pyincore/analyses/epfdamage/epfdamage.py index 8fe0b523d..40b43879d 100644 --- a/pyincore/analyses/epfdamage/epfdamage.py +++ b/pyincore/analyses/epfdamage/epfdamage.py @@ -40,7 +40,11 @@ def run(self): self.set_parameter("fragility_key", fragility_key) # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() # Hazard Uncertainty use_hazard_uncertainty = False @@ -52,29 +56,41 @@ def run(self): user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(epf_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(epf_set), user_defined_cpu + ) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - (ds_results, damage_results) = self.epf_damage_concurrent_future(self.epf_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazard), - repeat(hazard_type), - repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", damage_results, - name=self.get_parameter("result_name") + "_additional_info") + (ds_results, damage_results) = self.epf_damage_concurrent_future( + self.epf_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True @@ -93,14 +109,18 @@ def epf_damage_concurrent_future(self, function_name, num_workers, *args): output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_dataset_id): + def epf_damage_analysis_bulk_input( + self, epfs, hazard, hazard_type, hazard_dataset_id + ): """Run analysis for multiple epfs. Args: @@ -126,11 +146,14 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas fragility_key = self.get_parameter("fragility_key") - fragility_set = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), epfs, - fragility_key) + fragility_set = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), epfs, fragility_key + ) if hazard_type == "earthquake": - liquefaction_fragility_key = self.get_parameter("liquefaction_fragility_key") + liquefaction_fragility_key = self.get_parameter( + "liquefaction_fragility_key" + ) if self.get_parameter("use_liquefaction") is True: if liquefaction_fragility_key is None: liquefaction_fragility_key = self.DEFAULT_LIQ_FRAGILITY_KEY @@ -138,11 +161,16 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset - geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") + geology_dataset_id = self.get_parameter( + "liquefaction_geology_dataset_id" + ) if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), epfs, liquefaction_fragility_key) + self.get_input_dataset("dfr3_mapping_set"), + epfs, + liquefaction_fragility_key, + ) if fragility_sets_liq is not None: liquefaction_available = True @@ -158,11 +186,7 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas loc = str(location.y) + "," + str(location.x) demands = fragility_set[epf_id].demand_types units = fragility_set[epf_id].demand_units - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_epfs.append(epf) @@ -170,11 +194,7 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas fragility_set_liq = fragility_sets_liq[epf["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units - value_liq = { - "demands": demands_liq, - "units": units_liq, - "loc": loc - } + value_liq = {"demands": demands_liq, "units": units_liq, "loc": loc} values_payload_liq.append(value_liq) else: unmapped_epfs.append(epf) @@ -183,9 +203,9 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas liquefaction_resp = None if liquefaction_available: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, - geology_dataset_id, - values_payload_liq) + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) ds_results = [] damage_results = [] @@ -197,7 +217,9 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas selected_fragility_set = fragility_set[epf["id"]] if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): - hazard_val = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) + hazard_val = AnalysisUtil.update_precision_of_lists( + hazard_vals[i]["hazardValues"] + ) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] @@ -207,49 +229,69 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas hval_dict[d] = hazard_val[j] j += 1 - epf_args = selected_fragility_set.construct_expression_args_from_inventory(epf) - limit_states = selected_fragility_set.calculate_limit_state(hval_dict, - inventory_type='electric_facility', - **epf_args) + epf_args = ( + selected_fragility_set.construct_expression_args_from_inventory(epf) + ) + limit_states = selected_fragility_set.calculate_limit_state( + hval_dict, inventory_type="electric_facility", **epf_args + ) if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[epf["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): - liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) + liq_hazard_vals = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["pgdValues"] + ) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] - liquefaction_prob = liquefaction_resp[i]['liqProbability'] + liquefaction_prob = liquefaction_resp[i]["liqProbability"] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] - facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(epf) - pgd_limit_states = \ - fragility_set_liq.calculate_limit_state( - hval_dict_liq, inventory_type="electric_facility", - **facility_liq_args) + facility_liq_args = ( + fragility_set_liq.construct_expression_args_from_inventory( + epf + ) + ) + pgd_limit_states = fragility_set_liq.calculate_limit_state( + hval_dict_liq, + inventory_type="electric_facility", + **facility_liq_args + ) else: - raise ValueError("One of the fragilities is in deprecated format. " - "This should not happen If you are seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. " + "This should not happen If you are seeing this please report the issue." + ) - limit_states = AnalysisUtil.adjust_limit_states_for_pgd(limit_states, pgd_limit_states) + limit_states = AnalysisUtil.adjust_limit_states_for_pgd( + limit_states, pgd_limit_states + ) dmg_interval = selected_fragility_set.calculate_damage_interval( - limit_states, hazard_type=hazard_type, inventory_type='electric_facility') + limit_states, + hazard_type=hazard_type, + inventory_type="electric_facility", + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) ds_result["guid"] = epf["properties"]["guid"] ds_result.update(limit_states) ds_result.update(dmg_interval) - ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_val, hazard_type) + ds_result["haz_expose"] = AnalysisUtil.get_exposure_from_hazard_values( + hazard_val, hazard_type + ) - damage_result['guid'] = epf['properties']['guid'] - damage_result['fragility_id'] = selected_fragility_set.id + damage_result["guid"] = epf["properties"]["guid"] + damage_result["fragility_id"] = selected_fragility_set.id damage_result["demandtypes"] = input_demand_types damage_result["demandunits"] = input_demand_units damage_result["hazardtype"] = hazard_type @@ -257,17 +299,17 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas if hazard_type == "earthquake" and use_liquefaction is True: if liquefaction_available: - damage_result['liq_fragility_id'] = fragility_sets_liq[epf["id"]].id - damage_result['liqdemandtypes'] = liq_demand_types - damage_result['liqdemandunits'] = liq_demand_units - damage_result['liqhazval'] = liq_hazard_vals - damage_result['liqprobability'] = liquefaction_prob + damage_result["liq_fragility_id"] = fragility_sets_liq[epf["id"]].id + damage_result["liqdemandtypes"] = liq_demand_types + damage_result["liqdemandunits"] = liq_demand_units + damage_result["liqhazval"] = liq_hazard_vals + damage_result["liqprobability"] = liquefaction_prob else: - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqprobability'] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -280,19 +322,19 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard, hazard_type, hazard_datas for epf in unmapped_epfs: ds_result = dict() damage_result = dict() - ds_result['guid'] = epf['properties']['guid'] - damage_result['guid'] = epf['properties']['guid'] - damage_result['fragility_id'] = None + ds_result["guid"] = epf["properties"]["guid"] + damage_result["guid"] = epf["properties"]["guid"] + damage_result["fragility_id"] = None damage_result["demandtypes"] = None - damage_result['demandunits'] = None + damage_result["demandunits"] = None damage_result["hazardtype"] = None - damage_result['hazardval'] = None + damage_result["hazardval"] = None if hazard_type == "earthquake" and use_liquefaction is True: - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqprobability'] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -307,105 +349,102 @@ def get_spec(self): """ return { - 'name': 'epf-damage', - 'description': 'Electric Power Facility damage analysis.', - 'input_parameters': [ + "name": "epf-damage", + "description": "Electric Power Facility damage analysis.", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'A name of the resulting dataset', - 'type': str + "id": "result_name", + "required": True, + "description": "A name of the resulting dataset", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard type (e.g. earthquake).', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard type (e.g. earthquake).", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID which defines the particular hazard (e.g. New madrid earthquake ' - 'using Atkinson Boore 1995).', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID which defines the particular hazard (e.g. New madrid earthquake " + "using Atkinson Boore 1995).", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset ()', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset ()", + "type": str, }, { - 'id': 'liquefaction_fragility_key', - 'required': False, - 'description': 'Fragility key to use in liquefaction mapping dataset', - 'type': str + "id": "liquefaction_fragility_key", + "required": False, + "description": "Fragility key to use in liquefaction mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use a ground liquifacition to modify damage interval.', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use a ground liquifacition to modify damage interval.", + "type": bool, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Liquefaction geology/susceptibility dataset id. ' - 'If not provided, liquefaction will be ignored', - 'type': str + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Liquefaction geology/susceptibility dataset id. " + "If not provided, liquefaction will be ignored", + "type": str, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request.', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request.", + "type": int, }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tornado", "hurricane", "flood", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'epfs', - 'required': True, - 'description': 'Electric Power Facility Inventory', - 'type': ['incore:epf', - 'ergo:epf', - 'incore:epfVer2' - ], + "id": "epfs", + "required": True, + "description": "Electric Power Facility Inventory", + "type": ["incore:epf", "ergo:epf", "incore:epfVer2"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'epfs', - 'description': 'A csv file with limit state probabilities and damage states ' - 'for each electric power facility', - 'type': 'incore:epfDamageVer3' + "id": "result", + "parent_type": "epfs", + "description": "A csv file with limit state probabilities and damage states " + "for each electric power facility", + "type": "incore:epfDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'epfs', - 'description': 'additional metadata in json file about applied hazard value and ' - 'fragility', - 'type': 'incore:epfDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "epfs", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:epfDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/epfdamage/epfutil.py b/pyincore/analyses/epfdamage/epfutil.py index 9acb15157..ae10e9372 100644 --- a/pyincore/analyses/epfdamage/epfutil.py +++ b/pyincore/analyses/epfdamage/epfutil.py @@ -7,17 +7,30 @@ class EpfUtil: """Utility methods for the electric power facility damage analysis.""" + EPF_HAZUS_FRAGILITY_KEYS = { - "ESS1", "Low Voltage (115 KV) Substation (Anchored/Seismic Components)", - "ESS2", "Low Voltage (115 KV) Substation (Unanchored/Standard Components)", - "ESS3", "Medium Voltage (230 KV) Substation (Anchored/Seismic Components)", - "ESS4", "Medium Voltage (230 KV) Substation (Unanchored/Standard Components)", - "ESS5", "High Voltage (500 KV) Substation (Anchored/Seismic Components)", - "ESS6", "High Voltage (500 KV) Substation (Unanchored/Standard Components)", - "EPP1", "Small Generation Facility (Anchored Components)", - "EPP2", "Small Generation Facility (Unanchored Components)", - "EPP3", "Medium/Large Generation Facility (Anchored Components)", - "EPP4", "Medium/Large Generation Facility (Unanchored Components)", - "EDC1", "Distribution Circuit (Seismic Components)", - "EDC2", "Distribution Circuit (Standard Components)" + "ESS1", + "Low Voltage (115 KV) Substation (Anchored/Seismic Components)", + "ESS2", + "Low Voltage (115 KV) Substation (Unanchored/Standard Components)", + "ESS3", + "Medium Voltage (230 KV) Substation (Anchored/Seismic Components)", + "ESS4", + "Medium Voltage (230 KV) Substation (Unanchored/Standard Components)", + "ESS5", + "High Voltage (500 KV) Substation (Anchored/Seismic Components)", + "ESS6", + "High Voltage (500 KV) Substation (Unanchored/Standard Components)", + "EPP1", + "Small Generation Facility (Anchored Components)", + "EPP2", + "Small Generation Facility (Unanchored Components)", + "EPP3", + "Medium/Large Generation Facility (Anchored Components)", + "EPP4", + "Medium/Large Generation Facility (Unanchored Components)", + "EDC1", + "Distribution Circuit (Seismic Components)", + "EDC2", + "Distribution Circuit (Standard Components)", } diff --git a/pyincore/analyses/epfrepaircost/epfrepaircost.py b/pyincore/analyses/epfrepaircost/epfrepaircost.py index 8ee2bade9..771d60794 100644 --- a/pyincore/analyses/epfrepaircost/epfrepaircost.py +++ b/pyincore/analyses/epfrepaircost/epfrepaircost.py @@ -25,8 +25,12 @@ def run(self): """Executes electric power facility repair cost analysis.""" epf_df = self.get_input_dataset("epfs").get_dataframe_from_shapefile() - sample_damage_states_df = self.get_input_dataset("sample_damage_states").get_dataframe_from_csv() - replacement_cost = self.get_input_dataset("replacement_cost").get_dataframe_from_csv() + sample_damage_states_df = self.get_input_dataset( + "sample_damage_states" + ).get_dataframe_from_csv() + replacement_cost = self.get_input_dataset( + "replacement_cost" + ).get_dataframe_from_csv() # join damage state, replacement cost, with original inventory epf_df = epf_df.merge(sample_damage_states_df, on="guid") @@ -34,22 +38,32 @@ def run(self): epf_set = epf_df.to_dict(orient="records") user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(epf_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(epf_set), user_defined_cpu + ) avg_bulk_input_size = int(len(epf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(epf_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - repair_costs = self.epf_repair_cost_concurrent_future(self.epf_repair_cost_bulk_input, num_workers, - inventory_args) - self.set_result_csv_data("result", repair_costs, name=self.get_parameter("result_name") + "_repair_cost") + repair_costs = self.epf_repair_cost_concurrent_future( + self.epf_repair_cost_bulk_input, num_workers, inventory_args + ) + self.set_result_csv_data( + "result", + repair_costs, + name=self.get_parameter("result_name") + "_repair_cost", + ) return True @@ -67,7 +81,9 @@ def epf_repair_cost_concurrent_future(self, function_name, num_workers, *args): """ output = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1 in executor.map(function_name, *args): output.extend(ret1) @@ -85,7 +101,9 @@ def epf_repair_cost_bulk_input(self, epfs): """ # read in the damage ratio tables epf_dmg_ratios_csv = self.get_input_dataset("epf_dmg_ratios").get_csv_reader() - dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows(epf_dmg_ratios_csv, ignore_first_row=False) + dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows( + epf_dmg_ratios_csv, ignore_first_row=False + ) repair_costs = [] @@ -99,12 +117,15 @@ def epf_repair_cost_bulk_input(self, epfs): for n, ds in enumerate(sample_damage_states): for dmg_ratio_row in dmg_ratio_tbl: # use "in" instead of "==" since some inventory has pending number (e.g. EDC2) - if dmg_ratio_row["Inventory Type"] in epf_type and dmg_ratio_row["Damage State"] == ds: + if ( + dmg_ratio_row["Inventory Type"] in epf_type + and dmg_ratio_row["Damage State"] == ds + ): dr = float(dmg_ratio_row["Best Mean Damage Ratio"]) repair_cost[n] = str(epf["replacement_cost"] * dr) - rc["budget"] = ','.join(repair_cost) - rc["repaircost"] = ','.join(repair_cost) + rc["budget"] = ",".join(repair_cost) + rc["repaircost"] = ",".join(repair_cost) repair_costs.append(rc) @@ -125,13 +146,13 @@ def get_spec(self): "id": "result_name", "required": True, "description": "A name of the resulting dataset", - "type": str + "type": str, }, { "id": "num_cpu", "required": False, "description": "If using parallel execution, the number of cpus to request.", - "type": int + "type": int, }, ], "input_datasets": [ @@ -139,10 +160,7 @@ def get_spec(self): "id": "epfs", "required": True, "description": "Electric Power Facility Inventory", - "type": ["incore:epf", - "ergo:epf", - "incore:epfVer2" - ], + "type": ["incore:epf", "ergo:epf", "incore:epfVer2"], }, { "id": "replacement_cost", @@ -154,13 +172,13 @@ def get_spec(self): "id": "sample_damage_states", "required": True, "description": "sample damage states from Monte Carlo Simulation", - "type": ["incore:sampleDamageState"] + "type": ["incore:sampleDamageState"], }, { "id": "epf_dmg_ratios", "required": True, "description": "Damage Ratios table", - "type": ["incore:epfDamageRatios"] + "type": ["incore:epfDamageRatios"], }, ], "output_datasets": [ @@ -168,7 +186,7 @@ def get_spec(self): "id": "result", "parent_type": "epfs", "description": "A csv file with repair cost for each electric power facility", - "type": "incore:repairCost" + "type": "incore:repairCost", } - ] + ], } diff --git a/pyincore/analyses/epfrestoration/epfrestoration.py b/pyincore/analyses/epfrestoration/epfrestoration.py index 9a9864131..9669bab43 100644 --- a/pyincore/analyses/epfrestoration/epfrestoration.py +++ b/pyincore/analyses/epfrestoration/epfrestoration.py @@ -55,25 +55,62 @@ def run(self): damage = self.get_input_dataset("damage").get_csv_reader() damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) - (inventory_restoration_map, pf_results, time_results, func_results, - repair_times) = self.electricpowerfacility_restoration( - inventory_list, damage_result, mapping_set, restoration_key, end_time, time_interval, pf_interval, - discretized_days) - - self.set_result_csv_data("inventory_restoration_map", inventory_restoration_map, - name="inventory_restoration_map_" + self.get_parameter("result_name")) - self.set_result_csv_data("pf_results", time_results, name="percentage_of_functionality_" + - self.get_parameter("result_name")) - self.set_result_csv_data("time_results", pf_results, name="reptime_" + self.get_parameter("result_name")) - self.set_result_csv_data("func_results", func_results, - name=self.get_parameter("result_name") + "_discretized_restoration") - self.set_result_csv_data("repair_times", repair_times, name="full_reptime_" + self.get_parameter( - "result_name")) + ( + inventory_restoration_map, + pf_results, + time_results, + func_results, + repair_times, + ) = self.electricpowerfacility_restoration( + inventory_list, + damage_result, + mapping_set, + restoration_key, + end_time, + time_interval, + pf_interval, + discretized_days, + ) + + self.set_result_csv_data( + "inventory_restoration_map", + inventory_restoration_map, + name="inventory_restoration_map_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "pf_results", + time_results, + name="percentage_of_functionality_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "time_results", + pf_results, + name="reptime_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "func_results", + func_results, + name=self.get_parameter("result_name") + "_discretized_restoration", + ) + self.set_result_csv_data( + "repair_times", + repair_times, + name="full_reptime_" + self.get_parameter("result_name"), + ) return True - def electricpowerfacility_restoration(self, inventory_list, damage_result, mapping_set, restoration_key, end_time, - time_interval, pf_interval, discretized_days): + def electricpowerfacility_restoration( + self, + inventory_list, + damage_result, + mapping_set, + restoration_key, + end_time, + time_interval, + pf_interval, + discretized_days, + ): """Gets applicable restoration curve set and calculates restoration time and functionality Args: @@ -97,21 +134,30 @@ def electricpowerfacility_restoration(self, inventory_list, damage_result, mappi # Obtain classification for each electric facility, used to lookup discretized functionality inventory_class_map = {} restoration_sets = self.restorationsvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), inventory_list, restoration_key) + self.get_input_dataset("dfr3_mapping_set"), inventory_list, restoration_key + ) for inventory in inventory_list: if inventory["id"] in restoration_sets.keys(): restoration_set_id = restoration_sets[inventory["id"]].id else: restoration_set_id = None - inventory_restoration_map.append({"guid": inventory['properties']['guid'], - "restoration_id": restoration_set_id}) + inventory_restoration_map.append( + { + "guid": inventory["properties"]["guid"], + "restoration_id": restoration_set_id, + } + ) restoration_curve_set = restoration_sets[inventory["id"]] # For each facility, get the discretized restoration from the continuous curve - discretized_restoration = AnalysisUtil.get_discretized_restoration(restoration_curve_set, discretized_days) - inventory_class_map[inventory['properties']['guid']] = discretized_restoration + discretized_restoration = AnalysisUtil.get_discretized_restoration( + restoration_curve_set, discretized_days + ) + inventory_class_map[ + inventory["properties"]["guid"] + ] = discretized_restoration time_results = [] pf_results = [] @@ -122,163 +168,187 @@ def electricpowerfacility_restoration(self, inventory_list, damage_result, mappi # if it's string:id; then need to fetch it from remote and cast to restorationcurveset object restoration_curve_set = mapping.entry[restoration_key] if isinstance(restoration_curve_set, str): - restoration_curve_set = RestorationCurveSet(self.restorationsvc.get_dfr3_set(restoration_curve_set)) + restoration_curve_set = RestorationCurveSet( + self.restorationsvc.get_dfr3_set(restoration_curve_set) + ) # given time calculate pf time = np.arange(0, end_time + time_interval, time_interval) for t in time: - pf_results.append({ - "restoration_id": restoration_curve_set.id, - "time": t, - **restoration_curve_set.calculate_restoration_rates(time=t) - }) + pf_results.append( + { + "restoration_id": restoration_curve_set.id, + "time": t, + **restoration_curve_set.calculate_restoration_rates(time=t), + } + ) # given pf calculate time pf = np.arange(0, 1 + pf_interval, pf_interval) for p in pf: new_dict = {} - t_res = restoration_curve_set.calculate_inverse_restoration_rates(time=p) + t_res = restoration_curve_set.calculate_inverse_restoration_rates( + time=p + ) for key, value in t_res.items(): new_dict.update({"time_" + key: value}) - time_results.append({ - "restoration_id": restoration_curve_set.id, - "percentage_of_functionality": p, - **new_dict - }) - - repair_time[restoration_curve_set.id] = restoration_curve_set.calculate_inverse_restoration_rates(time=0.99) + time_results.append( + { + "restoration_id": restoration_curve_set.id, + "percentage_of_functionality": p, + **new_dict, + } + ) + + repair_time[ + restoration_curve_set.id + ] = restoration_curve_set.calculate_inverse_restoration_rates(time=0.99) # Compute discretized restoration func_result = [] for dmg in damage_result: - guid = dmg['guid'] - + guid = dmg["guid"] + # Dictionary of discretized restoration functionality rest_dict = inventory_class_map[guid] - ds_0, ds_1, ds_2, ds_3, ds_4 = dmg['DS_0'], dmg['DS_1'], dmg['DS_2'], dmg['DS_3'], dmg['DS_4'] + ds_0, ds_1, ds_2, ds_3, ds_4 = ( + dmg["DS_0"], + dmg["DS_1"], + dmg["DS_2"], + dmg["DS_3"], + dmg["DS_4"], + ) result_dict = {} for time in discretized_days: key = "day" + str(time) # Only compute if we have damage if ds_0: - functionality = (rest_dict[key][0] * float(ds_0) + rest_dict[key][1] * float(ds_1) + rest_dict[ - key][2] * float(ds_2) + rest_dict[key][3] * float(ds_3) + rest_dict[key][4] * float(ds_4)) + functionality = ( + rest_dict[key][0] * float(ds_0) + + rest_dict[key][1] * float(ds_1) + + rest_dict[key][2] * float(ds_2) + + rest_dict[key][3] * float(ds_3) + + rest_dict[key][4] * float(ds_4) + ) result_dict.update({str(key): functionality}) func_result.append({"guid": guid, **result_dict}) repair_times = [] for inventory in inventory_restoration_map: - repair_times.append({"guid": inventory["guid"], **repair_time[inventory["restoration_id"]]}) - - return inventory_restoration_map, pf_results, time_results, func_result, repair_times + repair_times.append( + {"guid": inventory["guid"], **repair_time[inventory["restoration_id"]]} + ) + + return ( + inventory_restoration_map, + pf_results, + time_results, + func_result, + repair_times, + ) def get_spec(self): return { - 'name': 'electric-power-facility-restoration', - 'description': 'electric power facility restoration analysis', - 'input_parameters': [ + "name": "electric-power-facility-restoration", + "description": "electric power facility restoration analysis", + "input_parameters": [ { - 'id': 'restoration_key', - 'required': False, - 'description': 'restoration key to use in mapping dataset', - 'type': str + "id": "restoration_key", + "required": False, + "description": "restoration key to use in mapping dataset", + "type": str, }, { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'end_time', - 'required': False, - 'description': 'end time. Default to 365.', - 'type': float + "id": "end_time", + "required": False, + "description": "end time. Default to 365.", + "type": float, }, { - 'id': 'time_interval', - 'required': False, - 'description': 'incremental interval for time in days. Default to 1', - 'type': float + "id": "time_interval", + "required": False, + "description": "incremental interval for time in days. Default to 1", + "type": float, }, { - 'id': 'pf_interval', - 'required': False, - 'description': 'incremental interval for percentage of functionality. Default to 0.1', - 'type': float + "id": "pf_interval", + "required": False, + "description": "incremental interval for percentage of functionality. Default to 0.1", + "type": float, }, { - 'id': 'discretized_days', - 'required': False, - 'description': 'Discretized days to compute functionality', - 'type': List[int] - } + "id": "discretized_days", + "required": False, + "description": "Discretized days to compute functionality", + "type": List[int], + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'epfs', - 'required': True, - 'description': 'Electric Power Facility Inventory', - 'type': ['incore:epf', 'ergo:epf'], + "id": "epfs", + "required": True, + "description": "Electric Power Facility Inventory", + "type": ["incore:epf", "ergo:epf"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - - 'id': 'damage', - 'required': True, - 'description': 'damage result that has damage intervals in it', - 'type': [ - 'incore:epfDamage', - 'incore:epfDamageVer2', - 'incore:epfDamageVer3'] - } - - - + "id": "damage", + "required": True, + "description": "damage result that has damage intervals in it", + "type": [ + "incore:epfDamage", + "incore:epfDamageVer2", + "incore:epfDamageVer3", + ], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': "inventory_restoration_map", - 'parent_type': '', - 'description': 'A csv file recording the mapping relationship between GUID and restoration id ' - 'applicable.', - 'type': 'incore:inventoryRestorationMap' + "id": "inventory_restoration_map", + "parent_type": "", + "description": "A csv file recording the mapping relationship between GUID and restoration id " + "applicable.", + "type": "incore:inventoryRestorationMap", }, { - 'id': 'pf_results', - 'parent_type': '', - 'description': 'A csv file recording functionality change with time for each class and limit ' - 'state.', - 'type': 'incore:epfRestorationFunc' + "id": "pf_results", + "parent_type": "", + "description": "A csv file recording functionality change with time for each class and limit " + "state.", + "type": "incore:epfRestorationFunc", }, { - 'id': 'time_results', - 'parent_type': '', - 'description': 'A csv file recording repair time at certain functionality recovery for each class ' - 'and limit state.', - 'type': 'incore:epfRestorationTime' + "id": "time_results", + "parent_type": "", + "description": "A csv file recording repair time at certain functionality recovery for each class " + "and limit state.", + "type": "incore:epfRestorationTime", }, { - - 'id': 'func_results', - 'parent_type': '', - 'description': 'A csv file recording discretized functionality over time', - 'type': 'incore:epfDiscretizedRestorationFunc' + "id": "func_results", + "parent_type": "", + "description": "A csv file recording discretized functionality over time", + "type": "incore:epfDiscretizedRestorationFunc", }, { - 'id': 'repair_times', - 'parent_type': '', - 'description': 'A csv file recording repair time at full functionality recovery for each guid ' - 'and limit state.', - 'type': 'incore:epfRepairTime' - } - - ] + "id": "repair_times", + "parent_type": "", + "description": "A csv file recording repair time at full functionality recovery for each guid " + "and limit state.", + "type": "incore:epfRepairTime", + }, + ], } diff --git a/pyincore/analyses/epfrestoration/epfrestorationutil.py b/pyincore/analyses/epfrestoration/epfrestorationutil.py index 6e02b9e85..5452845e4 100644 --- a/pyincore/analyses/epfrestoration/epfrestorationutil.py +++ b/pyincore/analyses/epfrestoration/epfrestorationutil.py @@ -1,15 +1,32 @@ -from pyincore.analyses.epfrestoration import EpfRestoration +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ class EpfRestorationUtil: - def __init__(self, inventory_restoration_map, pf_results, time_results, time_interval, pf_interval, end_time): - + def __init__( + self, + inventory_restoration_map, + pf_results, + time_results, + time_interval, + pf_interval, + end_time, + ): # merge inventory_restoration_map with pf and timetables - inventory_restoration_map_df = inventory_restoration_map.get_dataframe_from_csv() + inventory_restoration_map_df = ( + inventory_restoration_map.get_dataframe_from_csv() + ) pf_results_df = pf_results.get_dataframe_from_csv() time_results_df = time_results.get_dataframe_from_csv() - self.pf_results_df = inventory_restoration_map_df.merge(pf_results_df, on="restoration_id").set_index('guid') - self.time_results_df = inventory_restoration_map_df.merge(time_results_df, on="restoration_id").set_index('guid') + self.pf_results_df = inventory_restoration_map_df.merge( + pf_results_df, on="restoration_id" + ).set_index("guid") + self.time_results_df = inventory_restoration_map_df.merge( + time_results_df, on="restoration_id" + ).set_index("guid") self.time_interval = time_interval self.pf_interval = pf_interval @@ -22,18 +39,25 @@ def get_restoration_time(self, guid, damage_state="DS_0", pf=0.99): state = "time_" + damage_state.replace("DS", "PF") df = self.pf_results_df.loc[guid].reset_index(drop=True) # round up and get the closest - time = df.loc[(df["percentage_of_functionality"] >= pf) & (df["percentage_of_functionality"] < - pf+self.pf_interval), state].values[0] + time = df.loc[ + (df["percentage_of_functionality"] >= pf) + & (df["percentage_of_functionality"] < pf + self.pf_interval), + state, + ].values[0] return time def get_percentage_func(self, guid, damage_state="DS_0", time=1): if time > self.end_time: - raise ValueError("restore time should not be larger than end time for restoration model!") + raise ValueError( + "restore time should not be larger than end time for restoration model!" + ) state = damage_state.replace("DS", "PF") df = self.time_results_df.loc[guid].reset_index(drop=True) # round up and get the closest - pf = df.loc[(df["time"] >= time) & df['time'] < time+self.time_interval, state].values[0] + pf = df.loc[ + (df["time"] >= time) & df["time"] < time + self.time_interval, state + ].values[0] return pf diff --git a/pyincore/analyses/epnfunctionality/epnfunctionality.py b/pyincore/analyses/epnfunctionality/epnfunctionality.py index 6b85521eb..4a9a976c9 100644 --- a/pyincore/analyses/epnfunctionality/epnfunctionality.py +++ b/pyincore/analyses/epnfunctionality/epnfunctionality.py @@ -23,56 +23,90 @@ def __init__(self, incore_client): super(EpnFunctionality, self).__init__(incore_client) def run(self): - """Execute electric power facility functionality analysis """ + """Execute electric power facility functionality analysis""" # get network dataset - network_dataset = NetworkDataset.from_dataset(self.get_input_dataset("epn_network")) + network_dataset = NetworkDataset.from_dataset( + self.get_input_dataset("epn_network") + ) links_epl_gdf = network_dataset.links.get_dataframe_from_shapefile() nodes_epf_gdf = network_dataset.nodes.get_dataframe_from_shapefile() - links_epl_gdf['weight'] = links_epl_gdf.loc[:, 'length_km'] + links_epl_gdf["weight"] = links_epl_gdf.loc[:, "length_km"] G_ep = network_dataset.get_graph_networkx() # get epf sample - epf_dmg_fs = self.get_input_dataset('epf_sample_failure_state').get_dataframe_from_csv() + epf_dmg_fs = self.get_input_dataset( + "epf_sample_failure_state" + ).get_dataframe_from_csv() epf_sample_df = pd.DataFrame( - np.array([np.array(epf_dmg_fs.failure.values[i].split(',')).astype('int') - for i in np.arange(epf_dmg_fs.shape[0])]), - index=epf_dmg_fs.guid.values) + np.array( + [ + np.array(epf_dmg_fs.failure.values[i].split(",")).astype("int") + for i in np.arange(epf_dmg_fs.shape[0]) + ] + ), + index=epf_dmg_fs.guid.values, + ) # get the sample number num_samples = epf_sample_df.shape[1] - sampcols = ['s' + samp for samp in np.arange(num_samples).astype(str)] + sampcols = ["s" + samp for samp in np.arange(num_samples).astype(str)] # add column epf_sample_df.columns = sampcols - epf_sample_df1 = nodes_epf_gdf.loc[:, ['guid', 'nodenwid']].set_index('guid').join(epf_sample_df) + epf_sample_df1 = ( + nodes_epf_gdf.loc[:, ["guid", "nodenwid"]] + .set_index("guid") + .join(epf_sample_df) + ) # get gate station nodes gate_station_node_list = self.get_parameter("gate_station_node_list") if gate_station_node_list is None: # default to EPPL - gatestation_nodes_class = 'EPPL' + gatestation_nodes_class = "EPPL" # get the guid from the matching class - gate_station_node_list = nodes_epf_gdf[nodes_epf_gdf["utilfcltyc"] == gatestation_nodes_class]["nodenwid"]\ - .to_list() + gate_station_node_list = nodes_epf_gdf[ + nodes_epf_gdf["utilfcltyc"] == gatestation_nodes_class + ]["nodenwid"].to_list() # calculate the distribution nodes - distribution_sub_nodes = list(set(list(G_ep.nodes)) - set(gate_station_node_list)) - - (fs_results, fp_results) = self.epf_functionality(distribution_sub_nodes, gate_station_node_list, num_samples, - sampcols, epf_sample_df1, G_ep) - - self.set_result_csv_data("sample_failure_state", - fs_results, name=self.get_parameter("result_name") + "_failure_state", - source="dataframe") - self.set_result_csv_data("failure_probability", - fp_results, - name=self.get_parameter("result_name") + "_failure_probability", - source="dataframe") + distribution_sub_nodes = list( + set(list(G_ep.nodes)) - set(gate_station_node_list) + ) + + (fs_results, fp_results) = self.epf_functionality( + distribution_sub_nodes, + gate_station_node_list, + num_samples, + sampcols, + epf_sample_df1, + G_ep, + ) + + self.set_result_csv_data( + "sample_failure_state", + fs_results, + name=self.get_parameter("result_name") + "_failure_state", + source="dataframe", + ) + self.set_result_csv_data( + "failure_probability", + fp_results, + name=self.get_parameter("result_name") + "_failure_probability", + source="dataframe", + ) return True - def epf_functionality(self, distribution_sub_nodes, gate_station_node_list, num_samples, sampcols, epf_sample_df1, - G_ep): + def epf_functionality( + self, + distribution_sub_nodes, + gate_station_node_list, + num_samples, + sampcols, + epf_sample_df1, + G_ep, + ): """ Run EPN functionality analysis. @@ -92,36 +126,50 @@ def epf_functionality(self, distribution_sub_nodes, gate_station_node_list, num_ # a distance of M denotes disconnection M = 9999 - func_ep_df = pd.DataFrame(np.zeros((len(distribution_sub_nodes), num_samples)), index=distribution_sub_nodes, - columns=sampcols) + func_ep_df = pd.DataFrame( + np.zeros((len(distribution_sub_nodes), num_samples)), + index=distribution_sub_nodes, + columns=sampcols, + ) for si, scol in enumerate(sampcols): - nodestate_ep = epf_sample_df1.loc[:, ['nodenwid', scol]] + nodestate_ep = epf_sample_df1.loc[:, ["nodenwid", scol]] linkstate_ep = None - badlinks_ep = EpnFunctionalityUtil.get_bad_edges(G_ep, nodestate_ep, linkstate_ep, scol) - badlinkdict_ep = {k: {'weight': M} for k in badlinks_ep} + badlinks_ep = EpnFunctionalityUtil.get_bad_edges( + G_ep, nodestate_ep, linkstate_ep, scol + ) + badlinkdict_ep = {k: {"weight": M} for k in badlinks_ep} G1_ep = copy.deepcopy(G_ep) nx.set_edge_attributes(G1_ep, badlinkdict_ep) - res_ep = EpnFunctionalityUtil.network_shortest_paths(G1_ep, gate_station_node_list, distribution_sub_nodes) + res_ep = EpnFunctionalityUtil.network_shortest_paths( + G1_ep, gate_station_node_list, distribution_sub_nodes + ) func_ep_df.loc[distribution_sub_nodes, scol] = (res_ep < M) * 1 # use nodenwid index to get its guid - fs_temp = pd.merge(func_ep_df, epf_sample_df1["nodenwid"], left_index=True, right_on="nodenwid", - how='left').drop(columns=["nodenwid"]) + fs_temp = pd.merge( + func_ep_df, + epf_sample_df1["nodenwid"], + left_index=True, + right_on="nodenwid", + how="left", + ).drop(columns=["nodenwid"]) fp_temp = fs_temp.copy(deep=True) # shape the dataframe into failure probability and failure samples - fs_temp['failure'] = fs_temp.astype(str).apply(','.join, axis=1) - fs_results = fs_temp.filter(['failure']) + fs_temp["failure"] = fs_temp.astype(str).apply(",".join, axis=1) + fs_results = fs_temp.filter(["failure"]) fs_results.reset_index(inplace=True) - fs_results = fs_results.rename(columns={'index': 'guid'}) + fs_results = fs_results.rename(columns={"index": "guid"}) # calculate failure probability # count of 0s divided by sample size - fp_temp["failure_probability"] = (num_samples - fp_temp.sum(axis=1).astype(int)) / num_samples - fp_results = fp_temp.filter(['failure_probability']) + fp_temp["failure_probability"] = ( + num_samples - fp_temp.sum(axis=1).astype(int) + ) / num_samples + fp_results = fp_temp.filter(["failure_probability"]) fp_results.reset_index(inplace=True) - fp_results = fp_results.rename(columns={'index': 'guid'}) + fp_results = fp_results.rename(columns={"index": "guid"}) return fs_results, fp_results @@ -131,46 +179,46 @@ def get_spec(self): obj: A JSON object of specifications of the EPN functionality analysis. """ return { - 'name': 'epn-functionality', - 'description': 'electric power network functionality analysis', - 'input_parameters': [ + "name": "epn-functionality", + "description": "electric power network functionality analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'gate_station_node_list', - 'required': False, - 'description': "list of gate station nodes", - 'type': List[int] + "id": "gate_station_node_list", + "required": False, + "description": "list of gate station nodes", + "type": List[int], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'epn_network', - 'required': True, - 'description': 'EPN Network Dataset', - 'type': ['incore:epnNetwork'], + "id": "epn_network", + "required": True, + "description": "EPN Network Dataset", + "type": ["incore:epnNetwork"], }, { - 'id': 'epf_sample_failure_state', - 'required': True, - 'description': 'CSV file of failure state for each sample. Output from MCS analysis', - 'type': ['incore:sampleFailureState'] + "id": "epf_sample_failure_state", + "required": True, + "description": "CSV file of failure state for each sample. Output from MCS analysis", + "type": ["incore:sampleFailureState"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'failure_probability', - 'description': 'CSV file of failure probability', - 'type': 'incore:failureProbability' + "id": "failure_probability", + "description": "CSV file of failure probability", + "type": "incore:failureProbability", }, { - 'id': 'sample_failure_state', - 'description': 'CSV file of failure state for each sample', - 'type': 'incore:sampleFailureState' + "id": "sample_failure_state", + "description": "CSV file of failure state for each sample", + "type": "incore:sampleFailureState", }, - ] + ], } diff --git a/pyincore/analyses/epnfunctionality/epnfunctionalityutil.py b/pyincore/analyses/epnfunctionality/epnfunctionalityutil.py index 25bb300b1..7a71d043a 100644 --- a/pyincore/analyses/epnfunctionality/epnfunctionalityutil.py +++ b/pyincore/analyses/epnfunctionality/epnfunctionalityutil.py @@ -9,12 +9,13 @@ class EpnFunctionalityUtil: - @staticmethod - def get_bad_edges(G, nodestate, linkstate=None, scol='s0'): - badnodes = nodestate.loc[nodestate.loc[:, scol] == 0, 'nodenwid'].values + def get_bad_edges(G, nodestate, linkstate=None, scol="s0"): + badnodes = nodestate.loc[nodestate.loc[:, scol] == 0, "nodenwid"].values if linkstate is not None: - badlinks = linkstate.loc[linkstate.loc[:, scol] == 0, ['fromnode', 'tonode']].values + badlinks = linkstate.loc[ + linkstate.loc[:, scol] == 0, ["fromnode", "tonode"] + ].values badlinks = list(zip(badlinks[:, 0], badlinks[:, 1])) else: badlinks = [] @@ -23,5 +24,9 @@ def get_bad_edges(G, nodestate, linkstate=None, scol='s0'): return list(set(badlinks)) @staticmethod - def network_shortest_paths(G, sources, sinks, weightcol='weight'): - return pd.Series(nx.multi_source_dijkstra_path_length(G, sources, cutoff=None, weight=weightcol))[sinks] + def network_shortest_paths(G, sources, sinks, weightcol="weight"): + return pd.Series( + nx.multi_source_dijkstra_path_length( + G, sources, cutoff=None, weight=weightcol + ) + )[sinks] diff --git a/pyincore/analyses/example/exampleanalysis.py b/pyincore/analyses/example/exampleanalysis.py index de7df7e24..1e141c4e5 100755 --- a/pyincore/analyses/example/exampleanalysis.py +++ b/pyincore/analyses/example/exampleanalysis.py @@ -25,7 +25,9 @@ def run(self): results.append(self.building_damage_analysis(building)) # Create the result dataset - self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) + self.set_result_csv_data( + "result", results, name=self.get_parameter("result_name") + ) return True @@ -45,10 +47,10 @@ def building_damage_analysis(self, building): bldg_results = collections.OrderedDict() mean_damage = collections.OrderedDict() - mean_damage['meandamage'] = random.uniform(0.0, 1.0) + mean_damage["meandamage"] = random.uniform(0.0, 1.0) # Add building global id so damage can be linked to building attributes - bldg_results['guid'] = building['properties']['guid'] + bldg_results["guid"] = building["properties"]["guid"] # Damage result bldg_results.update(mean_damage) @@ -68,31 +70,33 @@ def get_spec(self): """ return { - 'name': 'mock-building-damage', - 'description': 'mock-building damage analysis', - 'input_parameters': [ + "name": "mock-building-damage", + "description": "mock-building damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + ], }, - ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of building structural damage', - 'type': 'ergo:buildingDamageVer4' + "id": "result", + "parent_type": "buildings", + "description": "CSV file of building structural damage", + "type": "ergo:buildingDamageVer4", } - ] + ], } diff --git a/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py b/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py index b1e729112..360c2e803 100644 --- a/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py +++ b/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py @@ -38,9 +38,11 @@ def run(self): inventory_set = self.get_input_dataset("gas_facilities").get_inventory_reader() # get input hazard - hazard, hazard_type, hazard_dataset_id = ( - self.create_hazard_object_from_input_params() - ) + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() user_defined_cpu = 1 @@ -310,9 +312,9 @@ def gasfacility_damage_analysis_bulk_input( **limit_states, **dmg_intervals, } - facility_result["haz_expose"] = ( - AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) - ) + facility_result[ + "haz_expose" + ] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() damage_result["guid"] = facility["properties"]["guid"] damage_result["fragility_id"] = fragility_set.id diff --git a/pyincore/analyses/housingrecovery/housingrecovery.py b/pyincore/analyses/housingrecovery/housingrecovery.py index 2395b9bc1..ea01d7292 100755 --- a/pyincore/analyses/housingrecovery/housingrecovery.py +++ b/pyincore/analyses/housingrecovery/housingrecovery.py @@ -9,8 +9,11 @@ from pyincore.analyses.housingvaluationrecovery import HousingValuationRecovery -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use HousingValuationRecovery instead.") -class HousingRecovery(): +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use HousingValuationRecovery instead.", +) +class HousingRecovery: def __init__(self, incore_client): self._delegate = HousingValuationRecovery(incore_client) diff --git a/pyincore/analyses/housingrecovery/housingrecoveryutil.py b/pyincore/analyses/housingrecovery/housingrecoveryutil.py index d7406a14b..ea8752c14 100644 --- a/pyincore/analyses/housingrecovery/housingrecoveryutil.py +++ b/pyincore/analyses/housingrecovery/housingrecoveryutil.py @@ -4,6 +4,7 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ + class HousingRecoveryUtil: BASEYEAR = 2008 @@ -22,7 +23,9 @@ class HousingRecoveryUtil: # Year indicator dummy variables B_PHM_year = {} B_PHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster - B_PHM_year[0] = 0.263500 # year 0, tax assessment immediately after disaster, damage year + B_PHM_year[ + 0 + ] = 0.263500 # year 0, tax assessment immediately after disaster, damage year B_PHM_year[1] = 0.147208 # year +1 B_PHM_year[2] = 0.110004 # year +2 B_PHM_year[3] = 0.122228 # year +3 @@ -47,7 +50,7 @@ class HousingRecoveryUtil: # Owner-occupied and year dummy interactions B_PHM_own_year = {} - B_PHM_own_year[-1] = 0.017153 # base effect + B_PHM_own_year[-1] = 0.017153 # base effect B_PHM_own_year[0] = 0.129077 + B_PHM_own_year[-1] B_PHM_own_year[1] = 0.188217 + B_PHM_own_year[-1] B_PHM_own_year[2] = 0.235435 + B_PHM_own_year[-1] @@ -58,7 +61,7 @@ class HousingRecoveryUtil: # Median household income and year dummy interactions B_PHM_inc_year = {} - B_PHM_inc_year[-1] = 0.002724 # base effect + B_PHM_inc_year[-1] = 0.002724 # base effect B_PHM_inc_year[0] = 0.001190 + B_PHM_inc_year[-1] B_PHM_inc_year[1] = 0.001480 + B_PHM_inc_year[-1] B_PHM_inc_year[2] = 0.001746 + B_PHM_inc_year[-1] @@ -69,7 +72,7 @@ class HousingRecoveryUtil: # Block Group percent Minority and year dummy interactions B_PHM_min_year = {} - B_PHM_min_year[-1] = -0.004783 # base effect + B_PHM_min_year[-1] = -0.004783 # base effect B_PHM_min_year[0] = 0.005609 + B_PHM_min_year[-1] B_PHM_min_year[1] = 0.007343 + B_PHM_min_year[-1] B_PHM_min_year[2] = 0.007459 + B_PHM_min_year[-1] @@ -84,7 +87,9 @@ class HousingRecoveryUtil: # Year indicator dummy variables B_SVHM_year = {} B_SVHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster - B_SVHM_year[0] = 1.489008 # year 0, tax assessment immediately after disaster, damage year + B_SVHM_year[ + 0 + ] = 1.489008 # year 0, tax assessment immediately after disaster, damage year B_SVHM_year[1] = 1.858770 # year +1 B_SVHM_year[2] = 2.163492 # year +2 B_SVHM_year[3] = 2.071690 # year +3 @@ -109,7 +114,7 @@ class HousingRecoveryUtil: # Owner-occupied and year dummy interactions B_SVHM_own_year = {} - B_SVHM_own_year[-1] = -0.017167 # base effect + B_SVHM_own_year[-1] = -0.017167 # base effect B_SVHM_own_year[0] = 0.043263 + B_SVHM_own_year[-1] B_SVHM_own_year[1] = 0.003315 + B_SVHM_own_year[-1] B_SVHM_own_year[2] = 0.034372 + B_SVHM_own_year[-1] @@ -120,7 +125,7 @@ class HousingRecoveryUtil: # Median household income and year dummy interactions B_SVHM_inc_year = {} - B_SVHM_inc_year[-1] = 0.003786 # base effect + B_SVHM_inc_year[-1] = 0.003786 # base effect B_SVHM_inc_year[0] = -0.013662 + B_SVHM_inc_year[-1] B_SVHM_inc_year[1] = -0.017401 + B_SVHM_inc_year[-1] B_SVHM_inc_year[2] = -0.021541 + B_SVHM_inc_year[-1] diff --git a/pyincore/analyses/housingrecoverysequential/__init__.py b/pyincore/analyses/housingrecoverysequential/__init__.py index 32421d52a..93575d2af 100644 --- a/pyincore/analyses/housingrecoverysequential/__init__.py +++ b/pyincore/analyses/housingrecoverysequential/__init__.py @@ -1 +1,3 @@ -from pyincore.analyses.housingrecoverysequential.housingrecoverysequential import HousingRecoverySequential +from pyincore.analyses.housingrecoverysequential.housingrecoverysequential import ( + HousingRecoverySequential, +) diff --git a/pyincore/analyses/housingrecoverysequential/housingrecoverysequential.py b/pyincore/analyses/housingrecoverysequential/housingrecoverysequential.py index 91fa8d31f..f6b400d93 100644 --- a/pyincore/analyses/housingrecoverysequential/housingrecoverysequential.py +++ b/pyincore/analyses/housingrecoverysequential/housingrecoverysequential.py @@ -37,50 +37,50 @@ class HousingRecoverySequential(BaseAnalysis): # Social vulnerability value generators per zone __sv_generator = { - 'Z1': { - 'threshold_0': 0.95, - 'below_lower': 0.00, - 'below_upper': 0.20, - 'above_lower': 0.20, - 'above_upper': 1.00 + "Z1": { + "threshold_0": 0.95, + "below_lower": 0.00, + "below_upper": 0.20, + "above_lower": 0.20, + "above_upper": 1.00, }, - 'Z2': { - 'threshold_0': 0.85, - 'below_lower': 0.20, - 'below_upper': 0.40, - 'threshold_1': 0.90, - 'middle_lower': 0.00, - 'middle_upper': 0.20, - 'above_lower': 0.40, - 'above_upper': 1.00 + "Z2": { + "threshold_0": 0.85, + "below_lower": 0.20, + "below_upper": 0.40, + "threshold_1": 0.90, + "middle_lower": 0.00, + "middle_upper": 0.20, + "above_lower": 0.40, + "above_upper": 1.00, }, - 'Z3': { - 'threshold_0': 0.80, - 'below_lower': 0.40, - 'below_upper': 0.60, - 'threshold_1': 0.90, - 'middle_lower': 0.00, - 'middle_upper': 0.40, - 'above_lower': 0.60, - 'above_upper': 1.00 + "Z3": { + "threshold_0": 0.80, + "below_lower": 0.40, + "below_upper": 0.60, + "threshold_1": 0.90, + "middle_lower": 0.00, + "middle_upper": 0.40, + "above_lower": 0.60, + "above_upper": 1.00, }, - 'Z4': { - 'threshold_0': 0.85, - 'below_lower': 0.60, - 'below_upper': 0.80, - 'threshold_1': 0.95, - 'middle_lower': 0.00, - 'middle_upper': 0.40, - 'above_lower': 0.80, - 'above_upper': 1.00 + "Z4": { + "threshold_0": 0.85, + "below_lower": 0.60, + "below_upper": 0.80, + "threshold_1": 0.95, + "middle_lower": 0.00, + "middle_upper": 0.40, + "above_lower": 0.80, + "above_upper": 1.00, + }, + "Z5": { + "threshold_0": 0.95, + "below_lower": 0.80, + "below_upper": 1.00, + "above_lower": 0.00, + "above_upper": 0.80, }, - 'Z5': { - 'threshold_0': 0.95, - 'below_lower': 0.80, - 'below_upper': 1.00, - 'above_lower': 0.00, - 'above_upper': 0.80 - } } def __init__(self, incore_client): @@ -89,20 +89,30 @@ def __init__(self, incore_client): def run(self): """Execute the HHRS analysis using parameters and input data.""" # Read parameters - t_delta = self.get_parameter('t_delta') - t_final = self.get_parameter('t_final') + t_delta = self.get_parameter("t_delta") + t_final = self.get_parameter("t_final") # Load population block data from IN-CORE - pop_dis_selectors = ['guid', 'huid', 'blockid', 'race', 'hispan', 'ownershp', 'dislocated'] - households_csv = self.get_input_dataset('population_dislocation_block').get_csv_reader() + pop_dis_selectors = [ + "guid", + "huid", + "blockid", + "race", + "hispan", + "ownershp", + "dislocated", + ] + households_csv = self.get_input_dataset( + "population_dislocation_block" + ).get_csv_reader() households_df = (pd.DataFrame(households_csv))[pop_dis_selectors] # Perform conversions across the dataset from object type into the appropriate type - households_df['blockid'] = households_df['blockid'].astype('int64') - households_df['race'] = pd.to_numeric(households_df['race']) - households_df['hispan'] = pd.to_numeric(households_df['hispan']) - households_df['ownershp'] = pd.to_numeric(households_df['ownershp']) - households_df['dislocated'] = (households_df['dislocated'] == 'True') + households_df["blockid"] = households_df["blockid"].astype("int64") + households_df["race"] = pd.to_numeric(households_df["race"]) + households_df["hispan"] = pd.to_numeric(households_df["hispan"]) + households_df["ownershp"] = pd.to_numeric(households_df["ownershp"]) + households_df["dislocated"] = households_df["dislocated"] == "True" # Load the transition probability matrix from IN-CORE tpm_csv = self.get_input_dataset("tpm").get_csv_reader() @@ -116,37 +126,52 @@ def run(self): tpm[:, 0] = np.around(tpm[:, 0], 3) # Load the initial stage probability vector - initial_prob_csv = self.get_input_dataset("initial_stage_probabilities").get_csv_reader() + initial_prob_csv = self.get_input_dataset( + "initial_stage_probabilities" + ).get_csv_reader() initial_prob = pd.DataFrame(list(initial_prob_csv)) - initial_prob['value'] = pd.to_numeric(initial_prob['value']) + initial_prob["value"] = pd.to_numeric(initial_prob["value"]) # Obtain the number of CPUs (cores) to execute the analysis with user_defined_cpu = 4 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(households_df), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(households_df), user_defined_cpu + ) # Chop dataset into `num_size` chunks - max_chunk_size = int(np.ceil(len(households_df)/num_workers)) + max_chunk_size = int(np.ceil(len(households_df) / num_workers)) households_df_list = list( - map(lambda x: households_df[x * max_chunk_size:x * max_chunk_size + max_chunk_size], - list(range(num_workers))) + map( + lambda x: households_df[ + x * max_chunk_size : x * max_chunk_size + max_chunk_size + ], + list(range(num_workers)), + ) ) # Run the analysis - result = self.hhrs_concurrent_future(self.housing_serial_recovery_model, - num_workers, - households_df_list, - repeat(t_delta), - repeat(t_final), - repeat(tpm), - repeat(initial_prob)) + result = self.hhrs_concurrent_future( + self.housing_serial_recovery_model, + num_workers, + households_df_list, + repeat(t_delta), + repeat(t_final), + repeat(tpm), + repeat(initial_prob), + ) result_name = self.get_parameter("result_name") - self.set_result_csv_data("ds_result", result, name=result_name, source="dataframe") + self.set_result_csv_data( + "ds_result", result, name=result_name, source="dataframe" + ) return True @@ -164,13 +189,17 @@ def hhrs_concurrent_future(self, function_name, parallelism, *args): """ output_ds = pd.DataFrame() - with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallelism + ) as executor: for ret in executor.map(function_name, *args): output_ds = pd.concat([output_ds, ret], ignore_index=True) return output_ds - def housing_serial_recovery_model(self, households_df, t_delta, t_final, tpm, initial_prob): + def housing_serial_recovery_model( + self, households_df, t_delta, t_final, tpm, initial_prob + ): """Performs the computation of the model as indicated in Sutley and Hamide (2020). Args: @@ -183,38 +212,44 @@ def housing_serial_recovery_model(self, households_df, t_delta, t_final, tpm, in Returns: pd.DataFrame: outcome of the HHRS model for a given household dataset. """ - seed = self.get_parameter('seed') + seed = self.get_parameter("seed") rng = np.random.RandomState(seed) - sv_result = self.get_input_dataset("sv_result").get_dataframe_from_csv(low_memory=False) + sv_result = self.get_input_dataset("sv_result").get_dataframe_from_csv( + low_memory=False + ) # turn fips code to string for ease of matching sv_result["FIPS"] = sv_result["FIPS"].astype(str) # Compute the social vulnerability zone using known factors - households_df = self.compute_social_vulnerability_zones(sv_result, households_df) + households_df = self.compute_social_vulnerability_zones( + sv_result, households_df + ) # Set the number of Markov chain stages stages = int(t_final / t_delta) # Data structure for selection operations - initial_prob['cumulative'] = initial_prob['value'].cumsum() + initial_prob["cumulative"] = initial_prob["value"].cumsum() # Obtain number of households with social vulnerability zones num_households = households_df.shape[0] # Obtain a social vulnerability score stochastically per household # We use them later to construct the final output dataset - sv_scores = self.compute_social_vulnerability_values(households_df, num_households, rng) + sv_scores = self.compute_social_vulnerability_values( + households_df, num_households, rng + ) # We store Markov states as a list of numpy arrays for convenience and add each one by one markov_stages = np.zeros((stages, num_households)) for household in range(0, num_households): - if households_df['dislocated'].iat[household]: + if households_df["dislocated"].iat[household]: spin = rng.rand() - if spin < initial_prob['cumulative'][0]: + if spin < initial_prob["cumulative"][0]: markov_stages[0][household] = 1.0 - elif spin < initial_prob['cumulative'][1]: + elif spin < initial_prob["cumulative"][1]: markov_stages[0][household] = 2.0 else: markov_stages[0][household] = 3.0 @@ -280,13 +315,17 @@ def housing_serial_recovery_model(self, households_df, t_delta, t_final, tpm, in if t >= 1: # Check every timestep that occurred prior to the current timestep. - regressions = self.compute_regressions(markov_stages, household, 1, t) + regressions = self.compute_regressions( + markov_stages, household, 1, t + ) if regressions > 10: markov_stages[t][household] = 5 if t >= 12: # Check the previous 12 timesteps that occurred prior to the current timestep. - regressions = self.compute_regressions(markov_stages, household, t - 11, t) + regressions = self.compute_regressions( + markov_stages, household, t - 11, t + ) # If the number of regressive steps in the household's past 12 timesteps is greater than 4, # the household transitions to stage 5. @@ -294,7 +333,9 @@ def housing_serial_recovery_model(self, households_df, t_delta, t_final, tpm, in markov_stages[t][household] = 5 if t >= 24: # Check the previous 24 timesteps that occurred prior to the current timestep. - regressions = self.compute_regressions(markov_stages, household, t - 23, t) + regressions = self.compute_regressions( + markov_stages, household, t - 23, t + ) # If the number of regressive steps in the household's past 24 timesteps is greater than 7, # the household transitions to stage 5. @@ -308,10 +349,10 @@ def housing_serial_recovery_model(self, households_df, t_delta, t_final, tpm, in # We make a copy to be used for numerical purposes, from which drop some of the columns result = pd.DataFrame() - result['guid'] = households_df['guid'] - result['huid'] = households_df['huid'] - result['Zone'] = households_df['Zone'] - result['SV'] = sv_scores + result["guid"] = households_df["guid"] + result["huid"] = households_df["huid"] + result["Zone"] = households_df["Zone"] + result["SV"] = sv_scores column_names = [str(i) for i in range(1, stages + 1)] for i, c in enumerate(column_names): @@ -335,16 +376,22 @@ def compute_social_vulnerability_zones(sv_result, households_df): """ # if FIPS has 11 digits (Tract level) if len(sv_result["FIPS"].iloc[0]) == 11: - households_df['blockfips'] = households_df['blockid'].apply(lambda x: str(x)[:11]).astype(str) + households_df["blockfips"] = ( + households_df["blockid"].apply(lambda x: str(x)[:11]).astype(str) + ) # if FIPS has 12 digits (Block Group level) elif len(sv_result["FIPS"].iloc[0]) == 12: - households_df['blockfips'] = households_df['blockid'].apply(lambda x: str(x)[:12]).astype(str) + households_df["blockfips"] = ( + households_df["blockid"].apply(lambda x: str(x)[:12]).astype(str) + ) - households_df = households_df.merge(sv_result[["FIPS", "zone"]], left_on="blockfips", right_on="FIPS") + households_df = households_df.merge( + sv_result[["FIPS", "zone"]], left_on="blockfips", right_on="FIPS" + ) # e.g.Medium Vulnerable (zone3) extract the number 3 to construct Z3 - households_df["Zone"] = households_df["zone"].apply(lambda row: "Z"+row[-2]) + households_df["Zone"] = households_df["zone"].apply(lambda row: "Z" + row[-2]) - return households_df[households_df['Zone'] != 'missing'] + return households_df[households_df["Zone"] != "missing"] def compute_social_vulnerability_values(self, households_df, num_households, rng): """ @@ -358,24 +405,41 @@ def compute_social_vulnerability_values(self, households_df, num_households, rng """ # Social vulnerability zone generator: this generalizes the code in the first version sv_scores = np.zeros(num_households) - zones = households_df['Zone'].to_numpy() + zones = households_df["Zone"].to_numpy() for household in range(0, num_households): spin = rng.rand() zone = zones[household] - if spin < self.__sv_generator[zone]['threshold_0']: - sv_scores[household] = round(rng.uniform(self.__sv_generator[zone]['below_lower'], - self.__sv_generator[zone]['below_upper']), 3) + if spin < self.__sv_generator[zone]["threshold_0"]: + sv_scores[household] = round( + rng.uniform( + self.__sv_generator[zone]["below_lower"], + self.__sv_generator[zone]["below_upper"], + ), + 3, + ) # for zone 2, 3, 4 there is additional middle range - elif 'threshold_1' in self.__sv_generator[zone].keys() \ - and spin < self.__sv_generator[zone]['threshold_1']: - sv_scores[household] = round(rng.uniform(self.__sv_generator[zone]['middle_lower'], - self.__sv_generator[zone]['middle_upper']), 3) + elif ( + "threshold_1" in self.__sv_generator[zone].keys() + and spin < self.__sv_generator[zone]["threshold_1"] + ): + sv_scores[household] = round( + rng.uniform( + self.__sv_generator[zone]["middle_lower"], + self.__sv_generator[zone]["middle_upper"], + ), + 3, + ) else: - sv_scores[household] = round(rng.uniform(self.__sv_generator[zone]['above_lower'], - self.__sv_generator[zone]['above_upper']), 3) + sv_scores[household] = round( + rng.uniform( + self.__sv_generator[zone]["above_lower"], + self.__sv_generator[zone]["above_upper"], + ), + 3, + ) return sv_scores @@ -413,77 +477,77 @@ def get_spec(self): """ return { - 'name': 'housing-recovery-serial', - 'description': 'Household-level housing recovery serial model', - 'input_parameters': [ + "name": "housing-recovery-serial", + "description": "Household-level housing recovery serial model", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result CSV dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str, }, { - 'id': 't_delta', - 'required': True, - 'description': 'size of the analysis time step', - 'type': float + "id": "t_delta", + "required": True, + "description": "size of the analysis time step", + "type": float, }, { - 'id': 't_final', - 'required': True, - 'description': 'total duration', - 'type': float + "id": "t_final", + "required": True, + "description": "total duration", + "type": float, }, { - 'id': 'seed', - 'required': False, - 'description': 'Seed to ensure replication of the Markov Chain path' - 'in connection with Population Dislocation.', - 'type': int + "id": "seed", + "required": False, + "description": "Seed to ensure replication of the Markov Chain path" + "in connection with Population Dislocation.", + "type": int, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - } + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'population_dislocation_block', - 'required': True, - 'description': 'A csv file with population dislocation result ' - 'aggregated to the block group level', - 'type': ['incore:popDislocation'] + "id": "population_dislocation_block", + "required": True, + "description": "A csv file with population dislocation result " + "aggregated to the block group level", + "type": ["incore:popDislocation"], }, { - 'id': 'tpm', - 'required': True, - 'description': 'Transition probability matrix in CSV format that specifies ' - 'the corresponding Markov chain per social vulnerability level.', - 'type': ['incore:houseRecTransitionProbMatrix'] + "id": "tpm", + "required": True, + "description": "Transition probability matrix in CSV format that specifies " + "the corresponding Markov chain per social vulnerability level.", + "type": ["incore:houseRecTransitionProbMatrix"], }, { - 'id': 'initial_stage_probabilities', - 'required': True, - 'description': 'initial mass probability function for stage 0 of the Markov Chain', - 'type': ['incore:houseRecInitialStageProbability'] + "id": "initial_stage_probabilities", + "required": True, + "description": "initial mass probability function for stage 0 of the Markov Chain", + "type": ["incore:houseRecInitialStageProbability"], }, { - 'id': 'sv_result', - 'required': True, - 'description': 'A csv file with zones containing demographic factors' - 'qualified by a social vulnerability score', - 'type': ['incore:socialVulnerabilityScore'] - } + "id": "sv_result", + "required": True, + "description": "A csv file with zones containing demographic factors" + "qualified by a social vulnerability score", + "type": ["incore:socialVulnerabilityScore"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'ds_result', - 'parent_type': 'housing_recovery_block', - 'description': 'A csv file with housing recovery sequences' - 'at the individual household level', - 'type': 'incore:housingRecoveryHistory' + "id": "ds_result", + "parent_type": "housing_recovery_block", + "description": "A csv file with housing recovery sequences" + "at the individual household level", + "type": "incore:housingRecoveryHistory", } - ] + ], } diff --git a/pyincore/analyses/housingunitallocation/__init__.py b/pyincore/analyses/housingunitallocation/__init__.py index e8c987019..eeec02013 100644 --- a/pyincore/analyses/housingunitallocation/__init__.py +++ b/pyincore/analyses/housingunitallocation/__init__.py @@ -3,4 +3,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.housingunitallocation.housingunitallocation import HousingUnitAllocation +from pyincore.analyses.housingunitallocation.housingunitallocation import ( + HousingUnitAllocation, +) diff --git a/pyincore/analyses/housingunitallocation/housingunitallocation.py b/pyincore/analyses/housingunitallocation/housingunitallocation.py index 5e3222e5e..a96a329ba 100644 --- a/pyincore/analyses/housingunitallocation/housingunitallocation.py +++ b/pyincore/analyses/housingunitallocation/housingunitallocation.py @@ -13,59 +13,63 @@ def __init__(self, incore_client): def get_spec(self): return { - 'name': 'housing-unit-allocation', - 'description': 'Probabilistic Housing Unit Allocation Analysis', - 'input_parameters': [ + "name": "housing-unit-allocation", + "description": "Probabilistic Housing Unit Allocation Analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result CSV dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str, }, { - 'id': 'seed', - 'required': True, - 'description': 'Initial seed for the probabilistic model', - 'type': int + "id": "seed", + "required": True, + "description": "Initial seed for the probabilistic model", + "type": int, }, { - 'id': 'iterations', - 'required': True, - 'description': 'No of iterations to perform the probabilistic model on', - 'type': int - } + "id": "iterations", + "required": True, + "description": "No of iterations to perform the probabilistic model on", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'housing_unit_inventory', - 'required': True, - 'description': 'Housing Unit Inventory CSV data, aka Census Block data. Corresponds to a possible ' - 'occupied housing unit, vacant housing unit, or a group quarters', - 'type': ['incore:housingUnitInventory'] + "id": "housing_unit_inventory", + "required": True, + "description": "Housing Unit Inventory CSV data, aka Census Block data. Corresponds to a possible " + "occupied housing unit, vacant housing unit, or a group quarters", + "type": ["incore:housingUnitInventory"], }, { - 'id': 'address_point_inventory', - 'required': True, - 'description': 'CSV dataset of address locations available in a block. Corresponds to a ' - 'specific address where a housing unit or group quarters could be assigned', - 'type': ['incore:addressPoints'] - } + "id": "address_point_inventory", + "required": True, + "description": "CSV dataset of address locations available in a block. Corresponds to a " + "specific address where a housing unit or group quarters could be assigned", + "type": ["incore:addressPoints"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'description': 'A csv file with the merged dataset of the inputs, aka Probabilistic' - 'Housing Unit Allocation', - 'type': 'incore:housingUnitAllocation' + "id": "result", + "description": "A csv file with the merged dataset of the inputs, aka Probabilistic" + "Housing Unit Allocation", + "type": "incore:housingUnitAllocation", } - ] + ], } def run(self): @@ -89,22 +93,33 @@ def run(self): # Datasets bg_inv = self.get_input_dataset("buildings").get_dataframe_from_shapefile() - pop_inv = self.get_input_dataset("housing_unit_inventory").get_dataframe_from_csv(low_memory=False) - addr_point_inv = self.get_input_dataset("address_point_inventory").get_dataframe_from_csv(low_memory=False) + pop_inv = self.get_input_dataset( + "housing_unit_inventory" + ).get_dataframe_from_csv(low_memory=False) + addr_point_inv = self.get_input_dataset( + "address_point_inventory" + ).get_dataframe_from_csv(low_memory=False) for i in range(iterations): seed_i = seed + i hua_inventory = self.get_iteration_probabilistic_allocation( - pop_inv, addr_point_inv, bg_inv, seed_i) + pop_inv, addr_point_inv, bg_inv, seed_i + ) temp_output_file = result_name + "_" + str(seed_i) + ".csv" # first column guid - hua_inventory = hua_inventory[["addrptid"] + [col for col in hua_inventory.columns if col != "addrptid"]] + hua_inventory = hua_inventory[ + ["addrptid"] + + [col for col in hua_inventory.columns if col != "addrptid"] + ] # last column geometry - hua_inventory = hua_inventory[[col for col in hua_inventory.columns if col != "geometry"] + ["geometry"]] - self.set_result_csv_data("result", - hua_inventory, - temp_output_file, "dataframe") + hua_inventory = hua_inventory[ + [col for col in hua_inventory.columns if col != "geometry"] + + ["geometry"] + ] + self.set_result_csv_data( + "result", hua_inventory, temp_output_file, "dataframe" + ) return True def prepare_housing_unit_inventory(self, housing_unit_inventory, seed): @@ -129,17 +144,24 @@ def prepare_housing_unit_inventory(self, housing_unit_inventory, seed): sorted_housing_unit0["randomhu"] = random_order_housing_unit # gsort BlockID -LiveTypeUnit Tenure randomaorderpop - sorted_housing_unit1 = sorted_housing_unit0.sort_values(by=["blockid", "ownershp", "vacancy", "randomhu"], - ascending=[True, True, True, True]) + sorted_housing_unit1 = sorted_housing_unit0.sort_values( + by=["blockid", "ownershp", "vacancy", "randomhu"], + ascending=[True, True, True, True], + ) # by BlockID: gen RandomMergeOrder = _n (+1 to be consistent with STATA starting from 1) - sorted_housing_unit1["randommergeorder"] = sorted_housing_unit1.groupby(["blockid"]).cumcount() + 1 + sorted_housing_unit1["randommergeorder"] = ( + sorted_housing_unit1.groupby(["blockid"]).cumcount() + 1 + ) - sorted_housing_unit2 = sorted_housing_unit1.sort_values(by=["blockid", "randommergeorder"], - ascending=[True, True]) + sorted_housing_unit2 = sorted_housing_unit1.sort_values( + by=["blockid", "randommergeorder"], ascending=[True, True] + ) return sorted_housing_unit2 - def merge_infrastructure_inventory(self, address_point_inventory, building_inventory): + def merge_infrastructure_inventory( + self, address_point_inventory, building_inventory + ): """Merge order to Building and Address inventories. Args: @@ -153,11 +175,18 @@ def merge_infrastructure_inventory(self, address_point_inventory, building_inven sorted_pnt_0 = address_point_inventory.sort_values(by=["strctid"]) sorted_bld_0 = building_inventory.sort_values(by=["strctid"]) - addresspt_building_inv = pd.merge(sorted_bld_0, sorted_pnt_0, - how='outer', on="strctid", - left_index=False, right_index=False, - sort=True, copy=True, indicator=True, - validate="1:m") + addresspt_building_inv = pd.merge( + sorted_bld_0, + sorted_pnt_0, + how="outer", + on="strctid", + left_index=False, + right_index=False, + sort=True, + copy=True, + indicator=True, + validate="1:m", + ) # addresspt_building_inv = self.compare_merges(sorted_pnt_0.columns, sorted_bld_0.columns, # addresspt_building_inv) @@ -165,15 +194,22 @@ def merge_infrastructure_inventory(self, address_point_inventory, building_inven match_column = set(sorted_pnt_0.columns).intersection(sorted_bld_0.columns) for col in match_column: # Compare two columns, keep one from the address, rename and drop - if col + "_x" in addresspt_building_inv.columns and col + "_y" in addresspt_building_inv.columns: + if ( + col + "_x" in addresspt_building_inv.columns + and col + "_y" in addresspt_building_inv.columns + ): addresspt_building_inv[col] = addresspt_building_inv[col + "_y"] - addresspt_building_inv = addresspt_building_inv.drop(columns=[col + "_x", col + "_y"]) + addresspt_building_inv = addresspt_building_inv.drop( + columns=[col + "_x", col + "_y"] + ) addresspt_building_inv = addresspt_building_inv.drop(columns=["_merge"]) return addresspt_building_inv - def prepare_infrastructure_inventory(self, seed_i: int, critical_bld_inv: pd.DataFrame): + def prepare_infrastructure_inventory( + self, seed_i: int, critical_bld_inv: pd.DataFrame + ): """Assign Random merge order to Building and Address inventories. Use main seed value. @@ -194,17 +230,24 @@ def prepare_infrastructure_inventory(self, seed_i: int, critical_bld_inv: pd.Dat randomap = random_generator.uniform(0, 1, size_row) sort_critical_bld_0["randomap"] = randomap - sort_critical_bld_1 = sort_critical_bld_0.sort_values(by=["blockid", "residential", "huestimate", "randomap"], - ascending=[True, False, True, True]) + sort_critical_bld_1 = sort_critical_bld_0.sort_values( + by=["blockid", "residential", "huestimate", "randomap"], + ascending=[True, False, True, True], + ) # +1 to be consistent with STATA starting from 1 - sort_critical_bld_1["randommergeorder"] = sort_critical_bld_1.groupby(["blockid"]).cumcount() + 1 + sort_critical_bld_1["randommergeorder"] = ( + sort_critical_bld_1.groupby(["blockid"]).cumcount() + 1 + ) - sort_critical_bld_2 = sort_critical_bld_1.sort_values(by=["blockid", "randommergeorder"], - ascending=[True, False]) + sort_critical_bld_2 = sort_critical_bld_1.sort_values( + by=["blockid", "randommergeorder"], ascending=[True, False] + ) return sort_critical_bld_2 - def merge_inventories(self, sorted_housing_unit: pd.DataFrame, sorted_infrastructure: pd.DataFrame): + def merge_inventories( + self, sorted_housing_unit: pd.DataFrame, sorted_infrastructure: pd.DataFrame + ): """Merge (Sorted) Housing Unit Inventory and (Sorted) Infrastructure Inventory. Args: @@ -216,23 +259,32 @@ def merge_inventories(self, sorted_housing_unit: pd.DataFrame, sorted_infrastruc pd.DataFrame: Final merge of all four inventories """ - huap_inventory = pd.merge(sorted_infrastructure, sorted_housing_unit, - how='outer', left_on=["blockid", "randommergeorder"], - right_on=["blockid", "randommergeorder"], - sort=True, suffixes=("_x", "_y"), - copy=True, indicator=True) + huap_inventory = pd.merge( + sorted_infrastructure, + sorted_housing_unit, + how="outer", + left_on=["blockid", "randommergeorder"], + right_on=["blockid", "randommergeorder"], + sort=True, + suffixes=("_x", "_y"), + copy=True, + indicator=True, + ) huap_inventory = huap_inventory.rename(columns={"_merge": "aphumerge"}) # check for duplicate columns from merge - huap_inventory = self.compare_merges(sorted_housing_unit.columns, - sorted_infrastructure.columns, - huap_inventory) + huap_inventory = self.compare_merges( + sorted_housing_unit.columns, sorted_infrastructure.columns, huap_inventory + ) - output = huap_inventory.sort_values(by=["aphumerge", "blockid"], ascending=[False, True]) + output = huap_inventory.sort_values( + by=["aphumerge", "blockid"], ascending=[False, True] + ) return output - def get_iteration_probabilistic_allocation(self, housing_unit_inventory, address_point_inventory, - building_inventory, seed): + def get_iteration_probabilistic_allocation( + self, housing_unit_inventory, address_point_inventory, building_inventory, seed + ): """Merge inventories Args: @@ -245,10 +297,16 @@ def get_iteration_probabilistic_allocation(self, housing_unit_inventory, address pd.DataFrame: Merged table """ - sorted_housing_unit = self.prepare_housing_unit_inventory(housing_unit_inventory, seed) + sorted_housing_unit = self.prepare_housing_unit_inventory( + housing_unit_inventory, seed + ) - critical_building_inv = self.merge_infrastructure_inventory(address_point_inventory, building_inventory) - sorted_infrastructure = self.prepare_infrastructure_inventory(seed, critical_building_inv) + critical_building_inv = self.merge_infrastructure_inventory( + address_point_inventory, building_inventory + ) + sorted_infrastructure = self.prepare_infrastructure_inventory( + seed, critical_building_inv + ) output = self.merge_inventories(sorted_housing_unit, sorted_infrastructure) return output @@ -270,8 +328,13 @@ def compare_merges(self, table1_cols, table2_cols, table_merged): match_column = set(table1_cols).intersection(table2_cols) for col in match_column: # Compare two columns and marked similarity or rename and drop - if col + "_x" in table_merged.columns and col + "_y" in table_merged.columns: - table_merged = self.compare_columns(table_merged, col + "_x", col + "_y", True) + if ( + col + "_x" in table_merged.columns + and col + "_y" in table_merged.columns + ): + table_merged = self.compare_columns( + table_merged, col + "_x", col + "_y", True + ) return table_merged diff --git a/pyincore/analyses/housingvaluationrecovery/__init__.py b/pyincore/analyses/housingvaluationrecovery/__init__.py index ad70e3263..efb1679a1 100644 --- a/pyincore/analyses/housingvaluationrecovery/__init__.py +++ b/pyincore/analyses/housingvaluationrecovery/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import HousingValuationRecovery +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import ( + HousingValuationRecovery, +) diff --git a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py index 36ff94667..5ffa6f7c0 100755 --- a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py +++ b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py @@ -8,11 +8,13 @@ import pandas as pd from pyincore import BaseAnalysis -from pyincore.analyses.housingvaluationrecovery.housingvaluationrecoveryutil import HousingValuationRecoveryUtil +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecoveryutil import ( + HousingValuationRecoveryUtil, +) class HousingValuationRecovery(BaseAnalysis): - """ The analysis predicts building values and value changes over time following a disaster event. + """The analysis predicts building values and value changes over time following a disaster event. The model is calibrated with respect to demographics, parcel data, and building value trajectories following Hurricane Ike (2008) in Galveston, Texas. The model predicts building value at the parcel level for 8 years of observation. The models rely on Census (Decennial or American Community Survey, ACS) @@ -37,6 +39,7 @@ class HousingValuationRecovery(BaseAnalysis): incore_client (IncoreClient): Service authentication. """ + def __init__(self, incore_client): super(HousingValuationRecovery, self).__init__(incore_client) @@ -49,59 +52,59 @@ def get_spec(self): "id": "base_year", "required": False, "description": "Base year is used to calculate improvement age. It needs to be set to the tax " - "assessment year representing pre-disaster building values. For example for GCAD " - "data which represents improvement valuation before Hurricane Ike impacts." - "Deafult 2008", - "type": int + "assessment year representing pre-disaster building values. For example for GCAD " + "data which represents improvement valuation before Hurricane Ike impacts." + "Deafult 2008", + "type": int, }, { - "id": "result_name", - "required": True, - "description": "Result CSV dataset name", - "type": str - } + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str, + }, ], "input_datasets": [ { - "id": "population_dislocation", - "required": True, - "description": "Population Dislocation aggregated to the block group level", - "type": ["incore:popDislocation"] + "id": "population_dislocation", + "required": True, + "description": "Population Dislocation aggregated to the block group level", + "type": ["incore:popDislocation"], }, { - "id": "building_area", - "required": True, - "description": "Building square footage and damage. Damage is the actual building value loss " - "in percentage terms observed through the County Appraisal District (GCAD) data", - "type": ["incore:buildingInventoryArea"] + "id": "building_area", + "required": True, + "description": "Building square footage and damage. Damage is the actual building value loss " + "in percentage terms observed through the County Appraisal District (GCAD) data", + "type": ["incore:buildingInventoryArea"], }, { - "id": "census_block_groups_data", - "required": True, - "description": "Census ACS data, 2010 5yr data for block groups available at IPUMS NHGIS " - "website.", - "type": ["incore:censusBlockGroupsData"] + "id": "census_block_groups_data", + "required": True, + "description": "Census ACS data, 2010 5yr data for block groups available at IPUMS NHGIS " + "website.", + "type": ["incore:censusBlockGroupsData"], }, { - "id": "census_appraisal_data", - "required": True, - "description": "Census data, 2010 Decennial Census District (GCAD) Census data", - "type": ["incore:censusAppraisalData"] - } + "id": "census_appraisal_data", + "required": True, + "description": "Census data, 2010 Decennial Census District (GCAD) Census data", + "type": ["incore:censusAppraisalData"], + }, ], "output_datasets": [ { - "id": "result", - "description": "A csv file with the building values for the 6 years following the disaster" - "event (year -1 denotes pre-impact conditions and 0 being the impact year). " - "Index year values represent building values against a base, pre-impact value.", - "type": "incore:buildingValues" + "id": "result", + "description": "A csv file with the building values for the 6 years following the disaster" + "event (year -1 denotes pre-impact conditions and 0 being the impact year). " + "Index year values represent building values against a base, pre-impact value.", + "type": "incore:buildingValues", } - ] + ], } def run(self): - """ Executes the housing valuation recovery analysis. + """Executes the housing valuation recovery analysis. Returns: bool: True if successful, False otherwise. @@ -117,12 +120,20 @@ def run(self): result_name = self.get_parameter("result_name") # Datasets - pop_disl = self.get_input_dataset("population_dislocation").get_dataframe_from_csv(low_memory=False) - addl_structure_info = self.get_input_dataset("building_area").get_dataframe_from_csv(low_memory=False) - bg_mhhinc = self.get_input_dataset("census_block_groups_data").get_dataframe_from_csv(low_memory=False) + pop_disl = self.get_input_dataset( + "population_dislocation" + ).get_dataframe_from_csv(low_memory=False) + addl_structure_info = self.get_input_dataset( + "building_area" + ).get_dataframe_from_csv(low_memory=False) + bg_mhhinc = self.get_input_dataset( + "census_block_groups_data" + ).get_dataframe_from_csv(low_memory=False) # Census data - vac_status = self.get_input_dataset("census_appraisal_data").get_dataframe_from_csv(low_memory=False) + vac_status = self.get_input_dataset( + "census_appraisal_data" + ).get_dataframe_from_csv(low_memory=False) # Calculate the percent vacation or seasonal housing of all housing units within a census tract vac_status = self.get_vac_season_housing(vac_status) @@ -139,7 +150,9 @@ def run(self): # Read in and clean additional building information, NOTE add to building inventory # Create structure id for merging data - addl_structure_info["strctid"] = addl_structure_info["xref"].apply(lambda x: "XREF"+x) + addl_structure_info["strctid"] = addl_structure_info["xref"].apply( + lambda x: "XREF" + x + ) hse_recov = self.merge_add_inv(single_family, addl_structure_info) # Merge with seasonal/vacation housing Census ACS data @@ -178,24 +191,30 @@ def run(self): hse_rec_fin2 = pd.DataFrame(hse_rec_fin_index, columns=index_yr) hse_recov = pd.concat([hse_recov, hse_rec_fin1, hse_rec_fin2], axis=1) - columns_to_save = ["guid", "d_vacationct", "mhhinck", "pminoritybg", "dmg", "value_loss"] + bval_yr + index_yr - self.set_result_csv_data("result", hse_recov[columns_to_save], result_name, "dataframe") + columns_to_save = ( + ["guid", "d_vacationct", "mhhinck", "pminoritybg", "dmg", "value_loss"] + + bval_yr + + index_yr + ) + self.set_result_csv_data( + "result", hse_recov[columns_to_save], result_name, "dataframe" + ) return True def get_owneship(self, popd): - """ Filter ownership based on the vacancy codes - Assumption: - Where ownershp is "missing", let vacancy codes 0/3/4 be considered owner-occupied, - and 1/2/5/6/7 be considered renter-occupied. - It is uncertain whether vacancy codes 3,4,5,6,7 will become owner- or renter-occupied or primarily - one or the other. - . - Args: - popd (pd.DataFrame): Population dislocation results with ownership information. - - Returns: - pd.DataFrame: Ownership data. + """Filter ownership based on the vacancy codes + Assumption: + Where ownershp is "missing", let vacancy codes 0/3/4 be considered owner-occupied, + and 1/2/5/6/7 be considered renter-occupied. + It is uncertain whether vacancy codes 3,4,5,6,7 will become owner- or renter-occupied or primarily + one or the other. + . + Args: + popd (pd.DataFrame): Population dislocation results with ownership information. + + Returns: + pd.DataFrame: Ownership data. """ # Create ownership dummy variable from popd.ownership @@ -213,14 +232,14 @@ def get_owneship(self, popd): return own def get_vac_season_housing(self, vac_status): - """ Calculate the percent vacation or seasonal housing of all housing units within a census tract and - add dummy variable for census tract as a seasonal/vacation housing submarket. -. - Args: - vac_status (obj): Seasonal/vacation housing Census ACS data from json reader. + """Calculate the percent vacation or seasonal housing of all housing units within a census tract and + add dummy variable for census tract as a seasonal/vacation housing submarket. + . + Args: + vac_status (obj): Seasonal/vacation housing Census ACS data from json reader. - Returns: - pd.DataFrame: Seasonal/vacation housing data. + Returns: + pd.DataFrame: Seasonal/vacation housing data. """ vac_status["B25004_006E"] = vac_status["B25004_006E"].astype(int) @@ -229,11 +248,19 @@ def get_vac_season_housing(self, vac_status): vac_status["B25002_001M"] = vac_status["B25002_001M"].astype(int) # Calculate the percent vacation or seasonal housing of all housing units within a census tract - vac_status["pvacationct_moe"] = vac_status["B25004_006E"] / vac_status["B25002_001E"] + vac_status["pvacationct_moe"] = ( + vac_status["B25004_006E"] / vac_status["B25002_001E"] + ) vac_status["pvacationct"] = 100 * vac_status["pvacationct_moe"] - vac_status["pvacationct_moe"] = vac_status["pvacationct_moe"] ** 2 * vac_status["B25002_001M"] ** 2 - vac_status["pvacationct_moe"] = vac_status["B25004_006M"] ** 2 - vac_status["pvacationct_moe"] - vac_status["pvacationct_moe"] = 100 * (1 / vac_status["B25002_001E"]) * vac_status["pvacationct_moe"] ** 0.5 + vac_status["pvacationct_moe"] = ( + vac_status["pvacationct_moe"] ** 2 * vac_status["B25002_001M"] ** 2 + ) + vac_status["pvacationct_moe"] = ( + vac_status["B25004_006M"] ** 2 - vac_status["pvacationct_moe"] + ) + vac_status["pvacationct_moe"] = ( + 100 * (1 / vac_status["B25002_001E"]) * vac_status["pvacationct_moe"] ** 0.5 + ) # dummy variable for census tract as a seasonal/vacation housing submarket vac_status["d_vacationct"] = np.where(vac_status["pvacationct"] >= 50, 1, 0) @@ -244,141 +271,182 @@ def get_vac_season_housing(self, vac_status): def merge_add_inv(self, hse_rec, addl_struct): """Merge study area and additional structure information. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - addl_struct (pd.DataFrame): Additional infrastructure inventory. + . + Args: + hse_rec (pd.DataFrame): Area inventory. + addl_struct (pd.DataFrame): Additional infrastructure inventory. - Returns: - pd.DataFrame: Final merge of two inventories. + Returns: + pd.DataFrame: Final merge of two inventories. """ hse_rec_merged = pd.merge(hse_rec, addl_struct, on="strctid", how="inner") return hse_rec_merged def merge_seasonal_data(self, hse_rec, vac_status): - """ Merge study area and with seasonal/vacation housing Census ACS data. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - vac_status (pd.DataFrame): Seasonal/vacation housing Census ACS data. + """Merge study area and with seasonal/vacation housing Census ACS data. + . + Args: + hse_rec (pd.DataFrame): Area inventory. + vac_status (pd.DataFrame): Seasonal/vacation housing Census ACS data. - Returns: - pd.DataFrame: Final merge of two inventories. + Returns: + pd.DataFrame: Final merge of two inventories. """ hse_rec["tractid"] = hse_rec["tractid"].astype(str) # Add county and state to trac to match hse_rec tracid (Galveston - 723900 to 48167723900) - vac_status["tractid"] = \ - vac_status["state"].astype(str) + vac_status["county"].astype(str) + vac_status["tract"].astype(str) - - hse_rec_merged = pd.merge(hse_rec, vac_status, left_on="tractid", right_on="tractid", how='inner') + vac_status["tractid"] = ( + vac_status["state"].astype(str) + + vac_status["county"].astype(str) + + vac_status["tract"].astype(str) + ) + + hse_rec_merged = pd.merge( + hse_rec, vac_status, left_on="tractid", right_on="tractid", how="inner" + ) return hse_rec_merged def merge_block_data(self, hse_rec, bg_mhhinc): - """ Merge block group level median household income. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - bg_mhhinc (pd.DataFrame): Block data. + """Merge block group level median household income. + . + Args: + hse_rec (pd.DataFrame): Area inventory. + bg_mhhinc (pd.DataFrame): Block data. - Returns: - pd.DataFrame: Final merge of two inventories. + Returns: + pd.DataFrame: Final merge of two inventories. """ - hse_rec_merged = pd.merge(hse_rec, bg_mhhinc, left_on="bgidstr", right_on="bgidstr", how="inner") + hse_rec_merged = pd.merge( + hse_rec, bg_mhhinc, left_on="bgidstr", right_on="bgidstr", how="inner" + ) return hse_rec_merged def value_loss(self, hse_rec): - """ Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009). - . - Args: - hse_rec (pd.DataFrame): Area inventory. + """Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009). + . + Args: + hse_rec (pd.DataFrame): Area inventory. - Returns: - pd.DataFrame: Inventory with value losses. + Returns: + pd.DataFrame: Inventory with value losses. """ - hse_rec["value_loss"] = 100 * (hse_rec["DS_0"] * hse_rec["rploss_0"] + - hse_rec["DS_1"] * hse_rec["rploss_1"] + - hse_rec["DS_2"] * hse_rec["rploss_2"] + - hse_rec["DS_3"] * hse_rec["rploss_3"]) + hse_rec["value_loss"] = 100 * ( + hse_rec["DS_0"] * hse_rec["rploss_0"] + + hse_rec["DS_1"] * hse_rec["rploss_1"] + + hse_rec["DS_2"] * hse_rec["rploss_2"] + + hse_rec["DS_3"] * hse_rec["rploss_3"] + ) return hse_rec def assemble_phm_coefs(self, hru, hse_rec): - """ Assemble Primary Housing Market (PHM) data for full inventory and all damage-related years. - . - Args: - hru (obj): Housing valuation recovery utility. - hse_rec (pd.DataFrame): Area inventory including losses. + """Assemble Primary Housing Market (PHM) data for full inventory and all damage-related years. + . + Args: + hru (obj): Housing valuation recovery utility. + hse_rec (pd.DataFrame): Area inventory including losses. - Returns: - np.array: Final coefficients for all damage years. + Returns: + np.array: Final coefficients for all damage years. """ dmg_years = np.array(hru.DMG_YEARS) dmg_years_size = len(hru.DMG_YEARS) coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) - coef_fin[:] = hru.B_PHM_intercept + np.fromiter(hru.B_PHM_year.values(), dtype=float) + coef_fin[:] = hru.B_PHM_intercept + np.fromiter( + hru.B_PHM_year.values(), dtype=float + ) # Adjust build year year with damage years yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) yrbl_all[:] = self.base_year + dmg_years + 1 - yrbuilt = hru.B_PHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) + yrbuilt = hru.B_PHM_age * ( + yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis] + ) # Square meters, use vector (1x8) with B_PHM_sqm - sqmeter = np.full((1, dmg_years_size), hru.B_PHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + sqmeter = ( + np.full((1, dmg_years_size), hru.B_PHM_sqm) + * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + ) if "dmg" in hse_rec.columns: - dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ - hse_rec["dmg"].to_numpy()[:, np.newaxis] + dmg_loss = ( + np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) + * hse_rec["dmg"].to_numpy()[:, np.newaxis] + ) else: - dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ - hse_rec["value_loss"].to_numpy()[:, np.newaxis] - d_owner = \ - np.fromiter(hru.B_PHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] - mhhinck = \ - np.fromiter(hru.B_PHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] - pminrbg = \ - np.fromiter(hru.B_PHM_min_year.values(), dtype=float) * hse_rec["pminoritybg"].to_numpy()[:, np.newaxis] + dmg_loss = ( + np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) + * hse_rec["value_loss"].to_numpy()[:, np.newaxis] + ) + d_owner = ( + np.fromiter(hru.B_PHM_own_year.values(), dtype=float) + * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] + ) + mhhinck = ( + np.fromiter(hru.B_PHM_inc_year.values(), dtype=float) + * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] + ) + pminrbg = ( + np.fromiter(hru.B_PHM_min_year.values(), dtype=float) + * hse_rec["pminoritybg"].to_numpy()[:, np.newaxis] + ) return coef_fin + yrbuilt + sqmeter + d_owner + dmg_loss + mhhinck + pminrbg def assemble_svhm_coefs(self, hru, hse_rec): - """ Assemble Seasonal/Vacation housing market (SVHM) data for full inventory and all damage-related years. - . - Args: - hru (obj): Housing valution recovery utility. - hse_rec (pd.DataFrame): Area inventory including losses. + """Assemble Seasonal/Vacation housing market (SVHM) data for full inventory and all damage-related years. + . + Args: + hru (obj): Housing valution recovery utility. + hse_rec (pd.DataFrame): Area inventory including losses. - Returns: - np.array: Final coefficients for all damage years. + Returns: + np.array: Final coefficients for all damage years. """ dmg_years = np.array(hru.DMG_YEARS) dmg_years_size = len(hru.DMG_YEARS) coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) - coef_fin[:] = hru.B_SVHM_intercept + np.fromiter(hru.B_SVHM_year.values(), dtype=float) + coef_fin[:] = hru.B_SVHM_intercept + np.fromiter( + hru.B_SVHM_year.values(), dtype=float + ) # Adjust build year year with damage years yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) yrbl_all[:] = self.base_year + dmg_years + 1 - yrbuilt = hru.B_SVHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) + yrbuilt = hru.B_SVHM_age * ( + yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis] + ) # Square meters, use vector (1x8) with B_PHM_sqm - sqmeter = np.full((1, dmg_years_size), hru.B_SVHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + sqmeter = ( + np.full((1, dmg_years_size), hru.B_SVHM_sqm) + * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + ) if "dmg" in hse_rec.columns: - dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ - hse_rec["dmg"].to_numpy()[:, np.newaxis] + dmg_loss = ( + np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) + * hse_rec["dmg"].to_numpy()[:, np.newaxis] + ) else: - dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ - hse_rec["value_loss"].to_numpy()[:, np.newaxis] - d_owner = \ - np.fromiter(hru.B_SVHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] - mhhinck = \ - np.fromiter(hru.B_SVHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] + dmg_loss = ( + np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) + * hse_rec["value_loss"].to_numpy()[:, np.newaxis] + ) + d_owner = ( + np.fromiter(hru.B_SVHM_own_year.values(), dtype=float) + * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] + ) + mhhinck = ( + np.fromiter(hru.B_SVHM_inc_year.values(), dtype=float) + * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] + ) return coef_fin + yrbuilt + sqmeter + dmg_loss + d_owner + mhhinck diff --git a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py index 01c3078e8..ec27d2eff 100644 --- a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py +++ b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py @@ -4,6 +4,7 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ + class HousingValuationRecoveryUtil: BASEYEAR = 2008 @@ -22,7 +23,9 @@ class HousingValuationRecoveryUtil: # Year indicator dummy variables B_PHM_year = {} B_PHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster - B_PHM_year[0] = 0.263500 # year 0, tax assessment immediately after disaster, damage year + B_PHM_year[ + 0 + ] = 0.263500 # year 0, tax assessment immediately after disaster, damage year B_PHM_year[1] = 0.147208 # year +1 B_PHM_year[2] = 0.110004 # year +2 B_PHM_year[3] = 0.122228 # year +3 @@ -47,7 +50,7 @@ class HousingValuationRecoveryUtil: # Owner-occupied and year dummy interactions B_PHM_own_year = {} - B_PHM_own_year[-1] = 0.017153 # base effect + B_PHM_own_year[-1] = 0.017153 # base effect B_PHM_own_year[0] = 0.129077 + B_PHM_own_year[-1] B_PHM_own_year[1] = 0.188217 + B_PHM_own_year[-1] B_PHM_own_year[2] = 0.235435 + B_PHM_own_year[-1] @@ -58,7 +61,7 @@ class HousingValuationRecoveryUtil: # Median household income and year dummy interactions B_PHM_inc_year = {} - B_PHM_inc_year[-1] = 0.002724 # base effect + B_PHM_inc_year[-1] = 0.002724 # base effect B_PHM_inc_year[0] = 0.001190 + B_PHM_inc_year[-1] B_PHM_inc_year[1] = 0.001480 + B_PHM_inc_year[-1] B_PHM_inc_year[2] = 0.001746 + B_PHM_inc_year[-1] @@ -69,7 +72,7 @@ class HousingValuationRecoveryUtil: # Block Group percent Minority and year dummy interactions B_PHM_min_year = {} - B_PHM_min_year[-1] = -0.004783 # base effect + B_PHM_min_year[-1] = -0.004783 # base effect B_PHM_min_year[0] = 0.005609 + B_PHM_min_year[-1] B_PHM_min_year[1] = 0.007343 + B_PHM_min_year[-1] B_PHM_min_year[2] = 0.007459 + B_PHM_min_year[-1] @@ -84,7 +87,9 @@ class HousingValuationRecoveryUtil: # Year indicator dummy variables B_SVHM_year = {} B_SVHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster - B_SVHM_year[0] = 1.489008 # year 0, tax assessment immediately after disaster, damage year + B_SVHM_year[ + 0 + ] = 1.489008 # year 0, tax assessment immediately after disaster, damage year B_SVHM_year[1] = 1.858770 # year +1 B_SVHM_year[2] = 2.163492 # year +2 B_SVHM_year[3] = 2.071690 # year +3 @@ -109,7 +114,7 @@ class HousingValuationRecoveryUtil: # Owner-occupied and year dummy interactions B_SVHM_own_year = {} - B_SVHM_own_year[-1] = -0.017167 # base effect + B_SVHM_own_year[-1] = -0.017167 # base effect B_SVHM_own_year[0] = 0.043263 + B_SVHM_own_year[-1] B_SVHM_own_year[1] = 0.003315 + B_SVHM_own_year[-1] B_SVHM_own_year[2] = 0.034372 + B_SVHM_own_year[-1] @@ -120,7 +125,7 @@ class HousingValuationRecoveryUtil: # Median household income and year dummy interactions B_SVHM_inc_year = {} - B_SVHM_inc_year[-1] = 0.003786 # base effect + B_SVHM_inc_year[-1] = 0.003786 # base effect B_SVHM_inc_year[0] = -0.013662 + B_SVHM_inc_year[-1] B_SVHM_inc_year[1] = -0.017401 + B_SVHM_inc_year[-1] B_SVHM_inc_year[2] = -0.021541 + B_SVHM_inc_year[-1] diff --git a/pyincore/analyses/indp/dislocationutils.py b/pyincore/analyses/indp/dislocationutils.py index 75b14a133..5b9bb12b9 100644 --- a/pyincore/analyses/indp/dislocationutils.py +++ b/pyincore/analyses/indp/dislocationutils.py @@ -12,7 +12,6 @@ class DislocationUtil: - @staticmethod def create_dynamic_param(params, pop_dislocation, dt_params, T=1, N=None): """ @@ -33,123 +32,181 @@ def create_dynamic_param(params, pop_dislocation, dt_params, T=1, N=None): dynamic_params : dict Dictionary of dynamic demand value for nodes """ - dynamic_param_dict = params['DYNAMIC_PARAMS'] - return_type = dynamic_param_dict['RETURN'] - dp_dict_col = ['time', 'node', 'current pop', 'total pop'] - net_names = {'WATER': 1, 'GAS': 2, 'POWER': 3, 'TELECOME': 4} + dynamic_param_dict = params["DYNAMIC_PARAMS"] + return_type = dynamic_param_dict["RETURN"] + dp_dict_col = ["time", "node", "current pop", "total pop"] + net_names = {"WATER": 1, "GAS": 2, "POWER": 3, "TELECOME": 4} dynamic_params = {} - output_file = dynamic_param_dict['TESTBED'] + '_pop_dislocation_demands_' + str(params['MAGNITUDE']) + 'yr.pkl' + output_file = ( + dynamic_param_dict["TESTBED"] + + "_pop_dislocation_demands_" + + str(params["MAGNITUDE"]) + + "yr.pkl" + ) if os.path.exists(output_file): print("\nReading from file...") - with open(output_file, 'rb') as f: + with open(output_file, "rb") as f: dynamic_params = pickle.load(f) return dynamic_params - for net in dynamic_param_dict['MAPPING'].keys(): + for net in dynamic_param_dict["MAPPING"].keys(): nn = net_names[net] - mapping_data = dynamic_param_dict['MAPPING'][net] + mapping_data = dynamic_param_dict["MAPPING"][net] dynamic_params[nn] = pd.DataFrame(columns=dp_dict_col) - if net == 'POWER': + if net == "POWER": for n, d in N.G.nodes(data=True): if n[1] != nn: continue - guid = d['data']['inf_data'].guid + guid = d["data"]["inf_data"].guid # Find building in the service area of the node/substation try: - serv_area = mapping_data[mapping_data['substations_guid'] == guid] + serv_area = mapping_data[ + mapping_data["substations_guid"] == guid + ] except KeyError: - serv_area = mapping_data[mapping_data['node_guid'] == guid] + serv_area = mapping_data[mapping_data["node_guid"] == guid] # compute dynamic_params num_dilocated = {t: 0 for t in range(T + 1)} total_pop_node = 0 for _, bldg in serv_area.iterrows(): try: - pop_bldg_dict = pop_dislocation[pop_dislocation['guid'] == bldg['buildings_guid']] + pop_bldg_dict = pop_dislocation[ + pop_dislocation["guid"] == bldg["buildings_guid"] + ] except KeyError: - pop_bldg_dict = pop_dislocation[pop_dislocation['guid'] == bldg['bldg_guid']] + pop_bldg_dict = pop_dislocation[ + pop_dislocation["guid"] == bldg["bldg_guid"] + ] for _, hh in pop_bldg_dict.iterrows(): - total_pop_node += hh['numprec'] if ~np.isnan(hh['numprec']) else 0 - if hh['dislocated']: + total_pop_node += ( + hh["numprec"] if ~np.isnan(hh["numprec"]) else 0 + ) + if hh["dislocated"]: # ..todo Lumebrton dislocation time model. Replace with that of Seaside when available - return_time = DislocationUtil.disloc_time_mode(hh, dt_params) + return_time = DislocationUtil.disloc_time_mode( + hh, dt_params + ) for t in range(return_time): - if t <= T and return_type == 'step_function': - num_dilocated[t] += hh['numprec'] if ~np.isnan(hh['numprec']) else 0 - elif t <= T and return_type == 'linear': + if t <= T and return_type == "step_function": + num_dilocated[t] += ( + hh["numprec"] + if ~np.isnan(hh["numprec"]) + else 0 + ) + elif t <= T and return_type == "linear": pass # ..todo Add linear return here for t in range(T + 1): - values = [t, n[0], total_pop_node - num_dilocated[t], total_pop_node] - dynamic_params[n[1]] = dynamic_params[n[1]].append(dict(zip(dp_dict_col, values)), - ignore_index=True) - elif net == 'WATER': + values = [ + t, + n[0], + total_pop_node - num_dilocated[t], + total_pop_node, + ] + dynamic_params[n[1]] = dynamic_params[n[1]].append( + dict(zip(dp_dict_col, values)), ignore_index=True + ) + elif net == "WATER": node_pop = {} for u, v, a in N.G.edges(data=True): if u[1] != nn or v[1] != nn: continue - guid = a['data']['inf_data'].guid + guid = a["data"]["inf_data"].guid # Find building in the service area of the pipe - serv_area = mapping_data[mapping_data['edge_guid'] == guid] + serv_area = mapping_data[mapping_data["edge_guid"] == guid] start_node = u[0] if start_node not in node_pop.keys(): - node_pop[start_node] = {'total_pop_node': 0, - 'num_dilocated': {t: 0 for t in range(T + 1)}} + node_pop[start_node] = { + "total_pop_node": 0, + "num_dilocated": {t: 0 for t in range(T + 1)}, + } end_node = v[0] if end_node not in node_pop.keys(): - node_pop[end_node] = {'total_pop_node': 0, - 'num_dilocated': {t: 0 for t in range(T + 1)}} + node_pop[end_node] = { + "total_pop_node": 0, + "num_dilocated": {t: 0 for t in range(T + 1)}, + } # compute dynamic_params for _, bldg in serv_area.iterrows(): - pop_bldg_dict = pop_dislocation[pop_dislocation['guid'] == bldg['bldg_guid']] + pop_bldg_dict = pop_dislocation[ + pop_dislocation["guid"] == bldg["bldg_guid"] + ] for _, hh in pop_bldg_dict.iterrows(): # half of the arc's demand is assigned to each node # also, each arc is counted twice both as (u,v) and (v,u) - node_pop[start_node]['total_pop_node'] += hh['numprec'] / 4 \ - if ~np.isnan(hh['numprec']) else 0 - node_pop[end_node]['total_pop_node'] += hh['numprec'] / 4 \ - if ~np.isnan(hh['numprec']) else 0 - if hh['dislocated']: + node_pop[start_node]["total_pop_node"] += ( + hh["numprec"] / 4 if ~np.isnan(hh["numprec"]) else 0 + ) + node_pop[end_node]["total_pop_node"] += ( + hh["numprec"] / 4 if ~np.isnan(hh["numprec"]) else 0 + ) + if hh["dislocated"]: # ..todo Lumebrton dislocation time model. Replace with that of Seaside when available - return_time = DislocationUtil.disloc_time_mode(hh, dt_params) + return_time = DislocationUtil.disloc_time_mode( + hh, dt_params + ) for t in range(return_time): - if t <= T and return_type == 'step_function': - node_pop[start_node]['num_dilocated'][t] += hh['numprec'] / 4 \ - if ~np.isnan(hh['numprec']) else 0 - node_pop[end_node]['num_dilocated'][t] += hh['numprec'] / 4 \ - if ~np.isnan(hh['numprec']) else 0 - elif t <= T and return_type == 'linear': + if t <= T and return_type == "step_function": + node_pop[start_node]["num_dilocated"][t] += ( + hh["numprec"] / 4 + if ~np.isnan(hh["numprec"]) + else 0 + ) + node_pop[end_node]["num_dilocated"][t] += ( + hh["numprec"] / 4 + if ~np.isnan(hh["numprec"]) + else 0 + ) + elif t <= T and return_type == "linear": pass # ..todo Add linear return here for n, val in node_pop.items(): for t in range(T + 1): - values = [t, n, val['total_pop_node'] - val['num_dilocated'][t], - val['total_pop_node']] - dynamic_params[nn] = dynamic_params[nn].append(dict(zip(dp_dict_col, values)), - ignore_index=True) - with open(output_file, 'wb') as f: + values = [ + t, + n, + val["total_pop_node"] - val["num_dilocated"][t], + val["total_pop_node"], + ] + dynamic_params[nn] = dynamic_params[nn].append( + dict(zip(dp_dict_col, values)), ignore_index=True + ) + with open(output_file, "wb") as f: pickle.dump(dynamic_params, f) return dynamic_params @staticmethod def disloc_time_mode(household_data, dt_params): - race_white = 1 if household_data['race'] == 1 else 0 - race_black = 1 if household_data['race'] == 2 else 0 - hispan = household_data['hispan'] if ~np.isnan(household_data['hispan']) else 0 + race_white = 1 if household_data["race"] == 1 else 0 + race_black = 1 if household_data["race"] == 2 else 0 + hispan = household_data["hispan"] if ~np.isnan(household_data["hispan"]) else 0 # ..todo verify that the explanatory variable correspond to columns in dt_params # ..todo Replace random insurance assumption - linear_term = household_data['DS_0'] * dt_params['DS0'] + household_data['DS_1'] * dt_params['DS1'] + \ - household_data['DS_2'] * dt_params['DS2'] + household_data['DS_3'] * dt_params[ - 'DS3'] + race_white * dt_params['white'] + race_black * dt_params['black'] + hispan * \ - dt_params['hispanic'] + np.random.choice([0, 1], p=[.15, .85]) * dt_params['insurance'] + linear_term = ( + household_data["DS_0"] * dt_params["DS0"] + + household_data["DS_1"] * dt_params["DS1"] + + household_data["DS_2"] * dt_params["DS2"] + + household_data["DS_3"] * dt_params["DS3"] + + race_white * dt_params["white"] + + race_black * dt_params["black"] + + hispan * dt_params["hispanic"] + + np.random.choice([0, 1], p=[0.15, 0.85]) * dt_params["insurance"] + ) # household_data['randincome']/1000*dt_params['income']+\#!!! income data disloc_time = np.exp(linear_term) - return_time = math.ceil(disloc_time / 7) # !!! assuming each time step is one week + return_time = math.ceil( + disloc_time / 7 + ) # !!! assuming each time step is one week return return_time @staticmethod def dynamic_parameters(N, original_N, t, dynamic_params): for n, d in N.G.nodes(data=True): - data = dynamic_params[d['data']['inf_data'].net_id] - if d['data']['inf_data'].demand < 0: - current_pop = data.loc[(data['node'] == n[0]) & (data['time'] == t), 'current pop'].iloc[0] - total_pop = data.loc[(data['node'] == n[0]) & (data['time'] == t), 'total pop'].iloc[0] - original_demand = original_N.G.nodes[n]['data']['inf_data'].demand - d['data']['inf_data'].demand = original_demand * current_pop / total_pop + data = dynamic_params[d["data"]["inf_data"].net_id] + if d["data"]["inf_data"].demand < 0: + current_pop = data.loc[ + (data["node"] == n[0]) & (data["time"] == t), "current pop" + ].iloc[0] + total_pop = data.loc[ + (data["node"] == n[0]) & (data["time"] == t), "total pop" + ].iloc[0] + original_demand = original_N.G.nodes[n]["data"]["inf_data"].demand + d["data"]["inf_data"].demand = original_demand * current_pop / total_pop diff --git a/pyincore/analyses/indp/indp.py b/pyincore/analyses/indp/indp.py index 81da4cacd..811e8db82 100644 --- a/pyincore/analyses/indp/indp.py +++ b/pyincore/analyses/indp/indp.py @@ -26,18 +26,18 @@ class INDP(BaseAnalysis): """ - This class runs INDP or td-INDP for a given number of time steps and input parameters.This analysis takes a - decentralized approach to solve the Interdependent Network Design Problem (INDP), a family of - centralized Mixed-Integer Programming (MIP) models, which find the optimal restoration strategy of disrupted - networked systems subject to budget and operational constraints. + This class runs INDP or td-INDP for a given number of time steps and input parameters.This analysis takes a + decentralized approach to solve the Interdependent Network Design Problem (INDP), a family of + centralized Mixed-Integer Programming (MIP) models, which find the optimal restoration strategy of disrupted + networked systems subject to budget and operational constraints. - Contributors - | Science: Hesam Talebiyan - | Implementation: Chen Wang and NCSA IN-CORE Dev Team + Contributors + | Science: Hesam Talebiyan + | Implementation: Chen Wang and NCSA IN-CORE Dev Team - Args: - incore_client (IncoreClient): Service authentication. - """ + Args: + incore_client (IncoreClient): Service authentication. + """ def __init__(self, incore_client): super(INDP, self).__init__(incore_client) @@ -49,17 +49,17 @@ def run(self): MAGS = self.get_parameter("MAGS") filter_sce = None fail_sce_param = { - 'TYPE': network_type, - 'SAMPLE_RANGE': sample_range, - 'MAGS': MAGS, - 'FILTER_SCE': filter_sce, + "TYPE": network_type, + "SAMPLE_RANGE": sample_range, + "MAGS": MAGS, + "FILTER_SCE": filter_sce, } - RC = self.get_parameter('RC') - layers = self.get_parameter('layers') - method = self.get_parameter('method') + RC = self.get_parameter("RC") + layers = self.get_parameter("layers") + method = self.get_parameter("method") - t_steps = self.get_parameter('t_steps') + t_steps = self.get_parameter("t_steps") if t_steps is None: t_steps = 10 @@ -73,14 +73,25 @@ def run(self): bldgs2elec_dataset = self.get_input_dataset("bldgs2elec") bldgs2wter_dataset = self.get_input_dataset("bldgs2wter") - if dislocation_data_type is not None and return_model is not None and testbed_name is not \ - None and bldgs2elec_dataset is not None and bldgs2wter_dataset is not None: + if ( + dislocation_data_type is not None + and return_model is not None + and testbed_name is not None + and bldgs2elec_dataset is not None + and bldgs2wter_dataset is not None + ): dynamic_params = { "TYPE": dislocation_data_type, "RETURN": return_model, "TESTBED": testbed_name, - "MAPPING": {'POWER': bldgs2elec_dataset.get_dataframe_from_csv(low_memory=False), - 'WATER': bldgs2wter_dataset.get_dataframe_from_csv(low_memory=False)} + "MAPPING": { + "POWER": bldgs2elec_dataset.get_dataframe_from_csv( + low_memory=False + ), + "WATER": bldgs2wter_dataset.get_dataframe_from_csv( + low_memory=False + ), + }, } extra_commodity = self.get_parameter("extra_commodity") @@ -93,18 +104,34 @@ def run(self): if save_model is None: save_model = False - action_result, cost_result, runtime_result = self.run_method(fail_sce_param, RC, layers, method=method, - t_steps=t_steps, - misc={'DYNAMIC_PARAMS': dynamic_params, - 'EXTRA_COMMODITY': extra_commodity, - 'TIME_RESOURCE': time_resource}, - save_model=save_model) + action_result, cost_result, runtime_result = self.run_method( + fail_sce_param, + RC, + layers, + method=method, + t_steps=t_steps, + misc={ + "DYNAMIC_PARAMS": dynamic_params, + "EXTRA_COMMODITY": extra_commodity, + "TIME_RESOURCE": time_resource, + }, + save_model=save_model, + ) self.set_result_csv_data("action", action_result, name="actions.csv") self.set_result_csv_data("cost", cost_result, name="costs.csv") self.set_result_csv_data("runtime", runtime_result, name="run_time.csv") - def run_method(self, fail_sce_param, v_r, layers, method, t_steps=10, misc=None, save_model=False): + def run_method( + self, + fail_sce_param, + v_r, + layers, + method, + t_steps=10, + misc=None, + save_model=False, + ): """ This function runs restoration analysis based on INDP or td-INDP for different numbers of resources. @@ -124,206 +151,369 @@ def run_method(self, fail_sce_param, v_r, layers, method, t_steps=10, misc=None, """ # input files - wf_repair_cost = self.get_input_dataset("wf_repair_cost").get_dataframe_from_csv(low_memory=False) - wf_repair_cost['budget'] = wf_repair_cost['budget'].str.split(',') - wf_repair_cost['repaircost'] = wf_repair_cost['repaircost'].str.split(',') - epf_repair_cost = self.get_input_dataset("epf_repair_cost").get_dataframe_from_csv(low_memory=False) - epf_repair_cost['budget'] = epf_repair_cost['budget'].str.split(',') - epf_repair_cost['repaircost'] = epf_repair_cost['repaircost'].str.split(',') - - pipeline_restoration_time = self.get_input_dataset("pipeline_restoration_time").get_dataframe_from_csv( - low_memory=False) - pipeline_repair_cost = self.get_input_dataset("pipeline_repair_cost").get_dataframe_from_csv(low_memory=False) - - powerline_supply_demand_info = self.get_input_dataset("powerline_supply_demand_info").get_dataframe_from_csv( - low_memory=False) - epf_supply_demand_info = self.get_input_dataset("epf_supply_demand_info").get_dataframe_from_csv( - low_memory=False) - power_network = NetworkDataset.from_dataset(self.get_input_dataset("power_network")) - power_arcs = power_network.links.get_dataframe_from_shapefile().merge(powerline_supply_demand_info, on="guid") - power_nodes = power_network.nodes.get_dataframe_from_shapefile().merge(epf_supply_demand_info, on="guid") - - pipeline_supply_demand_info = self.get_input_dataset("pipeline_supply_demand_info").get_dataframe_from_csv( - low_memory=False) - wf_supply_demand_info = self.get_input_dataset("wf_supply_demand_info").get_dataframe_from_csv(low_memory=False) - water_network = NetworkDataset.from_dataset(self.get_input_dataset("water_network")) - water_arcs = water_network.links.get_dataframe_from_shapefile().merge(pipeline_supply_demand_info, on="guid") + wf_repair_cost = self.get_input_dataset( + "wf_repair_cost" + ).get_dataframe_from_csv(low_memory=False) + wf_repair_cost["budget"] = wf_repair_cost["budget"].str.split(",") + wf_repair_cost["repaircost"] = wf_repair_cost["repaircost"].str.split(",") + epf_repair_cost = self.get_input_dataset( + "epf_repair_cost" + ).get_dataframe_from_csv(low_memory=False) + epf_repair_cost["budget"] = epf_repair_cost["budget"].str.split(",") + epf_repair_cost["repaircost"] = epf_repair_cost["repaircost"].str.split(",") + + pipeline_restoration_time = self.get_input_dataset( + "pipeline_restoration_time" + ).get_dataframe_from_csv(low_memory=False) + pipeline_repair_cost = self.get_input_dataset( + "pipeline_repair_cost" + ).get_dataframe_from_csv(low_memory=False) + + powerline_supply_demand_info = self.get_input_dataset( + "powerline_supply_demand_info" + ).get_dataframe_from_csv(low_memory=False) + epf_supply_demand_info = self.get_input_dataset( + "epf_supply_demand_info" + ).get_dataframe_from_csv(low_memory=False) + power_network = NetworkDataset.from_dataset( + self.get_input_dataset("power_network") + ) + power_arcs = power_network.links.get_dataframe_from_shapefile().merge( + powerline_supply_demand_info, on="guid" + ) + power_nodes = power_network.nodes.get_dataframe_from_shapefile().merge( + epf_supply_demand_info, on="guid" + ) + + pipeline_supply_demand_info = self.get_input_dataset( + "pipeline_supply_demand_info" + ).get_dataframe_from_csv(low_memory=False) + wf_supply_demand_info = self.get_input_dataset( + "wf_supply_demand_info" + ).get_dataframe_from_csv(low_memory=False) + water_network = NetworkDataset.from_dataset( + self.get_input_dataset("water_network") + ) + water_arcs = water_network.links.get_dataframe_from_shapefile().merge( + pipeline_supply_demand_info, on="guid" + ) water_nodes = water_network.nodes.get_dataframe_from_shapefile() water_nodes = water_nodes.merge(wf_supply_demand_info, on="guid") - interdep = self.get_input_dataset("interdep").get_dataframe_from_csv(low_memory=False) + interdep = self.get_input_dataset("interdep").get_dataframe_from_csv( + low_memory=False + ) # get rid of distribution nodes - wf_failure_state_df = self.get_input_dataset("wf_failure_state").get_dataframe_from_csv( - low_memory=False).dropna() - wf_damage_state_df = self.get_input_dataset("wf_damage_state").get_dataframe_from_csv(low_memory=False).dropna() - pipeline_failure_state_df = self.get_input_dataset("pipeline_failure_state").get_dataframe_from_csv( - low_memory=False).dropna() - epf_failure_state_df = self.get_input_dataset("epf_failure_state").get_dataframe_from_csv( - low_memory=False).dropna() - epf_damage_state_df = self.get_input_dataset("epf_damage_state").get_dataframe_from_csv( - low_memory=False).dropna() + wf_failure_state_df = ( + self.get_input_dataset("wf_failure_state") + .get_dataframe_from_csv(low_memory=False) + .dropna() + ) + wf_damage_state_df = ( + self.get_input_dataset("wf_damage_state") + .get_dataframe_from_csv(low_memory=False) + .dropna() + ) + pipeline_failure_state_df = ( + self.get_input_dataset("pipeline_failure_state") + .get_dataframe_from_csv(low_memory=False) + .dropna() + ) + epf_failure_state_df = ( + self.get_input_dataset("epf_failure_state") + .get_dataframe_from_csv(low_memory=False) + .dropna() + ) + epf_damage_state_df = ( + self.get_input_dataset("epf_damage_state") + .get_dataframe_from_csv(low_memory=False) + .dropna() + ) sample_range = self.get_parameter("sample_range") - initial_node = INDPUtil.generate_intial_node_failure_state(wf_failure_state_df, epf_failure_state_df, - water_nodes, power_nodes, sample_range) - initial_link = INDPUtil.generate_intial_link_failure_state(pipeline_failure_state_df, - water_arcs, power_arcs, sample_range) - - pop_dislocation = self.get_input_dataset("pop_dislocation").get_dataframe_from_csv(low_memory=False) - - wf_restoration_time = self.get_input_dataset("wf_restoration_time").get_dataframe_from_csv(low_memory=False) + initial_node = INDPUtil.generate_intial_node_failure_state( + wf_failure_state_df, + epf_failure_state_df, + water_nodes, + power_nodes, + sample_range, + ) + initial_link = INDPUtil.generate_intial_link_failure_state( + pipeline_failure_state_df, water_arcs, power_arcs, sample_range + ) + + pop_dislocation = self.get_input_dataset( + "pop_dislocation" + ).get_dataframe_from_csv(low_memory=False) + + wf_restoration_time = self.get_input_dataset( + "wf_restoration_time" + ).get_dataframe_from_csv(low_memory=False) wf_restoration_time = wf_restoration_time.merge(wf_damage_state_df, on="guid") - epf_restoration_time = self.get_input_dataset("epf_restoration_time").get_dataframe_from_csv(low_memory=False) - epf_restoration_time = epf_restoration_time.merge(epf_damage_state_df, on="guid") + epf_restoration_time = self.get_input_dataset( + "epf_restoration_time" + ).get_dataframe_from_csv(low_memory=False) + epf_restoration_time = epf_restoration_time.merge( + epf_damage_state_df, on="guid" + ) dt_params_dataset = self.get_input_dataset("dt_params") if dt_params_dataset is not None: dt_params = dt_params_dataset.get_json_reader() else: - dt_params = {'DS0': 1.00, 'DS1': 2.33, 'DS2': 2.49, 'DS3': 3.62, 'white': 0.78, 'black': 0.88, - 'hispanic': 0.83, 'income': -0.00, 'insurance': 1.06} + dt_params = { + "DS0": 1.00, + "DS1": 2.33, + "DS2": 2.49, + "DS3": 3.62, + "white": 0.78, + "black": 0.88, + "hispanic": 0.83, + "income": -0.00, + "insurance": 1.06, + } # results action_result = [] cost_result = [] runtime_result = [] for v_i, v in enumerate(v_r): - if method == 'INDP': - params = {"NUM_ITERATIONS": t_steps, "OUTPUT_DIR": 'indp_results', "V": v, - "T": 1, 'L': layers, "ALGORITHM": "INDP"} - elif method == 'TDINDP': - params = {"NUM_ITERATIONS": t_steps, "OUTPUT_DIR": 'tdindp_results', "V": v, - "T": t_steps, 'L': layers, "ALGORITHM": "INDP"} - if 'WINDOW_LENGTH' in misc.keys(): - params["WINDOW_LENGTH"] = misc['WINDOW_LENGTH'] + if method == "INDP": + params = { + "NUM_ITERATIONS": t_steps, + "OUTPUT_DIR": "indp_results", + "V": v, + "T": 1, + "L": layers, + "ALGORITHM": "INDP", + } + elif method == "TDINDP": + params = { + "NUM_ITERATIONS": t_steps, + "OUTPUT_DIR": "tdindp_results", + "V": v, + "T": t_steps, + "L": layers, + "ALGORITHM": "INDP", + } + if "WINDOW_LENGTH" in misc.keys(): + params["WINDOW_LENGTH"] = misc["WINDOW_LENGTH"] else: - raise ValueError('Wrong method name: ' + method + '. We currently only support INDP and TDINDP as ' - 'method name') - - params['EXTRA_COMMODITY'] = misc['EXTRA_COMMODITY'] - params['TIME_RESOURCE'] = misc['TIME_RESOURCE'] - params['DYNAMIC_PARAMS'] = misc['DYNAMIC_PARAMS'] - if misc['DYNAMIC_PARAMS']: - params['OUTPUT_DIR'] = 'dp_' + params['OUTPUT_DIR'] - - print('----Running for resources: ' + str(params['V'])) - for m in fail_sce_param['MAGS']: - for i in fail_sce_param['SAMPLE_RANGE']: + raise ValueError( + "Wrong method name: " + + method + + ". We currently only support INDP and TDINDP as " + "method name" + ) + + params["EXTRA_COMMODITY"] = misc["EXTRA_COMMODITY"] + params["TIME_RESOURCE"] = misc["TIME_RESOURCE"] + params["DYNAMIC_PARAMS"] = misc["DYNAMIC_PARAMS"] + if misc["DYNAMIC_PARAMS"]: + params["OUTPUT_DIR"] = "dp_" + params["OUTPUT_DIR"] + + print("----Running for resources: " + str(params["V"])) + for m in fail_sce_param["MAGS"]: + for i in fail_sce_param["SAMPLE_RANGE"]: params["SIM_NUMBER"] = i params["MAGNITUDE"] = m - print('---Running Magnitude ' + str(m) + ' sample ' + str(i) + '...') - if params['TIME_RESOURCE']: - print('Computing repair times...') + print( + "---Running Magnitude " + str(m) + " sample " + str(i) + "..." + ) + if params["TIME_RESOURCE"]: + print("Computing repair times...") wf_repair_cost_sample = wf_repair_cost.copy() - wf_repair_cost_sample["budget"] = wf_repair_cost_sample['budget'].apply(lambda x: float(x[i])) - wf_repair_cost_sample["repaircost"] = \ - wf_repair_cost_sample['repaircost'].apply(lambda x: float(x[i])) + wf_repair_cost_sample["budget"] = wf_repair_cost_sample[ + "budget" + ].apply(lambda x: float(x[i])) + wf_repair_cost_sample["repaircost"] = wf_repair_cost_sample[ + "repaircost" + ].apply(lambda x: float(x[i])) epf_repair_cost_sample = epf_repair_cost.copy() - epf_repair_cost_sample["budget"] = epf_repair_cost_sample['budget'].apply(lambda x: float(x[i])) - epf_repair_cost_sample["repaircost"] = \ - epf_repair_cost_sample['repaircost'].apply(lambda x: float(x[i])) + epf_repair_cost_sample["budget"] = epf_repair_cost_sample[ + "budget" + ].apply(lambda x: float(x[i])) + epf_repair_cost_sample["repaircost"] = epf_repair_cost_sample[ + "repaircost" + ].apply(lambda x: float(x[i])) # logic to read repair time wf_restoration_time_sample = pd.DataFrame() for index, row in wf_restoration_time.iterrows(): - failure_state = int(row["sample_damage_states"].split(",")[i].split("_")[1]) # DS_0,1,2,3,4 + failure_state = int( + row["sample_damage_states"].split(",")[i].split("_")[1] + ) # DS_0,1,2,3,4 if failure_state == 0: repairtime = 0 else: repairtime = row["PF_" + str(failure_state - 1)] - wf_restoration_time_sample = pd.concat([wf_restoration_time_sample, - pd.DataFrame([{"guid": row["guid"], - "repairtime": repairtime}])], - ignore_index=True) + wf_restoration_time_sample = pd.concat( + [ + wf_restoration_time_sample, + pd.DataFrame( + [ + { + "guid": row["guid"], + "repairtime": repairtime, + } + ] + ), + ], + ignore_index=True, + ) epf_restoration_time_sample = pd.DataFrame() for index, row in epf_restoration_time.iterrows(): - failure_state = int(row["sample_damage_states"].split(",")[i].split("_")[1]) # DS_0,1,2,3,4 + failure_state = int( + row["sample_damage_states"].split(",")[i].split("_")[1] + ) # DS_0,1,2,3,4 if failure_state == 0: repairtime = 0 else: repairtime = row["PF_" + str(failure_state - 1)] - epf_restoration_time_sample = pd.concat([epf_restoration_time_sample, - pd.DataFrame([{"guid": row["guid"], - "repairtime":repairtime}])], - ignore_index=True) - - water_nodes, water_arcs, power_nodes, power_arcs = \ - INDPUtil.time_resource_usage_curves(power_arcs, power_nodes, water_arcs, water_nodes, - wf_restoration_time_sample, wf_repair_cost_sample, - pipeline_restoration_time, pipeline_repair_cost, - epf_restoration_time_sample, epf_repair_cost_sample) + epf_restoration_time_sample = pd.concat( + [ + epf_restoration_time_sample, + pd.DataFrame( + [ + { + "guid": row["guid"], + "repairtime": repairtime, + } + ] + ), + ], + ignore_index=True, + ) + + ( + water_nodes, + water_arcs, + power_nodes, + power_arcs, + ) = INDPUtil.time_resource_usage_curves( + power_arcs, + power_nodes, + water_arcs, + water_nodes, + wf_restoration_time_sample, + wf_repair_cost_sample, + pipeline_restoration_time, + pipeline_repair_cost, + epf_restoration_time_sample, + epf_repair_cost_sample, + ) print("Initializing network...") - params["N"] = INDPUtil.initialize_network(power_nodes, power_arcs, water_nodes, water_arcs, - interdep, extra_commodity=params["EXTRA_COMMODITY"]) - - if params['DYNAMIC_PARAMS']: + params["N"] = INDPUtil.initialize_network( + power_nodes, + power_arcs, + water_nodes, + water_arcs, + interdep, + extra_commodity=params["EXTRA_COMMODITY"], + ) + + if params["DYNAMIC_PARAMS"]: print("Computing dynamic demand based on dislocation data...") - dyn_dmnd = DislocationUtil.create_dynamic_param(params, pop_dislocation, dt_params, - N=params["N"], T=params["NUM_ITERATIONS"]) - params['DYNAMIC_PARAMS']['DEMAND_DATA'] = dyn_dmnd - - if fail_sce_param['TYPE'] == 'from_csv': - InfrastructureUtil.add_from_csv_failure_scenario(params["N"], sample=i, - initial_node=initial_node, - initial_link=initial_link) + dyn_dmnd = DislocationUtil.create_dynamic_param( + params, + pop_dislocation, + dt_params, + N=params["N"], + T=params["NUM_ITERATIONS"], + ) + params["DYNAMIC_PARAMS"]["DEMAND_DATA"] = dyn_dmnd + + if fail_sce_param["TYPE"] == "from_csv": + InfrastructureUtil.add_from_csv_failure_scenario( + params["N"], + sample=i, + initial_node=initial_node, + initial_link=initial_link, + ) else: - raise ValueError('Wrong failure scenario data type.') + raise ValueError("Wrong failure scenario data type.") if params["ALGORITHM"] == "INDP": - indp_results = self.run_indp(params, layers=params['L'], controlled_layers=params['L'], - T=params["T"], save_model=save_model, print_cmd_line=False, - co_location=False) + indp_results = self.run_indp( + params, + layers=params["L"], + controlled_layers=params["L"], + T=params["T"], + save_model=save_model, + print_cmd_line=False, + co_location=False, + ) for t in indp_results.results: - actions = indp_results[t]['actions'] - costs = indp_results[t]['costs'] - runtimes = indp_results[t]['run_time'] + actions = indp_results[t]["actions"] + costs = indp_results[t]["costs"] + runtimes = indp_results[t]["run_time"] for a in actions: - action_result.append({ + action_result.append( + { + "RC": str(v_i), + "layers": "L" + str(len(layers)), + "magnitude": "m" + str(m), + "sample_num": str(i), + "t": str(t), + "action": a, + } + ) + + runtime_result.append( + { "RC": str(v_i), - "layers": 'L' + str(len(layers)), - "magnitude": 'm' + str(m), + "layers": "L" + str(len(layers)), + "magnitude": "m" + str(m), "sample_num": str(i), "t": str(t), - "action": a, - }) - - runtime_result.append({ - "RC": str(v_i), - "layers": 'L' + str(len(layers)), - "magnitude": 'm' + str(m), - "sample_num": str(i), - "t": str(t), - "runtime": runtimes, - }) - - cost_result.append({ - "RC": str(v_i), - "layers": 'L' + str(len(layers)), - "magnitude": 'm' + str(m), - "sample_num": str(i), - "t": str(t), - "Space Prep": str(costs["Space Prep"]), - "Arc": str(costs["Arc"]), - "Node": str(costs["Node"]), - "Over Supply": str(costs["Over Supply"]), - "Under Supply": str(costs["Under Supply"]), - "Flow": str(costs["Flow"]), - "Total": str(costs["Total"]), - "Under Supply Perc": str(costs["Under Supply Perc"]), - }) + "runtime": runtimes, + } + ) + + cost_result.append( + { + "RC": str(v_i), + "layers": "L" + str(len(layers)), + "magnitude": "m" + str(m), + "sample_num": str(i), + "t": str(t), + "Space Prep": str(costs["Space Prep"]), + "Arc": str(costs["Arc"]), + "Node": str(costs["Node"]), + "Over Supply": str(costs["Over Supply"]), + "Under Supply": str(costs["Under Supply"]), + "Flow": str(costs["Flow"]), + "Total": str(costs["Total"]), + "Under Supply Perc": str( + costs["Under Supply Perc"] + ), + } + ) else: - raise ValueError('Wrong algorithm type.') + raise ValueError("Wrong algorithm type.") return action_result, cost_result, runtime_result - def run_indp(self, params, layers=None, controlled_layers=None, functionality=None, T=1, save=True, suffix="", - forced_actions=False, save_model=False, print_cmd_line=True, co_location=True): + def run_indp( + self, + params, + layers=None, + controlled_layers=None, + functionality=None, + T=1, + save=True, + suffix="", + forced_actions=False, + save_model=False, + print_cmd_line=True, + co_location=True, + ): """ This function runs iINDP (T=1) or td-INDP for a given number of time steps and input parameters. @@ -378,29 +568,60 @@ def run_indp(self, params, layers=None, controlled_layers=None, functionality=No print("Num iters=", params["NUM_ITERATIONS"]) # Run INDP for 1 time step (original INDP). - output_dir = params["OUTPUT_DIR"] + '_L' + str(len(layers)) + '_m' + str( - params["MAGNITUDE"]) + "_v" + out_dir_suffix_res + output_dir = ( + params["OUTPUT_DIR"] + + "_L" + + str(len(layers)) + + "_m" + + str(params["MAGNITUDE"]) + + "_v" + + out_dir_suffix_res + ) # Initial calculations. - if params['DYNAMIC_PARAMS']: + if params["DYNAMIC_PARAMS"]: original_N = copy.deepcopy(interdependent_net) # !!! deepcopy - DislocationUtil.dynamic_parameters(interdependent_net, original_N, 0, - params['DYNAMIC_PARAMS']['DEMAND_DATA']) + DislocationUtil.dynamic_parameters( + interdependent_net, + original_N, + 0, + params["DYNAMIC_PARAMS"]["DEMAND_DATA"], + ) v_0 = {x: 0 for x in params["V"].keys()} - results = self.indp(interdependent_net, v_0, 1, layers, controlled_layers=controlled_layers, - functionality=functionality, co_location=co_location) + results = self.indp( + interdependent_net, + v_0, + 1, + layers, + controlled_layers=controlled_layers, + functionality=functionality, + co_location=co_location, + ) indp_results = results[1] if save_model: INDPUtil.save_indp_model_to_file(results[0], output_dir + "/Model", 0) for i in range(params["NUM_ITERATIONS"]): print("-Time Step (iINDP)", i + 1, "/", params["NUM_ITERATIONS"]) - if params['DYNAMIC_PARAMS']: - DislocationUtil.dynamic_parameters(interdependent_net, original_N, i + 1, - params['DYNAMIC_PARAMS']['DEMAND_DATA']) - results = self.indp(interdependent_net, params["V"], T, layers, controlled_layers=controlled_layers, - co_location=co_location, functionality=functionality) + if params["DYNAMIC_PARAMS"]: + DislocationUtil.dynamic_parameters( + interdependent_net, + original_N, + i + 1, + params["DYNAMIC_PARAMS"]["DEMAND_DATA"], + ) + results = self.indp( + interdependent_net, + params["V"], + T, + layers, + controlled_layers=controlled_layers, + co_location=co_location, + functionality=functionality, + ) indp_results.extend(results[1], t_offset=i + 1) if save_model: - INDPUtil.save_indp_model_to_file(results[0], output_dir + "/Model", i + 1) + INDPUtil.save_indp_model_to_file( + results[0], output_dir + "/Model", i + 1 + ) # Modify network to account for recovery and calculate components. INDPUtil.apply_recovery(interdependent_net, indp_results, i + 1) else: @@ -411,14 +632,34 @@ def run_indp(self, params, layers=None, controlled_layers=None, functionality=No if "WINDOW_LENGTH" in params: time_window_length = params["WINDOW_LENGTH"] num_time_windows = T - output_dir = params["OUTPUT_DIR"] + '_L' + str(len(layers)) + "_m" + str( - params["MAGNITUDE"]) + "_v" + out_dir_suffix_res - - print("Running td-INDP (T=" + str(T) + ", Window size=" + str(time_window_length) + ")") + output_dir = ( + params["OUTPUT_DIR"] + + "_L" + + str(len(layers)) + + "_m" + + str(params["MAGNITUDE"]) + + "_v" + + out_dir_suffix_res + ) + + print( + "Running td-INDP (T=" + + str(T) + + ", Window size=" + + str(time_window_length) + + ")" + ) # Initial percolation calculations. v_0 = {x: 0 for x in params["V"].keys()} - results = self.indp(interdependent_net, v_0, 1, layers, controlled_layers=controlled_layers, - functionality=functionality, co_location=co_location) + results = self.indp( + interdependent_net, + v_0, + 1, + layers, + controlled_layers=controlled_layers, + functionality=functionality, + co_location=co_location, + ) indp_results = results[1] if save_model: INDPUtil.save_indp_model_to_file(results[0], output_dir + "/Model", 0) @@ -436,11 +677,19 @@ def run_indp(self, params, layers=None, controlled_layers=None, functionality=No for d in range(diff): functionality_t[max_t + d + 1] = functionality_t[max_t] # Run td-INDP. - results = self.indp(interdependent_net, params["V"], time_window_length + 1, layers, - controlled_layers=controlled_layers, functionality=functionality_t, - co_location=co_location) + results = self.indp( + interdependent_net, + params["V"], + time_window_length + 1, + layers, + controlled_layers=controlled_layers, + functionality=functionality_t, + co_location=co_location, + ) if save_model: - INDPUtil.save_indp_model_to_file(results[0], output_dir + "/Model", n + 1) + INDPUtil.save_indp_model_to_file( + results[0], output_dir + "/Model", n + 1 + ) if "WINDOW_LENGTH" in params: indp_results.extend(results[1], t_offset=n + 1, t_start=1, t_end=2) # Modify network for recovery actions and calculate components. @@ -452,14 +701,26 @@ def run_indp(self, params, layers=None, controlled_layers=None, functionality=No INDPUtil.apply_recovery(interdependent_net, indp_results, t) # Save results of current simulation. if save: - if not os.path.exists(output_dir + '/agents'): - os.makedirs(output_dir + '/agents') - indp_results.to_csv_layer(output_dir + '/agents', params["SIM_NUMBER"], suffix=suffix) + if not os.path.exists(output_dir + "/agents"): + os.makedirs(output_dir + "/agents") + indp_results.to_csv_layer( + output_dir + "/agents", params["SIM_NUMBER"], suffix=suffix + ) return indp_results - def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=None, fixed_nodes=None, - print_cmd=True, co_location=True): + def indp( + self, + N, + v_r, + T=1, + layers=None, + controlled_layers=None, + functionality=None, + fixed_nodes=None, + print_cmd=True, + co_location=True, + ): """ INDP optimization problem in Pyomo. It also solves td-INDP if T > 1. @@ -513,29 +774,69 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N m.v_r = v_r m.functionality = functionality - '''Sets and Dictionaries''' - g_prime_nodes = [n[0] for n in N.G.nodes(data=True) if n[1]['data']['inf_data'].net_id in layers] + """Sets and Dictionaries""" + g_prime_nodes = [ + n[0] + for n in N.G.nodes(data=True) + if n[1]["data"]["inf_data"].net_id in layers + ] g_prime = N.G.subgraph(g_prime_nodes) # Nodes in controlled network. m.n_hat_nodes = pyo.Set( - initialize=[n[0] for n in g_prime.nodes(data=True) if n[1]['data']['inf_data'].net_id in controlled_layers]) + initialize=[ + n[0] + for n in g_prime.nodes(data=True) + if n[1]["data"]["inf_data"].net_id in controlled_layers + ] + ) m.n_hat = g_prime.subgraph(m.n_hat_nodes.ordered_data()) # Damaged nodes in controlled network. m.n_hat_prime_nodes = pyo.Set( - initialize=[n[0] for n in m.n_hat.nodes(data=True) if n[1]['data']['inf_data'].repaired == 0.0]) - n_hat_prime = [n for n in m.n_hat.nodes(data=True) if n[1]['data']['inf_data'].repaired == 0.0] + initialize=[ + n[0] + for n in m.n_hat.nodes(data=True) + if n[1]["data"]["inf_data"].repaired == 0.0 + ] + ) + n_hat_prime = [ + n + for n in m.n_hat.nodes(data=True) + if n[1]["data"]["inf_data"].repaired == 0.0 + ] # Arcs in controlled network. m.a_hat = pyo.Set( - initialize=[(u, v) for u, v, a in g_prime.edges(data=True) if - a['data']['inf_data'].layer in controlled_layers]) + initialize=[ + (u, v) + for u, v, a in g_prime.edges(data=True) + if a["data"]["inf_data"].layer in controlled_layers + ] + ) # Damaged arcs in whole network m.a_prime = pyo.Set( - initialize=[(u, v) for u, v, a in g_prime.edges(data=True) if a['data']['inf_data'].functionality == 0.0]) - a_prime = [(u, v, a) for u, v, a in g_prime.edges(data=True) if a['data']['inf_data'].functionality == 0.0] + initialize=[ + (u, v) + for u, v, a in g_prime.edges(data=True) + if a["data"]["inf_data"].functionality == 0.0 + ] + ) + a_prime = [ + (u, v, a) + for u, v, a in g_prime.edges(data=True) + if a["data"]["inf_data"].functionality == 0.0 + ] # Damaged arcs in controlled network. - m.a_hat_prime = pyo.Set(initialize=[(u, v) for u, v, _ in a_prime if m.n_hat.has_node(u) - and m.n_hat.has_node(v)]) - a_hat_prime = [(u, v, a) for u, v, a in a_prime if m.n_hat.has_node(u) and m.n_hat.has_node(v)] + m.a_hat_prime = pyo.Set( + initialize=[ + (u, v) + for u, v, _ in a_prime + if m.n_hat.has_node(u) and m.n_hat.has_node(v) + ] + ) + a_hat_prime = [ + (u, v, a) + for u, v, a in a_prime + if m.n_hat.has_node(u) and m.n_hat.has_node(v) + ] # Sub-spaces m.S = pyo.Set(initialize=N.S) @@ -543,24 +844,29 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N m.interdep_nodes = {} for u, v, a in g_prime.edges(data=True): if not functionality: - if a['data']['inf_data'].is_interdep and g_prime.nodes[u]['data']['inf_data'].functionality == 0.0: + if ( + a["data"]["inf_data"].is_interdep + and g_prime.nodes[u]["data"]["inf_data"].functionality == 0.0 + ): # print "Dependency edge goes from:",u,"to",v if v not in m.interdep_nodes: m.interdep_nodes[v] = [] - m.interdep_nodes[v].append((u, a['data']['inf_data'].gamma)) + m.interdep_nodes[v].append((u, a["data"]["inf_data"].gamma)) else: # Should populate m.n_hat with layers that are controlled. Then go through m.n_hat.edges(data=True) # to find interdependencies. for t in range(T): if t not in m.interdep_nodes: m.interdep_nodes[t] = {} - if m.n_hat.has_node(v) and a['data']['inf_data'].is_interdep: + if m.n_hat.has_node(v) and a["data"]["inf_data"].is_interdep: if functionality[t][u] == 0.0: if v not in m.interdep_nodes[t]: m.interdep_nodes[t][v] = [] - m.interdep_nodes[t][v].append((u, a['data']['inf_data'].gamma)) + m.interdep_nodes[t][v].append( + (u, a["data"]["inf_data"].gamma) + ) - '''Variables''' + """Variables""" m.time_step = pyo.Set(initialize=range(T)) # Add geographical space variables. if co_location: @@ -577,13 +883,13 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N # Add variables considering extra commodity in addition to the base one node_com_idx = [] for n, d in m.n_hat.nodes(data=True): - node_com_idx.append((n, 'b')) - for key, val in d['data']['inf_data'].extra_com.items(): + node_com_idx.append((n, "b")) + for key, val in d["data"]["inf_data"].extra_com.items(): node_com_idx.append((n, key)) arc_com_idx = [] for u, v, a in m.n_hat.edges(data=True): - arc_com_idx.append((u, v, 'b')) - for key, val in a['data']['inf_data'].extra_com.items(): + arc_com_idx.append((u, v, "b")) + for key, val in a["data"]["inf_data"].extra_com.items(): arc_com_idx.append((u, v, key)) # Add over/under-supply variables for each node. m.delta_p = pyo.Var(node_com_idx, m.time_step, domain=pyo.NonNegativeReals) @@ -599,7 +905,7 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N else: m.w_tilde[key].fix(val) - '''Populate objective function''' + """Populate objective function""" obj_func = 0 for t in range(m.T): if co_location: @@ -607,78 +913,137 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N obj_func += s.cost * m.z[s.id, t] for u, v, a in a_hat_prime: if T == 1: - obj_func += (float(a['data']['inf_data'].reconstruction_cost) / 2.0) * m.y[u, v, t] + obj_func += ( + float(a["data"]["inf_data"].reconstruction_cost) / 2.0 + ) * m.y[u, v, t] else: - obj_func += (float(a['data']['inf_data'].reconstruction_cost) / 2.0) * m.y_tilde[u, v, t] + obj_func += ( + float(a["data"]["inf_data"].reconstruction_cost) / 2.0 + ) * m.y_tilde[u, v, t] for n, d in n_hat_prime: if T == 1: - obj_func += d['data']['inf_data'].reconstruction_cost * m.w[n, t] + obj_func += d["data"]["inf_data"].reconstruction_cost * m.w[n, t] else: - obj_func += d['data']['inf_data'].reconstruction_cost * m.w_tilde[n, t] + obj_func += ( + d["data"]["inf_data"].reconstruction_cost * m.w_tilde[n, t] + ) for n, d in m.n_hat.nodes(data=True): - obj_func += d['data']['inf_data'].oversupply_penalty * m.delta_p[n, 'b', t] - obj_func += d['data']['inf_data'].undersupply_penalty * m.delta_m[n, 'b', t] - for layer, val in d['data']['inf_data'].extra_com.items(): - obj_func += val['oversupply_penalty'] * m.delta_p[n, layer, t] - obj_func += val['undersupply_penalty'] * m.delta_m[n, layer, t] + obj_func += ( + d["data"]["inf_data"].oversupply_penalty * m.delta_p[n, "b", t] + ) + obj_func += ( + d["data"]["inf_data"].undersupply_penalty * m.delta_m[n, "b", t] + ) + for layer, val in d["data"]["inf_data"].extra_com.items(): + obj_func += val["oversupply_penalty"] * m.delta_p[n, layer, t] + obj_func += val["undersupply_penalty"] * m.delta_m[n, layer, t] for u, v, a in m.n_hat.edges(data=True): - obj_func += a['data']['inf_data'].flow_cost * m.x[u, v, 'b', t] - for layer, val in a['data']['inf_data'].extra_com.items(): - obj_func += val['flow_cost'] * m.x[u, v, layer, t] + obj_func += a["data"]["inf_data"].flow_cost * m.x[u, v, "b", t] + for layer, val in a["data"]["inf_data"].extra_com.items(): + obj_func += val["flow_cost"] * m.x[u, v, layer, t] m.Obj = pyo.Objective(rule=obj_func, sense=pyo.minimize) - '''Constraints''' + """Constraints""" # Time-dependent constraints. if m.T > 1: - m.initial_state_node = pyo.Constraint(m.n_hat_prime_nodes, - rule=INDPUtil.initial_state_node_rule, - doc='Initialstate at node') - m.initial_state_arc = pyo.Constraint(m.a_hat_prime, rule=INDPUtil.initial_state_arc_rule, - doc='Initial state at arc') - m.time_dependent_node = pyo.Constraint(m.n_hat_prime_nodes, - m.time_step, - rule=INDPUtil.time_dependent_node_rule, - doc='Time dependent recovery constraint at node') - m.time_dependent_arc = pyo.Constraint(m.a_hat_prime, - m.time_step, - rule=INDPUtil.time_dependent_arc_rule, - doc='Time dependent recovery constraint at arc') + m.initial_state_node = pyo.Constraint( + m.n_hat_prime_nodes, + rule=INDPUtil.initial_state_node_rule, + doc="Initialstate at node", + ) + m.initial_state_arc = pyo.Constraint( + m.a_hat_prime, + rule=INDPUtil.initial_state_arc_rule, + doc="Initial state at arc", + ) + m.time_dependent_node = pyo.Constraint( + m.n_hat_prime_nodes, + m.time_step, + rule=INDPUtil.time_dependent_node_rule, + doc="Time dependent recovery constraint at node", + ) + m.time_dependent_arc = pyo.Constraint( + m.a_hat_prime, + m.time_step, + rule=INDPUtil.time_dependent_arc_rule, + doc="Time dependent recovery constraint at arc", + ) # Enforce a_i,j to be fixed if a_j,i is fixed (and vice versa). - m.arc_equality = pyo.Constraint(m.a_hat_prime, - m.time_step, - rule=INDPUtil.arc_equality_rule, - doc='Arc reconstruction equality') + m.arc_equality = pyo.Constraint( + m.a_hat_prime, + m.time_step, + rule=INDPUtil.arc_equality_rule, + doc="Arc reconstruction equality", + ) # Conservation of flow constraint. (2) in INDP paper. - m.flow_conserv_node = pyo.Constraint(m.delta_p_index_0, - m.time_step, - rule=INDPUtil.flow_conserv_node_rule, - doc='Flow conservation') + m.flow_conserv_node = pyo.Constraint( + m.delta_p_index_0, + m.time_step, + rule=INDPUtil.flow_conserv_node_rule, + doc="Flow conservation", + ) # Flow functionality constraints. - m.flow_in_functionality = pyo.Constraint(m.a_hat, m.time_step, rule=INDPUtil.flow_in_functionality_rule, - doc='Flow In Functionality') - m.flow_out_functionality = pyo.Constraint(m.a_hat, m.time_step, rule=INDPUtil.flow_out_functionality_rule, - doc='Flow Out Functionality') - m.flow_arc_functionality = pyo.Constraint(m.a_hat, m.time_step, rule=INDPUtil.flow_arc_functionality_rule, - doc='Flow Arc Functionality') + m.flow_in_functionality = pyo.Constraint( + m.a_hat, + m.time_step, + rule=INDPUtil.flow_in_functionality_rule, + doc="Flow In Functionality", + ) + m.flow_out_functionality = pyo.Constraint( + m.a_hat, + m.time_step, + rule=INDPUtil.flow_out_functionality_rule, + doc="Flow Out Functionality", + ) + m.flow_arc_functionality = pyo.Constraint( + m.a_hat, + m.time_step, + rule=INDPUtil.flow_arc_functionality_rule, + doc="Flow Arc Functionality", + ) # Resource availability constraints. - m.resource = pyo.Constraint(list(m.v_r.keys()), m.time_step, rule=INDPUtil.resource_rule, - doc='Resource availability') + m.resource = pyo.Constraint( + list(m.v_r.keys()), + m.time_step, + rule=INDPUtil.resource_rule, + doc="Resource availability", + ) # Interdependency constraints - m.interdependency = pyo.Constraint(m.n_hat_nodes, m.time_step, rule=INDPUtil.interdependency_rule, - doc='Interdependency') + m.interdependency = pyo.Constraint( + m.n_hat_nodes, + m.time_step, + rule=INDPUtil.interdependency_rule, + doc="Interdependency", + ) # Geographic space constraints if co_location: - m.node_geographic_space = pyo.Constraint(m.S_ids, m.n_hat_prime_nodes, m.time_step, - rule=INDPUtil.node_geographic_space_rule, - doc='Node Geographic space') - m.arc_geographic_space = pyo.Constraint(m.S_ids, m.a_hat_prime, m.time_step, - rule=INDPUtil.arc_geographic_space_rule, - doc='Arc Geographic space') - - '''Solve''' - num_cont_vars = len([v for v in m.component_data_objects(pyo.Var) if v.domain == pyo.NonNegativeReals]) - num_integer_vars = len([v for v in m.component_data_objects(pyo.Var) if v.domain == pyo.Binary]) + m.node_geographic_space = pyo.Constraint( + m.S_ids, + m.n_hat_prime_nodes, + m.time_step, + rule=INDPUtil.node_geographic_space_rule, + doc="Node Geographic space", + ) + m.arc_geographic_space = pyo.Constraint( + m.S_ids, + m.a_hat_prime, + m.time_step, + rule=INDPUtil.arc_geographic_space_rule, + doc="Arc Geographic space", + ) + + """Solve""" + num_cont_vars = len( + [ + v + for v in m.component_data_objects(pyo.Var) + if v.domain == pyo.NonNegativeReals + ] + ) + num_integer_vars = len( + [v for v in m.component_data_objects(pyo.Var) if v.domain == pyo.Binary] + ) solver_engine = self.get_parameter("solver_engine") if solver_engine is None: @@ -691,304 +1056,318 @@ def indp(self, N, v_r, T=1, layers=None, controlled_layers=None, functionality=N solver_time_limit = self.get_parameter("solver_time_limit") print( - "Solving... using %s solver (%d cont. vars, %d binary vars)" % - (solver_engine, num_cont_vars, num_integer_vars)) + "Solving... using %s solver (%d cont. vars, %d binary vars)" + % (solver_engine, num_cont_vars, num_integer_vars) + ) if solver_engine == "gurobi": solver = SolverFactory(solver_engine, timelimit=solver_time_limit) else: - solver = SolverFactory(solver_engine, timelimit=solver_time_limit, executable=solver_path) + solver = SolverFactory( + solver_engine, timelimit=solver_time_limit, executable=solver_path + ) solution = solver.solve(m) run_time = time.time() - start_time # Save results. - if solution.solver.termination_condition in [TerminationCondition.optimal, TerminationCondition.maxTimeLimit]: - if solution.solver.termination_condition == TerminationCondition.maxTimeLimit: - print('\nOptimizer time limit, gap = %1.3f\n' % solution.a.solution(0).gap) + if solution.solver.termination_condition in [ + TerminationCondition.optimal, + TerminationCondition.maxTimeLimit, + ]: + if ( + solution.solver.termination_condition + == TerminationCondition.maxTimeLimit + ): + print( + "\nOptimizer time limit, gap = %1.3f\n" % solution.a.solution(0).gap + ) results = INDPUtil.collect_results(m, controlled_layers, coloc=co_location) results.add_run_time(t, run_time) return [m, results] else: log_infeasible_constraints(m, log_expression=True, log_variables=True) if solution.solver.termination_condition == TerminationCondition.infeasible: - print(solution.solver.termination_condition, ": SOLUTION NOT FOUND. (Check data and/or violated " - "constraints in the infeasible_model.log).") + print( + solution.solver.termination_condition, + ": SOLUTION NOT FOUND. (Check data and/or violated " + "constraints in the infeasible_model.log).", + ) sys.exit() def get_spec(self): return { - 'name': 'INDP', - 'description': 'Interdependent Network Design Problem that models the restoration', - 'input_parameters': [ + "name": "INDP", + "description": "Interdependent Network Design Problem that models the restoration", + "input_parameters": [ { - 'id': 'network_type', - 'required': True, - 'description': 'type of the network, which is set to `from_csv` for Seaside networks. ' - 'e.g. from_csv, incore', - 'type': str + "id": "network_type", + "required": True, + "description": "type of the network, which is set to `from_csv` for Seaside networks. " + "e.g. from_csv, incore", + "type": str, }, { - 'id': 'MAGS', - 'required': True, - 'description': 'sets the earthquake return period.', - 'type': list + "id": "MAGS", + "required": True, + "description": "sets the earthquake return period.", + "type": list, }, { - 'id': 'sample_range', - 'required': True, - 'description': 'sets the range of sample scenarios to be analyzed', - 'type': range + "id": "sample_range", + "required": True, + "description": "sets the range of sample scenarios to be analyzed", + "type": range, }, { - 'id': 'dislocation_data_type', - 'required': False, - 'description': 'type of the dislocation data.', - 'type': str + "id": "dislocation_data_type", + "required": False, + "description": "type of the dislocation data.", + "type": str, }, { - 'id': 'return_model', - 'required': False, - 'description': 'type of the model for the return of the dislocated population. ' - 'Options: *step_function* and *linear*.', - 'type': str + "id": "return_model", + "required": False, + "description": "type of the model for the return of the dislocated population. " + "Options: *step_function* and *linear*.", + "type": str, }, { - 'id': 'testbed_name', - 'required': False, - 'description': 'sets the name of the testbed in analysis', - 'type': str + "id": "testbed_name", + "required": False, + "description": "sets the name of the testbed in analysis", + "type": str, }, { - 'id': 'extra_commodity', - 'required': True, - 'description': 'multi-commodity parameters dict', - 'type': dict + "id": "extra_commodity", + "required": True, + "description": "multi-commodity parameters dict", + "type": dict, }, { - 'id': 'RC', - 'required': True, - 'description': 'list of resource caps or the number of available resources in each step of the ' - 'analysis. Each item of the list is a dictionary whose items show the type of ' - 'resource and the available number of that type of resource. For example: ' - '* If `network_type`=*from_csv*, you have two options:* if, for example, ' - '`R_c`= [{"budget": 3}, {"budget": 6}], then the analysis is done for the cases ' - 'when there are 3 and 6 resources available of type "budget" ' - '(total resource assignment).* if, for example, `R_c`= [{"budget": {1:1, 2:1}}, ' - '{"budget": {1:1, 2:2}}, {"budget": {1:3, 2:3}}] and given there are 2 layers,' - ' then the analysis is done for the case where each layer gets 1 resource of ' - 'type "budget", AND the case where layer 1 gets 1 and layer 2 gets 2 resources of ' - 'type "budget", AND the case where each layer gets 3 resources of type ' - '"budget" (Prescribed resource for each layer).', - 'type': list + "id": "RC", + "required": True, + "description": "list of resource caps or the number of available resources in each step of the " + "analysis. Each item of the list is a dictionary whose items show the type of " + "resource and the available number of that type of resource. For example: " + "* If `network_type`=*from_csv*, you have two options:* if, for example, " + '`R_c`= [{"budget": 3}, {"budget": 6}], then the analysis is done for the cases ' + 'when there are 3 and 6 resources available of type "budget" ' + '(total resource assignment).* if, for example, `R_c`= [{"budget": {1:1, 2:1}}, ' + '{"budget": {1:1, 2:2}}, {"budget": {1:3, 2:3}}] and given there are 2 layers,' + " then the analysis is done for the case where each layer gets 1 resource of " + 'type "budget", AND the case where layer 1 gets 1 and layer 2 gets 2 resources of ' + 'type "budget", AND the case where each layer gets 3 resources of type ' + '"budget" (Prescribed resource for each layer).', + "type": list, }, { - 'id': 'layers', - 'required': True, - 'description': 'list of layers in the analysis', - 'type': list + "id": "layers", + "required": True, + "description": "list of layers in the analysis", + "type": list, }, { - 'id': 'method', - 'required': True, - 'description': 'There are two choices of method: 1. `INDP`: runs Interdependent Network ' - 'Restoration Problem (INDP). 2. `TDINDP`: runs time-dependent INDP (td-INDP). In ' - 'both cases, if "TIME_RESOURCE" is True, then the repair time for each element ' - 'is considered in devising the restoration plans', - 'type': str, + "id": "method", + "required": True, + "description": "There are two choices of method: 1. `INDP`: runs Interdependent Network " + "Restoration Problem (INDP). 2. `TDINDP`: runs time-dependent INDP (td-INDP). In " + 'both cases, if "TIME_RESOURCE" is True, then the repair time for each element ' + "is considered in devising the restoration plans", + "type": str, }, { - 'id': 't_steps', - 'required': False, - 'description': 'Number of time steps of the analysis', - 'type': int + "id": "t_steps", + "required": False, + "description": "Number of time steps of the analysis", + "type": int, }, { - 'id': 'time_resource', - 'required': False, - 'description': 'if TIME_RESOURCE is True, then the repair time for each element is ' - 'considered in devising the restoration plans', - 'type': bool + "id": "time_resource", + "required": False, + "description": "if TIME_RESOURCE is True, then the repair time for each element is " + "considered in devising the restoration plans", + "type": bool, }, { - 'id': 'save_model', - 'required': False, - 'description': 'If the optimization model should be saved to file. The default is False.', - 'type': bool + "id": "save_model", + "required": False, + "description": "If the optimization model should be saved to file. The default is False.", + "type": bool, }, { - 'id': 'solver_engine', - 'required': False, - 'description': "Solver to use for optimization model. Such as gurobi/glpk/scip, default to scip.", - 'type': str + "id": "solver_engine", + "required": False, + "description": "Solver to use for optimization model. Such as gurobi/glpk/scip, default to scip.", + "type": str, }, { - 'id': 'solver_path', - 'required': False, - 'description': "Solver to use for optimization model. Such as gurobi/glpk/scip, default to scip.", - 'type': str + "id": "solver_path", + "required": False, + "description": "Solver to use for optimization model. Such as gurobi/glpk/scip, default to scip.", + "type": str, }, { - 'id': 'solver_time_limit', - 'required': False, - 'description': "solver time limit in seconds", - 'type': int + "id": "solver_time_limit", + "required": False, + "description": "solver time limit in seconds", + "type": int, }, ], - 'input_datasets': [ + "input_datasets": [ { "id": "wf_repair_cost", "required": True, "description": "repair cost for each water facility", - "type": ["incore:repairCost"] + "type": ["incore:repairCost"], }, { - 'id': 'wf_restoration_time', + "id": "wf_restoration_time", "required": True, - 'description': 'recording repair time at certain functionality recovery for each class ' - 'and limit state.', - 'type': ['incore:waterFacilityRepairTime'] + "description": "recording repair time at certain functionality recovery for each class " + "and limit state.", + "type": ["incore:waterFacilityRepairTime"], }, { "id": "epf_repair_cost", "required": True, "description": "repair cost for each electric power facility", - "type": ["incore:repairCost"] + "type": ["incore:repairCost"], }, { - 'id': 'epf_restoration_time', + "id": "epf_restoration_time", "required": True, - 'description': 'recording repair time at certain functionality recovery for each class ' - 'and limit state.', - 'type': ['incore:epfRepairTime'] + "description": "recording repair time at certain functionality recovery for each class " + "and limit state.", + "type": ["incore:epfRepairTime"], }, { "id": "pipeline_repair_cost", "required": True, "description": "repair cost for each pipeline", - "type": ["incore:pipelineRepairCost"] + "type": ["incore:pipelineRepairCost"], }, { - 'id': 'pipeline_restoration_time', + "id": "pipeline_restoration_time", "required": True, - 'description': 'pipeline restoration times', - 'type': ['incore:pipelineRestorationVer1'] + "description": "pipeline restoration times", + "type": ["incore:pipelineRestorationVer1"], }, { - 'id': 'power_network', - 'required': True, - 'description': 'EPN Network Dataset', - 'type': ['incore:epnNetwork'], + "id": "power_network", + "required": True, + "description": "EPN Network Dataset", + "type": ["incore:epnNetwork"], }, { - 'id': 'water_network', - 'required': True, - 'description': 'Water Network Dataset', - 'type': ['incore:waterNetwork'], + "id": "water_network", + "required": True, + "description": "Water Network Dataset", + "type": ["incore:waterNetwork"], }, { - 'id': 'powerline_supply_demand_info', - 'required': True, - 'description': 'Supply and demand information for powerlines', - 'type': ['incore:powerLineSupplyDemandInfo'], + "id": "powerline_supply_demand_info", + "required": True, + "description": "Supply and demand information for powerlines", + "type": ["incore:powerLineSupplyDemandInfo"], }, { - 'id': 'epf_supply_demand_info', - 'required': True, - 'description': 'Supply and demand information for epfs', - 'type': ['incore:epfSupplyDemandInfo'], + "id": "epf_supply_demand_info", + "required": True, + "description": "Supply and demand information for epfs", + "type": ["incore:epfSupplyDemandInfo"], }, { - 'id': 'wf_supply_demand_info', - 'required': True, - 'description': 'Supply and demand information for water facilities', - 'type': ['incore:waterFacilitySupplyDemandInfo'], + "id": "wf_supply_demand_info", + "required": True, + "description": "Supply and demand information for water facilities", + "type": ["incore:waterFacilitySupplyDemandInfo"], }, { - 'id': 'pipeline_supply_demand_info', - 'required': True, - 'description': 'Supply and demand information for water pipelines', - 'type': ['incore:pipelineSupplyDemandInfo'], + "id": "pipeline_supply_demand_info", + "required": True, + "description": "Supply and demand information for water pipelines", + "type": ["incore:pipelineSupplyDemandInfo"], }, { "id": "interdep", "required": True, "description": "Interdepenency between water and electric power facilities", - "type": ["incore:interdep"] + "type": ["incore:interdep"], }, { "id": "wf_failure_state", "required": True, "description": "MCS failure state of water facilities", - "type": ["incore:sampleFailureState"] + "type": ["incore:sampleFailureState"], }, { "id": "wf_damage_state", "required": True, "description": "MCS damage state of water facilities", - "type": ["incore:sampleDamageState"] + "type": ["incore:sampleDamageState"], }, { "id": "pipeline_failure_state", "required": True, "description": "failure state of pipeline from pipeline functionality", - "type": ["incore:sampleFailureState"] + "type": ["incore:sampleFailureState"], }, { "id": "epf_failure_state", "required": True, "description": "MCS failure state of electric power facilities", - "type": ["incore:sampleFailureState"] + "type": ["incore:sampleFailureState"], }, { "id": "epf_damage_state", "required": True, "description": "MCS damage state of electric power facilities", - "type": ["incore:sampleDamageState"] + "type": ["incore:sampleDamageState"], }, { "id": "dt_params", "required": False, "description": "Parameters for population dislocation time", - "type": ["incore:dTParams"] + "type": ["incore:dTParams"], }, { "id": "pop_dislocation", "required": True, "description": "Population dislocation output", - "type": ["incore:popDislocation"] + "type": ["incore:popDislocation"], }, { "id": "bldgs2elec", "required": False, "description": "relation between building and electric power facility", - "type": ["incore:bldgs2elec"] + "type": ["incore:bldgs2elec"], }, { "id": "bldgs2wter", "required": False, "description": "relation between building and water facility", - "type": ["incore:bldgs2wter"] + "type": ["incore:bldgs2wter"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'action', - 'parent_type': '', - 'description': 'Restoration action plans', - 'type': 'incore:indpAction' + "id": "action", + "parent_type": "", + "description": "Restoration action plans", + "type": "incore:indpAction", }, { - 'id': 'cost', - 'parent_type': '', - 'description': 'Restoration cost plans', - 'type': 'incore:indpCost' + "id": "cost", + "parent_type": "", + "description": "Restoration cost plans", + "type": "incore:indpCost", }, { - 'id': 'runtime', - 'parent_type': '', - 'description': 'Restoration runtime plans', - 'type': 'incore:indpRuntime' - } - ] + "id": "runtime", + "parent_type": "", + "description": "Restoration runtime plans", + "type": "incore:indpRuntime", + }, + ], } diff --git a/pyincore/analyses/indp/indpcomponents.py b/pyincore/analyses/indp/indpcomponents.py index bc0b27f69..654d46fdf 100644 --- a/pyincore/analyses/indp/indpcomponents.py +++ b/pyincore/analyses/indp/indpcomponents.py @@ -56,8 +56,6 @@ def to_csv_string(self): """ comp_strings = [] for c in self.components: - comp = c[0] - supp = c[1] comp_string = "/".join(c[0]) comp_string += ":" + str(c[1]) comp_strings.append(comp_string) @@ -97,8 +95,8 @@ def calculate_components(clss, m, net, t=0, layers=None): for n in c: members.append(str(n[0]) + "." + str(n[1])) excess_supply = 0.0 - excess_supply += m.getVarByName('delta+_' + str(n) + "," + str(t)).x - excess_supply += -m.getVarByName('delta-_' + str(n) + "," + str(t)).x + excess_supply += m.getVarByName("delta+_" + str(n) + "," + str(t)).x + excess_supply += -m.getVarByName("delta-_" + str(n) + "," + str(t)).x total_excess_supply += excess_supply indp_components.add_component(members, total_excess_supply) return indp_components diff --git a/pyincore/analyses/indp/indpresults.py b/pyincore/analyses/indp/indpresults.py index 43067d836..fdd9e5f89 100644 --- a/pyincore/analyses/indp/indpresults.py +++ b/pyincore/analyses/indp/indpresults.py @@ -21,7 +21,17 @@ class INDPResults: results_layer : int Dictionary containing INDP results for each layer including actions, costs, run time, and components """ - cost_types = ["Space Prep", "Arc", "Node", "Over Supply", "Under Supply", "Flow", "Total", "Under Supply Perc"] + + cost_types = [ + "Space Prep", + "Arc", + "Node", + "Over Supply", + "Under Supply", + "Flow", + "Total", + "Under Supply Perc", + ] def __init__(self, layers=None): if layers is None: @@ -58,16 +68,22 @@ def extend(self, indp_result, t_offset=0, t_start=0, t_end=0): """ if t_end == 0: t_end = len(indp_result) - for new_t, t in zip([x + t_offset for x in range(t_end - t_start)], - [y + t_start for y in range(t_end - t_start)]): + for new_t, t in zip( + [x + t_offset for x in range(t_end - t_start)], + [y + t_start for y in range(t_end - t_start)], + ): self.results[new_t] = indp_result.results[t] if self.layers: if t_end == 0: t_end = len(indp_result[self.layers[0]]) for layer in indp_result.results_layer.keys(): - for new_t, t in zip([x + t_offset for x in range(t_end - t_start)], - [y + t_start for y in range(t_end - t_start)]): - self.results_layer[layer][new_t] = indp_result.results_layer[layer][t] + for new_t, t in zip( + [x + t_offset for x in range(t_end - t_start)], + [y + t_start for y in range(t_end - t_start)], + ): + self.results_layer[layer][new_t] = indp_result.results_layer[layer][ + t + ] def add_cost(self, t, cost_type, cost, cost_layer=None): """ @@ -94,18 +110,44 @@ def add_cost(self, t, cost_type, cost, cost_layer=None): cost_layer = {} if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['costs'][cost_type] = cost + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["costs"][cost_type] = cost if self.layers: for layer in cost_layer.keys(): if t not in self.results_layer[layer]: self.results_layer[layer][t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results_layer[layer][t]['costs'][cost_type] = cost_layer[layer] + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results_layer[layer][t]["costs"][cost_type] = cost_layer[layer] def add_run_time(self, t, run_time, save_layer=True): """ @@ -127,18 +169,44 @@ def add_run_time(self, t, run_time, save_layer=True): """ if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['run_time'] = run_time + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["run_time"] = run_time if self.layers and save_layer: for layer in self.layers: if t not in self.results_layer[layer]: self.results_layer[layer][t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results_layer[layer][t]['run_time'] = run_time + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results_layer[layer][t]["run_time"] = run_time def add_action(self, t, action, save_layer=True): """ @@ -160,18 +228,44 @@ def add_action(self, t, action, save_layer=True): """ if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['actions'].append(action) + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["actions"].append(action) if self.layers and save_layer: action_layer = int(action[-1]) if t not in self.results_layer[action_layer]: self.results_layer[action_layer][t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results_layer[action_layer][t]['actions'].append(action) + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results_layer[action_layer][t]["actions"].append(action) def add_gc_size(self, t, gc_size): """ @@ -191,10 +285,23 @@ def add_gc_size(self, t, gc_size): """ if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['gc_size'] = gc_size + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["gc_size"] = gc_size def add_num_components(self, t, num_components): """ @@ -214,10 +321,23 @@ def add_num_components(self, t, num_components): """ if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['num_components'] = num_components + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["num_components"] = num_components def add_components(self, t, components): """ @@ -237,10 +357,23 @@ def add_components(self, t, components): """ if t not in self.results: self.results[t] = { - 'costs': {"Space Prep": 0.0, "Arc": 0.0, "Node": 0.0, "Over Supply": 0.0, "Under Supply": 0.0, - "Under Supply Perc": 0.0, "Flow": 0.0, "Total": 0.0}, 'actions': [], 'gc_size': 0, - 'num_components': 0, 'components': INDPComponents(), 'run_time': 0.0} - self.results[t]['components'] = components + "costs": { + "Space Prep": 0.0, + "Arc": 0.0, + "Node": 0.0, + "Over Supply": 0.0, + "Under Supply": 0.0, + "Under Supply Perc": 0.0, + "Flow": 0.0, + "Total": 0.0, + }, + "actions": [], + "gc_size": 0, + "num_components": 0, + "components": INDPComponents(), + "run_time": 0.0, + } + self.results[t]["components"] = components self.add_num_components(t, components.num_components) self.add_gc_size(t, components.gc_size) @@ -264,25 +397,76 @@ def to_csv_layer(self, out_dir, sample_num=1, suffix=""): """ for layer in self.layers: - action_file = out_dir + "/actions_" + str(sample_num) + "_L" + str(layer) + "_" + suffix + ".csv" - costs_file = out_dir + "/costs_" + str(sample_num) + "_L" + str(layer) + "_" + suffix + ".csv" - run_time_file = out_dir + "/run_time_" + str(sample_num) + "_L" + str(layer) + "_" + suffix + ".csv" - with open(action_file, 'w') as f: + action_file = ( + out_dir + + "/actions_" + + str(sample_num) + + "_L" + + str(layer) + + "_" + + suffix + + ".csv" + ) + costs_file = ( + out_dir + + "/costs_" + + str(sample_num) + + "_L" + + str(layer) + + "_" + + suffix + + ".csv" + ) + run_time_file = ( + out_dir + + "/run_time_" + + str(sample_num) + + "_L" + + str(layer) + + "_" + + suffix + + ".csv" + ) + with open(action_file, "w") as f: f.write("t,action\n") for t in self.results_layer[layer]: - for a in self.results_layer[layer][t]['actions']: + for a in self.results_layer[layer][t]["actions"]: f.write(str(t) + "," + a + "\n") - with open(run_time_file, 'w') as f: + with open(run_time_file, "w") as f: f.write("t,run_time\n") for t in self.results_layer[layer]: - f.write(str(t) + "," + str(self.results_layer[layer][t]['run_time']) + "\n") - with open(costs_file, 'w') as f: - f.write("t,Space Prep,Arc,Node,Over Supply,Under Supply,Flow,Total,Under Supply Perc\n") + f.write( + str(t) + + "," + + str(self.results_layer[layer][t]["run_time"]) + + "\n" + ) + with open(costs_file, "w") as f: + f.write( + "t,Space Prep,Arc,Node,Over Supply,Under Supply,Flow,Total,Under Supply Perc\n" + ) for t in self.results_layer[layer]: - costs = self.results_layer[layer][t]['costs'] - f.write(str(t) + "," + str(costs["Space Prep"]) + "," + str(costs["Arc"]) + "," + str( - costs["Node"]) + "," + str(costs["Over Supply"]) + "," + str(costs["Under Supply"]) + "," + str( - costs["Flow"]) + "," + str(costs["Total"]) + "," + str(costs["Under Supply Perc"]) + "\n") + costs = self.results_layer[layer][t]["costs"] + f.write( + str(t) + + "," + + str(costs["Space Prep"]) + + "," + + str(costs["Arc"]) + + "," + + str(costs["Node"]) + + "," + + str(costs["Over Supply"]) + + "," + + str(costs["Under Supply"]) + + "," + + str(costs["Flow"]) + + "," + + str(costs["Total"]) + + "," + + str(costs["Under Supply Perc"]) + + "\n" + ) @classmethod def from_csv(clss, out_dir, sample_num=1, suffix=""): @@ -306,23 +490,25 @@ def from_csv(clss, out_dir, sample_num=1, suffix=""): """ action_file = out_dir + "/actions_" + str(sample_num) + "_" + suffix + ".csv" costs_file = out_dir + "/costs_" + str(sample_num) + "_" + suffix + ".csv" - perc_file = out_dir + "/percolation_" + str(sample_num) + "_" + suffix + ".csv" - comp_file = out_dir + "/components_" + str(sample_num) + "_" + suffix + ".csv" + # perc_file = out_dir + "/percolation_" + str(sample_num) + "_" + suffix + ".csv" + # comp_file = out_dir + "/components_" + str(sample_num) + "_" + suffix + ".csv" run_time_file = out_dir + "/run_time_" + str(sample_num) + "_" + suffix + ".csv" indp_result = INDPResults() - if os.path.isfile(action_file): # ..todo: component-related results are not currently added to the results. + if os.path.isfile( + action_file + ): # ..todo: component-related results are not currently added to the results. with open(action_file) as f: lines = f.readlines()[1:] for line in lines: - data = line.strip().split(',') + data = line.strip().split(",") t = int(data[0]) action = str.strip(data[1]) indp_result.add_action(t, action) with open(costs_file) as f: lines = f.readlines() - cost_types = lines[0].strip().split(',')[1:] + cost_types = lines[0].strip().split(",")[1:] for line in lines[1:]: - data = line.strip().split(',') + data = line.strip().split(",") t = int(data[0]) costs = data[1:] for ct in range(len(cost_types)): @@ -330,7 +516,7 @@ def from_csv(clss, out_dir, sample_num=1, suffix=""): with open(run_time_file) as f: lines = f.readlines() for line in lines[1:]: - data = line.strip().split(',') + data = line.strip().split(",") t = int(data[0]) run_time = data[1] indp_result.add_run_time(t, run_time) @@ -352,5 +538,5 @@ def from_csv(clss, out_dir, sample_num=1, suffix=""): # else: # print("Caution: No component.") else: - raise ValueError('File does not exist: ' + action_file) + raise ValueError("File does not exist: " + action_file) return indp_result diff --git a/pyincore/analyses/indp/indputil.py b/pyincore/analyses/indp/indputil.py index 96a67b0af..df8c3dc53 100644 --- a/pyincore/analyses/indp/indputil.py +++ b/pyincore/analyses/indp/indputil.py @@ -13,8 +13,8 @@ from pyincore.analyses.indp.indpresults import INDPResults import pandas as pd -class INDPUtil: +class INDPUtil: @staticmethod def get_resource_suffix(params): """ @@ -27,21 +27,34 @@ def get_resource_suffix(params): out_dir_suffix_res (str): The part of the suffix of result folders that pertains to resource cap(s). """ - out_dir_suffix_res = '' + out_dir_suffix_res = "" for rc, val in params["V"].items(): if isinstance(val, int): - if rc != '': + if rc != "": out_dir_suffix_res += rc[0] + str(val) else: out_dir_suffix_res += str(val) else: - out_dir_suffix_res += rc[0] + str(sum([lval for _, lval in val.items()])) + '_fixed_layer_Cap' + out_dir_suffix_res += ( + rc[0] + + str(sum([lval for _, lval in val.items()])) + + "_fixed_layer_Cap" + ) return out_dir_suffix_res @staticmethod - def time_resource_usage_curves(power_arcs, power_nodes, water_arcs, water_nodes, wf_restoration_time_sample, - wf_repair_cost_sample, pipeline_restoration_time, pipeline_repair_cost, - epf_restoration_time_sample, epf_repair_cost_sample): + def time_resource_usage_curves( + power_arcs, + power_nodes, + water_arcs, + water_nodes, + wf_restoration_time_sample, + wf_repair_cost_sample, + pipeline_restoration_time, + pipeline_repair_cost, + epf_restoration_time_sample, + epf_repair_cost_sample, + ): """ This module calculates the repair time for nodes and arcs for the current scenario based on their damage state, and writes them to the input files of INDP. Currently, it is only compatible with NIST testbeds. @@ -65,42 +78,52 @@ def time_resource_usage_curves(power_arcs, power_nodes, water_arcs, water_nodes, power_arcs: """ - _water_nodes = water_nodes.merge(wf_repair_cost_sample, on='guid', how='left').merge( - wf_restoration_time_sample, on='guid', how='left') - water_nodes['p_time'] = _water_nodes['repairtime'] - water_nodes['p_time'].fillna(0, inplace=True) - water_nodes['p_budget'] = _water_nodes['budget'] - water_nodes['p_budget'].fillna(0, inplace=True) - water_nodes['q'] = _water_nodes['repaircost'] - water_nodes['q'].fillna(0, inplace=True) - - _power_nodes = power_nodes.merge(epf_repair_cost_sample, on='guid', how='left').merge( - epf_restoration_time_sample, on='guid', how='left') - power_nodes['p_time'] = _power_nodes['repairtime'] - power_nodes['p_time'].fillna(0, inplace=True) - power_nodes['p_budget'] = _power_nodes['budget'] - power_nodes['p_budget'].fillna(0, inplace=True) - power_nodes['q'] = _power_nodes['repaircost'] - power_nodes['q'].fillna(0, inplace=True) - - _water_arcs = water_arcs.merge(pipeline_repair_cost, on='guid', how='left').merge( - pipeline_restoration_time, on='guid', how='left') - water_arcs['h_time'] = _water_arcs["repair_time"] - water_arcs['h_time'].fillna(0, inplace=True) - water_arcs['h_budget'] = _water_arcs['budget'].astype(float) - water_arcs['h_budget'].fillna(0, inplace=True) - water_arcs['f'] = _water_arcs['repaircost'].astype(float) - water_arcs['f'].fillna(0, inplace=True) - - power_arcs['h_time'] = 0 - power_arcs['h_budget'] = 0 - power_arcs['f'] = 0 + _water_nodes = water_nodes.merge( + wf_repair_cost_sample, on="guid", how="left" + ).merge(wf_restoration_time_sample, on="guid", how="left") + water_nodes["p_time"] = _water_nodes["repairtime"] + water_nodes["p_time"].fillna(0, inplace=True) + water_nodes["p_budget"] = _water_nodes["budget"] + water_nodes["p_budget"].fillna(0, inplace=True) + water_nodes["q"] = _water_nodes["repaircost"] + water_nodes["q"].fillna(0, inplace=True) + + _power_nodes = power_nodes.merge( + epf_repair_cost_sample, on="guid", how="left" + ).merge(epf_restoration_time_sample, on="guid", how="left") + power_nodes["p_time"] = _power_nodes["repairtime"] + power_nodes["p_time"].fillna(0, inplace=True) + power_nodes["p_budget"] = _power_nodes["budget"] + power_nodes["p_budget"].fillna(0, inplace=True) + power_nodes["q"] = _power_nodes["repaircost"] + power_nodes["q"].fillna(0, inplace=True) + + _water_arcs = water_arcs.merge( + pipeline_repair_cost, on="guid", how="left" + ).merge(pipeline_restoration_time, on="guid", how="left") + water_arcs["h_time"] = _water_arcs["repair_time"] + water_arcs["h_time"].fillna(0, inplace=True) + water_arcs["h_budget"] = _water_arcs["budget"].astype(float) + water_arcs["h_budget"].fillna(0, inplace=True) + water_arcs["f"] = _water_arcs["repaircost"].astype(float) + water_arcs["f"].fillna(0, inplace=True) + + power_arcs["h_time"] = 0 + power_arcs["h_budget"] = 0 + power_arcs["f"] = 0 return water_nodes, water_arcs, power_nodes, power_arcs @staticmethod - def initialize_network(power_nodes, power_arcs, water_nodes, water_arcs, interdep, cost_scale=1.0, - extra_commodity=None): + def initialize_network( + power_nodes, + power_arcs, + water_nodes, + water_arcs, + interdep, + cost_scale=1.0, + extra_commodity=None, + ): """ This function initializes a :class:`~infrastructure.InfrastructureNetwork` object based on network data. @@ -113,17 +136,19 @@ def initialize_network(power_nodes, power_arcs, water_nodes, water_arcs, interde interdep_net (class):`~infrastructure.InfrastructureNetwork` The object containing the network data. """ - interdep_net = InfrastructureUtil.load_infrastructure_array_format_extended(power_nodes, - power_arcs, - water_nodes, - water_arcs, - interdep, - cost_scale=cost_scale, - extra_commodity=extra_commodity) + interdep_net = InfrastructureUtil.load_infrastructure_array_format_extended( + power_nodes, + power_arcs, + water_nodes, + water_arcs, + interdep, + cost_scale=cost_scale, + extra_commodity=extra_commodity, + ) return interdep_net @staticmethod - def save_indp_model_to_file(model, out_model_dir, t, layer=0, suffix=''): + def save_indp_model_to_file(model, out_model_dir, t, layer=0, suffix=""): """ This function saves pyomo optimization model to file. @@ -149,18 +174,18 @@ def save_indp_model_to_file(model, out_model_dir, t, layer=0, suffix=''): os.makedirs(out_model_dir) # Write models to file l_name = "/Model_t%d_l%d_%s.txt" % (t, layer, suffix) - file_id = open(out_model_dir + l_name, 'w') + file_id = open(out_model_dir + l_name, "w") model.pprint(ostream=file_id) file_id.close() # Write solution to file s_name = "/Solution_t%d_l%d_%s.txt" % (t, layer, suffix) - file_id = open(out_model_dir + s_name, 'w') + file_id = open(out_model_dir + s_name, "w") for vv in model.component_data_objects(pyo.Var): if vv.value: - file_id.write('%s %g\n' % (str(vv), vv.value)) + file_id.write("%s %g\n" % (str(vv), vv.value)) else: - file_id.write('%s NONE\n' % (str(vv))) - file_id.write('Obj: %g' % value(model.Obj)) + file_id.write("%s NONE\n" % (str(vv))) + file_id.write("Obj: %g" % value(model.Obj)) file_id.close() @staticmethod @@ -183,19 +208,19 @@ def apply_recovery(N, indp_results, t): None. """ - for action in indp_results[t]['actions']: + for action in indp_results[t]["actions"]: if "/" in action: # Edge recovery action. data = action.split("/") src = tuple([int(x) for x in data[0].split(".")]) dst = tuple([int(x) for x in data[1].split(".")]) - N.G[src][dst]['data']['inf_data'].functionality = 1.0 + N.G[src][dst]["data"]["inf_data"].functionality = 1.0 else: # Node recovery action. node = tuple([int(x) for x in action.split(".")]) # print "Applying recovery:",node - N.G.nodes[node]['data']['inf_data'].repaired = 1.0 - N.G.nodes[node]['data']['inf_data'].functionality = 1.0 + N.G.nodes[node]["data"]["inf_data"].repaired = 1.0 + N.G.nodes[node]["data"]["inf_data"].functionality = 1.0 @staticmethod def collect_results(model, controlled_layers, coloc=True): @@ -224,7 +249,7 @@ def collect_results(model, controlled_layers, coloc=True): total_demand = 0.0 total_demand_layer = {layer: 0.0 for layer in layers} for n, d in model.n_hat.nodes(data=True): - demand_value = d['data']['inf_data'].demand + demand_value = d["data"]["inf_data"].demand if demand_value < 0: total_demand += demand_value total_demand_layer[n[1]] += demand_value @@ -242,7 +267,9 @@ def collect_results(model, controlled_layers, coloc=True): over_supp_cost_layer = {layer: 0.0 for layer in layers} under_supp_cost_layer = {layer: 0.0 for layer in layers} under_supp_layer = {layer: 0.0 for layer in layers} - space_prep_cost_layer = {layer: 0.0 for layer in layers} # !!! populate this for each layer + space_prep_cost_layer = { + layer: 0.0 for layer in layers + } # !!! populate this for each layer # Record node recovery actions. for n in model.n_hat_prime_nodes: if model.T == 1: @@ -266,10 +293,12 @@ def collect_results(model, controlled_layers, coloc=True): for s in model.S.value: if model.z[s.id, t].value: space_prep_cost += s.cost * model.z[s.id, t].value - indp_results.add_cost(t, "Space Prep", space_prep_cost, space_prep_cost_layer) + indp_results.add_cost( + t, "Space Prep", space_prep_cost, space_prep_cost_layer + ) # Calculate arc preparation costs. for i, k, j, kb in model.a_hat_prime: - a = model.n_hat[i, k][j, kb]['data']['inf_data'] + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] if model.T == 1: arc_state = model.y[i, k, j, kb, t].value else: @@ -279,7 +308,7 @@ def collect_results(model, controlled_layers, coloc=True): indp_results.add_cost(t, "Arc", arc_cost, arc_cost_layer) # Calculate node preparation costs. for n in model.n_hat_prime_nodes: - d = model.n_hat.nodes[n]['data']['inf_data'] + d = model.n_hat.nodes[n]["data"]["inf_data"] if model.T == 1: node_state = model.w[n, t].value else: @@ -289,35 +318,76 @@ def collect_results(model, controlled_layers, coloc=True): indp_results.add_cost(t, "Node", node_cost, node_cost_layer) # Calculate under/oversupply costs. for n, d in model.n_hat.nodes(data=True): - over_supp_cost += d['data']['inf_data'].oversupply_penalty * model.delta_p[n, 'b', t].value - over_supp_cost_layer[n[1]] += d['data']['inf_data'].oversupply_penalty * model.delta_p[n, 'b', t].value - under_supp += model.delta_m[n, 'b', t].value - under_supp_layer[n[1]] += model.delta_m[n, 'b', t].value / total_demand_layer[n[1]] - under_supp_cost += d['data']['inf_data'].undersupply_penalty * model.delta_m[n, 'b', t].value - under_supp_cost_layer[n[1]] += d['data']['inf_data'].undersupply_penalty * model.delta_m[ - n, 'b', t].value - indp_results.add_cost(t, "Over Supply", over_supp_cost, over_supp_cost_layer) - indp_results.add_cost(t, "Under Supply", under_supp_cost, under_supp_cost_layer) - indp_results.add_cost(t, "Under Supply Perc", under_supp / total_demand, under_supp_layer) + over_supp_cost += ( + d["data"]["inf_data"].oversupply_penalty + * model.delta_p[n, "b", t].value + ) + over_supp_cost_layer[n[1]] += ( + d["data"]["inf_data"].oversupply_penalty + * model.delta_p[n, "b", t].value + ) + under_supp += model.delta_m[n, "b", t].value + under_supp_layer[n[1]] += ( + model.delta_m[n, "b", t].value / total_demand_layer[n[1]] + ) + under_supp_cost += ( + d["data"]["inf_data"].undersupply_penalty + * model.delta_m[n, "b", t].value + ) + under_supp_cost_layer[n[1]] += ( + d["data"]["inf_data"].undersupply_penalty + * model.delta_m[n, "b", t].value + ) + indp_results.add_cost( + t, "Over Supply", over_supp_cost, over_supp_cost_layer + ) + indp_results.add_cost( + t, "Under Supply", under_supp_cost, under_supp_cost_layer + ) + indp_results.add_cost( + t, "Under Supply Perc", under_supp / total_demand, under_supp_layer + ) # Calculate flow costs. for i, k, j, kb in model.a_hat: - a = model.n_hat[i, k][j, kb]['data']['inf_data'] - flow_cost += a.flow_cost * model.x[i, k, j, kb, 'b', t].value - flow_cost_layer[k] += a.flow_cost * model.x[i, k, j, kb, 'b', t].value + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] + flow_cost += a.flow_cost * model.x[i, k, j, kb, "b", t].value + flow_cost_layer[k] += a.flow_cost * model.x[i, k, j, kb, "b", t].value indp_results.add_cost(t, "Flow", flow_cost, flow_cost_layer) # Calculate total costs. total_lyr = {} total_nd_lyr = {} for layer in layers: - total_lyr[layer] = flow_cost_layer[layer] + arc_cost_layer[layer] + node_cost_layer[layer] + \ - over_supp_cost_layer[layer] + under_supp_cost_layer[layer] + \ - space_prep_cost_layer[layer] - total_nd_lyr[layer] = space_prep_cost_layer[layer] + arc_cost_layer[layer] + flow_cost \ - + node_cost_layer[layer] - indp_results.add_cost(t, "Total", flow_cost + arc_cost + node_cost + over_supp_cost + under_supp_cost + - space_prep_cost, total_lyr) - indp_results.add_cost(t, "Total no disconnection", space_prep_cost + arc_cost + flow_cost + node_cost, - total_nd_lyr) + total_lyr[layer] = ( + flow_cost_layer[layer] + + arc_cost_layer[layer] + + node_cost_layer[layer] + + over_supp_cost_layer[layer] + + under_supp_cost_layer[layer] + + space_prep_cost_layer[layer] + ) + total_nd_lyr[layer] = ( + space_prep_cost_layer[layer] + + arc_cost_layer[layer] + + flow_cost + + node_cost_layer[layer] + ) + indp_results.add_cost( + t, + "Total", + flow_cost + + arc_cost + + node_cost + + over_supp_cost + + under_supp_cost + + space_prep_cost, + total_lyr, + ) + indp_results.add_cost( + t, + "Total no disconnection", + space_prep_cost + arc_cost + flow_cost + node_cost, + total_nd_lyr, + ) return indp_results @staticmethod @@ -331,14 +401,20 @@ def initial_state_arc_rule(model, i, k, j, kb): @staticmethod def time_dependent_node_rule(model, i, k, t): if t > 0: - return sum(model.w_tilde[i, k, t_p] for t_p in range(1, t + 1) if t > 0) >= model.w[i, k, t] + return ( + sum(model.w_tilde[i, k, t_p] for t_p in range(1, t + 1) if t > 0) + >= model.w[i, k, t] + ) else: return pyo.Constraint.Skip @staticmethod def time_dependent_arc_rule(model, i, k, j, kb, t): if t > 0: - return sum(model.y_tilde[i, k, j, kb, t_p] for t_p in range(1, t + 1) if t > 0) >= model.y[i, k, j, kb, t] + return ( + sum(model.y_tilde[i, k, j, kb, t_p] for t_p in range(1, t + 1) if t > 0) + >= model.y[i, k, j, kb, t] + ) else: return pyo.Constraint.Skip @@ -348,8 +424,10 @@ def arc_equality_rule(model, i, k, j, kb, t): if model.T == 1: return model.y[i, k, j, kb, t] == model.y[j, kb, i, k, t] else: - return model.y[i, k, j, kb, t] == model.y[j, kb, i, k, t], \ - model.y_tilde[i, k, j, kb, t] == model.y_tilde[j, kb, i, k, t] + return ( + model.y[i, k, j, kb, t] == model.y[j, kb, i, k, t], + model.y_tilde[i, k, j, kb, t] == model.y_tilde[j, kb, i, k, t], + ) except KeyError: return pyo.Constraint.Skip @@ -360,63 +438,93 @@ def flow_conserv_node_rule(model, i, k, layer, t): in_flow_constr = 0 demand_constr = 0 for u, v, a in model.n_hat.out_edges((i, k), data=True): - if layer == 'b' or layer in a['data']['inf_data'].extra_com.keys(): + if layer == "b" or layer in a["data"]["inf_data"].extra_com.keys(): out_flow_constr += model.x[u, v, layer, t] for u, v, a in model.n_hat.in_edges((i, k), data=True): - if layer == 'b' or layer in a['data']['inf_data'].extra_com.keys(): + if layer == "b" or layer in a["data"]["inf_data"].extra_com.keys(): in_flow_constr += model.x[u, v, layer, t] - if layer == 'b': - demand_constr += d['data']['inf_data'].demand - model.delta_p[i, k, layer, t] \ - + model.delta_m[i, k, layer, t] + if layer == "b": + demand_constr += ( + d["data"]["inf_data"].demand + - model.delta_p[i, k, layer, t] + + model.delta_m[i, k, layer, t] + ) else: - demand_constr += d['data']['inf_data'].extra_com[layer]['demand'] - model.delta_p[i, k, layer, t] + \ - model.delta_m[i, k, layer, t] + demand_constr += ( + d["data"]["inf_data"].extra_com[layer]["demand"] + - model.delta_p[i, k, layer, t] + + model.delta_m[i, k, layer, t] + ) return out_flow_constr - in_flow_constr == demand_constr @staticmethod def flow_in_functionality_rule(model, i, k, j, kb, t): if not model.functionality: - interdep_nodes_list = model.interdep_nodes.keys() # Interdependent nodes with a damaged dependee node + interdep_nodes_list = ( + model.interdep_nodes.keys() + ) # Interdependent nodes with a damaged dependee node else: - interdep_nodes_list = model.interdep_nodes[t].keys() # Interdependent nodes with a damaged dependee node - a = model.n_hat[i, k][j, kb]['data']['inf_data'] - lhs = model.x[i, k, j, kb, 'b', t] + interdep_nodes_list = model.interdep_nodes[ + t + ].keys() # Interdependent nodes with a damaged dependee node + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] + lhs = model.x[i, k, j, kb, "b", t] for layer in a.extra_com.keys(): lhs += model.x[i, k, j, kb, layer, t] if ((i, k) in model.n_hat_prime_nodes) | ((i, k) in interdep_nodes_list): return lhs <= a.capacity * model.w[i, k, t] else: - return lhs <= a.capacity * model.N.G.nodes[(i, k)]['data']['inf_data'].functionality + return ( + lhs + <= a.capacity + * model.N.G.nodes[(i, k)]["data"]["inf_data"].functionality + ) @staticmethod def flow_out_functionality_rule(model, i, k, j, kb, t): if not model.functionality: - interdep_nodes_list = model.interdep_nodes.keys() # Interdependent nodes with a damaged dependee node + interdep_nodes_list = ( + model.interdep_nodes.keys() + ) # Interdependent nodes with a damaged dependee node else: - interdep_nodes_list = model.interdep_nodes[t].keys() # Interdependent nodes with a damaged dependee node - a = model.n_hat[i, k][j, kb]['data']['inf_data'] - lhs = model.x[i, k, j, kb, 'b', t] + interdep_nodes_list = model.interdep_nodes[ + t + ].keys() # Interdependent nodes with a damaged dependee node + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] + lhs = model.x[i, k, j, kb, "b", t] for layer in a.extra_com.keys(): lhs += model.x[i, k, j, kb, layer, t] if ((j, kb) in model.n_hat_prime_nodes) | ((j, kb) in interdep_nodes_list): return lhs <= a.capacity * model.w[j, kb, t] else: - return lhs <= a.capacity * model.N.G.nodes[(j, kb)]['data']['inf_data'].functionality + return ( + lhs + <= a.capacity + * model.N.G.nodes[(j, kb)]["data"]["inf_data"].functionality + ) @staticmethod def flow_arc_functionality_rule(model, i, k, j, kb, t): if not model.functionality: - interdep_nodes_list = model.interdep_nodes.keys() # Interdependent nodes with a damaged dependee node + _ = ( + model.interdep_nodes.keys() + ) # Interdependent nodes with a damaged dependee node else: - interdep_nodes_list = model.interdep_nodes[t].keys() # Interdependent nodes with a damaged dependee node - a = model.n_hat[i, k][j, kb]['data']['inf_data'] - lhs = model.x[i, k, j, kb, 'b', t] + _ = model.interdep_nodes[ + t + ].keys() # Interdependent nodes with a damaged dependee node + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] + lhs = model.x[i, k, j, kb, "b", t] for layer in a.extra_com.keys(): lhs += model.x[i, k, j, kb, layer, t] if (i, k, j, kb) in model.a_hat_prime: return lhs <= a.capacity * model.y[i, k, j, kb, t] else: - return lhs <= a.capacity * model.N.G[(i, k)][(j, kb)]['data']['inf_data'].functionality + return ( + lhs + <= a.capacity + * model.N.G[(i, k)][(j, kb)]["data"]["inf_data"].functionality + ) @staticmethod def resource_rule(model, rc, t): @@ -427,15 +535,17 @@ def resource_rule(model, rc, t): else: is_sep_res = True total_resource = sum([lval for _, lval in resource_dict.items()]) - assert len(resource_dict.keys()) == len(model.layers), "The number of resource values does not match the \ + assert len(resource_dict.keys()) == len( + model.layers + ), "The number of resource values does not match the \ number of layers." resource_left_constr = 0 if is_sep_res: res_left_constr_sep = {key: 0 for key in resource_dict.keys()} for i, k, j, kb in model.a_hat_prime: - a = model.n_hat[i, k][j, kb]['data']['inf_data'] + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] idx_lyr = a.layer - res_use = 0.5 * a.resource_usage['h_' + rc] + res_use = 0.5 * a.resource_usage["h_" + rc] if model.T == 1: resource_left_constr += res_use * model.y[i, k, j, kb, t] if is_sep_res: @@ -443,11 +553,13 @@ def resource_rule(model, rc, t): else: resource_left_constr += res_use * model.y_tilde[i, k, j, kb, t] if is_sep_res: - res_left_constr_sep[idx_lyr] += res_use * model.y_tilde[i, k, j, kb, t] + res_left_constr_sep[idx_lyr] += ( + res_use * model.y_tilde[i, k, j, kb, t] + ) for n in model.n_hat_prime_nodes: idx_lyr = n[1] - d = model.n_hat.nodes[n]['data']['inf_data'] - res_use = d.resource_usage['p_' + rc] + d = model.n_hat.nodes[n]["data"]["inf_data"] + res_use = d.resource_usage["p_" + rc] if model.T == 1: resource_left_constr += res_use * model.w[n, t] if is_sep_res: @@ -460,8 +572,9 @@ def resource_rule(model, rc, t): if not is_sep_res: return resource_left_constr <= total_resource else: - return resource_left_constr <= total_resource, [res_left_constr_sep[key] <= lval for key, lval in - resource_dict] + return resource_left_constr <= total_resource, [ + res_left_constr_sep[key] <= lval for key, lval in resource_dict + ] else: return pyo.Constraint.Skip @@ -499,7 +612,7 @@ def interdependency_rule(model, i, k, t): @staticmethod def node_geographic_space_rule(model, s, i, k, t): - d = model.n_hat.nodes[(i, k)]['data']['inf_data'] + d = model.n_hat.nodes[(i, k)]["data"]["inf_data"] if d.in_space(s): if model.T == 1: return model.w[(i, k), t] * d.in_space(s) <= model.z[s, t] @@ -509,7 +622,7 @@ def node_geographic_space_rule(model, s, i, k, t): @staticmethod def arc_geographic_space_rule(model, s, i, k, j, kb, t): - a = model.n_hat[i, k][j, kb]['data']['inf_data'] + a = model.n_hat[i, k][j, kb]["data"]["inf_data"] if a.in_space(s): if model.T == 1: return model.y[i, k, j, kb, t] * a.in_space(s) <= model.z[s, t] @@ -544,24 +657,24 @@ def collect_solution_pool(m, T, n_hat_prime, a_hat_prime): sol_pool_results = {} current_sol_count = 0 for sol in range(m.SolCount): - m.setParam('SolutionNumber', sol) + m.setParam("SolutionNumber", sol) # print(m.PoolObjVal) - sol_pool_results[sol] = {'nodes': [], 'arcs': []} + sol_pool_results[sol] = {"nodes": [], "arcs": []} for t in range(T): # Record node recovery actions. for n, d in n_hat_prime: - node_var = 'w_tilde_' + str(n) + "," + str(t) + node_var = "w_tilde_" + str(n) + "," + str(t) if T == 1: - node_var = 'w_' + str(n) + "," + str(t) + node_var = "w_" + str(n) + "," + str(t) if round(m.getVarByName(node_var).xn) == 1: - sol_pool_results[sol]['nodes'].append(n) + sol_pool_results[sol]["nodes"].append(n) # Record edge recovery actions. for u, v, a in a_hat_prime: - arc_var = 'y_tilde_' + str(u) + "," + str(v) + "," + str(t) + arc_var = "y_tilde_" + str(u) + "," + str(v) + "," + str(t) if T == 1: - arc_var = 'y_' + str(u) + "," + str(v) + "," + str(t) + arc_var = "y_" + str(u) + "," + str(v) + "," + str(t) if round(m.getVarByName(arc_var).x) == 1: - sol_pool_results[sol]['arcs'].append((u, v)) + sol_pool_results[sol]["arcs"].append((u, v)) if sol > 0 and sol_pool_results[sol] == sol_pool_results[current_sol_count]: del sol_pool_results[sol] elif sol > 0: @@ -569,75 +682,144 @@ def collect_solution_pool(m, T, n_hat_prime, a_hat_prime): return sol_pool_results @staticmethod - def generate_intial_node_failure_state(wf_failure_state_df, epf_failure_state_df, water_nodes, - power_nodes, sample_range): + def generate_intial_node_failure_state( + wf_failure_state_df, + epf_failure_state_df, + water_nodes, + power_nodes, + sample_range, + ): # todo change those hard coding - network_name = {'Water': 1, 'Power': 3} + network_name = {"Water": 1, "Power": 3} combined_node_failed_states = pd.DataFrame() - for node_fail_state, node_data, network_code in zip([wf_failure_state_df, epf_failure_state_df], - [water_nodes, power_nodes], - network_name.values()): - node_fail_state[[str(x) for x in sample_range]] = \ - node_fail_state['failure'].str.split(',', expand=True).iloc[:, sample_range.start:sample_range.stop] - node_fail_state = node_fail_state.drop(columns=['failure']) - node_fail_state['name'] = 'nan' + for node_fail_state, node_data, network_code in zip( + [wf_failure_state_df, epf_failure_state_df], + [water_nodes, power_nodes], + network_name.values(), + ): + node_fail_state[[str(x) for x in sample_range]] = ( + node_fail_state["failure"] + .str.split(",", expand=True) + .iloc[:, sample_range.start : sample_range.stop] + ) + node_fail_state = node_fail_state.drop(columns=["failure"]) + node_fail_state["name"] = "nan" for index, row in node_data.iterrows(): - node_name = '(' + str(int(row['nodenwid'])) + ',' + str(network_code) + ')' - if not pd.isna(row['guid']): - node_fail_state.loc[node_fail_state['guid'] == row['guid'], 'name'] = node_name + node_name = ( + "(" + str(int(row["nodenwid"])) + "," + str(network_code) + ")" + ) + if not pd.isna(row["guid"]): + node_fail_state.loc[ + node_fail_state["guid"] == row["guid"], "name" + ] = node_name else: - temp_dict = {**{'name': node_name, 'guid': 'nan'}, **{str(x): 1 for x in sample_range}} - node_fail_state = pd.concat([node_fail_state, pd.DataFrame([temp_dict])], ignore_index=True) - combined_node_failed_states = pd.concat([combined_node_failed_states, node_fail_state]) + temp_dict = { + **{"name": node_name, "guid": "nan"}, + **{str(x): 1 for x in sample_range}, + } + node_fail_state = pd.concat( + [node_fail_state, pd.DataFrame([temp_dict])], ignore_index=True + ) + combined_node_failed_states = pd.concat( + [combined_node_failed_states, node_fail_state] + ) return combined_node_failed_states.dropna(subset=["name"]) @staticmethod - def generate_intial_link_failure_state(pipeline_failure_state_df, water_arcs, power_arcs, sample_range): + def generate_intial_link_failure_state( + pipeline_failure_state_df, water_arcs, power_arcs, sample_range + ): # TODO remove hardcoded - network_name = {'Water': 1, 'Power': 3} + network_name = {"Water": 1, "Power": 3} combined_arc_failed_states = pd.DataFrame() powerline_failure_state_df = pd.DataFrame() - powerline_failure_state_df['name'] = 'nan' + powerline_failure_state_df["name"] = "nan" for index, row in power_arcs.iterrows(): temp_dict = {str(x): 1 for x in sample_range} - temp_dict['guid'] = row['guid'] - powerline_failure_state_df = pd.concat([powerline_failure_state_df, pd.DataFrame([temp_dict])], - ignore_index=True) - s_node = int(row['fromnode']) - e_node = int(row['tonode']) - arc_name = '((' + str(s_node) + ',' + str(network_name["Power"]) + '),(' + str(e_node) + ',' + str( - network_name["Power"]) + '))' - powerline_failure_state_df.loc[powerline_failure_state_df['guid'] == row['guid'], 'name'] = arc_name - - pipeline_failure_state_df[[str(x) for x in sample_range]] = \ - pipeline_failure_state_df['failure'].str.split(',', expand=True).iloc[:, - sample_range.start:sample_range.stop] - pipeline_failure_state_df['name'] = 'nan' - pipeline_failure_state_df = pipeline_failure_state_df.drop(columns=['failure']) + temp_dict["guid"] = row["guid"] + powerline_failure_state_df = pd.concat( + [powerline_failure_state_df, pd.DataFrame([temp_dict])], + ignore_index=True, + ) + s_node = int(row["fromnode"]) + e_node = int(row["tonode"]) + arc_name = ( + "((" + + str(s_node) + + "," + + str(network_name["Power"]) + + "),(" + + str(e_node) + + "," + + str(network_name["Power"]) + + "))" + ) + powerline_failure_state_df.loc[ + powerline_failure_state_df["guid"] == row["guid"], "name" + ] = arc_name + + pipeline_failure_state_df[[str(x) for x in sample_range]] = ( + pipeline_failure_state_df["failure"] + .str.split(",", expand=True) + .iloc[:, sample_range.start : sample_range.stop] + ) + pipeline_failure_state_df["name"] = "nan" + pipeline_failure_state_df = pipeline_failure_state_df.drop(columns=["failure"]) for index, row in water_arcs.iterrows(): - s_node = int(row['fromnode']) - e_node = int(row['tonode']) - arc_name = '((' + str(s_node) + ',' + str(network_name["Water"]) + '),(' + str(e_node) + ',' + str( - network_name["Water"]) + '))' - if not pd.isna(row['guid']): - pipeline_failure_state_df.loc[pipeline_failure_state_df['guid'] == row['guid'], 'name'] = arc_name + s_node = int(row["fromnode"]) + e_node = int(row["tonode"]) + arc_name = ( + "((" + + str(s_node) + + "," + + str(network_name["Water"]) + + "),(" + + str(e_node) + + "," + + str(network_name["Water"]) + + "))" + ) + if not pd.isna(row["guid"]): + pipeline_failure_state_df.loc[ + pipeline_failure_state_df["guid"] == row["guid"], "name" + ] = arc_name else: - temp_dict = {**{'name': arc_name, 'guid': 'nan'}, **{str(x): 1 for x in sample_range}} - pipeline_failure_state_df = pd.concat([pipeline_failure_state_df, pd.DataFrame([temp_dict])], - ignore_index=True) - - combined_arc_failed_states = pd.concat([combined_arc_failed_states, powerline_failure_state_df]) - combined_arc_failed_states = pd.concat([combined_arc_failed_states, pipeline_failure_state_df]) + temp_dict = { + **{"name": arc_name, "guid": "nan"}, + **{str(x): 1 for x in sample_range}, + } + pipeline_failure_state_df = pd.concat( + [pipeline_failure_state_df, pd.DataFrame([temp_dict])], + ignore_index=True, + ) + + combined_arc_failed_states = pd.concat( + [combined_arc_failed_states, powerline_failure_state_df] + ) + combined_arc_failed_states = pd.concat( + [combined_arc_failed_states, pipeline_failure_state_df] + ) return combined_arc_failed_states.dropna(subset=["name"]) @staticmethod def generate_distribution_nodes(water_arc_df, water_nodes_df): - dist_nodes = list(set().union(water_arc_df['tonode'].unique(), water_arc_df['fromnode'].unique())) + dist_nodes = list( + set().union( + water_arc_df["tonode"].unique(), water_arc_df["fromnode"].unique() + ) + ) for node in dist_nodes: if node not in water_nodes_df["nodenwid"]: - water_nodes_df = pd.concat([water_nodes_df, pd.DataFrame([{'utilfcltyc': 'Distribution Node', - 'nodenwid': node}])], ignore_index=True) + water_nodes_df = pd.concat( + [ + water_nodes_df, + pd.DataFrame( + [{"utilfcltyc": "Distribution Node", "nodenwid": node}] + ), + ], + ignore_index=True, + ) return water_nodes_df diff --git a/pyincore/analyses/indp/infrastructurearc.py b/pyincore/analyses/indp/infrastructurearc.py index 95cd6b00a..3f21ae777 100644 --- a/pyincore/analyses/indp/infrastructurearc.py +++ b/pyincore/analyses/indp/infrastructurearc.py @@ -70,7 +70,7 @@ def set_extra_commodity(self, extra_commodity): """ for ec in extra_commodity: - self.extra_com[ec] = {'flow_cost': 0} + self.extra_com[ec] = {"flow_cost": 0} def set_resource_usage(self, resource_names): """ diff --git a/pyincore/analyses/indp/infrastructureinterdeparc.py b/pyincore/analyses/indp/infrastructureinterdeparc.py index 37466d052..e1423d554 100644 --- a/pyincore/analyses/indp/infrastructureinterdeparc.py +++ b/pyincore/analyses/indp/infrastructureinterdeparc.py @@ -24,7 +24,9 @@ class InfrastructureInterdepArc(InfrastructureArc): """ def __init__(self, source, dest, source_layer, dest_layer, gamma): - super(InfrastructureInterdepArc, self).__init__(source, dest, source_layer, True) + super(InfrastructureInterdepArc, self).__init__( + source, dest, source_layer, True + ) self.source_layer = source_layer self.dest_layer = dest_layer self.gamma = gamma diff --git a/pyincore/analyses/indp/infrastructurenetwork.py b/pyincore/analyses/indp/infrastructurenetwork.py index 9ec1fc315..e292a4a0f 100644 --- a/pyincore/analyses/indp/infrastructurenetwork.py +++ b/pyincore/analyses/indp/infrastructurenetwork.py @@ -55,16 +55,15 @@ def update_with_strategy(self, player_strategy): """ for q in player_strategy[0]: - node = q strat = player_strategy[0][q] - self.G.node[q]['data']['inf_data'].repaired = round(strat['repair']) - self.G.node[q]['data']['inf_data'].functionality = round(strat['w']) + self.G.node[q]["data"]["inf_data"].repaired = round(strat["repair"]) + self.G.node[q]["data"]["inf_data"].functionality = round(strat["w"]) for q in player_strategy[1]: src = q[0] dst = q[1] strat = player_strategy[1][q] - self.G[src][dst]['data']['inf_data'].repaired = round(strat['repair']) - self.G[src][dst]['data']['inf_data'].functionality = round(strat['y']) + self.G[src][dst]["data"]["inf_data"].repaired = round(strat["repair"]) + self.G[src][dst]["data"]["inf_data"].functionality = round(strat["y"]) def get_clusters(self, layer): """ @@ -81,11 +80,20 @@ def get_clusters(self, layer): List of layer components """ - g_prime_nodes = [n[0] for n in self.G.nodes(data=True) if - n[1]['data']['inf_data'].net_id == layer and n[1]['data']['inf_data'].functionality == 1.0] + g_prime_nodes = [ + n[0] + for n in self.G.nodes(data=True) + if n[1]["data"]["inf_data"].net_id == layer + and n[1]["data"]["inf_data"].functionality == 1.0 + ] g_prime = nx.DiGraph(self.G.subgraph(g_prime_nodes).copy()) g_prime.remove_edges_from( - [(u, v) for u, v, a in g_prime.edges(data=True) if a['data']['inf_data'].functionality == 0.0]) + [ + (u, v) + for u, v, a in g_prime.edges(data=True) + if a["data"]["inf_data"].functionality == 0.0 + ] + ) # print nx.connected_components(g_prime.to_undirected()) return list(nx.connected_components(g_prime.to_undirected())) @@ -103,11 +111,20 @@ def gc_size(self, layer): : int Size of the largest component in the layer """ - g_prime_nodes = [n[0] for n in self.G.nodes(data=True) if - n[1]['data']['inf_data'].net_id == layer and n[1]['data']['inf_data'].functionality == 1.0] + g_prime_nodes = [ + n[0] + for n in self.G.nodes(data=True) + if n[1]["data"]["inf_data"].net_id == layer + and n[1]["data"]["inf_data"].functionality == 1.0 + ] g_prime = nx.Graph(self.G.subgraph(g_prime_nodes)) g_prime.remove_edges_from( - [(u, v) for u, v, a in g_prime.edges(data=True) if a['data']['inf_data'].functionality == 0.0]) + [ + (u, v) + for u, v, a in g_prime.edges(data=True) + if a["data"]["inf_data"].functionality == 0.0 + ] + ) cc = nx.connected_components(g_prime.to_undirected()) if cc: # if len(list(cc)) == 1: @@ -139,13 +156,23 @@ def to_game_file(self, layers=None): layers = [1, 3] with open("../results/indp_gamefile.game") as f: num_players = len(layers) - num_targets = len([n for n in self.G.nodes(data=True) if n[1]['data']['inf_data'].net_id in layers]) + num_targets = len( + [ + n + for n in self.G.nodes(data=True) + if n[1]["data"]["inf_data"].net_id in layers + ] + ) f.write(str(num_players) + "," + str(num_targets) + ",2\n") for layer in range(len(layers)): - layer_nodes = [n for n in self.G.nodes(data=True) if n[1]['data']['inf_data'].net_id == layers[layer]] + layer_nodes = [ + n + for n in self.G.nodes(data=True) + if n[1]["data"]["inf_data"].net_id == layers[layer] + ] for node in layer_nodes: def_values = [0.0] * len(layers) - def_values[layer] = abs(node[1]['data']['inf_data'].demand) + def_values[layer] = abs(node[1]["data"]["inf_data"].demand) atk_values = sum(def_values) f.write(str(layer) + "\n") # f.write("0.0,1.0") @@ -167,6 +194,15 @@ def to_csv(self, filename="infrastructure_adj.csv"): None. """ - with open(filename, 'w') as f: + with open(filename, "w") as f: for u, v, a in self.G.edges(data=True): - f.write(str(u[0]) + "." + str(u[1]) + "," + str(v[0]) + "." + str(v[1]) + "\n") + f.write( + str(u[0]) + + "." + + str(u[1]) + + "," + + str(v[0]) + + "." + + str(v[1]) + + "\n" + ) diff --git a/pyincore/analyses/indp/infrastructurenode.py b/pyincore/analyses/indp/infrastructurenode.py index 35278a043..dd123b4f1 100644 --- a/pyincore/analyses/indp/infrastructurenode.py +++ b/pyincore/analyses/indp/infrastructurenode.py @@ -87,8 +87,12 @@ def set_extra_commodity(self, extra_commodity): None. """ - for l in extra_commodity: - self.extra_com[l] = {'demand': 0, 'oversupply_penalty': 0, 'undersupply_penalty': 0} + for m in extra_commodity: + self.extra_com[m] = { + "demand": 0, + "oversupply_penalty": 0, + "undersupply_penalty": 0, + } def set_resource_usage(self, resource_names): """ diff --git a/pyincore/analyses/indp/infrastructureutil.py b/pyincore/analyses/indp/infrastructureutil.py index 9375981b7..8b5ae23f7 100644 --- a/pyincore/analyses/indp/infrastructureutil.py +++ b/pyincore/analyses/indp/infrastructureutil.py @@ -11,11 +11,16 @@ class InfrastructureUtil: - @staticmethod - def load_infrastructure_array_format_extended(power_nodes, power_arcs, water_nodes, water_arcs, - interdep, cost_scale=1.0, - extra_commodity=None): + def load_infrastructure_array_format_extended( + power_nodes, + power_arcs, + water_nodes, + water_arcs, + interdep, + cost_scale=1.0, + extra_commodity=None, + ): """ This function reads the infrastructure network from file in the extended format @@ -33,7 +38,7 @@ def load_infrastructure_array_format_extended(power_nodes, power_arcs, water_nod G (class:`~infrastructure.InfrastructureNetwork`): The object containing the network data. """ - net_names = {'Water': 1, 'Gas': 2, 'Power': 3, 'Telecommunication': 4} # !!! + net_names = {"Water": 1, "Gas": 2, "Power": 3, "Telecommunication": 4} # !!! G = InfrastructureNetwork("Test") global_index = 0 @@ -44,33 +49,39 @@ def load_infrastructure_array_format_extended(power_nodes, power_arcs, water_nod net = net_names["Water"] for v in data.iterrows(): try: - node_id = v[1]['ID'] + node_id = v[1]["ID"] except KeyError: - node_id = v[1]['nodenwid'] + node_id = v[1]["nodenwid"] n = InfrastructureNode(global_index, net, int(node_id)) - G.G.add_node((n.local_id, n.net_id), data={'inf_data': n}) + G.G.add_node((n.local_id, n.net_id), data={"inf_data": n}) global_index += 1 - node_main_data = G.G.nodes[(n.local_id, n.net_id)]['data']['inf_data'] - node_main_data.reconstruction_cost = float(v[1]['q_ds_3']) * cost_scale - node_main_data.oversupply_penalty = float(v[1]['Mp']) * cost_scale - node_main_data.undersupply_penalty = float(v[1]['Mm']) * cost_scale - node_main_data.demand = float(v[1]['Demand']) - if 'guid' in v[1].index.values: - node_main_data.guid = v[1]['guid'] - resource_names = [x for x in list(v[1].index.values) if x[:2] == 'p_'] + node_main_data = G.G.nodes[(n.local_id, n.net_id)]["data"]["inf_data"] + node_main_data.reconstruction_cost = float(v[1]["q_ds_3"]) * cost_scale + node_main_data.oversupply_penalty = float(v[1]["Mp"]) * cost_scale + node_main_data.undersupply_penalty = float(v[1]["Mm"]) * cost_scale + node_main_data.demand = float(v[1]["Demand"]) + if "guid" in v[1].index.values: + node_main_data.guid = v[1]["guid"] + resource_names = [x for x in list(v[1].index.values) if x[:2] == "p_"] if len(resource_names) > 0: n.set_resource_usage(resource_names) for rc in resource_names: n.resource_usage[rc] = v[1][rc] else: - n.resource_usage['p_'] = 1 + n.resource_usage["p_"] = 1 if extra_commodity: n.set_extra_commodity(extra_commodity[net]) for layer in extra_commodity[net]: - ext_com_data = G.G.nodes[(n.local_id, n.net_id)]['data']['inf_data'].extra_com[layer] - ext_com_data['oversupply_penalty'] = float(v[1]['Mp_' + layer]) * cost_scale - ext_com_data['undersupply_penalty'] = float(v[1]['Mm_' + layer]) * cost_scale - ext_com_data['demand'] = float(v[1]['Demand_' + layer]) + ext_com_data = G.G.nodes[(n.local_id, n.net_id)]["data"][ + "inf_data" + ].extra_com[layer] + ext_com_data["oversupply_penalty"] = ( + float(v[1]["Mp_" + layer]) * cost_scale + ) + ext_com_data["undersupply_penalty"] = ( + float(v[1]["Mm_" + layer]) * cost_scale + ) + ext_com_data["demand"] = float(v[1]["Demand_" + layer]) for index, data in enumerate([power_arcs, water_arcs]): if index == 0: @@ -79,45 +90,58 @@ def load_infrastructure_array_format_extended(power_nodes, power_arcs, water_nod net = net_names["Water"] for v in data.iterrows(): try: - start_id = v[1]['Start Node'] - end_id = v[1]['End Node'] + start_id = v[1]["Start Node"] + end_id = v[1]["End Node"] except KeyError: - start_id = v[1]['fromnode'] - end_id = v[1]['tonode'] + start_id = v[1]["fromnode"] + end_id = v[1]["tonode"] for duplicate in range(2): if duplicate == 0: a = InfrastructureArc(int(start_id), int(end_id), net) elif duplicate == 1: a = InfrastructureArc(int(end_id), int(start_id), net) - G.G.add_edge((a.source, a.layer), (a.dest, a.layer), data={'inf_data': a}) - arc_main_data = G.G[(a.source, a.layer)][(a.dest, a.layer)]['data']['inf_data'] - arc_main_data.flow_cost = float(v[1]['c']) * cost_scale - arc_main_data.reconstruction_cost = float(v[1]['f']) * cost_scale - arc_main_data.capacity = float(v[1]['u']) - if 'guid' in v[1].index.values: - arc_main_data.guid = v[1]['guid'] - resource_names = [x for x in list(v[1].index.values) if x[:2] == 'h_'] + G.G.add_edge( + (a.source, a.layer), (a.dest, a.layer), data={"inf_data": a} + ) + arc_main_data = G.G[(a.source, a.layer)][(a.dest, a.layer)]["data"][ + "inf_data" + ] + arc_main_data.flow_cost = float(v[1]["c"]) * cost_scale + arc_main_data.reconstruction_cost = float(v[1]["f"]) * cost_scale + arc_main_data.capacity = float(v[1]["u"]) + if "guid" in v[1].index.values: + arc_main_data.guid = v[1]["guid"] + resource_names = [ + x for x in list(v[1].index.values) if x[:2] == "h_" + ] if len(resource_names) > 0: a.set_resource_usage(resource_names) for rc in resource_names: a.resource_usage[rc] = v[1][rc] else: - a.resource_usage['h_'] = 1 + a.resource_usage["h_"] = 1 if extra_commodity: a.set_extra_commodity(extra_commodity[net]) for layer in extra_commodity[net]: - ext_com_data = \ - G.G[(a.source, a.layer)][(a.dest, a.layer)]['data']['inf_data'].extra_com[layer] - ext_com_data['flow_cost'] = float(v[1]['c_' + layer]) * cost_scale + ext_com_data = G.G[(a.source, a.layer)][(a.dest, a.layer)][ + "data" + ]["inf_data"].extra_com[layer] + ext_com_data["flow_cost"] = ( + float(v[1]["c_" + layer]) * cost_scale + ) for v in interdep.iterrows(): - if v[1]['Type'] == 'Physical': - i = int(v[1]['Dependee Node']) - net_i = net_names[v[1]['Dependee Network']] - j = int(v[1]['Depender Node']) - net_j = net_names[v[1]['Depender Network']] + if v[1]["Type"] == "Physical": + i = int(v[1]["Dependee Node"]) + net_i = net_names[v[1]["Dependee Network"]] + j = int(v[1]["Depender Node"]) + net_j = net_names[v[1]["Depender Network"]] a = InfrastructureInterdepArc(i, j, net_i, net_j, gamma=1.0) - G.G.add_edge((a.source, a.source_layer), (a.dest, a.dest_layer), data={'inf_data': a}) + G.G.add_edge( + (a.source, a.source_layer), + (a.dest, a.dest_layer), + data={"inf_data": a}, + ) if extra_commodity: a.set_extra_commodity(extra_commodity[net_i]) a.set_extra_commodity(extra_commodity[net_j]) @@ -144,19 +168,19 @@ def add_from_csv_failure_scenario(G, sample, initial_node, initial_link): """ for index, row in initial_node.iterrows(): raw_n = row["name"].split(",") - n = (int(raw_n[0].strip(' )(')), int(raw_n[1].strip(' )('))) + n = (int(raw_n[0].strip(" )(")), int(raw_n[1].strip(" )("))) state = float(row[sample + 1]) - G.G.nodes[n]['data']['inf_data'].functionality = state - G.G.nodes[n]['data']['inf_data'].repaired = state + G.G.nodes[n]["data"]["inf_data"].functionality = state + G.G.nodes[n]["data"]["inf_data"].repaired = state for index, row in initial_link.iterrows(): raw_uv = row["name"].split(",") - u = (int(raw_uv[0].strip(' )(')), int(raw_uv[1].strip(' )('))) - v = (int(raw_uv[2].strip(' )(')), int(raw_uv[3].strip(' )('))) + u = (int(raw_uv[0].strip(" )(")), int(raw_uv[1].strip(" )("))) + v = (int(raw_uv[2].strip(" )(")), int(raw_uv[3].strip(" )("))) state = float(row[sample + 1]) if state == 0.0: - G.G[u][v]['data']['inf_data'].functionality = state - G.G[u][v]['data']['inf_data'].repaired = state + G.G[u][v]["data"]["inf_data"].functionality = state + G.G[u][v]["data"]["inf_data"].repaired = state - G.G[v][u]['data']['inf_data'].functionality = state - G.G[v][u]['data']['inf_data'].repaired = state + G.G[v][u]["data"]["inf_data"].functionality = state + G.G[v][u]["data"]["inf_data"].repaired = state diff --git a/pyincore/analyses/joplinempiricalbuildingrestoration/__init__.py b/pyincore/analyses/joplinempiricalbuildingrestoration/__init__.py index d48607533..300abf995 100644 --- a/pyincore/analyses/joplinempiricalbuildingrestoration/__init__.py +++ b/pyincore/analyses/joplinempiricalbuildingrestoration/__init__.py @@ -5,5 +5,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempiricalbuildingrestoration import JoplinEmpiricalBuildingRestoration -from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempirrestor_util import JoplinEmpirRestorUtil \ No newline at end of file +from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempiricalbuildingrestoration import ( + JoplinEmpiricalBuildingRestoration, +) +from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempirrestor_util import ( + JoplinEmpirRestorUtil, +) diff --git a/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py b/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py index fd835eac5..71ac8bdaf 100755 --- a/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py +++ b/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py @@ -8,11 +8,13 @@ import pandas as pd from pyincore import BaseAnalysis -from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempirrestor_util import JoplinEmpirRestorUtil +from pyincore.analyses.joplinempiricalbuildingrestoration.joplinempirrestor_util import ( + JoplinEmpirRestorUtil, +) class JoplinEmpiricalBuildingRestoration(BaseAnalysis): - """ Joplin Empirical Building Restoration Model generates a random realization for the restoration time of + """Joplin Empirical Building Restoration Model generates a random realization for the restoration time of a building damaged in a tornado event to be restored to a certain functionality level. Functionality levels in this model are defined according to Koliou and van de Lindt (2020) and range from Functionality Level 4 (FL4, the lowest functionality) to Functionality Level 0 (FL0, full functionality). @@ -26,7 +28,7 @@ def __init__(self, incore_client): super(JoplinEmpiricalBuildingRestoration, self).__init__(incore_client) def run(self): - """ Executes Joplin empirical building restoration model analysis. + """Executes Joplin empirical building restoration model analysis. Returns: bool: True if successful, False otherwise. @@ -39,45 +41,69 @@ def run(self): result_name = self.get_parameter("result_name") # Building dataset - building_set = self.get_input_dataset("buildings").get_dataframe_from_shapefile() + building_set = self.get_input_dataset( + "buildings" + ).get_dataframe_from_shapefile() # Building damage dataset - building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv(low_memory=False) + building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv( + low_memory=False + ) # Building functionality target level building_target_fl = self.get_input_dataset("building_functionality_level") if building_target_fl is not None: - building_target_fl = building_target_fl.get_dataframe_from_csv(low_memory=False) + building_target_fl = building_target_fl.get_dataframe_from_csv( + low_memory=False + ) else: building_target_fl = None # merge and filter out archetypes > 5 - building_dmg_all = pd.merge(building_dmg, building_set, how="left", on="guid", copy=True, validate="1:1") - building_dmg_5 = building_dmg_all[["guid", "archetype", "LS_0", "LS_1", "LS_2", "haz_expose"]].copy() + building_dmg_all = pd.merge( + building_dmg, building_set, how="left", on="guid", copy=True, validate="1:1" + ) + building_dmg_5 = building_dmg_all[ + ["guid", "archetype", "LS_0", "LS_1", "LS_2", "haz_expose"] + ].copy() building_func_5 = building_dmg_5[building_dmg_all["archetype"] <= 5] - building_func = building_func_5[["guid", "LS_0", "LS_1", "LS_2", "haz_expose"]].copy() + building_func = building_func_5[ + ["guid", "LS_0", "LS_1", "LS_2", "haz_expose"] + ].copy() building_func["targetFL"] = target_fl if building_target_fl is not None: - building_func = pd.merge(building_func, building_target_fl, - how="left", on="guid", copy=True, validate="1:1") + building_func = pd.merge( + building_func, + building_target_fl, + how="left", + on="guid", + copy=True, + validate="1:1", + ) # Replace NaN value from targetFL_y with targetFL_x value - building_func["targetFL"] = building_func["targetFL_y"].fillna(building_func["targetFL_x"]) + building_func["targetFL"] = building_func["targetFL_y"].fillna( + building_func["targetFL_x"] + ) # Drop merged columns building_func = building_func.drop(["targetFL_x", "targetFL_y"], axis=1) building_func = building_func.astype({"targetFL": "int64"}) - initial_func_level, restoration_days = self.get_restoration_days(seed_i, building_func) + initial_func_level, restoration_days = self.get_restoration_days( + seed_i, building_func + ) building_func["initialFL"] = initial_func_level building_func["restorDays"] = restoration_days - building_func_fin = building_func[["guid", "initialFL", "targetFL", "restorDays"]] + building_func_fin = building_func[ + ["guid", "initialFL", "targetFL", "restorDays"] + ] csv_source = "dataframe" self.set_result_csv_data("result", building_func_fin, result_name, csv_source) return True def get_restoration_days(self, seed_i, building_func): - """ Calculates restoration days. + """Calculates restoration days. Args: seed_i (int): Seed for random number generator to ensure replication if run as part @@ -99,11 +125,31 @@ def get_restoration_days(self, seed_i, building_func): # generate a random number between 0 and 1 and see where in boundaries it locates and use it to assign FL, # for each building - rnd_num = np.random.uniform(0, 1, (len(building_func.index, ))) - bdnp_init = np.zeros(len(building_func.index, )).astype(int) # first, set all to 0 - bdnp_init = np.where(rnd_num < bdnp[:, 0], 1, bdnp_init) # if rnd_num < LS_0 set to 1 - bdnp_init = np.where(rnd_num < bdnp[:, 1], 2, bdnp_init) # if rnd_num < LS_0 set to 2 - bdnp_init = np.where(rnd_num < bdnp[:, 2], 3, bdnp_init) # if rnd_num < LS_0 set to 3 + rnd_num = np.random.uniform( + 0, + 1, + ( + len( + building_func.index, + ) + ), + ) + bdnp_init = np.zeros( + len( + building_func.index, + ) + ).astype( + int + ) # first, set all to 0 + bdnp_init = np.where( + rnd_num < bdnp[:, 0], 1, bdnp_init + ) # if rnd_num < LS_0 set to 1 + bdnp_init = np.where( + rnd_num < bdnp[:, 1], 2, bdnp_init + ) # if rnd_num < LS_0 set to 2 + bdnp_init = np.where( + rnd_num < bdnp[:, 2], 3, bdnp_init + ) # if rnd_num < LS_0 set to 3 bdnp_target = building_func["targetFL"].to_numpy() @@ -138,58 +184,63 @@ def get_spec(self): "id": "result_name", "required": True, "description": "result dataset name", - "type": str + "type": str, }, { "id": "target_functionality_level", "required": False, "description": "Target functionality level for all infrastructure", - "type": int + "type": int, }, { "id": "seed", "required": False, "description": "Initial seed for the tornado hazard value", - "type": int - } + "type": int, + }, ], "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { "id": "building_dmg", "required": True, "description": "Building damage results CSV file", - "type": ["ergo:buildingDamageVer4", - "ergo:buildingDamageVer5", - "ergo:buildingDamageVer6", - "ergo:buildingInventory", - "ergo:nsBuildingInventoryDamage", - "ergo:nsBuildingInventoryDamageVer2", - 'ergo:nsBuildingInventoryDamageVer3', - 'ergo:nsBuildingInventoryDamageVer4' - ] + "type": [ + "ergo:buildingDamageVer4", + "ergo:buildingDamageVer5", + "ergo:buildingDamageVer6", + "ergo:buildingInventory", + "ergo:nsBuildingInventoryDamage", + "ergo:nsBuildingInventoryDamageVer2", + "ergo:nsBuildingInventoryDamageVer3", + "ergo:nsBuildingInventoryDamageVer4", + ], }, { "id": "building_functionality_level", "required": False, "description": "Functionality level per building. The target level defaults " - "to target_functionality_level parameter if building not in the dataset", - "type": ["incore:buildingFuncTargetVer1"] - } + "to target_functionality_level parameter if building not in the dataset", + "type": ["incore:buildingFuncTargetVer1"], + }, ], "output_datasets": [ { "id": "result", "parent_type": "buildings", "description": "A dataset containing results (format: CSV) with values (in days) for the predicted " - "restoration time of the building.", - "type": "incore:restorationTime" + "restoration time of the building.", + "type": "incore:restorationTime", } - ] + ], } diff --git a/pyincore/analyses/meandamage/meandamage.py b/pyincore/analyses/meandamage/meandamage.py index 483011d88..f11dd1a1b 100644 --- a/pyincore/analyses/meandamage/meandamage.py +++ b/pyincore/analyses/meandamage/meandamage.py @@ -28,68 +28,70 @@ def get_spec(self): """ return { - 'name': 'mean-damage', - 'description': 'calculate the mean and expected damage using damage ratio table', - 'input_parameters': [ + "name": "mean-damage", + "description": "calculate the mean and expected damage using damage ratio table", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'damage_interval_keys', - 'required': True, - 'description': 'Column name of the damage interval must be four and ranged in order', - 'type': List[str] + "id": "damage_interval_keys", + "required": True, + "description": "Column name of the damage interval must be four and ranged in order", + "type": List[str], }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - } + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'damage', - 'required': True, - 'description': 'damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer4', - 'ergo:buildingDamageVer5', - 'ergo:buildingDamageVer6', - 'ergo:nsBuildingInventoryDamage', - 'ergo:nsBuildingInventoryDamageVer2', - 'ergo:nsBuildingInventoryDamageVer3', - 'ergo:nsBuildingInventoryDamageVer4', - 'ergo:bridgeDamage', - 'ergo:bridgeDamageVer2', - 'ergo:bridgeDamageVer3', - 'ergo:roadDamage', - 'ergo:roadDamageVer2', - 'ergo:roadDamageVer3' - ] + "id": "damage", + "required": True, + "description": "damage result that has damage intervals in it", + "type": [ + "ergo:buildingDamageVer4", + "ergo:buildingDamageVer5", + "ergo:buildingDamageVer6", + "ergo:nsBuildingInventoryDamage", + "ergo:nsBuildingInventoryDamageVer2", + "ergo:nsBuildingInventoryDamageVer3", + "ergo:nsBuildingInventoryDamageVer4", + "ergo:bridgeDamage", + "ergo:bridgeDamageVer2", + "ergo:bridgeDamageVer3", + "ergo:roadDamage", + "ergo:roadDamageVer2", + "ergo:roadDamageVer3", + ], }, { - 'id': 'dmg_ratios', - 'required': True, - 'description': 'Damage Ratios table', - 'type': ['ergo:buildingDamageRatios', - 'ergo:bridgeDamageRatios', - 'ergo:buildingContentDamageRatios', - 'ergo:buildingASDamageRatios', - 'ergo:buildingDSDamageRatios', - 'ergo:roadDamageRatios'] + "id": "dmg_ratios", + "required": True, + "description": "Damage Ratios table", + "type": [ + "ergo:buildingDamageRatios", + "ergo:bridgeDamageRatios", + "ergo:buildingContentDamageRatios", + "ergo:buildingASDamageRatios", + "ergo:buildingDSDamageRatios", + "ergo:roadDamageRatios", + ], }, - ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'description': 'CSV file of mean damage', - 'type': 'ergo:meanDamage' + "id": "result", + "description": "CSV file of mean damage", + "type": "ergo:meanDamage", } - ] + ], } def run(self): @@ -104,27 +106,33 @@ def run(self): # setting number of cpus to use user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len( - damage_result), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(damage_result), user_defined_cpu + ) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] count = 0 inventory_list = damage_result while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size results = self.mean_damage_concurrent_future( - self.mean_damage_bulk_input, num_workers, - inventory_args, repeat(dmg_ratio_tbl)) - self.set_result_csv_data("result", results, - name=self.get_parameter("result_name")) + self.mean_damage_bulk_input, + num_workers, + inventory_args, + repeat(dmg_ratio_tbl), + ) + self.set_result_csv_data( + "result", results, name=self.get_parameter("result_name") + ) return True def mean_damage_concurrent_future(self, function_name, parallelism, *args): @@ -141,7 +149,8 @@ def mean_damage_concurrent_future(self, function_name, parallelism, *args): """ output = [] with concurrent.futures.ProcessPoolExecutor( - max_workers=parallelism) as executor: + max_workers=parallelism + ) as executor: for ret in executor.map(function_name, *args): output.extend(ret) @@ -170,8 +179,8 @@ def mean_damage_bulk_input(self, damage, dmg_ratio_tbl): result = [] for dmg in damage: result.append( - self.mean_damage(dmg, dmg_ratio_tbl, damage_interval_keys, - is_bridge)) + self.mean_damage(dmg, dmg_ratio_tbl, damage_interval_keys, is_bridge) + ) return result @@ -194,39 +203,42 @@ def mean_damage(self, dmg, dmg_ratio_tbl, damage_interval_keys, is_bridge): if is_bridge: # need to calculate bridge span - if "spans" in dmg.keys() and dmg['spans'] is not None \ - and dmg["spans"].isdigit(): + if ( + "spans" in dmg.keys() + and dmg["spans"] is not None + and dmg["spans"].isdigit() + ): bridge_spans = int(dmg["spans"]) else: bridge_spans = 1 if bridge_spans > 10: bridge_spans = 10 - print("A bridge was found with greater than 10 spans: " - + dmg['guid'] + ". Default to 10 bridge spans.") - - mean_damage = AnalysisUtil.calculate_mean_damage(dmg_ratio_tbl, - dmg, - damage_interval_keys, - is_bridge, - bridge_spans) + print( + "A bridge was found with greater than 10 spans: " + + dmg["guid"] + + ". Default to 10 bridge spans." + ) + + mean_damage = AnalysisUtil.calculate_mean_damage( + dmg_ratio_tbl, dmg, damage_interval_keys, is_bridge, bridge_spans + ) else: - mean_damage = AnalysisUtil.calculate_mean_damage(dmg_ratio_tbl, - dmg, - damage_interval_keys, - is_bridge) + mean_damage = AnalysisUtil.calculate_mean_damage( + dmg_ratio_tbl, dmg, damage_interval_keys, is_bridge + ) results.update(mean_damage) # bridge doesn't calculates deviation if not is_bridge: mean_damage_dev = AnalysisUtil.calculate_mean_damage_std_deviation( - dmg_ratio_tbl, dmg, mean_damage['meandamage'], - damage_interval_keys) + dmg_ratio_tbl, dmg, mean_damage["meandamage"], damage_interval_keys + ) results.update(mean_damage_dev) else: expected_damage = AnalysisUtil.get_expected_damage( - mean_damage['meandamage'], - dmg_ratio_tbl) - results['expectval'] = expected_damage + mean_damage["meandamage"], dmg_ratio_tbl + ) + results["expectval"] = expected_damage return results diff --git a/pyincore/analyses/mlenabledcgeslc/__init__.py b/pyincore/analyses/mlenabledcgeslc/__init__.py index b251e805f..7af6b47c1 100644 --- a/pyincore/analyses/mlenabledcgeslc/__init__.py +++ b/pyincore/analyses/mlenabledcgeslc/__init__.py @@ -4,4 +4,4 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.mlenabledcgeslc.mlcgeslc import MlEnabledCgeSlc \ No newline at end of file +from pyincore.analyses.mlenabledcgeslc.mlcgeslc import MlEnabledCgeSlc diff --git a/pyincore/analyses/mlenabledcgeslc/mlcgeslc.py b/pyincore/analyses/mlenabledcgeslc/mlcgeslc.py index ec0d69ac5..33d5362cb 100644 --- a/pyincore/analyses/mlenabledcgeslc/mlcgeslc.py +++ b/pyincore/analyses/mlenabledcgeslc/mlcgeslc.py @@ -17,7 +17,6 @@ class MlEnabledCgeSlc(CoreCGEML): - model = "Machine Learning Enabled Computable General Equilibrium - Salt Lake City " # Coefficients files @@ -83,9 +82,13 @@ class MlEnabledCgeSlc(CoreCGEML): ] def __init__(self, incore_client: IncoreClient): - sectors, base_cap_factors, base_cap, model_coeffs, cap_shock_sectors = ( - CGEMLFileUtil.parse_files(self.model_filenames, self.filenames) - ) + ( + sectors, + base_cap_factors, + base_cap, + model_coeffs, + cap_shock_sectors, + ) = CGEMLFileUtil.parse_files(self.model_filenames, self.filenames) self.base_cap_factors = base_cap_factors self.base_cap = base_cap self.model_coeffs = model_coeffs @@ -95,8 +98,8 @@ def __init__(self, incore_client: IncoreClient): ) # 4 labor groups def run(self) -> bool: - """Executes the ML enabled CGE model for Salt Lake City """ - + """Executes the ML enabled CGE model for Salt Lake City""" + logger.info(f"Running {self.model} model...") sector_shocks = pd.read_csv( self.get_input_dataset("sector_shocks").get_file_path("csv") @@ -107,8 +110,8 @@ def run(self) -> bool: for sector in self.cap_shock_sectors: if sector.upper() not in [v.upper() for v in sector_shocks["sector"]]: raise ValueError( - f"Sector {sector} not found in the sector shocks file with\n {sector_shocks['sector']} sectors.\n" + - "Please make sure you have used the correct capital shocks" + f"Sector {sector} not found in the sector shocks file with\n {sector_shocks['sector']} sectors.\n" + + "Please make sure you have used the correct capital shocks" ) shocks.append( sector_shocks.loc[sector_shocks["sector"] == sector.upper()]["shock"] diff --git a/pyincore/analyses/montecarlofailureprobability/__init__.py b/pyincore/analyses/montecarlofailureprobability/__init__.py index 0574eab9c..11d1ab054 100644 --- a/pyincore/analyses/montecarlofailureprobability/__init__.py +++ b/pyincore/analyses/montecarlofailureprobability/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.montecarlofailureprobability.montecarlofailureprobability import MonteCarloFailureProbability +from pyincore.analyses.montecarlofailureprobability.montecarlofailureprobability import ( + MonteCarloFailureProbability, +) diff --git a/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py b/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py index 2467a56da..0c5f84d44 100644 --- a/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py +++ b/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py @@ -4,11 +4,16 @@ from deprecated.sphinx import deprecated -from pyincore.analyses.montecarlolimitstateprobability import MonteCarloLimitStateProbability +from pyincore.analyses.montecarlolimitstateprobability import ( + MonteCarloLimitStateProbability, +) -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use MonteCarloLimitStateProbability instead.") -class MonteCarloFailureProbability(): +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use MonteCarloLimitStateProbability instead.", +) +class MonteCarloFailureProbability: def __init__(self, incore_client): self._delegate = MonteCarloLimitStateProbability(incore_client) diff --git a/pyincore/analyses/montecarlolimitstateprobability/__init__.py b/pyincore/analyses/montecarlolimitstateprobability/__init__.py index a358ba14c..f6446f7f3 100644 --- a/pyincore/analyses/montecarlolimitstateprobability/__init__.py +++ b/pyincore/analyses/montecarlolimitstateprobability/__init__.py @@ -5,5 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.montecarlolimitstateprobability.montecarlolimitstateprobability import \ - MonteCarloLimitStateProbability +from pyincore.analyses.montecarlolimitstateprobability.montecarlolimitstateprobability import ( + MonteCarloLimitStateProbability, +) diff --git a/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py index 1352b3a66..23abd975d 100644 --- a/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py +++ b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py @@ -30,97 +30,98 @@ def get_spec(self): """ return { - 'name': 'monte-carlo-limit-state-probability', - 'description': 'calculate the probability of limit state in monte-carlo simulation', - 'input_parameters': [ + "name": "monte-carlo-limit-state-probability", + "description": "calculate the probability of limit state in monte-carlo simulation", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'basename of the result datasets. This analysis will create two outputs: failure ' - 'proability and failure state with the basename in the filename. ' - 'For example: "result_name = joplin_mcs_building" will result in ' - '"joplin_mcs_building_failure_state.csv" and ' - '"joplin_mcs_building_failure_probability.csv"', - 'type': str + "id": "result_name", + "required": True, + "description": "basename of the result datasets. This analysis will create two outputs: failure " + "proability and failure state with the basename in the filename. " + 'For example: "result_name = joplin_mcs_building" will result in ' + '"joplin_mcs_building_failure_state.csv" and ' + '"joplin_mcs_building_failure_probability.csv"', + "type": str, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'num_samples', - 'required': True, - 'description': 'Number of MC samples', - 'type': int + "id": "num_samples", + "required": True, + "description": "Number of MC samples", + "type": int, }, { - 'id': 'damage_interval_keys', - 'required': True, - 'description': 'Column name of the damage interval', - 'type': List[str] + "id": "damage_interval_keys", + "required": True, + "description": "Column name of the damage interval", + "type": List[str], }, { - 'id': 'failure_state_keys', - 'required': True, - 'description': 'Column name of the damage interval that considered as damaged', - 'type': List[str] + "id": "failure_state_keys", + "required": True, + "description": "Column name of the damage interval that considered as damaged", + "type": List[str], }, { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the probabilistic model', - 'type': int + "id": "seed", + "required": False, + "description": "Initial seed for the probabilistic model", + "type": int, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'damage', - 'required': True, - 'description': 'damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer4', - 'ergo:buildingDamageVer5', - 'ergo:buildingDamageVer6', - 'ergo:nsBuildingInventoryDamage', - 'ergo:nsBuildingInventoryDamageVer2', - 'ergo:nsBuildingInventoryDamageVer3', - 'ergo:nsBuildingInventoryDamageVer4', - 'ergo:bridgeDamage', - 'ergo:bridgeDamageVer2', - 'ergo:bridgeDamageVer3', - 'ergo:waterFacilityDamageVer4', - 'ergo:waterFacilityDamageVer5', - 'ergo:waterFacilityDamageVer6', - 'ergo:roadDamage', - 'ergo:roadDamageVer2', - 'ergo:roadDamageVer3', - 'incore:epfDamage', - 'incore:epfDamageVer2', - 'incore:epfDamageVer3', - 'incore:pipelineDamage', - 'incore:pipelineDamageVer2', - 'incore:pipelineDamageVer3'] + "id": "damage", + "required": True, + "description": "damage result that has damage intervals in it", + "type": [ + "ergo:buildingDamageVer4", + "ergo:buildingDamageVer5", + "ergo:buildingDamageVer6", + "ergo:nsBuildingInventoryDamage", + "ergo:nsBuildingInventoryDamageVer2", + "ergo:nsBuildingInventoryDamageVer3", + "ergo:nsBuildingInventoryDamageVer4", + "ergo:bridgeDamage", + "ergo:bridgeDamageVer2", + "ergo:bridgeDamageVer3", + "ergo:waterFacilityDamageVer4", + "ergo:waterFacilityDamageVer5", + "ergo:waterFacilityDamageVer6", + "ergo:roadDamage", + "ergo:roadDamageVer2", + "ergo:roadDamageVer3", + "incore:epfDamage", + "incore:epfDamageVer2", + "incore:epfDamageVer3", + "incore:pipelineDamage", + "incore:pipelineDamageVer2", + "incore:pipelineDamageVer3", + ], }, - ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'failure_probability', - 'description': 'CSV file of failure probability', - 'type': 'incore:failureProbability' + "id": "failure_probability", + "description": "CSV file of failure probability", + "type": "incore:failureProbability", }, { - 'id': 'sample_failure_state', - 'description': 'CSV file of failure state for each sample', - 'type': 'incore:sampleFailureState' + "id": "sample_failure_state", + "description": "CSV file of failure state for each sample", + "type": "incore:sampleFailureState", }, { - 'id': 'sample_damage_states', - 'description': 'CSV file of simulated damage states for each sample', - 'type': 'incore:sampleDamageState' - } - ] + "id": "sample_damage_states", + "description": "CSV file of simulated damage states for each sample", + "type": "incore:sampleDamageState", + }, + ], } def run(self): @@ -132,14 +133,15 @@ def run(self): # setting number of cpus to use user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, - len( - damage_result), - user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(damage_result), user_defined_cpu + ) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] @@ -151,29 +153,55 @@ def run(self): if seed is not None: while count < len(inventory_list): inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) - seed_list.append([seed + i for i in range(count - 1, count + avg_bulk_input_size - 1)]) + inventory_list[count : count + avg_bulk_input_size] + ) + seed_list.append( + [ + seed + i + for i in range(count - 1, count + avg_bulk_input_size - 1) + ] + ) count += avg_bulk_input_size else: while count < len(inventory_list): inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) - seed_list.append([None for i in range(count - 1, count + avg_bulk_input_size - 1)]) + inventory_list[count : count + avg_bulk_input_size] + ) + seed_list.append( + [None for i in range(count - 1, count + avg_bulk_input_size - 1)] + ) count += avg_bulk_input_size - fs_results, fp_results, samples_results = self.monte_carlo_failure_probability_concurrent_future( - self.monte_carlo_failure_probability_bulk_input, num_workers, - inventory_args, seed_list) - self.set_result_csv_data("sample_failure_state", - fs_results, name=self.get_parameter("result_name") + "_failure_state") - self.set_result_csv_data("failure_probability", - fp_results, name=self.get_parameter("result_name") + "_failure_probability") - self.set_result_csv_data("sample_damage_states", - samples_results, name=self.get_parameter("result_name") + "_sample_damage_states") + ( + fs_results, + fp_results, + samples_results, + ) = self.monte_carlo_failure_probability_concurrent_future( + self.monte_carlo_failure_probability_bulk_input, + num_workers, + inventory_args, + seed_list, + ) + self.set_result_csv_data( + "sample_failure_state", + fs_results, + name=self.get_parameter("result_name") + "_failure_state", + ) + self.set_result_csv_data( + "failure_probability", + fp_results, + name=self.get_parameter("result_name") + "_failure_probability", + ) + self.set_result_csv_data( + "sample_damage_states", + samples_results, + name=self.get_parameter("result_name") + "_sample_damage_states", + ) return True - def monte_carlo_failure_probability_concurrent_future(self, function_name, - parallelism, *args): + def monte_carlo_failure_probability_concurrent_future( + self, function_name, parallelism, *args + ): """Utilizes concurrent.future module. Args: @@ -190,7 +218,8 @@ def monte_carlo_failure_probability_concurrent_future(self, function_name, fp_output = [] samples_output = [] with concurrent.futures.ProcessPoolExecutor( - max_workers=parallelism) as executor: + max_workers=parallelism + ) as executor: for fs_ret, fp_ret, samples_ret in executor.map(function_name, *args): fs_output.extend(fs_ret) fp_output.extend(fp_ret) @@ -220,8 +249,9 @@ def monte_carlo_failure_probability_bulk_input(self, damage, seed_list): i = 0 for dmg in damage: - fs, fp, samples_result = self.monte_carlo_failure_probability(dmg, damage_interval_keys, failure_state_keys, - num_samples, seed_list[i]) + fs, fp, samples_result = self.monte_carlo_failure_probability( + dmg, damage_interval_keys, failure_state_keys, num_samples, seed_list[i] + ) fs_result.append(fs) fp_result.append(fp) samples_output.append(samples_result) @@ -229,8 +259,9 @@ def monte_carlo_failure_probability_bulk_input(self, damage, seed_list): return fs_result, fp_result, samples_output - def monte_carlo_failure_probability(self, dmg, damage_interval_keys, - failure_state_keys, num_samples, seed): + def monte_carlo_failure_probability( + self, dmg, damage_interval_keys, failure_state_keys, num_samples, seed + ): """Calculates building damage results for a single building. Args: @@ -253,28 +284,29 @@ def monte_carlo_failure_probability(self, dmg, damage_interval_keys, samples_result = collections.OrderedDict() # copying guid/id column to the sample damage failure table - if 'guid' in dmg.keys(): - fs_result['guid'] = dmg['guid'] - samples_result['guid'] = dmg['guid'] + if "guid" in dmg.keys(): + fs_result["guid"] = dmg["guid"] + samples_result["guid"] = dmg["guid"] - elif 'id' in dmg.keys(): - fs_result['id'] = dmg['id'] - samples_result['id'] = dmg['id'] + elif "id" in dmg.keys(): + fs_result["id"] = dmg["id"] + samples_result["id"] = dmg["id"] else: - fs_result['id'] = 'NA' - samples_result['id'] = 'NA' + fs_result["id"] = "NA" + samples_result["id"] = "NA" # failure probability fp_result = collections.OrderedDict() - fp_result['guid'] = dmg['guid'] + fp_result["guid"] = dmg["guid"] - ds_sample = self.sample_damage_interval(dmg, damage_interval_keys, - num_samples, seed) + ds_sample = self.sample_damage_interval( + dmg, damage_interval_keys, num_samples, seed + ) func, fp = self.calc_probability_failure_value(ds_sample, failure_state_keys) - fs_result['failure'] = ",".join(func.values()) - fp_result['failure_probability'] = fp - samples_result['sample_damage_states'] = ','.join(ds_sample.values()) + fs_result["failure"] = ",".join(func.values()) + fp_result["failure_probability"] = fp + samples_result["sample_damage_states"] = ",".join(ds_sample.values()) return fs_result, fp_result, samples_result @@ -300,9 +332,8 @@ def sample_damage_interval(self, dmg, damage_interval_keys, num_samples, seed): prob_val = 0 flag = True for ds_name in damage_interval_keys: - if rnd_num < prob_val + AnalysisUtil.float_to_decimal(dmg[ds_name]): - ds['sample_{}'.format(i)] = ds_name + ds["sample_{}".format(i)] = ds_name flag = False break else: diff --git a/pyincore/analyses/multiobjectiveretrofitoptimization/__init__.py b/pyincore/analyses/multiobjectiveretrofitoptimization/__init__.py index aa4ac7f2d..f74c50e82 100644 --- a/pyincore/analyses/multiobjectiveretrofitoptimization/__init__.py +++ b/pyincore/analyses/multiobjectiveretrofitoptimization/__init__.py @@ -1,2 +1,3 @@ -from pyincore.analyses.multiobjectiveretrofitoptimization.multiobjectiveretrofitoptimization import \ - MultiObjectiveRetrofitOptimization +from pyincore.analyses.multiobjectiveretrofitoptimization.multiobjectiveretrofitoptimization import ( + MultiObjectiveRetrofitOptimization, +) diff --git a/pyincore/analyses/multiobjectiveretrofitoptimization/multiobjectiveretrofitoptimization.py b/pyincore/analyses/multiobjectiveretrofitoptimization/multiobjectiveretrofitoptimization.py index 169d1f05b..d4b332f7d 100644 --- a/pyincore/analyses/multiobjectiveretrofitoptimization/multiobjectiveretrofitoptimization.py +++ b/pyincore/analyses/multiobjectiveretrofitoptimization/multiobjectiveretrofitoptimization.py @@ -9,7 +9,6 @@ import numpy as np import time from typing import List -from pandas import DataFrame from pyomo.environ import ConcreteModel, Set, Var, Param, Objective, Constraint from pyomo.environ import quicksum, minimize, maximize, NonNegativeReals from pyomo.environ import sum_product @@ -20,32 +19,32 @@ class MultiObjectiveRetrofitOptimization(BaseAnalysis): """ - This analysis computes a series of linear programming models for single- and multi-objective - optimization related to the effect of extreme weather on a community in terms of three objective functions. - The three objectives used in this program are to minimize economic loss, minimize population dislocation, - and maximize building functionality. The computation proceeds by iteratively solving constrained linear - models using epsilon steps. + This analysis computes a series of linear programming models for single- and multi-objective + optimization related to the effect of extreme weather on a community in terms of three objective functions. + The three objectives used in this program are to minimize economic loss, minimize population dislocation, + and maximize building functionality. The computation proceeds by iteratively solving constrained linear + models using epsilon steps. - The output of the computation a collection of optimal resource allocations. + The output of the computation a collection of optimal resource allocations. - Contributors - | Science: Charles Nicholson, Yunjie Wen - | Implementation: Dale Cochran , Tarun Adluri , Jorge Duarte, Santiago Núñez-Corrales, Diego Calderon - and NCSA IN-CORE Dev Team + Contributors + | Science: Charles Nicholson, Yunjie Wen + | Implementation: Dale Cochran , Tarun Adluri , Jorge Duarte, Santiago Núñez-Corrales, Diego Calderon + and NCSA IN-CORE Dev Team - Related publications + Related publications - Args: + Args: - incore_client (IncoreClient): Service authentication. + incore_client (IncoreClient): Service authentication. - """ + """ # Column descriptors - __Q_col = 'Q_t_hat' - __Q_rs_col = 'Q_t_hat_rs' - __SC_col = 'Sc' - __SC_rs_col = 'Sc_rs' + __Q_col = "Q_t_hat" + __Q_rs_col = "Q_t_hat_rs" + __SC_col = "Sc" + __SC_rs_col = "Sc_rs" __budget_default = 0.2 @@ -55,39 +54,56 @@ def __init__(self, incore_client): def run(self): """Execute the multiobjective retrofit optimization analysis using parameters and input data.""" # Read parameters - model_solver = self.get_parameter('model_solver') - num_epsilon_steps = self.get_parameter('num_epsilon_steps') + model_solver = self.get_parameter("model_solver") + num_epsilon_steps = self.get_parameter("num_epsilon_steps") budget_available = self.__budget_default - if self.get_parameter('max_budget') != 'default': - budget_available = self.get_parameter('budget_available') + if self.get_parameter("max_budget") != "default": + budget_available = self.get_parameter("budget_available") inactive_submodels = [] - in_subm = self.get_parameter('inactive_submodels') + in_subm = self.get_parameter("inactive_submodels") if in_subm is not None: inactive_submodels = in_subm # Perform code scaling scaling_factor = 1.0 - if self.get_parameter('scale_data'): - scaling_factor = self.get_parameter('scaling_factor') + if self.get_parameter("scale_data"): + scaling_factor = self.get_parameter("scaling_factor") - building_related_data = self.get_input_dataset('building_related_data').get_dataframe_from_csv() - strategy_costs = self.get_input_dataset('strategy_costs_data').get_dataframe_from_csv() + building_related_data = self.get_input_dataset( + "building_related_data" + ).get_dataframe_from_csv() + strategy_costs = self.get_input_dataset( + "strategy_costs_data" + ).get_dataframe_from_csv() # Convert Z columns to text in both datasets - building_related_data['Z'] = building_related_data['Z'].astype(str) - strategy_costs['Z'] = strategy_costs['Z'].astype(str) - - self.multiobjective_retrofit_optimization_model(model_solver, num_epsilon_steps, budget_available, - scaling_factor, inactive_submodels, building_related_data, - strategy_costs) - - def multiobjective_retrofit_optimization_model(self, model_solver, num_epsilon_steps, budget_available, - scaling_factor, inactive_submodels, building_related_data, - strategy_costs): + building_related_data["Z"] = building_related_data["Z"].astype(str) + strategy_costs["Z"] = strategy_costs["Z"].astype(str) + + self.multiobjective_retrofit_optimization_model( + model_solver, + num_epsilon_steps, + budget_available, + scaling_factor, + inactive_submodels, + building_related_data, + strategy_costs, + ) + + def multiobjective_retrofit_optimization_model( + self, + model_solver, + num_epsilon_steps, + budget_available, + scaling_factor, + inactive_submodels, + building_related_data, + strategy_costs, + ): """Performs the computation of the model. Args: @@ -102,18 +118,19 @@ def multiobjective_retrofit_optimization_model(self, model_solver, num_epsilon_s """ # Setup the stream that will collect data - self.ostr = open('pyincore.txt', 'w') + self.ostr = open("pyincore.txt", "w") # Setup the model - model, sum_sc = self.configure_model(budget_available, scaling_factor, building_related_data, - strategy_costs) + model, sum_sc = self.configure_model( + budget_available, scaling_factor, building_related_data, strategy_costs + ) self.configure_model_objectives(model) print("With constraints model") - self.configure_model_retrofit_costs(model) # Suspicious + self.configure_model_retrofit_costs(model) # Suspicious # Choose the solver setting if model_solver == "gurobi" or model_solver is None: - model_solver_setting = pyo.SolverFactory('gurobi', solver_io="python") + model_solver_setting = pyo.SolverFactory("gurobi", solver_io="python") else: model_solver_setting = pyo.SolverFactory(model_solver) @@ -124,18 +141,32 @@ def multiobjective_retrofit_optimization_model(self, model_solver, num_epsilon_s self.configure_min_max_epsilon_values(model, obj_list, num_epsilon_steps) print("Epsilon model") - xresults_df, yresults_df = self.solve_epsilon_models(model, model_solver_setting, inactive_submodels) - - df_list = self.compute_optimal_results(inactive_submodels, xresults_df, yresults_df) - - self.set_result_csv_data("optimal_solution_dv_x", df_list[0], name="optimal_solution_dv_x", - source="dataframe") - self.set_result_csv_data("optimal_solution_dv_y", df_list[1], name="optimal_solution_dv_y", - source="dataframe") + xresults_df, yresults_df = self.solve_epsilon_models( + model, model_solver_setting, inactive_submodels + ) + + df_list = self.compute_optimal_results( + inactive_submodels, xresults_df, yresults_df + ) + + self.set_result_csv_data( + "optimal_solution_dv_x", + df_list[0], + name="optimal_solution_dv_x", + source="dataframe", + ) + self.set_result_csv_data( + "optimal_solution_dv_y", + df_list[1], + name="optimal_solution_dv_y", + source="dataframe", + ) return True - def configure_model(self, budget_available, scaling_factor, building_related_data, strategy_costs): - """ Configure the base model to perform the multiobjective optimization. + def configure_model( + self, budget_available, scaling_factor, building_related_data, strategy_costs + ): + """Configure the base model to perform the multiobjective optimization. Args: @@ -150,9 +181,12 @@ def configure_model(self, budget_available, scaling_factor, building_related_dat """ # Rescale data if scaling_factor != 1.0: - building_related_data[self.__Q_col] = \ - building_related_data[self.__Q_col].map(lambda a: a / scaling_factor) - strategy_costs[self.__SC_col] = strategy_costs[self.__SC_col].map(lambda a: a / scaling_factor) + building_related_data[self.__Q_col] = building_related_data[ + self.__Q_col + ].map(lambda a: a / scaling_factor) + strategy_costs[self.__SC_col] = strategy_costs[self.__SC_col].map( + lambda a: a / scaling_factor + ) # Setup pyomo model = ConcreteModel() @@ -164,49 +198,65 @@ def configure_model(self, budget_available, scaling_factor, building_related_dat zsk = [] for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] # Identify the i ∈ Z value. - j = building_related_data.loc[y, 'S'] # Identify the j ∈ S value. - k = building_related_data.loc[y, 'K'] # Identify the k ∈ K value. + i = building_related_data.loc[y, "Z"] # Identify the i ∈ Z value. + j = building_related_data.loc[y, "S"] # Identify the j ∈ S value. + k = building_related_data.loc[y, "K"] # Identify the k ∈ K value. zsk.append((i, j, k)) # Add the combination to the list. - zsk = sorted(set(zsk), key=zsk.index) # Convert the list to an ordered set for Pyomo. + zsk = sorted( + set(zsk), key=zsk.index + ) # Convert the list to an ordered set for Pyomo. model.ZSK = Set(initialize=zsk) # Define and initialize the ZSK set in Pyomo. zs = [] for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] # Identify the i ∈ Z value. - j = building_related_data.loc[y, 'S'] # Identify the j ∈ S value. + i = building_related_data.loc[y, "Z"] # Identify the i ∈ Z value. + j = building_related_data.loc[y, "S"] # Identify the j ∈ S value. zs.append((i, j)) # Add the combination to the list. - zs = sorted(set(zs), key=zs.index) # Convert the list to an ordered set for Pyomo. + zs = sorted( + set(zs), key=zs.index + ) # Convert the list to an ordered set for Pyomo. model.ZS = Set(initialize=zs) # Define and initialize the ZS set in Pyomo. kk_prime = [] for y in range(len(strategy_costs)): - k = strategy_costs.loc[y, 'K'] # Identify the k ∈ K value. + k = strategy_costs.loc[y, "K"] # Identify the k ∈ K value. k_prime = strategy_costs.loc[y, "K'"] # Identify the k ∈ K value. kk_prime.append((k, k_prime)) # Add the combination to the list. - kk_prime = sorted(set(kk_prime), key=kk_prime.index) # Convert the list to an ordered set for Pyomo. - model.KK_prime = Set(initialize=kk_prime) # Define and initialize the KK_prime set in Pyomo. + kk_prime = sorted( + set(kk_prime), key=kk_prime.index + ) # Convert the list to an ordered set for Pyomo. + model.KK_prime = Set( + initialize=kk_prime + ) # Define and initialize the KK_prime set in Pyomo. k_primek = [] for y in range(len(strategy_costs)): - k = strategy_costs.loc[y, 'K'] # Identify the k ∈ K value. + k = strategy_costs.loc[y, "K"] # Identify the k ∈ K value. k_prime = strategy_costs.loc[y, "K'"] # Identify the k ∈ K value. if k_prime <= k: k_primek.append((k_prime, k)) # Add the combination to the list. - k_primek = sorted(set(k_primek), key=k_primek.index) # Convert the list to an ordered set for Pyomo. - model.K_primeK = Set(initialize=k_primek) # Define and initialize the K_primeK set in Pyomo. + k_primek = sorted( + set(k_primek), key=k_primek.index + ) # Convert the list to an ordered set for Pyomo. + model.K_primeK = Set( + initialize=k_primek + ) # Define and initialize the K_primeK set in Pyomo. # Define the set of all ZSKK' combinations: zskk_prime = [] for y in range(len(strategy_costs)): - i = strategy_costs.loc[y, 'Z'] # Identify the i ∈ Z value. - j = strategy_costs.loc[y, 'S'] # Identify the j ∈ S value. - k = strategy_costs.loc[y, 'K'] # Identify the k ∈ K value. + i = strategy_costs.loc[y, "Z"] # Identify the i ∈ Z value. + j = strategy_costs.loc[y, "S"] # Identify the j ∈ S value. + k = strategy_costs.loc[y, "K"] # Identify the k ∈ K value. k_prime = strategy_costs.loc[y, "K'"] # Identify the k ∈ K value. zskk_prime.append((i, j, k, k_prime)) # Add the combination to the list. - zskk_prime = sorted(set(zskk_prime), key=zskk_prime.index) # Convert the list to an ordered set for Pyomo. - model.ZSKK_prime = Set(initialize=zskk_prime) # Define and initialize the ZSKK_prime set in Pyomo. - model.zskk_prime = zskk_prime # Use the redundant index for later reference + zskk_prime = sorted( + set(zskk_prime), key=zskk_prime.index + ) # Convert the list to an ordered set for Pyomo. + model.ZSKK_prime = Set( + initialize=zskk_prime + ) # Define and initialize the ZSKK_prime set in Pyomo. + model.zskk_prime = zskk_prime # Use the redundant index for later reference #################################################################################################### # DEFINE VARIABLES AND PARAMETERS: @@ -222,43 +272,45 @@ def configure_model(self, budget_available, scaling_factor, building_related_dat # Declare economic loss cost parameter l_ijk: model.l_ijk = Param(model.ZSK, within=NonNegativeReals, mutable=True) for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] - j = building_related_data.loc[y, 'S'] - k = building_related_data.loc[y, 'K'] - model.l_ijk[i, j, k] = building_related_data.loc[y, 'l'] + i = building_related_data.loc[y, "Z"] + j = building_related_data.loc[y, "S"] + k = building_related_data.loc[y, "K"] + model.l_ijk[i, j, k] = building_related_data.loc[y, "l"] # Declare dislocation parameter d_ijk: model.d_ijk = Param(model.ZSK, within=NonNegativeReals, mutable=True) for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] - j = building_related_data.loc[y, 'S'] - k = building_related_data.loc[y, 'K'] - model.d_ijk[i, j, k] = building_related_data.loc[y, 'd_ijk'] + i = building_related_data.loc[y, "Z"] + j = building_related_data.loc[y, "S"] + k = building_related_data.loc[y, "K"] + model.d_ijk[i, j, k] = building_related_data.loc[y, "d_ijk"] # Declare the number of buildings parameter b_ijk: model.b_ijk = Param(model.ZSK, within=NonNegativeReals, mutable=True) for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] - j = building_related_data.loc[y, 'S'] - k = building_related_data.loc[y, 'K'] - model.b_ijk[i, j, k] = building_related_data.loc[y, 'b'] + i = building_related_data.loc[y, "Z"] + j = building_related_data.loc[y, "S"] + k = building_related_data.loc[y, "K"] + model.b_ijk[i, j, k] = building_related_data.loc[y, "b"] # Declare the building functionality parameter Q_t_hat: model.Q_t_hat = Param(model.ZSK, within=NonNegativeReals, mutable=True) for y in range(len(building_related_data)): - i = building_related_data.loc[y, 'Z'] - j = building_related_data.loc[y, 'S'] - k = building_related_data.loc[y, 'K'] - model.Q_t_hat[i, j, k] = building_related_data.loc[y, 'Q_t_hat'] + i = building_related_data.loc[y, "Z"] + j = building_related_data.loc[y, "S"] + k = building_related_data.loc[y, "K"] + model.Q_t_hat[i, j, k] = building_related_data.loc[y, "Q_t_hat"] # Declare the retrofit cost parameter Sc_ijkk': - model.Sc_ijkk_prime = Param(model.ZSKK_prime, within=NonNegativeReals, mutable=True) + model.Sc_ijkk_prime = Param( + model.ZSKK_prime, within=NonNegativeReals, mutable=True + ) for y in range(len(strategy_costs)): - i = strategy_costs.loc[y, 'Z'] - j = strategy_costs.loc[y, 'S'] - k = strategy_costs.loc[y, 'K'] + i = strategy_costs.loc[y, "Z"] + j = strategy_costs.loc[y, "S"] + k = strategy_costs.loc[y, "K"] k_prime = strategy_costs.loc[y, "K'"] - model.Sc_ijkk_prime[i, j, k, k_prime] = strategy_costs.loc[y, 'Sc'] + model.Sc_ijkk_prime[i, j, k, k_prime] = strategy_costs.loc[y, "Sc"] #################################################################################################### # DECLARE THE TOTAL MAX BUDGET AND TOTAL AVAILABLE BUDGET: @@ -268,7 +320,10 @@ def configure_model(self, budget_available, scaling_factor, building_related_dat if budget_available == self.__budget_default: sumSc = quicksum( - pyo.value(model.Sc_ijkk_prime[i, j, k, 3]) * pyo.value(model.b_ijk[i, j, k]) for i, j, k in model.ZSK) + pyo.value(model.Sc_ijkk_prime[i, j, k, 3]) + * pyo.value(model.b_ijk[i, j, k]) + for i, j, k in model.ZSK + ) else: sumSc = budget_available # Define the total available budget based on user's input: @@ -277,7 +332,7 @@ def configure_model(self, budget_available, scaling_factor, building_related_dat return model, sumSc def configure_model_objectives(self, model): - """ Configure the model by adding objectives + """Configure the model by adding objectives Args: model (ConcreteModel): a base cost/functionality model @@ -287,20 +342,30 @@ def configure_model_objectives(self, model): """ model.objective_1 = Objective(rule=self.obj_economic, sense=minimize) - model.econ_loss = Param(mutable=True, within=NonNegativeReals) # ,default=10000000000) + model.econ_loss = Param( + mutable=True, within=NonNegativeReals + ) # ,default=10000000000) model.objective_2 = Objective(rule=self.obj_dislocation, sense=minimize) - model.dislocation = Param(mutable=True, within=NonNegativeReals) # ,default=30000) + model.dislocation = Param( + mutable=True, within=NonNegativeReals + ) # ,default=30000) model.objective_3 = Objective(rule=self.obj_functionality, sense=maximize) - model.functionality = Param(mutable=True, within=NonNegativeReals) # ,default=1) + model.functionality = Param( + mutable=True, within=NonNegativeReals + ) # ,default=1) def configure_model_retrofit_costs(self, model): model.retrofit_budget_constraint = Constraint(rule=self.retrofit_cost_rule) - model.number_buildings_ij_constraint = Constraint(model.ZS, rule=self.number_buildings_ij_rule) + model.number_buildings_ij_constraint = Constraint( + model.ZS, rule=self.number_buildings_ij_rule + ) model.a = Param(mutable=True) model.c = Param(mutable=True) - model.building_level_constraint = Constraint(model.ZSK, rule=self.building_level_rule) + model.building_level_constraint = Constraint( + model.ZSK, rule=self.building_level_rule + ) def solve_individual_models(self, model, model_solver_setting, sum_sc): print("Max Budget: $", sum_sc) @@ -311,16 +376,31 @@ def solve_individual_models(self, model, model_solver_setting, sum_sc): rlist_obj_2 = self.solve_model_2(model, model_solver_setting) rlist_obj_3 = self.solve_model_3(model, model_solver_setting) - values_list = [sum_sc, pyo.value(model.B)] + rlist_obj_1 + rlist_obj_2 + rlist_obj_3 - - no_epsilon_constr_init_results = pd.DataFrame(data={ - "Label": ["Max Budget: $", "Budget (20% of max)", "Economic loss min epsilon (optimal value)", - "Dislocation when optimizing Economic Loss", "Functionality when optimizing Economic Loss", - "Dislocation min epsilon (optimal value)", "Economic Loss when optimizing Dislocation", - "Functionality when optimizing Dislocation", "Functionality max epsilon (optimal value)", - "Economic Loss when optimizing Functionality", "Dislocation when optimizing Functionality"] - , "Value": values_list}) - filename = 'no_epsilon_constr_init_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + values_list = ( + [sum_sc, pyo.value(model.B)] + rlist_obj_1 + rlist_obj_2 + rlist_obj_3 + ) + + no_epsilon_constr_init_results = pd.DataFrame( + data={ + "Label": [ + "Max Budget: $", + "Budget (20% of max)", + "Economic loss min epsilon (optimal value)", + "Dislocation when optimizing Economic Loss", + "Functionality when optimizing Economic Loss", + "Dislocation min epsilon (optimal value)", + "Economic Loss when optimizing Dislocation", + "Functionality when optimizing Dislocation", + "Functionality max epsilon (optimal value)", + "Economic Loss when optimizing Functionality", + "Dislocation when optimizing Functionality", + ], + "Value": values_list, + } + ) + filename = ( + "no_epsilon_constr_init_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" + ) no_epsilon_constr_init_results.to_csv(filename) return rlist_obj_1 + rlist_obj_2 + rlist_obj_3 @@ -338,16 +418,27 @@ def solve_model_1(self, model, model_solver_setting): # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - obj_1_min_epsilon = pyo.value(model.objective_1) # Save the optimal economic loss value. - obj_2_value_1 = pyo.value(model.dislocation) # Save the dislocation value when optimizing economic loss. + obj_1_min_epsilon = pyo.value( + model.objective_1 + ) # Save the optimal economic loss value. + obj_2_value_1 = pyo.value( + model.dislocation + ) # Save the dislocation value when optimizing economic loss. obj_3_value_1 = pyo.value( - model.functionality) # Save the functionality value when optimizing economic loss. + model.functionality + ) # Save the functionality value when optimizing economic loss. print("Initial solve for objective function 1 complete.") - print("Economic Loss: ", pyo.value(model.econ_loss), - "Dislocation: ", pyo.value(model.dislocation), - "Functionality: ", pyo.value(model.functionality)) + print( + "Economic Loss: ", + pyo.value(model.econ_loss), + "Dislocation: ", + pyo.value(model.dislocation), + "Functionality: ", + pyo.value(model.functionality), + ) print("Economic loss min epsilon (optimal value):", obj_1_min_epsilon) print("Dislocation when optimizing Economic Loss:", obj_2_value_1) @@ -378,15 +469,27 @@ def solve_model_2(self, model, model_solver_setting): # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - obj_2_min_epsilon = pyo.value(model.objective_2) # Save the optimal dislocation value. - obj_1_value_2 = pyo.value(model.econ_loss) # Save the economic loss value when optimizing dislocation. - obj_3_value_2 = pyo.value(model.functionality) # Save the functionality value when optimizing dislocation. + obj_2_min_epsilon = pyo.value( + model.objective_2 + ) # Save the optimal dislocation value. + obj_1_value_2 = pyo.value( + model.econ_loss + ) # Save the economic loss value when optimizing dislocation. + obj_3_value_2 = pyo.value( + model.functionality + ) # Save the functionality value when optimizing dislocation. print("Initial solve for objective function 2 complete.") - print("Economic Loss: ", pyo.value(model.econ_loss), - "Dislocation: ", pyo.value(model.dislocation), - "Functionality: ", pyo.value(model.functionality)) + print( + "Economic Loss: ", + pyo.value(model.econ_loss), + "Dislocation: ", + pyo.value(model.dislocation), + "Functionality: ", + pyo.value(model.functionality), + ) print("Dislocation min epsilon (optimal value):", obj_2_min_epsilon) print("Economic Loss when optimizing Dislocation:", obj_1_value_2) @@ -417,15 +520,27 @@ def solve_model_3(self, model, model_solver_setting): # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - obj_3_max_epsilon = pyo.value(model.objective_3) # Save the optimal functionality value. - obj_1_value_3 = pyo.value(model.econ_loss) # Save the economic loss value when optimizing functionality. - obj_2_value_3 = pyo.value(model.dislocation) # Save the dislocation value when optimizing functionality. + obj_3_max_epsilon = pyo.value( + model.objective_3 + ) # Save the optimal functionality value. + obj_1_value_3 = pyo.value( + model.econ_loss + ) # Save the economic loss value when optimizing functionality. + obj_2_value_3 = pyo.value( + model.dislocation + ) # Save the dislocation value when optimizing functionality. print("Initial solve for objective function 3 complete.") - print("Economic Loss: ", pyo.value(model.econ_loss), - "Dislocation: ", pyo.value(model.dislocation), - "Functionality: ", pyo.value(model.functionality)) + print( + "Economic Loss: ", + pyo.value(model.econ_loss), + "Dislocation: ", + pyo.value(model.dislocation), + "Functionality: ", + pyo.value(model.functionality), + ) print("Functionality max epsilon (optimal value):", obj_3_max_epsilon) print("Economic Loss when optimizing Functionality:", obj_1_value_3) @@ -452,30 +567,54 @@ def configure_min_max_epsilon_values(self, model, objs, num_epsilon_steps): __obj_3_value_1_pos = 2 __obj_3_value_2_pos = 5 __obj_3_max_epsilon_pos = 6 - model.econ_loss_max = Param(within=NonNegativeReals, initialize=max(objs[__obj_1_value_2_pos], - objs[__obj_1_value_3_pos])) - model.econ_loss_min = Param(within=NonNegativeReals, initialize=objs[__obj_1_min_epsilon_pos]) - model.dislocation_max = Param(within=NonNegativeReals, initialize=max(objs[__obj_2_value_1_pos], - objs[__obj_2_value_3_pos])) - model.dislocation_min = Param(within=NonNegativeReals, initialize=objs[__obj_2_min_epsilon_pos]) - model.functionality_max = Param(within=NonNegativeReals, initialize=objs[__obj_3_max_epsilon_pos]) - model.functionality_min = Param(within=NonNegativeReals, initialize=min(objs[__obj_3_value_1_pos], - objs[__obj_3_value_2_pos])) - - model.econ_loss_step = Param(within=NonNegativeReals, - initialize=(pyo.value(model.econ_loss_max) - pyo.value(model.econ_loss_min)) * ( - 1 / (num_epsilon_steps - 1))) - model.dislocation_step = Param(within=NonNegativeReals, initialize=(pyo.value( - model.dislocation_max) - pyo.value(model.dislocation_min)) * (1 / (num_epsilon_steps - 1))) - model.functionality_step = Param(within=NonNegativeReals, initialize=(pyo.value( - model.functionality_max) - pyo.value(model.functionality_min)) * (1 / (num_epsilon_steps - 1))) + model.econ_loss_max = Param( + within=NonNegativeReals, + initialize=max(objs[__obj_1_value_2_pos], objs[__obj_1_value_3_pos]), + ) + model.econ_loss_min = Param( + within=NonNegativeReals, initialize=objs[__obj_1_min_epsilon_pos] + ) + model.dislocation_max = Param( + within=NonNegativeReals, + initialize=max(objs[__obj_2_value_1_pos], objs[__obj_2_value_3_pos]), + ) + model.dislocation_min = Param( + within=NonNegativeReals, initialize=objs[__obj_2_min_epsilon_pos] + ) + model.functionality_max = Param( + within=NonNegativeReals, initialize=objs[__obj_3_max_epsilon_pos] + ) + model.functionality_min = Param( + within=NonNegativeReals, + initialize=min(objs[__obj_3_value_1_pos], objs[__obj_3_value_2_pos]), + ) + + model.econ_loss_step = Param( + within=NonNegativeReals, + initialize=(pyo.value(model.econ_loss_max) - pyo.value(model.econ_loss_min)) + * (1 / (num_epsilon_steps - 1)), + ) + model.dislocation_step = Param( + within=NonNegativeReals, + initialize=( + pyo.value(model.dislocation_max) - pyo.value(model.dislocation_min) + ) + * (1 / (num_epsilon_steps - 1)), + ) + model.functionality_step = Param( + within=NonNegativeReals, + initialize=( + pyo.value(model.functionality_max) - pyo.value(model.functionality_min) + ) + * (1 / (num_epsilon_steps - 1)), + ) def solve_epsilon_models(self, model, model_solver_setting, inactive_submodels): xresults_df = pd.DataFrame() yresults_df = pd.DataFrame() if 1 not in inactive_submodels: - self.solve_epsilon_model_1(model, model_solver_setting) + self.solve_epsilon_model_1(model, model_solver_setting) if 2 not in inactive_submodels: self.solve_epsilon_model_2(model, model_solver_setting) @@ -493,22 +632,27 @@ def solve_epsilon_models(self, model, model_solver_setting, inactive_submodels): self.solve_epsilon_model_6(model, model_solver_setting) if 7 not in inactive_submodels: - xresults_df, yresults_df = self.solve_epsilon_model_7(model, model_solver_setting, xresults_df, - yresults_df) + xresults_df, yresults_df = self.solve_epsilon_model_7( + model, model_solver_setting, xresults_df, yresults_df + ) if 8 not in inactive_submodels: - xresults_df, yresults_df = self.solve_epsilon_model_8(model, model_solver_setting, xresults_df, - yresults_df) + xresults_df, yresults_df = self.solve_epsilon_model_8( + model, model_solver_setting, xresults_df, yresults_df + ) if 9 not in inactive_submodels: - xresults_df, yresults_df = self.solve_epsilon_model_9(model, model_solver_setting, xresults_df, - yresults_df) + xresults_df, yresults_df = self.solve_epsilon_model_9( + model, model_solver_setting, xresults_df, yresults_df + ) return xresults_df, yresults_df def solve_epsilon_model_1(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING ECONOMIC LOSS SUBJECT TO POPULATION DISLOCATION EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING ECONOMIC LOSS SUBJECT TO POPULATION DISLOCATION EPSILON CONSTRAINTS****" + ) # Activate objective function 1 and deactivate others: model.objective_1.activate() model.objective_2.deactivate() @@ -517,35 +661,57 @@ def solve_epsilon_model_1(self, model, model_solver_setting): # Add objective function 2 as epsilon constraint of objective function 1: # Dataframe to store optimization results: obj_1_2_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 2 (dislocation) epsilon value: model.obj_2_e = Param(mutable=True, within=NonNegativeReals) # For each dislocation epsilon value (starting at min) set as constraint and solve model: counter = 0 - for e in np.arange(pyo.value(model.dislocation_min), pyo.value(model.dislocation_max) + 0.000001, - pyo.value(model.dislocation_step)): + for e in np.arange( + pyo.value(model.dislocation_min), + pyo.value(model.dislocation_max) + 0.000001, + pyo.value(model.dislocation_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_2_e = e # Set the model parameter to the epsilon value. - model.add_component("objective_2_constraint", - Constraint(expr=sum_product(model.d_ijk, model.x_ijk) <= pyo.value( - model.obj_2_e))) # Add the epsilon constraint. + model.add_component( + "objective_2_constraint", + Constraint( + expr=sum_product(model.d_ijk, model.x_ijk) + <= pyo.value(model.obj_2_e) + ), + ) # Add the epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (economic loss), dislocation, and functionality values to results dataframe: - obj_1_2_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the optimal economic loss value. - obj_1_2_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the resulting dislocation value. - obj_1_2_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the resulting functionality value. + obj_1_2_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the optimal economic loss value. + obj_1_2_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the resulting dislocation value. + obj_1_2_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the resulting functionality value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -558,7 +724,7 @@ def solve_epsilon_model_1(self, model, model_solver_setting): # Display and save the results of optimizing economic loss subject to dislocation epsilon constraints: print(obj_1_2_epsilon_results) - filename = 'obj_1_2_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_1_2_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_1_2_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime @@ -566,7 +732,9 @@ def solve_epsilon_model_1(self, model, model_solver_setting): def solve_epsilon_model_2(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING ECONOMIC LOSS SUBJECT TO BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING ECONOMIC LOSS SUBJECT TO BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****" + ) # Activate objective function 1 and deactivate others: model.objective_1.activate() model.objective_2.deactivate() @@ -575,37 +743,58 @@ def solve_epsilon_model_2(self, model, model_solver_setting): # Add objective function 3 as epsilon constraint of objective function 1: # Dataframe to store optimization results: obj_1_3_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 3 (functionality) epsilon value: model.obj_3_e = Param(mutable=True, within=NonNegativeReals) counter = 0 # For each functionality epsilon value (starting at min) set as constraint and solve model: # Adding 0.0000000000001 to the maximum value allows np.arange() to include the maximum functionality. - for e in np.arange(pyo.value(model.functionality_min), - pyo.value(model.functionality_max) + 0.000000000000000001, - pyo.value(model.functionality_step)): + for e in np.arange( + pyo.value(model.functionality_min), + pyo.value(model.functionality_max) + 0.000000000000000001, + pyo.value(model.functionality_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_3_e = e # Set the model parameter to the epsilon value. - model.add_component("objective_3_constraint", - Constraint(expr=sum_product(model.Q_t_hat, model.x_ijk) >= pyo.value( - model.obj_3_e))) # Add the epsilon constraint. + model.add_component( + "objective_3_constraint", + Constraint( + expr=sum_product(model.Q_t_hat, model.x_ijk) + >= pyo.value(model.obj_3_e) + ), + ) # Add the epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (economic loss), dislocation, and functionality values to results dataframe: - obj_1_3_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the optimal economic loss value. - obj_1_3_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the resulting dislocation value. - obj_1_3_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the resulting functionality value. + obj_1_3_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the optimal economic loss value. + obj_1_3_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the resulting dislocation value. + obj_1_3_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the resulting functionality value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -618,7 +807,7 @@ def solve_epsilon_model_2(self, model, model_solver_setting): # Display and save the results of optimizing economic loss subject to functionality epsilon constraints: print(obj_1_3_epsilon_results) - filename = 'obj_1_3_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_1_3_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_1_3_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime @@ -626,7 +815,9 @@ def solve_epsilon_model_2(self, model, model_solver_setting): def solve_epsilon_model_3(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING POPULATION DISLOCATION SUBJECT TO ECONOMIC LOSS EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING POPULATION DISLOCATION SUBJECT TO ECONOMIC LOSS EPSILON CONSTRAINTS****" + ) # Activate objective function 2 and deactivate others: model.objective_1.deactivate() model.objective_2.activate() @@ -635,36 +826,58 @@ def solve_epsilon_model_3(self, model, model_solver_setting): # Add objective function 1 as epsilon constraint of objective function 2: # Dataframe to store optimization results: obj_2_1_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 1 (economic loss) epsilon value: model.obj_1_e = Param(mutable=True, within=NonNegativeReals) # For each economic loss epsilon value (starting at min) set as constraint and solve model: # Adding 1 to the maximum value allows np.arange() to include the maximum economic loss. counter = 0 - for e in np.arange(pyo.value(model.econ_loss_min), pyo.value(model.econ_loss_max) + 0.1, - pyo.value(model.econ_loss_step)): + for e in np.arange( + pyo.value(model.econ_loss_min), + pyo.value(model.econ_loss_max) + 0.1, + pyo.value(model.econ_loss_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_1_e = e # Set the model parameter to the epsilon value. - model.add_component("objective_1_constraint", - Constraint(expr=sum_product(model.l_ijk, model.x_ijk) <= pyo.value( - model.obj_1_e))) # Set epsilon constraint. + model.add_component( + "objective_1_constraint", + Constraint( + expr=sum_product(model.l_ijk, model.x_ijk) + <= pyo.value(model.obj_1_e) + ), + ) # Set epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (dislocation), economic loss, and functionality values to results dataframe: - obj_2_1_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the optimal dislocation value. - obj_2_1_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the resulting economic loss value. - obj_2_1_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the resulting functionality value. + obj_2_1_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the optimal dislocation value. + obj_2_1_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the resulting economic loss value. + obj_2_1_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the resulting functionality value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -677,7 +890,7 @@ def solve_epsilon_model_3(self, model, model_solver_setting): # Display the results of optimizing dislocation subject to economic loss epsilon constraints: print(obj_2_1_epsilon_results) - filename = 'obj_2_1_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_2_1_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_2_1_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime @@ -685,7 +898,9 @@ def solve_epsilon_model_3(self, model, model_solver_setting): def solve_epsilon_model_4(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING POPULATION DISLOCATION SUBJECT TO BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING POPULATION DISLOCATION SUBJECT TO BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****" + ) # Activate objective function 2 and deactivate others: model.objective_1.deactivate() model.objective_2.activate() @@ -694,36 +909,58 @@ def solve_epsilon_model_4(self, model, model_solver_setting): # Add objective function 3 as epsilon constraint of objective function 2: # Dataframe to store optimization results: obj_2_3_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 3 (functionality) epsilon value: model.obj_3_e = Param(mutable=True, within=NonNegativeReals) # For each functionality epsilon value (starting at min) set as constraint and solve model: # Adding 0.0000000000001 to the maximum value allows np.arange() to include the maximum functionality. counter = 0 - for e in np.arange(pyo.value(model.functionality_min), pyo.value(model.functionality_max) + 0.0000000000001, - pyo.value(model.functionality_step)): + for e in np.arange( + pyo.value(model.functionality_min), + pyo.value(model.functionality_max) + 0.0000000000001, + pyo.value(model.functionality_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_3_e = e # Set the model parameter to the epsilon value. - model.add_component("objective_3_constraint", - Constraint(expr=sum_product(model.Q_t_hat, model.x_ijk) >= ( - pyo.value(model.obj_3_e)))) # Set the epsilon constraint. + model.add_component( + "objective_3_constraint", + Constraint( + expr=sum_product(model.Q_t_hat, model.x_ijk) + >= (pyo.value(model.obj_3_e)) + ), + ) # Set the epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (dislocation), economic loss, and functionality values to results dataframe: - obj_2_3_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the optimal dislocation value. - obj_2_3_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the resulting economic loss value. - obj_2_3_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the resulting functionality value. + obj_2_3_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the optimal dislocation value. + obj_2_3_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the resulting economic loss value. + obj_2_3_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the resulting functionality value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -736,7 +973,7 @@ def solve_epsilon_model_4(self, model, model_solver_setting): # Display the results of optimizing dislocation subject to functionality epsilon constraints: print(obj_2_3_epsilon_results) - filename = 'obj_2_3_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_2_3_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_2_3_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime @@ -744,7 +981,9 @@ def solve_epsilon_model_4(self, model, model_solver_setting): def solve_epsilon_model_5(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING BUILDING FUNCTIONALITY SUBJECT TO ECONOMIC LOSS EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING BUILDING FUNCTIONALITY SUBJECT TO ECONOMIC LOSS EPSILON CONSTRAINTS****" + ) # Activate objective function 3 and deactivate others: model.objective_1.deactivate() model.objective_2.deactivate() @@ -753,36 +992,58 @@ def solve_epsilon_model_5(self, model, model_solver_setting): # Add objective function 1 as epsilon constraint of objective function 3: # Dataframe to store optimization results: obj_3_1_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 1 (economic loss) epsilon value: model.obj_1_e = Param(mutable=True, within=NonNegativeReals) # For each economic loss epsilon value (starting at min) set as constraint and solve model: # Adding 1 to the maximum value allows np.arange() to include the maximum economic loss. counter = 0 - for e in np.arange(pyo.value(model.econ_loss_min), pyo.value(model.econ_loss_max) + 0.000001, - pyo.value(model.econ_loss_step)): + for e in np.arange( + pyo.value(model.econ_loss_min), + pyo.value(model.econ_loss_max) + 0.000001, + pyo.value(model.econ_loss_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_1_e = e - model.add_component("objective_1_constraint", - Constraint(expr=sum_product(model.l_ijk, model.x_ijk) <= pyo.value( - model.obj_1_e))) # Set the epsilon constraint. + model.add_component( + "objective_1_constraint", + Constraint( + expr=sum_product(model.l_ijk, model.x_ijk) + <= pyo.value(model.obj_1_e) + ), + ) # Set the epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (functionality), economic loss, and dislocation values to results dataframe: - obj_3_1_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the optimal functionality value. - obj_3_1_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the resulting economic loss value. - obj_3_1_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the resulting dislocation value. + obj_3_1_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the optimal functionality value. + obj_3_1_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the resulting economic loss value. + obj_3_1_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the resulting dislocation value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -795,7 +1056,7 @@ def solve_epsilon_model_5(self, model, model_solver_setting): # Display the results of optimizing functionality subject to economic loss epsilon constraints: print(obj_3_1_epsilon_results) - filename = 'obj_3_1_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_3_1_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_3_1_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime @@ -803,7 +1064,9 @@ def solve_epsilon_model_5(self, model, model_solver_setting): def solve_epsilon_model_6(self, model, model_solver_setting): starttime = time.time() - print("****OPTIMIZING BUILDING FUNCTIONALITY SUBJECT TO POPULATION DISLOCATION EPSILON CONSTRAINTS****") + print( + "****OPTIMIZING BUILDING FUNCTIONALITY SUBJECT TO POPULATION DISLOCATION EPSILON CONSTRAINTS****" + ) # Activate objective function 3 (functionality) and deactivate others: model.objective_1.deactivate() model.objective_2.deactivate() @@ -812,36 +1075,58 @@ def solve_epsilon_model_6(self, model, model_solver_setting): # Add objective function 2 as epsilon constraint of objective function 3: # Dataframe to store optimization results: obj_3_2_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 2 (dislocation) epsilon value: model.obj_2_e = Param(mutable=True, within=NonNegativeReals) # For each dislocation epsilon value (starting at min) set as constraint and solve model: # Adding 1 to the maximum value allows np.arange() to include the maximum dislocation. counter = 0 - for e in np.arange(pyo.value(model.dislocation_min), pyo.value(model.dislocation_max) + 0.000001, - pyo.value(model.dislocation_step)): + for e in np.arange( + pyo.value(model.dislocation_min), + pyo.value(model.dislocation_max) + 0.000001, + pyo.value(model.dislocation_step), + ): counter += 1 print("Step ", counter, ": ", e) model.obj_2_e = e - model.add_component("objective_2_constraint", - Constraint(expr=sum_product(model.d_ijk, model.x_ijk) <= pyo.value( - model.obj_2_e))) # Set the epsilon constraint. + model.add_component( + "objective_2_constraint", + Constraint( + expr=sum_product(model.d_ijk, model.x_ijk) + <= pyo.value(model.obj_2_e) + ), + ) # Set the epsilon constraint. # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) # Add objective (functionality), economic loss, and dislocation values to results dataframe: - obj_3_2_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value( - model.functionality) # Save the optimal functionality value. - obj_3_2_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) # Save the resulting economic loss value. - obj_3_2_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value( - model.dislocation) # Save the resulting dislocation value. + obj_3_2_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value( + model.functionality + ) # Save the optimal functionality value. + obj_3_2_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value( + model.econ_loss + ) # Save the resulting economic loss value. + obj_3_2_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value( + model.dislocation + ) # Save the resulting dislocation value. else: print(results.solver.termination_condition) log_infeasible_constraints(model) @@ -854,17 +1139,20 @@ def solve_epsilon_model_6(self, model, model_solver_setting): # Display the results of optimizing functionality subject to dislocation epsilon constraints: print(obj_3_2_epsilon_results) - filename = 'obj_3_2_epsilon_results_' + str(time.strftime("%m-%d-%Y")) + '.csv' + filename = "obj_3_2_epsilon_results_" + str(time.strftime("%m-%d-%Y")) + ".csv" obj_3_2_epsilon_results.to_csv(filename) endtime = time.time() elapsedtime = endtime - starttime print("Elapsed time: ", elapsedtime) - def solve_epsilon_model_7(self, model, model_solver_setting, xresults_df, yresults_df): + def solve_epsilon_model_7( + self, model, model_solver_setting, xresults_df, yresults_df + ): starttime = time.time() print( "****OPTIMIZING ECONOMIC LOSS SUBJECT TO POPULATION DISLOCATION " - "AND BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****") + "AND BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****" + ) model.objective_1.activate() model.objective_2.deactivate() model.objective_3.deactivate() @@ -872,7 +1160,12 @@ def solve_epsilon_model_7(self, model, model_solver_setting, xresults_df, yresul # Add objective functions 2 and 3 as epsilon constraints of objective function 1: # Dataframe to store optimization results: obj_1_23_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 2 (dislocation) and objective 3 (functionality) epsilon values: model.obj_2_e = Param(mutable=True, within=NonNegativeReals) @@ -883,47 +1176,75 @@ def solve_epsilon_model_7(self, model, model_solver_setting, xresults_df, yresul epsilon7_yresult_df = pd.DataFrame() counter = 0 - for e in np.arange(pyo.value(model.dislocation_min), pyo.value(model.dislocation_max) + 0.000001, - pyo.value(model.dislocation_step)): + for e in np.arange( + pyo.value(model.dislocation_min), + pyo.value(model.dislocation_max) + 0.000001, + pyo.value(model.dislocation_step), + ): model.obj_2_e = e - model.add_component("objective_2_constraint", - Constraint(expr=sum_product(model.d_ijk, model.x_ijk) <= pyo.value(model.obj_2_e))) - for e2 in np.arange(pyo.value(model.functionality_min), - pyo.value(model.functionality_max) + 0.0000000000001, - pyo.value(model.functionality_step)): + model.add_component( + "objective_2_constraint", + Constraint( + expr=sum_product(model.d_ijk, model.x_ijk) + <= pyo.value(model.obj_2_e) + ), + ) + for e2 in np.arange( + pyo.value(model.functionality_min), + pyo.value(model.functionality_max) + 0.0000000000001, + pyo.value(model.functionality_step), + ): counter += 1 model.obj_3_e = e2 print("Step ", counter, " e: ", e, " e2:", e2) # obj_1_23_epsilon_results.loc[counter-1,'Dislocation Epsilon']=e # obj_1_23_epsilon_results.loc[counter-1,'Functionality Epsilon']=e2 - model.add_component("objective_3_constraint", - Constraint( - expr=sum_product(model.Q_t_hat, model.x_ijk) >= pyo.value(model.obj_3_e))) + model.add_component( + "objective_3_constraint", + Constraint( + expr=sum_product(model.Q_t_hat, model.x_ijk) + >= pyo.value(model.obj_3_e) + ), + ) # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - budget_used = quicksum(pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) * pyo.value( - model.Sc_ijkk_prime[i, j, k, k_prime]) for (i, j, k, k_prime) in model.ZSKK_prime) - percent_budget_used = (budget_used / pyo.value( - model.B)) * 100 # Record percentage of available budget used. + budget_used = quicksum( + pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) + * pyo.value(model.Sc_ijkk_prime[i, j, k, k_prime]) + for (i, j, k, k_prime) in model.ZSKK_prime + ) + _ = ( + budget_used / pyo.value(model.B) + ) * 100 # Record percentage of available budget used. # Add objective (economic loss), dislocation, and functionality values to results dataframe: # TODO: optimize this search - obj_1_23_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value(model.functionality) - obj_1_23_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) - obj_1_23_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value(model.dislocation) + obj_1_23_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value(model.functionality) + obj_1_23_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value(model.econ_loss) + obj_1_23_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value(model.dislocation) # Extract results per each variable and convert to dataframe x_data: dict = model.x_ijk.extract_values() - newx_df = self.assemble_dataframe_from_solution("x_ijk", x_data, counter) + newx_df = self.assemble_dataframe_from_solution( + "x_ijk", x_data, counter + ) y_data: dict = model.y_ijkk_prime.extract_values() - newy_df = self.assemble_dataframe_from_solution("y_ijkk_prime", y_data, counter) + newy_df = self.assemble_dataframe_from_solution( + "y_ijkk_prime", y_data, counter + ) # Append to local analysis result epsilon7_xresult_df = pd.concat([epsilon7_xresult_df, newx_df]) @@ -946,11 +1267,13 @@ def solve_epsilon_model_7(self, model, model_solver_setting, xresults_df, yresul # Drop rows with infeasible results: initial_length = len(obj_1_23_epsilon_results) - obj_1_23_data = obj_1_23_epsilon_results.dropna(axis=0, how='any') - print("Infeasible rows dropped: ", initial_length - len(obj_1_23_data), " rows.") + obj_1_23_data = obj_1_23_epsilon_results.dropna(axis=0, how="any") + print( + "Infeasible rows dropped: ", initial_length - len(obj_1_23_data), " rows." + ) # Save results: - filename = '7_epsilon_results.csv' + filename = "7_epsilon_results.csv" obj_1_23_data.index += 1 obj_1_23_data.to_csv(filename) @@ -958,16 +1281,21 @@ def solve_epsilon_model_7(self, model, model_solver_setting, xresults_df, yresul elapsedtime = endtime - starttime print("Elapsed time: ", elapsedtime) - epsilon7_xresult_df['Epsilon'] = 7 - epsilon7_yresult_df['Epsilon'] = 7 + epsilon7_xresult_df["Epsilon"] = 7 + epsilon7_yresult_df["Epsilon"] = 7 - return pd.concat([xresults_df, epsilon7_xresult_df]), pd.concat([yresults_df, epsilon7_yresult_df]) + return pd.concat([xresults_df, epsilon7_xresult_df]), pd.concat( + [yresults_df, epsilon7_yresult_df] + ) - def solve_epsilon_model_8(self, model, model_solver_setting, xresults_df, yresults_df): + def solve_epsilon_model_8( + self, model, model_solver_setting, xresults_df, yresults_df + ): starttime = time.time() print( "****OPTIMIZING POPULATION DISLOCATION SUBJECT TO " - "ECONOMIC LOSS AND BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****") + "ECONOMIC LOSS AND BUILDING FUNCTIONALITY EPSILON CONSTRAINTS****" + ) model.objective_1.deactivate() model.objective_2.activate() model.objective_3.deactivate() @@ -975,7 +1303,12 @@ def solve_epsilon_model_8(self, model, model_solver_setting, xresults_df, yresul # Add objective functions 1 and 3 as epsilon constraints of objective function 2: # Dataframe to store optimization results: obj_2_13_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 1 (economic loss) and objective 3 (functionality) epsilon values: model.obj_1_e = Param(mutable=True, within=NonNegativeReals) @@ -986,45 +1319,74 @@ def solve_epsilon_model_8(self, model, model_solver_setting, xresults_df, yresul epsilon8_yresult_df = pd.DataFrame() counter = 0 - for e in np.arange(pyo.value(model.econ_loss_min), pyo.value(model.econ_loss_max) + 0.000001, - pyo.value(model.econ_loss_step)): + for e in np.arange( + pyo.value(model.econ_loss_min), + pyo.value(model.econ_loss_max) + 0.000001, + pyo.value(model.econ_loss_step), + ): model.obj_1_e = e - model.add_component("objective_1_constraint", - Constraint(expr=sum_product(model.l_ijk, model.x_ijk) <= pyo.value(model.obj_1_e))) - for e2 in np.arange(pyo.value(model.functionality_min), pyo.value(model.functionality_max) + - 0.0000000000001, pyo.value(model.functionality_step)): + model.add_component( + "objective_1_constraint", + Constraint( + expr=sum_product(model.l_ijk, model.x_ijk) + <= pyo.value(model.obj_1_e) + ), + ) + for e2 in np.arange( + pyo.value(model.functionality_min), + pyo.value(model.functionality_max) + 0.0000000000001, + pyo.value(model.functionality_step), + ): counter += 1 model.obj_3_e = e2 print("Step ", counter, " e: ", e, " e2:", e2) # obj_2_13_epsilon_results.loc[counter-1,'Economic Loss Epsilon']=e # obj_2_13_epsilon_results.loc[counter-1,'Functionality Epsilon']=e2 - model.add_component("objective_3_constraint", - Constraint( - expr=sum_product(model.Q_t_hat, model.x_ijk) >= pyo.value(model.obj_3_e))) + model.add_component( + "objective_3_constraint", + Constraint( + expr=sum_product(model.Q_t_hat, model.x_ijk) + >= pyo.value(model.obj_3_e) + ), + ) # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - budget_used = quicksum(pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) * pyo.value( - model.Sc_ijkk_prime[i, j, k, k_prime]) for (i, j, k, k_prime) in model.ZSKK_prime) - percent_budget_used = (budget_used / pyo.value( - model.B)) * 100 # Record percentage of available budget used. + budget_used = quicksum( + pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) + * pyo.value(model.Sc_ijkk_prime[i, j, k, k_prime]) + for (i, j, k, k_prime) in model.ZSKK_prime + ) + _ = ( + budget_used / pyo.value(model.B) + ) * 100 # Record percentage of available budget used. # Add objective (dislocation), economic loss, and functionality values to results dataframe: # TODO: optimize this search - obj_2_13_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value(model.functionality) - obj_2_13_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) - obj_2_13_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value(model.dislocation) + obj_2_13_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value(model.functionality) + obj_2_13_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value(model.econ_loss) + obj_2_13_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value(model.dislocation) # Extract results per each variable and convert to dataframe x_data: dict = model.x_ijk.extract_values() - newx_df = self.assemble_dataframe_from_solution("x_ijk", x_data, counter) + newx_df = self.assemble_dataframe_from_solution( + "x_ijk", x_data, counter + ) y_data: dict = model.y_ijkk_prime.extract_values() - newy_df = self.assemble_dataframe_from_solution("y_ijkk_prime", y_data, counter) + newy_df = self.assemble_dataframe_from_solution( + "y_ijkk_prime", y_data, counter + ) # Append to local analysis result epsilon8_xresult_df = pd.concat([epsilon8_xresult_df, newx_df]) @@ -1047,11 +1409,13 @@ def solve_epsilon_model_8(self, model, model_solver_setting, xresults_df, yresul # Drop rows with infeasible results: initial_length = len(obj_2_13_epsilon_results) - obj_2_13_data = obj_2_13_epsilon_results.dropna(axis=0, how='any') - print("Infeasible rows dropped: ", initial_length - len(obj_2_13_data), " rows.") + obj_2_13_data = obj_2_13_epsilon_results.dropna(axis=0, how="any") + print( + "Infeasible rows dropped: ", initial_length - len(obj_2_13_data), " rows." + ) # Save results: - filename = '8_epsilon_results.csv' + filename = "8_epsilon_results.csv" obj_2_13_data.index += 1 obj_2_13_data.to_csv(filename) @@ -1059,16 +1423,21 @@ def solve_epsilon_model_8(self, model, model_solver_setting, xresults_df, yresul elapsedtime = endtime - starttime print("Elapsed time: ", elapsedtime) - epsilon8_xresult_df['Epsilon'] = 8 - epsilon8_yresult_df['Epsilon'] = 8 + epsilon8_xresult_df["Epsilon"] = 8 + epsilon8_yresult_df["Epsilon"] = 8 - return pd.concat([xresults_df, epsilon8_xresult_df]), pd.concat([yresults_df, epsilon8_yresult_df]) + return pd.concat([xresults_df, epsilon8_xresult_df]), pd.concat( + [yresults_df, epsilon8_yresult_df] + ) - def solve_epsilon_model_9(self, model, model_solver_setting, xresults_df, yresults_df): + def solve_epsilon_model_9( + self, model, model_solver_setting, xresults_df, yresults_df + ): starttime = time.time() print( "****OPTIMIZING BUILDING FUNCTIONALITY SUBJECT TO " - "ECONOMIC LOSS AND POPULATION DISLOCATION EPSILON CONSTRAINTS****") + "ECONOMIC LOSS AND POPULATION DISLOCATION EPSILON CONSTRAINTS****" + ) model.objective_1.deactivate() model.objective_2.deactivate() model.objective_3.activate() @@ -1076,7 +1445,12 @@ def solve_epsilon_model_9(self, model, model_solver_setting, xresults_df, yresul # Add objective functions 1 and 2 as epsilon constraints of objective function 3: # Dataframe to store optimization results: obj_3_12_epsilon_results = pd.DataFrame( - columns=['Economic Loss(Million Dollars)', 'Dislocation Value', 'Functionality Value']) + columns=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ] + ) # Parameter for objective 2 (dislocation) and objective 1 (economic loss) epsilon values: model.obj_1_e = Param(mutable=True, within=NonNegativeReals) @@ -1087,45 +1461,74 @@ def solve_epsilon_model_9(self, model, model_solver_setting, xresults_df, yresul epsilon9_yresult_df = pd.DataFrame() counter = 0 - for e in np.arange(pyo.value(model.econ_loss_min), pyo.value(model.econ_loss_max), - pyo.value(model.econ_loss_step)): + for e in np.arange( + pyo.value(model.econ_loss_min), + pyo.value(model.econ_loss_max), + pyo.value(model.econ_loss_step), + ): model.obj_1_e = e - model.add_component("objective_1_constraint", - Constraint(expr=sum_product(model.l_ijk, model.x_ijk) <= pyo.value(model.obj_1_e))) - for e2 in np.arange(pyo.value(model.dislocation_min), pyo.value(model.dislocation_max), - pyo.value(model.dislocation_step)): + model.add_component( + "objective_1_constraint", + Constraint( + expr=sum_product(model.l_ijk, model.x_ijk) + <= pyo.value(model.obj_1_e) + ), + ) + for e2 in np.arange( + pyo.value(model.dislocation_min), + pyo.value(model.dislocation_max), + pyo.value(model.dislocation_step), + ): counter += 1 model.obj_2_e = e2 print("Step ", counter, " e: ", e, " e2:", e2) # obj_3_12_epsilon_results.loc[counter-1,'Economic Loss Epsilon']=e # obj_3_12_epsilon_results.loc[counter-1,'Dislocation Epsilon']=e2 - model.add_component("objective_2_constraint", - Constraint( - expr=sum_product(model.d_ijk, model.x_ijk) <= (pyo.value(model.obj_2_e)))) + model.add_component( + "objective_2_constraint", + Constraint( + expr=sum_product(model.d_ijk, model.x_ijk) + <= (pyo.value(model.obj_2_e)) + ), + ) # Solve the model: results = model_solver_setting.solve(model) # Save the results if the solver returns an optimal solution: if (results.solver.status == SolverStatus.ok) and ( - results.solver.termination_condition == TerminationCondition.optimal): + results.solver.termination_condition == TerminationCondition.optimal + ): self.extract_optimization_results(model) - budget_used = quicksum(pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) * pyo.value( - model.Sc_ijkk_prime[i, j, k, k_prime]) for (i, j, k, k_prime) in model.ZSKK_prime) - percent_budget_used = (budget_used / pyo.value( - model.B)) * 100 # Record percentage of available budget used. + budget_used = quicksum( + pyo.value(model.y_ijkk_prime[i, j, k, k_prime]) + * pyo.value(model.Sc_ijkk_prime[i, j, k, k_prime]) + for (i, j, k, k_prime) in model.ZSKK_prime + ) + _ = ( + budget_used / pyo.value(model.B) + ) * 100 # Record percentage of available budget used. # Add objective (functionality), economic loss, and dislocation values to results dataframe: # TODO: optimize this search - obj_3_12_epsilon_results.loc[counter - 1, 'Functionality Value'] = pyo.value(model.functionality) - obj_3_12_epsilon_results.loc[counter - 1, 'Economic Loss(Million Dollars)'] = pyo.value( - model.econ_loss) - obj_3_12_epsilon_results.loc[counter - 1, 'Dislocation Value'] = pyo.value(model.dislocation) + obj_3_12_epsilon_results.loc[ + counter - 1, "Functionality Value" + ] = pyo.value(model.functionality) + obj_3_12_epsilon_results.loc[ + counter - 1, "Economic Loss(Million Dollars)" + ] = pyo.value(model.econ_loss) + obj_3_12_epsilon_results.loc[ + counter - 1, "Dislocation Value" + ] = pyo.value(model.dislocation) # Extract results per each variable and convert to dataframe x_data: dict = model.x_ijk.extract_values() - newx_df = self.assemble_dataframe_from_solution("x_ijk", x_data, counter) + newx_df = self.assemble_dataframe_from_solution( + "x_ijk", x_data, counter + ) y_data: dict = model.y_ijkk_prime.extract_values() - newy_df = self.assemble_dataframe_from_solution("y_ijkk_prime", y_data, counter) + newy_df = self.assemble_dataframe_from_solution( + "y_ijkk_prime", y_data, counter + ) # Append to local analysis result epsilon9_xresult_df = pd.concat([epsilon9_xresult_df, newx_df]) @@ -1148,11 +1551,13 @@ def solve_epsilon_model_9(self, model, model_solver_setting, xresults_df, yresul # Drop rows with infeasible results: initial_length = len(obj_3_12_epsilon_results) - obj_3_12_data = obj_3_12_epsilon_results.dropna(axis=0, how='any') - print("Infeasible rows dropped: ", initial_length - len(obj_3_12_data), " rows.") + obj_3_12_data = obj_3_12_epsilon_results.dropna(axis=0, how="any") + print( + "Infeasible rows dropped: ", initial_length - len(obj_3_12_data), " rows." + ) # Save results: - filename = '9_epsilon_results.csv' + filename = "9_epsilon_results.csv" obj_3_12_data.index += 1 obj_3_12_data.to_csv(filename) @@ -1160,10 +1565,12 @@ def solve_epsilon_model_9(self, model, model_solver_setting, xresults_df, yresul elapsedtime = endtime - starttime print("Elapsed time: ", elapsedtime) - epsilon9_xresult_df['Epsilon'] = 9 - epsilon9_yresult_df['Epsilon'] = 9 + epsilon9_xresult_df["Epsilon"] = 9 + epsilon9_yresult_df["Epsilon"] = 9 - return pd.concat([xresults_df, epsilon9_xresult_df]), pd.concat([yresults_df, epsilon9_yresult_df]) + return pd.concat([xresults_df, epsilon9_xresult_df]), pd.concat( + [yresults_df, epsilon9_yresult_df] + ) def compute_optimal_results(self, inactive_submodels, xresults_df, yresults_df): # Fixed for the moment, will be expanded to the full model set in later iterations @@ -1174,24 +1581,42 @@ def compute_optimal_results(self, inactive_submodels, xresults_df, yresults_df): for k in epsilon_models: if k not in inactive_submodels: - results = pd.read_csv(str(k) + '_epsilon_results.csv', usecols=['Economic Loss(Million Dollars)', - 'Dislocation Value', - 'Functionality Value'], - low_memory=False) - list_loss = results['Economic Loss(Million Dollars)'].values.tolist() - list_dislocation = results['Dislocation Value'].values.tolist() - list_func = results['Functionality Value'].values.tolist() - zipped_list = self.optimal_points(list_loss, list_dislocation, list_func) - optimal = pd.DataFrame(zipped_list, columns=['Iteration', 'Economic Loss(Million Dollars)', - 'Dislocation Value', 'Functionality Value']) + results = pd.read_csv( + str(k) + "_epsilon_results.csv", + usecols=[ + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ], + low_memory=False, + ) + list_loss = results["Economic Loss(Million Dollars)"].values.tolist() + list_dislocation = results["Dislocation Value"].values.tolist() + list_func = results["Functionality Value"].values.tolist() + zipped_list = self.optimal_points( + list_loss, list_dislocation, list_func + ) + optimal = pd.DataFrame( + zipped_list, + columns=[ + "Iteration", + "Economic Loss(Million Dollars)", + "Dislocation Value", + "Functionality Value", + ], + ) # Select only results corresponding to current epsilon step - epsilon_xresults_df = xresults_df[xresults_df['Epsilon'] == k] - epsilon_yresults_df = yresults_df[yresults_df['Epsilon'] == k] + epsilon_xresults_df = xresults_df[xresults_df["Epsilon"] == k] + epsilon_yresults_df = yresults_df[yresults_df["Epsilon"] == k] # Filter results depending on optimal epsilon model - opt_xresults_df = epsilon_xresults_df[epsilon_xresults_df['Iteration'].isin(optimal['Iteration'])] - opt_yresults_df = epsilon_yresults_df[epsilon_yresults_df['Iteration'].isin(optimal['Iteration'])] + opt_xresults_df = epsilon_xresults_df[ + epsilon_xresults_df["Iteration"].isin(optimal["Iteration"]) + ] + opt_yresults_df = epsilon_yresults_df[ + epsilon_yresults_df["Iteration"].isin(optimal["Iteration"]) + ] # Append for later construction of final dataset xresults_list.append(opt_xresults_df) @@ -1204,54 +1629,86 @@ def compute_optimal_results(self, inactive_submodels, xresults_df, yresults_df): @staticmethod def obj_economic(model): # return(sum_product(model.l_ijk,model.x_ijk)) - return quicksum(model.l_ijk[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK) + return quicksum( + model.l_ijk[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK + ) @staticmethod def obj_dislocation(model): # return(sum_product(model.d_ijk,model.x_ijk)) - return quicksum(model.d_ijk[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK) + return quicksum( + model.d_ijk[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK + ) @staticmethod def obj_functionality(model): # return(sum_product(model.Q_t_hat,model.x_ijk)) - return quicksum(model.Q_t_hat[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK) + return quicksum( + model.Q_t_hat[i, j, k] * model.x_ijk[i, j, k] for (i, j, k) in model.ZSK + ) @staticmethod def retrofit_cost_rule(model): - return (None, - quicksum( - model.Sc_ijkk_prime[i, j, k, k_prime] * model.y_ijkk_prime[i, j, k, k_prime] for - (i, j, k, k_prime) - in model.ZSKK_prime), - pyo.value(model.B)) + return ( + None, + quicksum( + model.Sc_ijkk_prime[i, j, k, k_prime] + * model.y_ijkk_prime[i, j, k, k_prime] + for (i, j, k, k_prime) in model.ZSKK_prime + ), + pyo.value(model.B), + ) @staticmethod def number_buildings_ij_rule(model, i, j): - return (quicksum(pyo.value(model.b_ijk[i, j, k]) for k in model.K), - quicksum(model.x_ijk[i, j, k] for k in model.K), - quicksum(pyo.value(model.b_ijk[i, j, k]) for k in model.K)) + return ( + quicksum(pyo.value(model.b_ijk[i, j, k]) for k in model.K), + quicksum(model.x_ijk[i, j, k] for k in model.K), + quicksum(pyo.value(model.b_ijk[i, j, k]) for k in model.K), + ) @staticmethod def building_level_rule(model, i, j, k): - model.a = quicksum(model.y_ijkk_prime[i, j, k_prime, k] for k_prime in model.K_prime if - (i, j, k_prime, k) in model.zskk_prime) - model.c = quicksum(model.y_ijkk_prime[i, j, k, k_prime] for k_prime in model.K_prime if - (i, j, k, k_prime) in model.zskk_prime) - return (pyo.value(model.b_ijk[i, j, k]), - model.x_ijk[i, j, k] + quicksum(model.y_ijkk_prime[i, j, k, k_prime] for k_prime in model.K_prime if - (i, j, k, k_prime) in model.zskk_prime) - - quicksum(model.y_ijkk_prime[i, j, k_prime, k] for k_prime in model.K_prime if - (i, j, k_prime, k) in model.zskk_prime), - pyo.value(model.b_ijk[i, j, k])) + model.a = quicksum( + model.y_ijkk_prime[i, j, k_prime, k] + for k_prime in model.K_prime + if (i, j, k_prime, k) in model.zskk_prime + ) + model.c = quicksum( + model.y_ijkk_prime[i, j, k, k_prime] + for k_prime in model.K_prime + if (i, j, k, k_prime) in model.zskk_prime + ) + return ( + pyo.value(model.b_ijk[i, j, k]), + model.x_ijk[i, j, k] + + quicksum( + model.y_ijkk_prime[i, j, k, k_prime] + for k_prime in model.K_prime + if (i, j, k, k_prime) in model.zskk_prime + ) + - quicksum( + model.y_ijkk_prime[i, j, k_prime, k] + for k_prime in model.K_prime + if (i, j, k_prime, k) in model.zskk_prime + ), + pyo.value(model.b_ijk[i, j, k]), + ) @staticmethod def extract_optimization_results(model): model.econ_loss = quicksum( - pyo.value(model.l_ijk[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) for (i, j, k) in model.ZSK) + pyo.value(model.l_ijk[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) + for (i, j, k) in model.ZSK + ) model.dislocation = quicksum( - pyo.value(model.d_ijk[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) for (i, j, k) in model.ZSK) + pyo.value(model.d_ijk[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) + for (i, j, k) in model.ZSK + ) model.functionality = quicksum( - pyo.value(model.Q_t_hat[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) for (i, j, k) in model.ZSK) + pyo.value(model.Q_t_hat[i, j, k]) * pyo.value(model.x_ijk[i, j, k]) + for (i, j, k) in model.ZSK + ) @staticmethod def assemble_dataframe_from_solution(variable, sol_dict, iteration): @@ -1268,7 +1725,7 @@ def assemble_dataframe_from_solution(variable, sol_dict, iteration): x_dict[variable] = sol_dict.values() df = pd.DataFrame(x_dict) - df['Iteration'] = iteration + df["Iteration"] = iteration return df @@ -1289,7 +1746,10 @@ def optimal_points(list_loss, list_dislocation, list_func): f_temp = [] for i in range(len(list_temp)): for j in range(i + 1, len(list_temp)): - if list_temp[i][0] == list_temp[j][0] and list_temp[i][1] == list_temp[j][1]: + if ( + list_temp[i][0] == list_temp[j][0] + and list_temp[i][1] == list_temp[j][1] + ): if list_temp[i][2] > list_temp[j][2]: f_temp.append(list_temp[j]) @@ -1335,8 +1795,7 @@ def optimal_points(list_loss, list_dislocation, list_func): comb2 = [] for i in range(len(loss_optimal)): - comb2.append([loss_optimal[i], dislocation_optimal[i], - func_optimal[i]]) + comb2.append([loss_optimal[i], dislocation_optimal[i], func_optimal[i]]) optimal_index = {} for i in range(len(comb2)): @@ -1345,7 +1804,9 @@ def optimal_points(list_loss, list_dislocation, list_func): else: optimal_index[comb1.index(comb2[i])] = comb2[i] optimal_index_list = list(optimal_index.keys()) - zipped_list = list(zip(optimal_index_list, loss_optimal, dislocation_optimal, func_optimal)) + zipped_list = list( + zip(optimal_index_list, loss_optimal, dislocation_optimal, func_optimal) + ) return zipped_list def get_spec(self): @@ -1356,86 +1817,85 @@ def get_spec(self): """ return { - 'name': 'multiobjective-retrofit-optimization', - 'description': 'Multiobjective retrofit optimization model', - 'input_parameters': [ + "name": "multiobjective-retrofit-optimization", + "description": "Multiobjective retrofit optimization model", + "input_parameters": [ { - 'id': 'result_name', - 'required': False, - 'description': 'Result CSV dataset name', - 'type': str + "id": "result_name", + "required": False, + "description": "Result CSV dataset name", + "type": str, }, { - 'id': 'model_solver', - 'required': False, - 'description': 'Choice of the model solver to use. Gurobi is the default solver.', - 'type': str + "id": "model_solver", + "required": False, + "description": "Choice of the model solver to use. Gurobi is the default solver.", + "type": str, }, { - 'id': 'num_epsilon_steps', - 'required': True, - 'description': 'Number of epsilon values to evaluate', - 'type': int + "id": "num_epsilon_steps", + "required": True, + "description": "Number of epsilon values to evaluate", + "type": int, }, { - 'id': 'max_budget', - 'required': True, - 'description': 'Selection of maximum possible budget', - 'type': str + "id": "max_budget", + "required": True, + "description": "Selection of maximum possible budget", + "type": str, }, { - 'id': 'budget_available', - 'required': False, - 'description': 'Custom budget value', - 'type': float + "id": "budget_available", + "required": False, + "description": "Custom budget value", + "type": float, }, { - 'id': 'inactive_submodels', - 'required': False, - 'description': 'Identifier of submodels to inactivate during analysis', - 'type': List[int] + "id": "inactive_submodels", + "required": False, + "description": "Identifier of submodels to inactivate during analysis", + "type": List[int], }, { - 'id': 'scale_data', - 'required': True, - 'description': 'Choice for scaling data', - 'type': bool + "id": "scale_data", + "required": True, + "description": "Choice for scaling data", + "type": bool, }, { - 'id': 'scaling_factor', - 'required': False, - 'description': 'Custom scaling factor', - 'type': float + "id": "scaling_factor", + "required": False, + "description": "Custom scaling factor", + "type": float, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'building_related_data', - 'required': True, - 'description': 'A csv file with building related data required to evaluate retrofit strategies', - 'type': ['incore:multiobjectiveBuildingRelatedData'] + "id": "building_related_data", + "required": True, + "description": "A csv file with building related data required to evaluate retrofit strategies", + "type": ["incore:multiobjectiveBuildingRelatedData"], }, { - 'id': 'strategy_costs_data', - 'required': True, - 'description': 'A csv file with strategy cost data' - 'per building', - 'type': ['incore:multiobjectiveStrategyCosts'] + "id": "strategy_costs_data", + "required": True, + "description": "A csv file with strategy cost data" "per building", + "type": ["incore:multiobjectiveStrategyCosts"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'optimal_solution_dv_x', - 'parent_type': '', - 'description': 'Optimal solution for decision variable x', - 'type': 'incore:multiobjectiveOptimalSolutionX' + "id": "optimal_solution_dv_x", + "parent_type": "", + "description": "Optimal solution for decision variable x", + "type": "incore:multiobjectiveOptimalSolutionX", }, { - 'id': 'optimal_solution_dv_y', - 'parent_type': '', - 'description': 'Optimal solution for decision variable y with initial and final retrofitted ' - 'strategies', - 'type': 'incore:multiobjectiveOptimalSolutionY' - } - ] + "id": "optimal_solution_dv_y", + "parent_type": "", + "description": "Optimal solution for decision variable y with initial and final retrofitted " + "strategies", + "type": "incore:multiobjectiveOptimalSolutionY", + }, + ], } diff --git a/pyincore/analyses/ncifunctionality/__init__.py b/pyincore/analyses/ncifunctionality/__init__.py index 781879f7a..7f10697e9 100644 --- a/pyincore/analyses/ncifunctionality/__init__.py +++ b/pyincore/analyses/ncifunctionality/__init__.py @@ -3,4 +3,4 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.ncifunctionality.ncifunctionality import NciFunctionality \ No newline at end of file +from pyincore.analyses.ncifunctionality.ncifunctionality import NciFunctionality diff --git a/pyincore/analyses/ncifunctionality/ncifunctionality.py b/pyincore/analyses/ncifunctionality/ncifunctionality.py index 65d6abe1d..6de6a1cef 100644 --- a/pyincore/analyses/ncifunctionality/ncifunctionality.py +++ b/pyincore/analyses/ncifunctionality/ncifunctionality.py @@ -48,52 +48,99 @@ def __init__(self, incore_client): def run(self): # Load parameters - discretized_days = self.get_parameter('discretized_days') + discretized_days = self.get_parameter("discretized_days") # Load all dataset-related entities for EPF - epf_network_dataset = NetworkDataset.from_dataset(self.get_input_dataset('epf_network')) + epf_network_dataset = NetworkDataset.from_dataset( + self.get_input_dataset("epf_network") + ) epf_network_nodes = epf_network_dataset.nodes.get_dataframe_from_shapefile() epf_network_links = epf_network_dataset.links.get_dataframe_from_shapefile() # Load all dataset-related entities for WDS - wds_network_dataset = NetworkDataset.from_dataset(self.get_input_dataset('wds_network')) + wds_network_dataset = NetworkDataset.from_dataset( + self.get_input_dataset("wds_network") + ) wds_network_nodes = wds_network_dataset.nodes.get_dataframe_from_shapefile() wds_network_links = wds_network_dataset.links.get_dataframe_from_shapefile() # Load network interdependencies - epf_wds_intdp_table = self.get_input_dataset('epf_wds_intdp_table').get_dataframe_from_csv() - wds_epf_intdp_table = self.get_input_dataset('wds_epf_intdp_table').get_dataframe_from_csv() + epf_wds_intdp_table = self.get_input_dataset( + "epf_wds_intdp_table" + ).get_dataframe_from_csv() + wds_epf_intdp_table = self.get_input_dataset( + "wds_epf_intdp_table" + ).get_dataframe_from_csv() # Load restoration functionality and time results for EPF - epf_time_results = self.get_input_dataset('epf_time_results').get_dataframe_from_csv() - epf_subst_failure_results = self.get_input_dataset('epf_subst_failure_results').get_dataframe_from_csv() - epf_inventory_rest_map = self.get_input_dataset('epf_inventory_rest_map').get_dataframe_from_csv() + epf_time_results = self.get_input_dataset( + "epf_time_results" + ).get_dataframe_from_csv() + epf_subst_failure_results = self.get_input_dataset( + "epf_subst_failure_results" + ).get_dataframe_from_csv() + epf_inventory_rest_map = self.get_input_dataset( + "epf_inventory_rest_map" + ).get_dataframe_from_csv() # Load restoration functionality and time results for WDS - wds_time_results = self.get_input_dataset('wds_time_results').get_dataframe_from_csv() - wds_dmg_results = self.get_input_dataset('wds_dmg_results').get_dataframe_from_csv() - wds_inventory_rest_map = self.get_input_dataset('wds_inventory_rest_map').get_dataframe_from_csv() + wds_time_results = self.get_input_dataset( + "wds_time_results" + ).get_dataframe_from_csv() + wds_dmg_results = self.get_input_dataset( + "wds_dmg_results" + ).get_dataframe_from_csv() + wds_inventory_rest_map = self.get_input_dataset( + "wds_inventory_rest_map" + ).get_dataframe_from_csv() # Load limit state probabilities and damage states for each electric power facility - epf_damage = self.get_input_dataset('epf_damage').get_dataframe_from_csv() - - epf_cascading_functionality = self.nci_functionality(discretized_days, epf_network_nodes, epf_network_links, - wds_network_nodes, wds_network_links, - epf_wds_intdp_table, wds_epf_intdp_table, - epf_subst_failure_results, epf_inventory_rest_map, - epf_time_results, wds_dmg_results, wds_inventory_rest_map, - wds_time_results, epf_damage) + epf_damage = self.get_input_dataset("epf_damage").get_dataframe_from_csv() + + epf_cascading_functionality = self.nci_functionality( + discretized_days, + epf_network_nodes, + epf_network_links, + wds_network_nodes, + wds_network_links, + epf_wds_intdp_table, + wds_epf_intdp_table, + epf_subst_failure_results, + epf_inventory_rest_map, + epf_time_results, + wds_dmg_results, + wds_inventory_rest_map, + wds_time_results, + epf_damage, + ) result_name = self.get_parameter("result_name") - self.set_result_csv_data("epf_cascading_functionality", epf_cascading_functionality, name=result_name, - source="dataframe") + self.set_result_csv_data( + "epf_cascading_functionality", + epf_cascading_functionality, + name=result_name, + source="dataframe", + ) return True - def nci_functionality(self, discretized_days, epf_network_nodes, epf_network_links, wds_network_nodes, - wds_network_links, epf_wds_intdp_table, wds_epf_intdp_table, epf_subst_failure_results, - epf_inventory_rest_map, epf_time_results, wds_dmg_results, wds_inventory_rest_map, - wds_time_results, epf_damage): + def nci_functionality( + self, + discretized_days, + epf_network_nodes, + epf_network_links, + wds_network_nodes, + wds_network_links, + epf_wds_intdp_table, + wds_epf_intdp_table, + epf_subst_failure_results, + epf_inventory_rest_map, + epf_time_results, + wds_dmg_results, + wds_inventory_rest_map, + wds_time_results, + epf_damage, + ): """Compute EPF and WDS cascading functionality outcomes Args: @@ -117,120 +164,229 @@ def nci_functionality(self, discretized_days, epf_network_nodes, epf_network_lin """ # Compute updated EPF and WDS node information - efp_nodes_updated = self.update_epf_discretized_func(epf_network_nodes, epf_subst_failure_results, - epf_inventory_rest_map, epf_time_results, epf_damage) - - wds_nodes_updated = self.update_wds_discretized_func(wds_network_nodes, wds_dmg_results, - wds_inventory_rest_map, wds_time_results) + efp_nodes_updated = self.update_epf_discretized_func( + epf_network_nodes, + epf_subst_failure_results, + epf_inventory_rest_map, + epf_time_results, + epf_damage, + ) + + wds_nodes_updated = self.update_wds_discretized_func( + wds_network_nodes, wds_dmg_results, wds_inventory_rest_map, wds_time_results + ) # Compute updated WDS links wds_links_updated = self.update_wds_network_links(wds_network_links) # Generate the functionality data - df_functionality_nodes = pd.concat([efp_nodes_updated, wds_nodes_updated], ignore_index=True) + df_functionality_nodes = pd.concat( + [efp_nodes_updated, wds_nodes_updated], ignore_index=True + ) # Create each individual graph - g_epf = NetworkUtil.create_network_graph_from_dataframes(epf_network_nodes, epf_network_links) - g_wds = NetworkUtil.create_network_graph_from_dataframes(wds_network_nodes, wds_links_updated) + g_epf = NetworkUtil.create_network_graph_from_dataframes( + epf_network_nodes, epf_network_links + ) + g_wds = NetworkUtil.create_network_graph_from_dataframes( + wds_network_nodes, wds_links_updated + ) # Obtain two graphs for directional interdependencies - g_epf_wds = NetworkUtil.merge_labeled_networks(g_epf, g_wds, epf_wds_intdp_table, directed=True) + g_epf_wds = NetworkUtil.merge_labeled_networks( + g_epf, g_wds, epf_wds_intdp_table, directed=True + ) # To be implemented in a future release # g_wds_epf = NetworkUtil.merge_labeled_networks(g_wds, g_epf, wds_epf_intdp_table, directed=True) # Solve the corresponding Leontief problems - df_epn_func_nodes = self.solve_leontief_equation(g_epf_wds, df_functionality_nodes, discretized_days) + df_epn_func_nodes = self.solve_leontief_equation( + g_epf_wds, df_functionality_nodes, discretized_days + ) # To be implemented in a future release # df_wds_func_nodes = self.solve_leontief_equation(g_wds_epf, df_functionality_nodes, discretized_days) - epn_cascading_functionality = epf_network_nodes[['guid', 'geometry']].merge(df_epn_func_nodes, on='guid', - how='left').rename(columns={ - 'guid': 'sguid'}) + epn_cascading_functionality = ( + epf_network_nodes[["guid", "geometry"]] + .merge(df_epn_func_nodes, on="guid", how="left") + .rename(columns={"guid": "sguid"}) + ) # To be implemented in a future release # wds_cascading_functionality = wds_network_nodes[['guid', 'geometry']].merge(df_wds_func_nodes, on='guid', - #how='left').rename(columns={ - #'guid': 'sguid'}) + # how='left').rename(columns={'guid': 'sguid'}) return epn_cascading_functionality @staticmethod - def update_epf_discretized_func(epf_nodes, epf_subst_failure_results, epf_inventory_restoration_map, - epf_time_results, epf_damage): + def update_epf_discretized_func( + epf_nodes, + epf_subst_failure_results, + epf_inventory_restoration_map, + epf_time_results, + epf_damage, + ): epf_time_results = epf_time_results.loc[ - (epf_time_results['time'] == 1) | (epf_time_results['time'] == 3) | (epf_time_results['time'] == 7) | ( - epf_time_results['time'] == 30) | (epf_time_results['time'] == 90)] - epf_time_results.insert(2, 'PF_00', list( - np.ones(len(epf_time_results)))) # PF_00, PF_0, PF_1, PF_2, PF_3 ---> DS_0, DS_1, DS_2, DS_3, DS_4 - - epf_subst_failure_results = pd.merge(epf_damage, epf_subst_failure_results, on='guid', how='outer') - - epf_nodes_updated = pd.merge(epf_nodes[['nodenwid', 'utilfcltyc', 'guid']], epf_subst_failure_results[ - ['guid', 'DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4', 'failure_probability']], on='guid', how='outer') - - EPPL_restoration_id = list(epf_inventory_restoration_map.loc[epf_inventory_restoration_map['guid'] == - epf_nodes_updated.loc[epf_nodes_updated[ - 'utilfcltyc'] == 'EPPL'].guid.tolist()[ - 0]]['restoration_id'])[0] - ESS_restoration_id = \ - list(set(epf_inventory_restoration_map.restoration_id.unique()) - set([EPPL_restoration_id]))[0] - df_EPN_node_EPPL = epf_nodes_updated.loc[epf_nodes_updated['utilfcltyc'] == 'EPPL'] - df_EPN_node_ESS = epf_nodes_updated.loc[epf_nodes_updated['utilfcltyc'] != 'EPPL'] - epf_time_results_EPPL = epf_time_results.loc[epf_time_results['restoration_id'] == EPPL_restoration_id][ - ['PF_00', 'PF_0', 'PF_1', 'PF_2', 'PF_3']] + (epf_time_results["time"] == 1) + | (epf_time_results["time"] == 3) + | (epf_time_results["time"] == 7) + | (epf_time_results["time"] == 30) + | (epf_time_results["time"] == 90) + ] + epf_time_results.insert( + 2, "PF_00", list(np.ones(len(epf_time_results))) + ) # PF_00, PF_0, PF_1, PF_2, PF_3 ---> DS_0, DS_1, DS_2, DS_3, DS_4 + + epf_subst_failure_results = pd.merge( + epf_damage, epf_subst_failure_results, on="guid", how="outer" + ) + + epf_nodes_updated = pd.merge( + epf_nodes[["nodenwid", "utilfcltyc", "guid"]], + epf_subst_failure_results[ + ["guid", "DS_0", "DS_1", "DS_2", "DS_3", "DS_4", "failure_probability"] + ], + on="guid", + how="outer", + ) + + EPPL_restoration_id = list( + epf_inventory_restoration_map.loc[ + epf_inventory_restoration_map["guid"] + == epf_nodes_updated.loc[ + epf_nodes_updated["utilfcltyc"] == "EPPL" + ].guid.tolist()[0] + ]["restoration_id"] + )[0] + ESS_restoration_id = list( + set(epf_inventory_restoration_map.restoration_id.unique()) + - set([EPPL_restoration_id]) + )[0] + df_EPN_node_EPPL = epf_nodes_updated.loc[ + epf_nodes_updated["utilfcltyc"] == "EPPL" + ] + df_EPN_node_ESS = epf_nodes_updated.loc[ + epf_nodes_updated["utilfcltyc"] != "EPPL" + ] + epf_time_results_EPPL = epf_time_results.loc[ + epf_time_results["restoration_id"] == EPPL_restoration_id + ][["PF_00", "PF_0", "PF_1", "PF_2", "PF_3"]] EPPL_func_df = pd.DataFrame( - np.dot(df_EPN_node_EPPL[['DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4']], np.array(epf_time_results_EPPL).T), - columns=['functionality1', 'functionality3', 'functionality7', 'functionality30', 'functionality90']) - EPPL_func_df.insert(0, 'guid', list(df_EPN_node_EPPL.guid)) - epf_time_results_ESS = epf_time_results.loc[epf_time_results['restoration_id'] == ESS_restoration_id][ - ['PF_00', 'PF_0', 'PF_1', 'PF_2', 'PF_3']] + np.dot( + df_EPN_node_EPPL[["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]], + np.array(epf_time_results_EPPL).T, + ), + columns=[ + "functionality1", + "functionality3", + "functionality7", + "functionality30", + "functionality90", + ], + ) + EPPL_func_df.insert(0, "guid", list(df_EPN_node_EPPL.guid)) + epf_time_results_ESS = epf_time_results.loc[ + epf_time_results["restoration_id"] == ESS_restoration_id + ][["PF_00", "PF_0", "PF_1", "PF_2", "PF_3"]] ESS_func_df = pd.DataFrame( - np.dot(df_EPN_node_ESS[['DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4']], np.array(epf_time_results_ESS).T), - columns=['functionality1', 'functionality3', 'functionality7', 'functionality30', 'functionality90']) - ESS_func_df.insert(0, 'guid', list(df_EPN_node_ESS.guid)) + np.dot( + df_EPN_node_ESS[["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]], + np.array(epf_time_results_ESS).T, + ), + columns=[ + "functionality1", + "functionality3", + "functionality7", + "functionality30", + "functionality90", + ], + ) + ESS_func_df.insert(0, "guid", list(df_EPN_node_ESS.guid)) epf_function_df = pd.concat([ESS_func_df, EPPL_func_df], ignore_index=True) epf_nodes_updated = pd.merge(epf_nodes_updated, epf_function_df, on="guid") return epf_nodes_updated @staticmethod - def update_wds_discretized_func(wds_nodes, wds_dmg_results, wds_inventory_restoration_map, wds_time_results): + def update_wds_discretized_func( + wds_nodes, wds_dmg_results, wds_inventory_restoration_map, wds_time_results + ): wf_time_results = wds_time_results.loc[ - (wds_time_results['time'] == 1) | (wds_time_results['time'] == 3) | (wds_time_results['time'] == 7) | ( - wds_time_results['time'] == 30) | (wds_time_results['time'] == 90)] - wf_time_results.insert(2, 'PF_00', list(np.ones(len(wf_time_results)))) - - - wds_nodes_updated = pd.merge(wds_nodes[['nodenwid', 'utilfcltyc', 'guid']], - wds_dmg_results[['guid', 'DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4']], on='guid', - how='outer') - - PPPL_restoration_id = list(wds_inventory_restoration_map.loc[wds_inventory_restoration_map['guid'] == - wds_nodes_updated.loc[wds_nodes_updated[ - 'utilfcltyc'] == 'PPPL'].guid.tolist()[ - 0]]['restoration_id'])[0] - PSTAS_restoration_id = list(wds_inventory_restoration_map.loc[wds_inventory_restoration_map['guid'] == - wds_nodes_updated.loc[wds_nodes_updated[ - 'utilfcltyc'] == 'PSTAS'].guid.tolist()[ - 0]]['restoration_id'])[0] - df_wds_node_PPPL = wds_nodes_updated.loc[wds_nodes_updated['utilfcltyc'] == 'PPPL'] - df_wds_node_PSTAS = wds_nodes_updated.loc[wds_nodes_updated['utilfcltyc'] == 'PSTAS'] - - wf_time_results_PPPL = wf_time_results.loc[wf_time_results['restoration_id'] == PPPL_restoration_id][ - ['PF_00', 'PF_0', 'PF_1', 'PF_2', 'PF_3']] + (wds_time_results["time"] == 1) + | (wds_time_results["time"] == 3) + | (wds_time_results["time"] == 7) + | (wds_time_results["time"] == 30) + | (wds_time_results["time"] == 90) + ] + wf_time_results.insert(2, "PF_00", list(np.ones(len(wf_time_results)))) + + wds_nodes_updated = pd.merge( + wds_nodes[["nodenwid", "utilfcltyc", "guid"]], + wds_dmg_results[["guid", "DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]], + on="guid", + how="outer", + ) + + PPPL_restoration_id = list( + wds_inventory_restoration_map.loc[ + wds_inventory_restoration_map["guid"] + == wds_nodes_updated.loc[ + wds_nodes_updated["utilfcltyc"] == "PPPL" + ].guid.tolist()[0] + ]["restoration_id"] + )[0] + PSTAS_restoration_id = list( + wds_inventory_restoration_map.loc[ + wds_inventory_restoration_map["guid"] + == wds_nodes_updated.loc[ + wds_nodes_updated["utilfcltyc"] == "PSTAS" + ].guid.tolist()[0] + ]["restoration_id"] + )[0] + df_wds_node_PPPL = wds_nodes_updated.loc[ + wds_nodes_updated["utilfcltyc"] == "PPPL" + ] + df_wds_node_PSTAS = wds_nodes_updated.loc[ + wds_nodes_updated["utilfcltyc"] == "PSTAS" + ] + + wf_time_results_PPPL = wf_time_results.loc[ + wf_time_results["restoration_id"] == PPPL_restoration_id + ][["PF_00", "PF_0", "PF_1", "PF_2", "PF_3"]] PPPL_func_df = pd.DataFrame( - np.dot(df_wds_node_PPPL[['DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4']], np.array(wf_time_results_PPPL).T), - columns=['functionality1', 'functionality3', 'functionality7', 'functionality30', 'functionality90']) - PPPL_func_df.insert(0, 'guid', list(df_wds_node_PPPL.guid)) + np.dot( + df_wds_node_PPPL[["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]], + np.array(wf_time_results_PPPL).T, + ), + columns=[ + "functionality1", + "functionality3", + "functionality7", + "functionality30", + "functionality90", + ], + ) + PPPL_func_df.insert(0, "guid", list(df_wds_node_PPPL.guid)) - wf_time_results_PSTAS = wf_time_results.loc[wf_time_results['restoration_id'] == PSTAS_restoration_id][ - ['PF_00', 'PF_0', 'PF_1', 'PF_2', 'PF_3']] + wf_time_results_PSTAS = wf_time_results.loc[ + wf_time_results["restoration_id"] == PSTAS_restoration_id + ][["PF_00", "PF_0", "PF_1", "PF_2", "PF_3"]] PSTAS_func_df = pd.DataFrame( - np.dot(df_wds_node_PSTAS[['DS_0', 'DS_1', 'DS_2', 'DS_3', 'DS_4']], np.array(wf_time_results_PSTAS).T), - columns=['functionality1', 'functionality3', 'functionality7', 'functionality30', 'functionality90']) - PSTAS_func_df.insert(0, 'guid', list(df_wds_node_PSTAS.guid)) + np.dot( + df_wds_node_PSTAS[["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]], + np.array(wf_time_results_PSTAS).T, + ), + columns=[ + "functionality1", + "functionality3", + "functionality7", + "functionality30", + "functionality90", + ], + ) + PSTAS_func_df.insert(0, "guid", list(df_wds_node_PSTAS.guid)) wf_function_df = pd.concat([PSTAS_func_df, PPPL_func_df], ignore_index=True) wds_nodes_updated = pd.merge(wds_nodes_updated, wf_function_df, on="guid") @@ -257,11 +413,13 @@ def solve_leontief_equation(graph, functionality_nodes, discretized_days): M = nx.adjacency_matrix(graph).todense() for idx in discretized_days: - u = 1 - df_functionality_nodes[f'functionality{idx}'] + u = 1 - df_functionality_nodes[f"functionality{idx}"] u = u.to_numpy() - I = np.identity(len(u)) - q = np.dot(np.linalg.inv(I - M.T), u).tolist() - df_functionality_nodes[f'func_cascading{idx}'] = [0 if i >= 1 else 1 - i for i in q] + i = np.identity(len(u)) + q = np.dot(np.linalg.inv(i - M.T), u).tolist() + df_functionality_nodes[f"func_cascading{idx}"] = [ + 0 if i >= 1 else 1 - i for i in q + ] return df_functionality_nodes @@ -279,31 +437,35 @@ def update_wds_network_links(wds_network_links): wds_links = copy.deepcopy(wds_network_links) # Use `numpgvrpr` from pipeline damage - #wds_links = pd.merge(wds_links, pp_dmg_results, on='guid', how='outer') + # wds_links = pd.merge(wds_links, pp_dmg_results, on='guid', how='outer') # Update values with pgv and pgd calculations - for idx in wds_links['linknwid']: + for idx in wds_links["linknwid"]: df = wds_links[wds_links.linknwid.isin([idx])] # standard deviation of normal distribution sigma = 0.85 # mean of normal distribution - mu = np.log(.1) + mu = np.log(0.1) C_pgv = 0.2 # 0.2 C_pgd = 0.8 # 0.8 - im = (C_pgv * df['numpgvrpr'] + C_pgd * df['numpgdrpr']).sum() / df['length'].sum() + im = (C_pgv * df["numpgvrpr"] + C_pgd * df["numpgdrpr"]).sum() / df[ + "length" + ].sum() SI_break = 1 - stats.lognorm(s=sigma, scale=np.exp(mu)).cdf(im) C_pgv = 0.8 # 0.2 C_pgd = 0.2 # 0.8 - im = (C_pgv * df['numpgvrpr'] + C_pgd * df['numpgdrpr']).sum() / df['length'].sum() + im = (C_pgv * df["numpgvrpr"] + C_pgd * df["numpgdrpr"]).sum() / df[ + "length" + ].sum() SI_leak = 1 - stats.lognorm(s=sigma, scale=np.exp(mu)).cdf(im) - m = wds_links['linknwid'] == idx - wds_links.loc[m, ['SI_break_idv']] = SI_break - wds_links.loc[m, ['SI_leak__idv']] = SI_leak + m = wds_links["linknwid"] == idx + wds_links.loc[m, ["SI_break_idv"]] = SI_break + wds_links.loc[m, ["SI_leak__idv"]] = SI_leak return wds_links @@ -313,97 +475,95 @@ def get_spec(self): obj: A JSON object of specifications of the NCI functionality analysis. """ return { - 'name': 'network-cascading-interdepedency-functionality', - 'description': 'Network cascading interdepedency functionality analysis', - 'input_parameters': [ + "name": "network-cascading-interdepedency-functionality", + "description": "Network cascading interdepedency functionality analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'discretized_days', - 'required': False, - 'description': 'Discretized days to compute functionality', - 'type': List[int] - } + "id": "discretized_days", + "required": False, + "description": "Discretized days to compute functionality", + "type": List[int], + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'epf_network', - 'required': True, - 'description': 'EPN network', - 'type': ['incore:epnNetwork'], + "id": "epf_network", + "required": True, + "description": "EPN network", + "type": ["incore:epnNetwork"], }, { - 'id': 'wds_network', - 'required': True, - 'description': 'WDS network', - 'type': ['incore:waterNetwork'], + "id": "wds_network", + "required": True, + "description": "WDS network", + "type": ["incore:waterNetwork"], }, { - 'id': 'epf_wds_intdp_table', - 'required': True, - 'description': 'Table containing interdependency information from EPN to WDS networks', - 'type': ['incore:networkInterdependencyTable'] + "id": "epf_wds_intdp_table", + "required": True, + "description": "Table containing interdependency information from EPN to WDS networks", + "type": ["incore:networkInterdependencyTable"], }, { - 'id': 'wds_epf_intdp_table', - 'required': True, - 'description': 'Table containing interdependency information from WDS to EPF networks', - 'type': ['incore:networkInterdependencyTable'] + "id": "wds_epf_intdp_table", + "required": True, + "description": "Table containing interdependency information from WDS to EPF networks", + "type": ["incore:networkInterdependencyTable"], }, { - 'id': 'epf_subst_failure_results', - 'required': True, - 'description': 'EPF substation failure results', - 'type': ['incore:failureProbability'] + "id": "epf_subst_failure_results", + "required": True, + "description": "EPF substation failure results", + "type": ["incore:failureProbability"], }, { - 'id': 'epf_inventory_rest_map', - 'required': True, - 'description': 'EPF inventory restoration map', - 'type': ['incore:inventoryRestorationMap'] + "id": "epf_inventory_rest_map", + "required": True, + "description": "EPF inventory restoration map", + "type": ["incore:inventoryRestorationMap"], }, { - - 'id': 'epf_time_results', - 'required': True, - 'description': 'A csv file recording repair time for EPF per class and limit state', - 'type': ['incore:epfRestorationTime'] + "id": "epf_time_results", + "required": True, + "description": "A csv file recording repair time for EPF per class and limit state", + "type": ["incore:epfRestorationTime"], }, { - 'id': 'wds_dmg_results', - 'required': True, - 'description': 'WDS damage results', - 'type': ['ergo:waterFacilityDamageVer6'] + "id": "wds_dmg_results", + "required": True, + "description": "WDS damage results", + "type": ["ergo:waterFacilityDamageVer6"], }, { - 'id': 'wds_inventory_rest_map', - 'required': True, - 'description': 'WDS inventory restoration map', - 'type': ['incore:inventoryRestorationMap'] + "id": "wds_inventory_rest_map", + "required": True, + "description": "WDS inventory restoration map", + "type": ["incore:inventoryRestorationMap"], }, { - - 'id': 'wds_time_results', - 'required': True, - 'description': 'A csv file recording repair time for WDS per class and limit state', - 'type': ['incore:waterFacilityRestorationTime'] + "id": "wds_time_results", + "required": True, + "description": "A csv file recording repair time for WDS per class and limit state", + "type": ["incore:waterFacilityRestorationTime"], }, { - 'id': 'epf_damage', - 'required': True, - 'description': 'A csv file with limit state probabilities and damage states for each electric power facility', - 'type': ['incore:epfDamageVer3'] - } + "id": "epf_damage", + "required": True, + "description": "A csv file with limit state probabilities and damage states for each electric power facility", + "type": ["incore:epfDamageVer3"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'epf_cascading_functionality', - 'description': 'CSV file of interdependent cascading network functionality for EPF', - 'type': 'incore:epfDiscretizedCascadingFunc' + "id": "epf_cascading_functionality", + "description": "CSV file of interdependent cascading network functionality for EPF", + "type": "incore:epfDiscretizedCascadingFunc", } - ] + ], } diff --git a/pyincore/analyses/nonstructbuildingdamage/__init__.py b/pyincore/analyses/nonstructbuildingdamage/__init__.py index 28ad09d49..8555e69a6 100644 --- a/pyincore/analyses/nonstructbuildingdamage/__init__.py +++ b/pyincore/analyses/nonstructbuildingdamage/__init__.py @@ -5,5 +5,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingdamage import NonStructBuildingDamage -from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingutil import NonStructBuildingUtil +from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingdamage import ( + NonStructBuildingDamage, +) +from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingutil import ( + NonStructBuildingUtil, +) diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py index f2f4518f0..7ecb5b93f 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py @@ -6,12 +6,16 @@ from deprecated.sphinx import deprecated -from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import \ - BuildingNonStructDamage +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import ( + BuildingNonStructDamage, +) -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use BuildingNonStructDamage instead.") -class NonStructBuildingDamage(): +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use BuildingNonStructDamage instead.", +) +class NonStructBuildingDamage: def __init__(self, incore_client): self._delegate = BuildingNonStructDamage(incore_client) @@ -19,4 +23,4 @@ def __getattr__(self, name): """ Delegate attribute access to the BuildingNonStructDamage instance. """ - return getattr(self._delegate, name) \ No newline at end of file + return getattr(self._delegate, name) diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py index 21e9f25a5..50d7ef9e6 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py @@ -9,18 +9,21 @@ class NonStructBuildingUtil: """Utility methods for the non-structural building damage analysis.""" + BUILDING_FRAGILITY_KEYSBUILDING_FRAGILITY_KEYS = { "drift-sensitive fragility id code": ["Drift Sensitive", "DS"], "parametric non-retrofit fragility id code": ["Parametric Non-Retrofit", "PNR"], "acceleration-sensitive fragility id code": ["Acceleration Sensitive", "AS"], - "non-retrofit fragility id code": ["as built", "none"] + "non-retrofit fragility id code": ["as built", "none"], } DEFAULT_FRAGILITY_KEY_DS = "Drift-Sensitive Fragility ID Code" DEFAULT_FRAGILITY_KEY_AS = "Acceleration-Sensitive Fragility ID Code" @staticmethod - def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_probabilities): + def adjust_damage_for_liquefaction( + limit_state_probabilities, ground_failure_probabilities + ): """Adjusts building damage probability based on liquefaction ground failure probability with the liq_dmg, we know that it is 3 values, the first two are the same. The 3rd might be different. @@ -44,19 +47,25 @@ def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_pro # second-to-last probability of ground failure instead. if i > len(ground_failure_probabilities) - 1: - prob_ground_failure = ground_failure_probabilities[len(ground_failure_probabilities) - 2] + prob_ground_failure = ground_failure_probabilities[ + len(ground_failure_probabilities) - 2 + ] else: prob_ground_failure = ground_failure_probabilities[i] - adjusted_limit_state_probabilities[keys[i]] = \ - limit_state_probabilities[keys[i]] + prob_ground_failure \ + adjusted_limit_state_probabilities[keys[i]] = ( + limit_state_probabilities[keys[i]] + + prob_ground_failure - limit_state_probabilities[keys[i]] * prob_ground_failure + ) # the final one is the last of limitStates should match with the last of ground failures j = len(limit_state_probabilities) - 1 prob_ground_failure = ground_failure_probabilities[-1] - adjusted_limit_state_probabilities[keys[j]] = \ - limit_state_probabilities[keys[j]] \ - + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure + adjusted_limit_state_probabilities[keys[j]] = ( + limit_state_probabilities[keys[j]] + + prob_ground_failure + - limit_state_probabilities[keys[j]] * prob_ground_failure + ) return adjusted_limit_state_probabilities diff --git a/pyincore/analyses/pipelinedamage/pipelinedamage.py b/pyincore/analyses/pipelinedamage/pipelinedamage.py index fc23ef003..e5cad1b5a 100644 --- a/pyincore/analyses/pipelinedamage/pipelinedamage.py +++ b/pyincore/analyses/pipelinedamage/pipelinedamage.py @@ -10,8 +10,13 @@ import concurrent.futures from itertools import repeat -from pyincore import BaseAnalysis, HazardService, FragilityService, \ - AnalysisUtil, GeoUtil +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + AnalysisUtil, + GeoUtil, +) from pyincore.models.dfr3curve import DFR3Curve @@ -30,44 +35,57 @@ def __init__(self, incore_client): super(PipelineDamage, self).__init__(incore_client) def run(self): - """Execute pipeline damage analysis """ + """Execute pipeline damage analysis""" pipeline_dataset = self.get_input_dataset("pipeline").get_inventory_reader() # Get hazard input - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) - num_workers = AnalysisUtil.determine_parallelism_locally(self, - dataset_size, - user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, dataset_size, user_defined_cpu + ) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size (results, damage_results) = self.pipeline_damage_concurrent_future( - self.pipeline_damage_analysis_bulk_input, num_workers, - inventory_args, repeat(hazard), repeat(hazard_type), repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + self.pipeline_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True - def pipeline_damage_concurrent_future(self, function_name, num_workers, - *args): + def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: @@ -83,14 +101,17 @@ def pipeline_damage_concurrent_future(self, function_name, num_workers, output_ds = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor( - max_workers=num_workers) as executor: + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, hazard_dataset_id): + def pipeline_damage_analysis_bulk_input( + self, pipelines, hazard, hazard_type, hazard_dataset_id + ): """Run pipeline damage analysis for multiple pipelines. Args: @@ -105,18 +126,25 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha """ # get allowed demand types for the hazard type - allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( - hazard_type)] + allowed_demand_types = [ + item["demand_type"].lower() + for item in self.hazardsvc.get_allowed_demands(hazard_type) + ] # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" if hazard_type == 'tsunami' else "pgv" + fragility_key = ( + "Non-Retrofit inundationDepth Fragility ID Code" + if hazard_type == "tsunami" + else "pgv" + ) self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) + self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key + ) values_payload = [] unmapped_pipelines = [] @@ -127,13 +155,10 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha fragility_set = fragility_sets[pipeline["id"]] location = GeoUtil.get_location(pipeline) loc = str(location.y) + "," + str(location.x) - demands, units, _ = AnalysisUtil.get_hazard_demand_types_units(pipeline, fragility_set, hazard_type, - allowed_demand_types) - value = { - "demands": demands, - "units": units, - "loc": loc - } + demands, units, _ = AnalysisUtil.get_hazard_demand_types_units( + pipeline, fragility_set, hazard_type, allowed_demand_types + ) + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) @@ -143,10 +168,12 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha # not needed anymore as they are already split into mapped and unmapped del pipelines - if hazard_type == 'earthquake' or "tsunami": + if hazard_type == "earthquake" or "tsunami": hazard_vals = hazard.read_hazard_values(values_payload, self.hazardsvc) else: - raise ValueError("The provided hazard type is not supported yet by this analysis") + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) pipeline_results = [] damage_results = [] @@ -159,7 +186,9 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha # TODO: Once all fragilities are migrated to new format, we can remove this condition if isinstance(fragility_set.fragility_curves[0], DFR3Curve): # Supports multiple demand types in same fragility - haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) + haz_vals = AnalysisUtil.update_precision_of_lists( + hazard_vals[i]["hazardValues"] + ) demand_types = hazard_vals[i]["demands"] demand_units = hazard_vals[i]["units"] @@ -168,29 +197,38 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = haz_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): - pipeline_args = fragility_set.construct_expression_args_from_inventory(pipeline) - limit_states = fragility_set.calculate_limit_state(hval_dict, - inventory_type="pipeline", - **pipeline_args) - dmg_intervals = fragility_set.calculate_damage_interval(limit_states, hazard_type=hazard_type, - inventory_type="pipeline") + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_vals[i]["hazardValues"] + ): + pipeline_args = ( + fragility_set.construct_expression_args_from_inventory(pipeline) + ) + limit_states = fragility_set.calculate_limit_state( + hval_dict, inventory_type="pipeline", **pipeline_args + ) + dmg_intervals = fragility_set.calculate_damage_interval( + limit_states, hazard_type=hazard_type, inventory_type="pipeline" + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) - pipeline_result['guid'] = pipeline['properties']['guid'] + pipeline_result["guid"] = pipeline["properties"]["guid"] pipeline_result.update(limit_states) pipeline_result.update(dmg_intervals) - pipeline_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(haz_vals, hazard_type) + pipeline_result[ + "haz_expose" + ] = AnalysisUtil.get_exposure_from_hazard_values(haz_vals, hazard_type) damage_result = dict() - damage_result['guid'] = pipeline['properties']['guid'] - damage_result['fragility_id'] = fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardval'] = haz_vals + damage_result["guid"] = pipeline["properties"]["guid"] + damage_result["fragility_id"] = fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardval"] = haz_vals pipeline_results.append(pipeline_result) damage_results.append(damage_result) @@ -199,13 +237,13 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha for pipeline in unmapped_pipelines: pipeline_result = dict() damage_result = dict() - pipeline_result['guid'] = pipeline['properties']['guid'] - damage_result['guid'] = pipeline['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardvals'] = None + pipeline_result["guid"] = pipeline["properties"]["guid"] + damage_result["guid"] = pipeline["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardvals"] = None pipeline_results.append(pipeline_result) damage_results.append(damage_result) @@ -220,80 +258,80 @@ def get_spec(self): """ return { - 'name': 'pipeline-damage', - 'description': 'Buried pipeline damage analysis', - 'input_parameters': [ + "name": "pipeline-damage", + "description": "Buried pipeline damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "Result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Geology dataset id', - 'type': str, - } + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Geology dataset id", + "type": str, + }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'pipeline', - 'required': True, - 'description': 'Pipeline Inventory', - 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], + "id": "pipeline", + "required": True, + "description": "Pipeline Inventory", + "type": ["ergo:buriedPipelineTopology", "ergo:pipeline"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'pipeline', - 'description': 'CSV file of damage states for pipeline damage', - 'type': 'incore:pipelineDamageVer3' + "id": "result", + "parent_type": "pipeline", + "description": "CSV file of damage states for pipeline damage", + "type": "incore:pipelineDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'pipeline', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:pipelineDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "pipeline", + "description": "Json file with information about applied hazard value and fragility", + "type": "incore:pipelineDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/pipelinedamagerepairrate/__init__.py b/pyincore/analyses/pipelinedamagerepairrate/__init__.py index f6293fe45..072bc98b7 100644 --- a/pyincore/analyses/pipelinedamagerepairrate/__init__.py +++ b/pyincore/analyses/pipelinedamagerepairrate/__init__.py @@ -5,5 +5,7 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.pipelinedamagerepairrate.pipelinedamagerepairrate import PipelineDamageRepairRate +from pyincore.analyses.pipelinedamagerepairrate.pipelinedamagerepairrate import ( + PipelineDamageRepairRate, +) from pyincore.analyses.pipelinedamagerepairrate.pipelineutil import PipelineUtil diff --git a/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py b/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py index 0eae7ca10..1ca45172d 100644 --- a/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py +++ b/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py @@ -13,10 +13,14 @@ import math from itertools import repeat -from pyincore import BaseAnalysis, HazardService, FragilityService, \ - AnalysisUtil, GeoUtil -from pyincore.analyses.pipelinedamagerepairrate.pipelineutil import \ - PipelineUtil +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + AnalysisUtil, + GeoUtil, +) +from pyincore.analyses.pipelinedamagerepairrate.pipelineutil import PipelineUtil class PipelineDamageRepairRate(BaseAnalysis): @@ -34,47 +38,59 @@ def __init__(self, incore_client): super(PipelineDamageRepairRate, self).__init__(incore_client) def run(self): - """Execute pipeline damage analysis """ + """Execute pipeline damage analysis""" # Pipeline dataset - pipeline_dataset = self.get_input_dataset( - "pipeline").get_inventory_reader() + pipeline_dataset = self.get_input_dataset("pipeline").get_inventory_reader() # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") dataset_size = len(pipeline_dataset) - num_workers = AnalysisUtil.determine_parallelism_locally(self, - dataset_size, - user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, dataset_size, user_defined_cpu + ) avg_bulk_input_size = int(dataset_size / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_dataset) while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.pipeline_damage_concurrent_future( - self.pipeline_damage_analysis_bulk_input, num_workers, - inventory_args, repeat(hazard), repeat(hazard_type), repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + self.pipeline_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True - def pipeline_damage_concurrent_future(self, function_name, num_workers, - *args): + def pipeline_damage_concurrent_future(self, function_name, num_workers, *args): """Utilizes concurrent.future module. Args: @@ -88,14 +104,18 @@ def pipeline_damage_concurrent_future(self, function_name, num_workers, """ output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, hazard_dataset_id): + def pipeline_damage_analysis_bulk_input( + self, pipelines, hazard, hazard_type, hazard_dataset_id + ): """Run pipeline damage analysis for multiple pipelines. Args: @@ -111,34 +131,40 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha # Get Fragility key fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - fragility_key = PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY if hazard_type == 'tsunami' else \ - PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY + fragility_key = ( + PipelineUtil.DEFAULT_TSU_FRAGILITY_KEY + if hazard_type == "tsunami" + else PipelineUtil.DEFAULT_EQ_FRAGILITY_KEY + ) self.set_parameter("fragility_key", fragility_key) # get fragility set fragility_sets = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key) + self.get_input_dataset("dfr3_mapping_set"), pipelines, fragility_key + ) # Get Liquefaction Fragility Key - liquefaction_fragility_key = self.get_parameter( - "liquefaction_fragility_key") + liquefaction_fragility_key = self.get_parameter("liquefaction_fragility_key") if hazard_type == "earthquake" and liquefaction_fragility_key is None: liquefaction_fragility_key = PipelineUtil.LIQ_FRAGILITY_KEY # Liquefaction use_liquefaction = False - if hazard_type == "earthquake" and self.get_parameter( - "use_liquefaction") is not None: + if ( + hazard_type == "earthquake" + and self.get_parameter("use_liquefaction") is not None + ): use_liquefaction = self.get_parameter("use_liquefaction") # Get geology dataset id - geology_dataset_id = self.get_parameter( - "liquefaction_geology_dataset_id") + geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") fragility_sets_liq = None if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), pipelines, - liquefaction_fragility_key) + self.get_input_dataset("dfr3_mapping_set"), + pipelines, + liquefaction_fragility_key, + ) values_payload = [] values_payload_liq = [] # for liquefaction if used @@ -152,44 +178,43 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_pipelines.append(pipeline) # Check if liquefaction is applicable - if use_liquefaction and \ - geology_dataset_id is not None and \ - fragility_sets_liq is not None and \ - pipeline["id"] in fragility_sets_liq: + if ( + use_liquefaction + and geology_dataset_id is not None + and fragility_sets_liq is not None + and pipeline["id"] in fragility_sets_liq + ): fragility_set_liq = fragility_sets_liq[pipeline["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units - value_liq = { - "demands": demands_liq, - "units": units_liq, - "loc": loc - } + value_liq = {"demands": demands_liq, "units": units_liq, "loc": loc} values_payload_liq.append(value_liq) else: unmapped_pipelines.append(pipeline) del pipelines - if hazard_type == 'earthquake' or hazard_type == 'tsunami': + if hazard_type == "earthquake" or hazard_type == "tsunami": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) else: - raise ValueError("The provided hazard type is not supported yet by this analysis") + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) liquefaction_resp = None # Check if liquefaction is applicable - if use_liquefaction is True and \ - fragility_sets_liq is not None and \ - geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, - values_payload_liq) + if ( + use_liquefaction is True + and fragility_sets_liq is not None + and geology_dataset_id is not None + ): + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) # calculate LS and DS ds_results = [] @@ -213,14 +238,16 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha ds_result = dict() damage_result = dict() - ds_result['guid'] = pipeline['properties']['guid'] - damage_result['guid'] = pipeline['properties']['guid'] + ds_result["guid"] = pipeline["properties"]["guid"] + damage_result["guid"] = pipeline["properties"]["guid"] fragility_set = fragility_sets[pipeline["id"]] # TODO assume there is only one curve fragility_curve = fragility_set.fragility_curves[0] - hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + hazard_vals = AnalysisUtil.update_precision_of_lists( + hazard_resp[i]["hazardValues"] + ) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] @@ -228,13 +255,19 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): - pipeline_args = fragility_set.construct_expression_args_from_inventory(pipeline) - pgv_repairs = \ - fragility_curve.solve_curve_expression( - hval_dict, fragility_set.curve_parameters, **pipeline_args) + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_resp[i]["hazardValues"] + ): + pipeline_args = fragility_set.construct_expression_args_from_inventory( + pipeline + ) + pgv_repairs = fragility_curve.solve_curve_expression( + hval_dict, fragility_set.curve_parameters, **pipeline_args + ) # Convert PGV repairs to SI units - pgv_repairs = PipelineUtil.convert_result_unit(fragility_curve.return_type["unit"], pgv_repairs) + pgv_repairs = PipelineUtil.convert_result_unit( + fragility_curve.return_type["unit"], pgv_repairs + ) length = PipelineUtil.get_pipe_length(pipeline) @@ -242,40 +275,54 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha num_pgv_repairs = pgv_repairs * length # Check if liquefaction is applicable - if use_liquefaction is True \ - and fragility_sets_liq is not None \ - and geology_dataset_id is not None \ - and liquefaction_resp is not None: + if ( + use_liquefaction is True + and fragility_sets_liq is not None + and geology_dataset_id is not None + and liquefaction_resp is not None + ): fragility_set_liq = fragility_sets_liq[pipeline["id"]] # TODO assume there is only one curve liq_fragility_curve = fragility_set_liq.fragility_curves[0] - liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) + liq_hazard_vals = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["pgdValues"] + ) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] - liquefaction_prob = liquefaction_resp[i]['liqProbability'] + liquefaction_prob = liquefaction_resp[i]["liqProbability"] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability - pipeline_args = fragility_set_liq.construct_expression_args_from_inventory(pipeline) - pgd_repairs = \ - liq_fragility_curve.solve_curve_expression( - liq_hval_dict, fragility_set_liq.curve_parameters, **pipeline_args) + pipeline_args = ( + fragility_set_liq.construct_expression_args_from_inventory( + pipeline + ) + ) + pgd_repairs = liq_fragility_curve.solve_curve_expression( + liq_hval_dict, + fragility_set_liq.curve_parameters, + **pipeline_args + ) # Convert PGD repairs to SI units - pgd_repairs = PipelineUtil.convert_result_unit(liq_fragility_curve.return_type["unit"], pgd_repairs) + pgd_repairs = PipelineUtil.convert_result_unit( + liq_fragility_curve.return_type["unit"], pgd_repairs + ) num_pgd_repairs = pgd_repairs * length # record results - if 'pipetype' in pipeline['properties']: - damage_result['pipeclass'] = pipeline['properties']['pipetype'] - elif 'pipelinesc' in pipeline['properties']: - damage_result['pipeclass'] = pipeline['properties']['pipelinesc'] + if "pipetype" in pipeline["properties"]: + damage_result["pipeclass"] = pipeline["properties"]["pipetype"] + elif "pipelinesc" in pipeline["properties"]: + damage_result["pipeclass"] = pipeline["properties"][ + "pipelinesc" + ] else: - damage_result['pipeclass'] = "" + damage_result["pipeclass"] = "" break_rate = 0.2 * pgv_repairs + 0.8 * pgd_repairs leak_rate = 0.8 * pgv_repairs + 0.2 * pgd_repairs @@ -283,38 +330,44 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha failure_probability = 1 - math.exp(-1.0 * break_rate * length) num_repairs = num_pgd_repairs + num_pgv_repairs - ds_result['pgvrepairs'] = pgv_repairs - ds_result['pgdrepairs'] = pgd_repairs - ds_result['repairspkm'] = total_repair_rate - ds_result['breakrate'] = break_rate - ds_result['leakrate'] = leak_rate - ds_result['failprob'] = failure_probability - ds_result['numpgvrpr'] = num_pgv_repairs - ds_result['numpgdrpr'] = num_pgd_repairs - ds_result['numrepairs'] = num_repairs - ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) - - damage_result['fragility_id'] = fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardval'] = hazard_vals + ds_result["pgvrepairs"] = pgv_repairs + ds_result["pgdrepairs"] = pgd_repairs + ds_result["repairspkm"] = total_repair_rate + ds_result["breakrate"] = break_rate + ds_result["leakrate"] = leak_rate + ds_result["failprob"] = failure_probability + ds_result["numpgvrpr"] = num_pgv_repairs + ds_result["numpgdrpr"] = num_pgd_repairs + ds_result["numrepairs"] = num_repairs + ds_result["haz_expose"] = AnalysisUtil.get_exposure_from_hazard_values( + hazard_vals, hazard_type + ) + + damage_result["fragility_id"] = fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardval"] = hazard_vals # Check if liquefaction is applicable - if use_liquefaction is True \ - and fragility_sets_liq is not None \ - and geology_dataset_id is not None: - damage_result['liq_fragility_id'] = fragility_sets_liq[pipeline["id"]].id - damage_result['liqdemandtypes'] = liq_demand_types - damage_result['liqdemandunits'] = liq_demand_units - damage_result['liqhazval'] = liq_hazard_vals - damage_result['liqprobability'] = liquefaction_prob + if ( + use_liquefaction is True + and fragility_sets_liq is not None + and geology_dataset_id is not None + ): + damage_result["liq_fragility_id"] = fragility_sets_liq[ + pipeline["id"] + ].id + damage_result["liqdemandtypes"] = liq_demand_types + damage_result["liqdemandunits"] = liq_demand_units + damage_result["liqhazval"] = liq_hazard_vals + damage_result["liqprobability"] = liquefaction_prob else: - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqprobability'] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -322,27 +375,27 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard, hazard_type, ha # pipelines do not have matched mappings for pipeline in unmapped_pipelines: ds_result = dict() - ds_result['guid'] = pipeline['properties']['guid'] + ds_result["guid"] = pipeline["properties"]["guid"] damage_result = dict() - damage_result['guid'] = pipeline['properties']['guid'] - if 'pipetype' in pipeline['properties']: - damage_result['pipeclass'] = pipeline['properties']['pipetype'] - elif 'pipelinesc' in pipeline['properties']: - damage_result['pipeclass'] = pipeline['properties']['pipelinesc'] + damage_result["guid"] = pipeline["properties"]["guid"] + if "pipetype" in pipeline["properties"]: + damage_result["pipeclass"] = pipeline["properties"]["pipetype"] + elif "pipelinesc" in pipeline["properties"]: + damage_result["pipeclass"] = pipeline["properties"]["pipelinesc"] else: - damage_result['pipeclass'] = "" - - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardval'] = None - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqhazval'] = None + damage_result["pipeclass"] = "" + + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardval"] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqhazval"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -357,92 +410,92 @@ def get_spec(self): """ return { - 'name': 'pipeline-damage', - 'description': 'buried pipeline damage analysis', - 'input_parameters': [ + "name": "pipeline-damage", + "description": "buried pipeline damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type (e.g. earthquake)", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, { - 'id': 'liquefaction_fragility_key', - 'required': False, - 'description': 'Fragility key to use in liquefaction mapping dataset', - 'type': str + "id": "liquefaction_fragility_key", + "required": False, + "description": "Fragility key to use in liquefaction mapping dataset", + "type": str, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Geology dataset id', - 'type': str, - } + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Geology dataset id", + "type": str, + }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'pipeline', - 'required': True, - 'description': 'Pipeline Inventory', - 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], + "id": "pipeline", + "required": True, + "description": "Pipeline Inventory", + "type": ["ergo:buriedPipelineTopology", "ergo:pipeline"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'pipeline', - 'type': 'ergo:pipelineDamageVer3' + "id": "result", + "parent_type": "pipeline", + "type": "ergo:pipelineDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'pipeline', - 'description': 'additional metadata in json file about applied hazard value and ' - 'fragility', - 'type': 'incore:pipelineDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "pipeline", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:pipelineDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/pipelinedamagerepairrate/pipelineutil.py b/pyincore/analyses/pipelinedamagerepairrate/pipelineutil.py index 502f842ab..ba4bb5cbe 100644 --- a/pyincore/analyses/pipelinedamagerepairrate/pipelineutil.py +++ b/pyincore/analyses/pipelinedamagerepairrate/pipelineutil.py @@ -30,8 +30,11 @@ def convert_result_unit(result_unit: str, result: float): elif result_unit.lower() == "repairs/1000ft": return result / 0.3048 - print("Result type was not found so we didn't change it. For pipes, all results should convert from their " - "unit type into Repairs per Kilometer for uniform results. We found a result type of " + result_unit) + print( + "Result type was not found so we didn't change it. For pipes, all results should convert from their " + "unit type into Repairs per Kilometer for uniform results. We found a result type of " + + result_unit + ) return result @staticmethod @@ -47,12 +50,12 @@ def get_pipe_length(pipeline): """ pipe_length = 0.0 - if 'pipelength' in pipeline['properties']: - pipe_length = float(pipeline['properties']['pipelength']) - elif 'length_km' in pipeline['properties']: - pipe_length = float(pipeline['properties']['length_km']) - elif 'length' in pipeline['properties']: - pipe_length = float(pipeline['properties']['length']) + if "pipelength" in pipeline["properties"]: + pipe_length = float(pipeline["properties"]["pipelength"]) + elif "length_km" in pipeline["properties"]: + pipe_length = float(pipeline["properties"]["length_km"]) + elif "length" in pipeline["properties"]: + pipe_length = float(pipeline["properties"]["length"]) else: print("Pipeline has no length attribute") @@ -70,7 +73,7 @@ def get_pipe_diameter(pipeline): """ diameter = 0.0 - if 'diameter' in pipeline['properties']: - diameter = float(pipeline['properties']['diameter']) + if "diameter" in pipeline["properties"]: + diameter = float(pipeline["properties"]["diameter"]) return diameter diff --git a/pyincore/analyses/pipelinefunctionality/__init__.py b/pyincore/analyses/pipelinefunctionality/__init__.py index ea34a7b67..75462af03 100644 --- a/pyincore/analyses/pipelinefunctionality/__init__.py +++ b/pyincore/analyses/pipelinefunctionality/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.pipelinefunctionality.pipelinefunctionality import PipelineFunctionality +from pyincore.analyses.pipelinefunctionality.pipelinefunctionality import ( + PipelineFunctionality, +) diff --git a/pyincore/analyses/pipelinefunctionality/pipelinefunctionality.py b/pyincore/analyses/pipelinefunctionality/pipelinefunctionality.py index de9f3b45c..7f2ed9e0a 100644 --- a/pyincore/analyses/pipelinefunctionality/pipelinefunctionality.py +++ b/pyincore/analyses/pipelinefunctionality/pipelinefunctionality.py @@ -43,18 +43,27 @@ def __init__(self, incore_client): super(PipelineFunctionality, self).__init__(incore_client) def run(self): - pipeline_dmg_df = self.get_input_dataset("pipeline_repair_rate_damage").get_dataframe_from_csv() + pipeline_dmg_df = self.get_input_dataset( + "pipeline_repair_rate_damage" + ).get_dataframe_from_csv() num_samples = self.get_parameter("num_samples") - (fs_results, fp_results) = self.pipeline_functionality(pipeline_dmg_df, num_samples) - self.set_result_csv_data("sample_failure_state", - fs_results, name=self.get_parameter("result_name") + "_failure_state", - source="dataframe") - self.set_result_csv_data("failure_probability", - fp_results, - name=self.get_parameter("result_name") + "_failure_probability", - source="dataframe") + (fs_results, fp_results) = self.pipeline_functionality( + pipeline_dmg_df, num_samples + ) + self.set_result_csv_data( + "sample_failure_state", + fs_results, + name=self.get_parameter("result_name") + "_failure_state", + source="dataframe", + ) + self.set_result_csv_data( + "failure_probability", + fp_results, + name=self.get_parameter("result_name") + "_failure_probability", + source="dataframe", + ) return True @@ -70,31 +79,40 @@ def pipeline_functionality(self, pipeline_dmg_df, num_samples): """ - pipeline_dmg_df['pgv_pf'] = 1 - poisson.pmf(0, pipeline_dmg_df.loc[:, 'numpgvrpr'].values) + pipeline_dmg_df["pgv_pf"] = 1 - poisson.pmf( + 0, pipeline_dmg_df.loc[:, "numpgvrpr"].values + ) # todo there is more efficient pandas manipulation - sampcols = ['s' + samp for samp in np.arange(num_samples).astype(str)] + sampcols = ["s" + samp for samp in np.arange(num_samples).astype(str)] fs_results = pd.DataFrame( - bernoulli.rvs(1 - pipeline_dmg_df.loc[:, 'pgv_pf'].values, size=(num_samples, pipeline_dmg_df.shape[0])).T, - index=pipeline_dmg_df.guid.values, columns=sampcols) + bernoulli.rvs( + 1 - pipeline_dmg_df.loc[:, "pgv_pf"].values, + size=(num_samples, pipeline_dmg_df.shape[0]), + ).T, + index=pipeline_dmg_df.guid.values, + columns=sampcols, + ) fp_results = fs_results.copy(deep=True) # calculate sample failure # concatenate all columns into one failure column - fs_results['failure'] = fs_results.astype(str).apply(','.join, axis=1) - fs_results = fs_results.filter(['failure']) + fs_results["failure"] = fs_results.astype(str).apply(",".join, axis=1) + fs_results = fs_results.filter(["failure"]) # set guid column fs_results.reset_index(inplace=True) - fs_results = fs_results.rename(columns={'index': 'guid'}) + fs_results = fs_results.rename(columns={"index": "guid"}) # calculate failure probability # count of 0s divided by sample size - fp_results["failure_probability"] = (num_samples - fp_results.sum(axis=1).astype(int)) / num_samples - fp_results = fp_results.filter(['failure_probability']) + fp_results["failure_probability"] = ( + num_samples - fp_results.sum(axis=1).astype(int) + ) / num_samples + fp_results = fp_results.filter(["failure_probability"]) # set guid column fp_results.reset_index(inplace=True) - fp_results = fp_results.rename(columns={'index': 'guid'}) + fp_results = fp_results.rename(columns={"index": "guid"}) return fs_results, fp_results @@ -106,40 +124,40 @@ def get_spec(self): """ return { - 'name': 'pipeline-functionality', - 'description': 'buried pipeline functionality analysis', - 'input_parameters': [ + "name": "pipeline-functionality", + "description": "buried pipeline functionality analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'num_samples', - 'required': True, - 'description': 'Number of MC samples', - 'type': int + "id": "num_samples", + "required": True, + "description": "Number of MC samples", + "type": int, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'pipeline_repair_rate_damage', - 'required': True, - 'description': 'Output of pipeline damage repair rate analysis', - 'type': ['ergo:pipelineDamageVer3'], + "id": "pipeline_repair_rate_damage", + "required": True, + "description": "Output of pipeline damage repair rate analysis", + "type": ["ergo:pipelineDamageVer3"], }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'failure_probability', - 'description': 'CSV file of failure probability', - 'type': 'incore:failureProbability' + "id": "failure_probability", + "description": "CSV file of failure probability", + "type": "incore:failureProbability", }, { - 'id': 'sample_failure_state', - 'description': 'CSV file of failure state for each sample', - 'type': 'incore:sampleFailureState' + "id": "sample_failure_state", + "description": "CSV file of failure state for each sample", + "type": "incore:sampleFailureState", }, - ] + ], } diff --git a/pyincore/analyses/pipelinerepaircost/pipelinerepaircost.py b/pyincore/analyses/pipelinerepaircost/pipelinerepaircost.py index 5e0481378..415c88135 100644 --- a/pyincore/analyses/pipelinerepaircost/pipelinerepaircost.py +++ b/pyincore/analyses/pipelinerepaircost/pipelinerepaircost.py @@ -25,8 +25,12 @@ def run(self): """Executes pipline facility repair cost analysis.""" pipeline_df = self.get_input_dataset("pipeline").get_dataframe_from_shapefile() - pipeline_dmg_df = self.get_input_dataset("pipeline_dmg").get_dataframe_from_csv() - replacement_cost = self.get_input_dataset("replacement_cost").get_dataframe_from_csv() + pipeline_dmg_df = self.get_input_dataset( + "pipeline_dmg" + ).get_dataframe_from_csv() + replacement_cost = self.get_input_dataset( + "replacement_cost" + ).get_dataframe_from_csv() # join damage, replacement cost, with original inventory pipeline_df = pipeline_df.merge(pipeline_dmg_df, on="guid") @@ -34,22 +38,32 @@ def run(self): pipeline_set = pipeline_df.to_dict(orient="records") user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(pipeline_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(pipeline_set), user_defined_cpu + ) avg_bulk_input_size = int(len(pipeline_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(pipeline_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - repair_costs = self.pipeline_repair_cost_concurrent_future(self.pipeline_repair_cost_bulk_input, num_workers, - inventory_args) - self.set_result_csv_data("result", repair_costs, name=self.get_parameter("result_name") + "_repair_cost") + repair_costs = self.pipeline_repair_cost_concurrent_future( + self.pipeline_repair_cost_bulk_input, num_workers, inventory_args + ) + self.set_result_csv_data( + "result", + repair_costs, + name=self.get_parameter("result_name") + "_repair_cost", + ) return True @@ -67,7 +81,9 @@ def pipeline_repair_cost_concurrent_future(self, function_name, num_workers, *ar """ output = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1 in executor.map(function_name, *args): output.extend(ret1) @@ -84,8 +100,12 @@ def pipeline_repair_cost_bulk_input(self, pipelines): """ # read in the damage ratio tables - pipeline_dmg_ratios_csv = self.get_input_dataset("pipeline_dmg_ratios").get_csv_reader() - dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows(pipeline_dmg_ratios_csv, ignore_first_row=False) + pipeline_dmg_ratios_csv = self.get_input_dataset( + "pipeline_dmg_ratios" + ).get_csv_reader() + dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows( + pipeline_dmg_ratios_csv, ignore_first_row=False + ) segment_length = self.get_parameter("segment_length") if segment_length is None: @@ -110,19 +130,27 @@ def pipeline_repair_cost_bulk_input(self, pipelines): dr_leak = 0 if pipeline["diameter"] > diameter: for dmg_ratio_row in dmg_ratio_tbl: - if dmg_ratio_row["Inventory Type"] == ">" + str(diameter) + " in" and \ - dmg_ratio_row["Damage State"] == "break": + if ( + dmg_ratio_row["Inventory Type"] == ">" + str(diameter) + " in" + and dmg_ratio_row["Damage State"] == "break" + ): dr_break = float(dmg_ratio_row["Best Mean Damage Ratio"]) - if dmg_ratio_row["Inventory Type"] == ">" + str(diameter) + " in" and \ - dmg_ratio_row["Damage State"] == "leak": + if ( + dmg_ratio_row["Inventory Type"] == ">" + str(diameter) + " in" + and dmg_ratio_row["Damage State"] == "leak" + ): dr_leak = float(dmg_ratio_row["Best Mean Damage Ratio"]) else: for dmg_ratio_row in dmg_ratio_tbl: - if dmg_ratio_row["Inventory Type"] == "<" + str(diameter) + " in" and \ - dmg_ratio_row["Damage State"] == "break": + if ( + dmg_ratio_row["Inventory Type"] == "<" + str(diameter) + " in" + and dmg_ratio_row["Damage State"] == "break" + ): dr_break = float(dmg_ratio_row["Best Mean Damage Ratio"]) - if dmg_ratio_row["Inventory Type"] == "<" + str(diameter) + " in" and \ - dmg_ratio_row["Damage State"] == "leak": + if ( + dmg_ratio_row["Inventory Type"] == "<" + str(diameter) + " in" + and dmg_ratio_row["Damage State"] == "leak" + ): dr_leak = float(dmg_ratio_row["Best Mean Damage Ratio"]) num_segment = pipe_length_ft / segment_length @@ -131,13 +159,17 @@ def pipeline_repair_cost_bulk_input(self, pipelines): if num_breaks > num_segment: repair_cost += pipeline["replacement_cost"] * dr_break else: - repair_cost += pipeline["replacement_cost"] / num_segment * num_breaks * dr_break + repair_cost += ( + pipeline["replacement_cost"] / num_segment * num_breaks * dr_break + ) num_leaks = pipeline["leakrate"] * pipe_length if num_leaks > num_segment: repair_cost += pipeline["replacement_cost"] * dr_leak else: - repair_cost += pipeline["replacement_cost"] / num_segment * num_leaks * dr_leak + repair_cost += ( + pipeline["replacement_cost"] / num_segment * num_leaks * dr_leak + ) repair_cost = min(repair_cost, pipeline["replacement_cost"]) rc["budget"] = repair_cost @@ -162,26 +194,26 @@ def get_spec(self): "id": "result_name", "required": True, "description": "A name of the resulting dataset", - "type": str + "type": str, }, { "id": "num_cpu", "required": False, "description": "If using parallel execution, the number of cpus to request.", - "type": int + "type": int, }, { "id": "diameter", "required": False, "description": "Pipeline diameter cutoff assumption for different damage ratios. Default is 20 " - "inches", - "type": int + "inches", + "type": int, }, { "id": "segment_length", "required": False, "description": "Segment length assumption. Default is 20 feet", - "type": int + "type": int, }, ], "input_datasets": [ @@ -201,13 +233,13 @@ def get_spec(self): "id": "pipeline_dmg", "required": True, "description": "pipeline damage from PipelineDamageRepairRate Analysis", - "type": ["ergo:pipelineDamageVer3"] + "type": ["ergo:pipelineDamageVer3"], }, { "id": "pipeline_dmg_ratios", "required": True, "description": "Damage Ratios table", - "type": ["incore:pipelineDamageRatios"] + "type": ["incore:pipelineDamageRatios"], }, ], "output_datasets": [ @@ -215,7 +247,7 @@ def get_spec(self): "id": "result", "parent_type": "pipelines", "description": "A csv file with repair cost for each pipeline", - "type": "incore:pipelineRepairCost" + "type": "incore:pipelineRepairCost", } - ] + ], } diff --git a/pyincore/analyses/pipelinerestoration/__init__.py b/pyincore/analyses/pipelinerestoration/__init__.py index d5dcfa830..3141884b2 100644 --- a/pyincore/analyses/pipelinerestoration/__init__.py +++ b/pyincore/analyses/pipelinerestoration/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.pipelinerestoration.pipelinerestoration import PipelineRestoration +from pyincore.analyses.pipelinerestoration.pipelinerestoration import ( + PipelineRestoration, +) diff --git a/pyincore/analyses/pipelinerestoration/pipelinerestoration.py b/pyincore/analyses/pipelinerestoration/pipelinerestoration.py index fd6d8f842..baeed75d4 100644 --- a/pyincore/analyses/pipelinerestoration/pipelinerestoration.py +++ b/pyincore/analyses/pipelinerestoration/pipelinerestoration.py @@ -31,61 +31,61 @@ def get_spec(self): """ return { - 'name': 'pipeline-restoration', - 'description': 'calculate the restoration times for damaged pipelines', - 'input_parameters': [ + "name": "pipeline-restoration", + "description": "calculate the restoration times for damaged pipelines", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'name of the result csv dataset', - 'type': str + "id": "result_name", + "required": True, + "description": "name of the result csv dataset", + "type": str, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'num_available_workers', - 'required': True, - 'description': 'Number of available workers to work on the repairs', - 'type': int + "id": "num_available_workers", + "required": True, + "description": "Number of available workers to work on the repairs", + "type": int, }, { - 'id': 'restoration_key', - 'required': False, - 'description': 'restoration key to use in mapping dataset', - 'type': str + "id": "restoration_key", + "required": False, + "description": "restoration key to use in mapping dataset", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'pipeline', - 'required': True, - 'description': 'Pipeline Inventory', - 'type': ['ergo:buriedPipelineTopology', 'ergo:pipeline'], + "id": "pipeline", + "required": True, + "description": "Pipeline Inventory", + "type": ["ergo:buriedPipelineTopology", "ergo:pipeline"], }, { - 'id': 'pipeline_damage', - 'required': True, - 'description': 'pipeline damage results with repairs', - 'type': ['ergo:pipelineDamageVer2', 'ergo:pipelineDamageVer3'] + "id": "pipeline_damage", + "required": True, + "description": "pipeline damage results with repairs", + "type": ["ergo:pipelineDamageVer2", "ergo:pipelineDamageVer3"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'pipeline_restoration', - 'description': 'CSV file of pipeline restoration times', - 'type': 'incore:pipelineRestorationVer1' + "id": "pipeline_restoration", + "description": "CSV file of pipeline restoration times", + "type": "incore:pipelineRestorationVer1", } - ] + ], } def run(self): @@ -96,18 +96,19 @@ def run(self): pipeline_dmg = self.get_input_dataset("pipeline_damage").get_csv_reader() pipelines_dmg_df = pd.DataFrame(list(pipeline_dmg)) - damage_result = pipelines_dmg_df.merge(pipelines_df, on='guid') - damage_result = damage_result.to_dict(orient='records') + damage_result = pipelines_dmg_df.merge(pipelines_df, on="guid") + damage_result = damage_result.to_dict(orient="records") user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, - len( - damage_result), - user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(damage_result), user_defined_cpu + ) avg_bulk_input_size = int(len(damage_result) / num_workers) inventory_args = [] @@ -115,18 +116,20 @@ def run(self): inventory_list = damage_result while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size restoration_results = self.pipeline_restoration_concurrent_future( - self.pipeline_restoration_bulk_input, num_workers, - inventory_args) - self.set_result_csv_data("pipeline_restoration", - restoration_results, name=self.get_parameter("result_name")) + self.pipeline_restoration_bulk_input, num_workers, inventory_args + ) + self.set_result_csv_data( + "pipeline_restoration", + restoration_results, + name=self.get_parameter("result_name"), + ) return True - def pipeline_restoration_concurrent_future(self, function_name, - parallelism, *args): + def pipeline_restoration_concurrent_future(self, function_name, parallelism, *args): """Utilizes concurrent.future module. Args: @@ -140,7 +143,8 @@ def pipeline_restoration_concurrent_future(self, function_name, """ res_output = [] with concurrent.futures.ProcessPoolExecutor( - max_workers=parallelism) as executor: + max_workers=parallelism + ) as executor: for res_ret in executor.map(function_name, *args): res_output.extend(res_ret) @@ -163,11 +167,14 @@ def pipeline_restoration_bulk_input(self, damage): if restoration_key is None: restoration_key = "Restoration ID Code" - restoration_sets = self.restorationsvc.match_list_of_dicts(self.get_input_dataset("dfr3_mapping_set"), - damage, restoration_key) + restoration_sets = self.restorationsvc.match_list_of_dicts( + self.get_input_dataset("dfr3_mapping_set"), damage, restoration_key + ) for dmg in damage: - res = self.restoration_time(dmg, num_available_workers, restoration_sets[dmg['guid']]) + res = self.restoration_time( + dmg, num_available_workers, restoration_sets[dmg["guid"]] + ) restoration_results.append(res) return restoration_results @@ -186,15 +193,18 @@ def restoration_time(dmg, num_available_workers, restoration_set): """ res_result = collections.OrderedDict() - if 'guid' in dmg.keys(): - res_result['guid'] = dmg['guid'] + if "guid" in dmg.keys(): + res_result["guid"] = dmg["guid"] else: - res_result['guid'] = 'NA' - - res_result['repair_time'] = restoration_set.calculate_restoration_rates(**{ - "break_rate": float(dmg['breakrate']), - "leak_rate": float(dmg['leakrate']), - "pipe_length": dmg['length'], - "num_workers": num_available_workers})['RT'] + res_result["guid"] = "NA" + + res_result["repair_time"] = restoration_set.calculate_restoration_rates( + **{ + "break_rate": float(dmg["breakrate"]), + "leak_rate": float(dmg["leakrate"]), + "pipe_length": dmg["length"], + "num_workers": num_available_workers, + } + )["RT"] return res_result diff --git a/pyincore/analyses/populationdislocation/__init__.py b/pyincore/analyses/populationdislocation/__init__.py index f5bcdc39e..7aa1a913e 100644 --- a/pyincore/analyses/populationdislocation/__init__.py +++ b/pyincore/analyses/populationdislocation/__init__.py @@ -3,5 +3,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.populationdislocation.populationdislocation import PopulationDislocation -from pyincore.analyses.populationdislocation.populationdislocationutil import PopulationDislocationUtil +from pyincore.analyses.populationdislocation.populationdislocation import ( + PopulationDislocation, +) +from pyincore.analyses.populationdislocation.populationdislocationutil import ( + PopulationDislocationUtil, +) diff --git a/pyincore/analyses/populationdislocation/populationdislocation.py b/pyincore/analyses/populationdislocation/populationdislocation.py index a27456c58..b43cf20ea 100644 --- a/pyincore/analyses/populationdislocation/populationdislocation.py +++ b/pyincore/analyses/populationdislocation/populationdislocation.py @@ -6,7 +6,9 @@ import pandas as pd import warnings from pyincore import BaseAnalysis -from pyincore.analyses.populationdislocation.populationdislocationutil import PopulationDislocationUtil +from pyincore.analyses.populationdislocation.populationdislocationutil import ( + PopulationDislocationUtil, +) class PopulationDislocation(BaseAnalysis): @@ -32,98 +34,100 @@ def __init__(self, incore_client): def get_spec(self): return { - 'name': 'population-dislocation', - 'description': 'Population Dislocation Analysis', - 'input_parameters': [ + "name": "population-dislocation", + "description": "Population Dislocation Analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result CSV dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str, }, { - 'id': 'seed', - 'required': True, - 'description': 'Seed to ensure replication if run as part of a probabilistic analysis, ' - 'for example in connection with Housing Unit Allocation analysis.', - 'type': int + "id": "seed", + "required": True, + "description": "Seed to ensure replication if run as part of a probabilistic analysis, " + "for example in connection with Housing Unit Allocation analysis.", + "type": int, }, { - 'id': 'choice_dislocation', - 'required': False, - 'description': 'Flag to calculate choice dislocation', - 'type': bool + "id": "choice_dislocation", + "required": False, + "description": "Flag to calculate choice dislocation", + "type": bool, }, { - 'id': 'choice_dislocation_cutoff', - 'required': False, - 'description': 'Choice dislocation cutoff', - 'type': float + "id": "choice_dislocation_cutoff", + "required": False, + "description": "Choice dislocation cutoff", + "type": float, }, { - 'id': 'choice_dislocation_ds', - 'required': False, - 'description': 'Damage state to use for choice dislocation ', - 'type': str + "id": "choice_dislocation_ds", + "required": False, + "description": "Damage state to use for choice dislocation ", + "type": str, }, { - 'id': 'unsafe_occupancy', - 'required': False, - 'description': 'Flag to calculate unsafe occupancy', - 'type': bool + "id": "unsafe_occupancy", + "required": False, + "description": "Flag to calculate unsafe occupancy", + "type": bool, }, { - 'id': 'unsafe_occupancy_cutoff', - 'required': False, - 'description': 'Unsafe occupancy cutoff', - 'type': float + "id": "unsafe_occupancy_cutoff", + "required": False, + "description": "Unsafe occupancy cutoff", + "type": float, }, { - 'id': 'unsafe_occupancy_ds', - 'required': False, - 'description': 'Damage state to use for unsafe occupancy ', - 'type': str - } + "id": "unsafe_occupancy_ds", + "required": False, + "description": "Damage state to use for unsafe occupancy ", + "type": str, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'building_dmg', - 'required': True, - 'description': 'Building damage results CSV file', - 'type': ['ergo:buildingInventoryVer4', - 'ergo:buildingDamageVer5', - 'ergo:buildingDamageVer6', - 'ergo:buildingInventory'] + "id": "building_dmg", + "required": True, + "description": "Building damage results CSV file", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingDamageVer5", + "ergo:buildingDamageVer6", + "ergo:buildingInventory", + ], }, { - 'id': 'housing_unit_allocation', - 'required': True, - 'description': 'A csv file with the merged dataset of the inputs, aka Probabilistic' - 'House Unit Allocation', - 'type': ['incore:housingUnitAllocation'] + "id": "housing_unit_allocation", + "required": True, + "description": "A csv file with the merged dataset of the inputs, aka Probabilistic" + "House Unit Allocation", + "type": ["incore:housingUnitAllocation"], }, { - 'id': 'block_group_data', - 'required': True, - 'description': 'Block group racial distribution census CSV data', - 'type': ['incore:blockGroupData'] + "id": "block_group_data", + "required": True, + "description": "Block group racial distribution census CSV data", + "type": ["incore:blockGroupData"], }, { - 'id': 'value_loss_param', - 'required': True, - 'description': 'A table with value loss beta distribution parameters based on Bai et al. 2009', - 'type': ['incore:valuLossParam'] - } + "id": "value_loss_param", + "required": True, + "description": "A table with value loss beta distribution parameters based on Bai et al. 2009", + "type": ["incore:valuLossParam"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'population_block', - 'description': 'A csv file with population dislocation result ' - 'aggregated to the block group level', - 'type': 'incore:popDislocation' + "id": "result", + "parent_type": "population_block", + "description": "A csv file with population dislocation result " + "aggregated to the block group level", + "type": "incore:popDislocation", } - ] + ], } def run(self): @@ -139,18 +143,25 @@ def run(self): result_name = self.get_parameter("result_name") # Building damage dataset - building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv(low_memory=False) + building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv( + low_memory=False + ) # Housing unit allocation dataset - housing_unit_alloc = self.get_input_dataset("housing_unit_allocation"). \ - get_dataframe_from_csv(low_memory=False) + housing_unit_alloc = self.get_input_dataset( + "housing_unit_allocation" + ).get_dataframe_from_csv(low_memory=False) # Block group dataset - bg_data = self.get_input_dataset("block_group_data").get_dataframe_from_csv(low_memory=False) + bg_data = self.get_input_dataset("block_group_data").get_dataframe_from_csv( + low_memory=False + ) # Get value loss parameters - value_loss = self.get_input_dataset("value_loss_param").get_dataframe_from_csv(low_memory=False) - value_loss.set_index('damagestate', inplace=True) + value_loss = self.get_input_dataset("value_loss_param").get_dataframe_from_csv( + low_memory=False + ) + value_loss.set_index("damagestate", inplace=True) # Get choice_dislocation and unsafe_occupancy variables choice_dislocation = self.get_parameter("choice_dislocation") @@ -167,20 +178,32 @@ def run(self): merged_final_inv["choice_dis"] = None merged_final_inv["unsafe_occ"] = None if choice_dislocation: - choice_dislocation_cutoff = self.get_parameter("choice_dislocation_cutoff") or 0.5 - choice_dislocation_ds = self.get_parameter("choice_dislocation_ds") or "DS_0" - PopulationDislocationUtil.get_choice_dislocation(merged_final_inv, choice_dislocation_cutoff, choice_dislocation_ds) + choice_dislocation_cutoff = ( + self.get_parameter("choice_dislocation_cutoff") or 0.5 + ) + choice_dislocation_ds = ( + self.get_parameter("choice_dislocation_ds") or "DS_0" + ) + PopulationDislocationUtil.get_choice_dislocation( + merged_final_inv, choice_dislocation_cutoff, choice_dislocation_ds + ) if unsafe_occupancy: - unsafe_occupancy_cutoff = self.get_parameter("unsafe_occupancy_cutoff") or 0.5 + unsafe_occupancy_cutoff = ( + self.get_parameter("unsafe_occupancy_cutoff") or 0.5 + ) unsafe_occupancy_ds = self.get_parameter("unsafe_occupancy_ds") or "DS_3" - PopulationDislocationUtil.get_unsafe_occupancy(merged_final_inv, unsafe_occupancy_cutoff, unsafe_occupancy_ds) + PopulationDislocationUtil.get_unsafe_occupancy( + merged_final_inv, unsafe_occupancy_cutoff, unsafe_occupancy_ds + ) self.set_result_csv_data("result", merged_final_inv, result_name, "dataframe") return True - def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.DataFrame): + def get_dislocation( + self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.DataFrame + ): """Calculates dislocation probability. Probability of dislocation, a binary variable based on the logistic probability of dislocation. @@ -205,9 +228,9 @@ def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.D if "huestimate" in inventory.columns: inventory["d_sf"] = (inventory["huestimate"] == 1).astype(int) elif "huestimate_x" in inventory.columns: - inventory = PopulationDislocationUtil.compare_columns(inventory, - "huestimate_x", - "huestimate_y", True) + inventory = PopulationDislocationUtil.compare_columns( + inventory, "huestimate_x", "huestimate_y", True + ) if "huestimate_x-huestimate_y" in inventory.columns: exit("Column huestimate is ambiguous, check the input datasets!") else: @@ -219,7 +242,6 @@ def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.D if "d_sf_y" in inventory.columns: inventory = inventory.drop(columns=["d_sf_y"]) dsf = inventory["d_sf"].values - hue = inventory["huestimate"].values.astype(int) pbd = inventory["pblackbg"].values phd = inventory["phispbg"].values @@ -229,20 +251,36 @@ def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.D prob3 = inventory["DS_3"].values # include random value loss by damage state - rploss0 = PopulationDislocationUtil.get_random_loss(seed_i, value_loss, "DS_0", dsf.size) - rploss1 = PopulationDislocationUtil.get_random_loss(seed_i, value_loss, "DS_1", dsf.size) - rploss2 = PopulationDislocationUtil.get_random_loss(seed_i, value_loss, "DS_2", dsf.size) - rploss3 = PopulationDislocationUtil.get_random_loss(seed_i, value_loss, "DS_3", dsf.size) + rploss0 = PopulationDislocationUtil.get_random_loss( + seed_i, value_loss, "DS_0", dsf.size + ) + rploss1 = PopulationDislocationUtil.get_random_loss( + seed_i, value_loss, "DS_1", dsf.size + ) + rploss2 = PopulationDislocationUtil.get_random_loss( + seed_i, value_loss, "DS_2", dsf.size + ) + rploss3 = PopulationDislocationUtil.get_random_loss( + seed_i, value_loss, "DS_3", dsf.size + ) inventory["rploss_0"] = rploss0 inventory["rploss_1"] = rploss1 inventory["rploss_2"] = rploss2 inventory["rploss_3"] = rploss3 - prob0_disl = PopulationDislocationUtil.get_disl_probability(rploss0, dsf, pbd, phd) - prob1_disl = PopulationDislocationUtil.get_disl_probability(rploss1, dsf, pbd, phd) - prob2_disl = PopulationDislocationUtil.get_disl_probability(rploss2, dsf, pbd, phd) - prob3_disl = PopulationDislocationUtil.get_disl_probability(rploss3, dsf, pbd, phd) + prob0_disl = PopulationDislocationUtil.get_disl_probability( + rploss0, dsf, pbd, phd + ) + prob1_disl = PopulationDislocationUtil.get_disl_probability( + rploss1, dsf, pbd, phd + ) + prob2_disl = PopulationDislocationUtil.get_disl_probability( + rploss2, dsf, pbd, phd + ) + prob3_disl = PopulationDislocationUtil.get_disl_probability( + rploss3, dsf, pbd, phd + ) # dislocation probability is 0 if the damage is set to 100% probability (insignificant, DS_0 = 1). # DS_0 bulding does not distinguish between in and out hazard boundaries. All DS_0 = 1 are set to @@ -251,7 +289,12 @@ def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.D # total_prob_disl is the sum of the probability of dislocation at four damage states # times the probability of being in that damage state. - total_prob_disl = prob0_disl * prob0 + prob1_disl * prob1 + prob2_disl * prob2 + prob3_disl * prob3 + total_prob_disl = ( + prob0_disl * prob0 + + prob1_disl * prob1 + + prob2_disl * prob2 + + prob3_disl * prob3 + ) inventory["prdis"] = total_prob_disl diff --git a/pyincore/analyses/populationdislocation/populationdislocationutil.py b/pyincore/analyses/populationdislocation/populationdislocationutil.py index aa6b7ddf7..9242429ca 100644 --- a/pyincore/analyses/populationdislocation/populationdislocationutil.py +++ b/pyincore/analyses/populationdislocation/populationdislocationutil.py @@ -7,10 +7,12 @@ class PopulationDislocationUtil: - @staticmethod - def merge_damage_housing_block(building_dmg: pd.DataFrame, hua_inventory: pd.DataFrame, - block_data: pd.DataFrame): + def merge_damage_housing_block( + building_dmg: pd.DataFrame, + hua_inventory: pd.DataFrame, + block_data: pd.DataFrame, + ): """Load CSV files to pandas Dataframes, merge them and drop unused columns. Args: @@ -29,7 +31,9 @@ def merge_damage_housing_block(building_dmg: pd.DataFrame, hua_inventory: pd.Dat # first merge hazard with house unit allocation inventory on "guid" # note guid can be duplicated in housing unit allocation inventory - df = pd.merge(building_dmg, hua_inventory, how="right", on="guid", validate="1:m") + df = pd.merge( + building_dmg, hua_inventory, how="right", on="guid", validate="1:m" + ) # drop columns in building damage that are not used col_drops = ["LS_0", "LS_1", "LS_2", "hazardtype", "meandamage", "mdamagedev"] @@ -44,7 +48,9 @@ def merge_damage_housing_block(building_dmg: pd.DataFrame, hua_inventory: pd.Dat elif "blockid" in df.columns: df["bgid"] = df["blockid"].astype("Int64") elif "blockid_x" in df.columns: - df = PopulationDislocationUtil.compare_columns(df, "blockid_x", "blockid_y", True) + df = PopulationDislocationUtil.compare_columns( + df, "blockid_x", "blockid_y", True + ) if "blockid_x-blockid_y" in df.columns: exit("Column bgid is ambiguous, check the input datasets!") else: @@ -57,8 +63,12 @@ def merge_damage_housing_block(building_dmg: pd.DataFrame, hua_inventory: pd.Dat return final_df @staticmethod - def get_disl_probability(value_loss: np.array, d_sf: np.array, - percent_black_bg: np.array, percent_hisp_bg: np.array): + def get_disl_probability( + value_loss: np.array, + d_sf: np.array, + percent_black_bg: np.array, + percent_hisp_bg: np.array, + ): """ Calculate dislocation, the probability of dislocation for the household and population. Probability of dislocation Damage factor, @@ -80,20 +90,30 @@ def get_disl_probability(value_loss: np.array, d_sf: np.array, """ # coefficients for the Logistic regression model - coefficient = {"beta0": -0.42523, - "beta1": 0.02480, - "beta2": -0.50166, # single family coefficient - "beta3": -0.01826, # black block group coefficient - "beta4": -0.01198} # hispanic block group coefficient + coefficient = { + "beta0": -0.42523, + "beta1": 0.02480, + "beta2": -0.50166, # single family coefficient + "beta3": -0.01826, # black block group coefficient + "beta4": -0.01198, + } # hispanic block group coefficient disl_prob = np.zeros_like(d_sf) try: - disl_prob = 1.0 / (1 + np.exp(-1.0 * (coefficient["beta0"] * 1 + - coefficient["beta1"] * (value_loss * 100) + - coefficient["beta2"] * d_sf + - coefficient["beta3"] * percent_black_bg + - coefficient["beta4"] * percent_hisp_bg))) - except Exception as e: + disl_prob = 1.0 / ( + 1 + + np.exp( + -1.0 + * ( + coefficient["beta0"] * 1 + + coefficient["beta1"] * (value_loss * 100) + + coefficient["beta2"] * d_sf + + coefficient["beta3"] * percent_black_bg + + coefficient["beta4"] * percent_hisp_bg + ) + ) + ) + except Exception: print() # raise e @@ -118,10 +138,10 @@ def get_random_loss(seed_i: int, df: pd.DataFrame, damage_state: str, size: int) """ # select upper bound and lower bound from input table - alpha = df.loc[damage_state, 'alpha'] - beta = df.loc[damage_state, 'beta'] - ub = df.loc[damage_state, 'ub'] - lb = df.loc[damage_state, 'lb'] + alpha = df.loc[damage_state, "alpha"] + beta = df.loc[damage_state, "beta"] + ub = df.loc[damage_state, "ub"] + lb = df.loc[damage_state, "lb"] # Generate array of random values that follow beta distribution for damage state random_generator = np.random.RandomState(seed_i) @@ -131,7 +151,7 @@ def get_random_loss(seed_i: int, df: pd.DataFrame, damage_state: str, size: int) @staticmethod def compare_merges(table1_cols, table2_cols, table_merged): - """ Compare two lists of columns and run compare columns on columns in both lists. + """Compare two lists of columns and run compare columns on columns in both lists. It assumes that suffixes are _x and _y Args: @@ -146,10 +166,13 @@ def compare_merges(table1_cols, table2_cols, table_merged): match_column = set(table1_cols).intersection(table2_cols) for col in match_column: # Compare two columns and marked similarity or rename and drop - if col + "_x" in table_merged.columns and col + "_y" in table_merged.columns: - table_merged = PopulationDislocationUtil.compare_columns(table_merged, - col + "_x", - col + "_y", True) + if ( + col + "_x" in table_merged.columns + and col + "_y" in table_merged.columns + ): + table_merged = PopulationDislocationUtil.compare_columns( + table_merged, col + "_x", col + "_y", True + ) return table_merged @staticmethod @@ -181,7 +204,9 @@ def compare_columns(table, col1, col2, drop): return table @staticmethod - def get_choice_dislocation(pop_dislocation, choice_dislocation_cutoff, choice_dislocation_ds): + def get_choice_dislocation( + pop_dislocation, choice_dislocation_cutoff, choice_dislocation_ds + ): """Get choice dislocation based on the dislocation state and the choice dislocation cutoff. Args: @@ -193,16 +218,18 @@ def get_choice_dislocation(pop_dislocation, choice_dislocation_cutoff, choice_di null: None. """ - condition1 = pop_dislocation['dislocated'] == 1 + condition1 = pop_dislocation["dislocated"] == 1 condition2 = pop_dislocation[choice_dislocation_ds] > choice_dislocation_cutoff - pop_dislocation['choice_dis'] = np.where(condition1 & condition2, True, False) + pop_dislocation["choice_dis"] = np.where(condition1 & condition2, True, False) # Change dislocated to 0 if choice dislocation is 1 - pop_dislocation['dislocated'] = np.where(condition1 & condition2, False, True) + pop_dislocation["dislocated"] = np.where(condition1 & condition2, False, True) return None @staticmethod - def get_unsafe_occupancy(pop_dislocation, unsafe_occupancy_cutoff, unsafe_occupancy_ds): + def get_unsafe_occupancy( + pop_dislocation, unsafe_occupancy_cutoff, unsafe_occupancy_ds + ): """Get unsafe occupancy based on the dislocation state and the unsafe occupancy cutoff. Args: @@ -214,10 +241,10 @@ def get_unsafe_occupancy(pop_dislocation, unsafe_occupancy_cutoff, unsafe_occupa null: None. """ - condition1 = pop_dislocation['dislocated'] == 0 + condition1 = pop_dislocation["dislocated"] == 0 condition2 = pop_dislocation[unsafe_occupancy_ds] > unsafe_occupancy_cutoff - pop_dislocation['unsafe_occ'] = np.where(condition1 & condition2, True, False) + pop_dislocation["unsafe_occ"] = np.where(condition1 & condition2, True, False) # Change dislocated to 1 if unsafe occupancy is 1 - pop_dislocation['dislocated'] = np.where(condition1 & condition2, True, False) + pop_dislocation["dislocated"] = np.where(condition1 & condition2, True, False) return None diff --git a/pyincore/analyses/residentialbuildingrecovery/__init__.py b/pyincore/analyses/residentialbuildingrecovery/__init__.py index 60fbf986e..bad248bb9 100644 --- a/pyincore/analyses/residentialbuildingrecovery/__init__.py +++ b/pyincore/analyses/residentialbuildingrecovery/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.residentialbuildingrecovery.residentialbuildingrecovery import ResidentialBuildingRecovery +from pyincore.analyses.residentialbuildingrecovery.residentialbuildingrecovery import ( + ResidentialBuildingRecovery, +) diff --git a/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py b/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py index 45165014b..a54d56dac 100644 --- a/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py +++ b/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py @@ -52,7 +52,7 @@ def run(self): """ # TODO: Start using seed - seed = self.get_parameter("seed") + # seed = self.get_parameter("seed") num_samples = self.get_parameter("num_samples") result_name = self.get_parameter("result_name") @@ -64,22 +64,44 @@ def run(self): buildings = self.get_input_dataset("buildings").get_inventory_reader() buildings = list(buildings) - sample_damage_states = self.get_input_dataset("sample_damage_states").get_dataframe_from_csv(low_memory=False) - socio_demographic_data = self.get_input_dataset("socio_demographic_data").get_dataframe_from_csv( - low_memory=False) + sample_damage_states = self.get_input_dataset( + "sample_damage_states" + ).get_dataframe_from_csv(low_memory=False) + socio_demographic_data = self.get_input_dataset( + "socio_demographic_data" + ).get_dataframe_from_csv(low_memory=False) # socio_demographic_data.set_index("guid", inplace=True) - financial_resources = self.get_input_dataset("financial_resources").get_dataframe_from_csv(low_memory=False) - redi_delay_factors = self.get_input_dataset("delay_factors").get_dataframe_from_csv(low_memory=False) + financial_resources = self.get_input_dataset( + "financial_resources" + ).get_dataframe_from_csv(low_memory=False) + redi_delay_factors = self.get_input_dataset( + "delay_factors" + ).get_dataframe_from_csv(low_memory=False) # Returns dataframe - recovery_results = self.residential_recovery(buildings, sample_damage_states, socio_demographic_data, - financial_resources, redi_delay_factors, num_samples) - self.set_result_csv_data("residential_building_recovery", recovery_results, result_name, "dataframe") + recovery_results = self.residential_recovery( + buildings, + sample_damage_states, + socio_demographic_data, + financial_resources, + redi_delay_factors, + num_samples, + ) + self.set_result_csv_data( + "residential_building_recovery", recovery_results, result_name, "dataframe" + ) return True - def residential_recovery(self, buildings, sample_damage_states, socio_demographic_data, financial_resources, - redi_delay_factors, num_samples): + def residential_recovery( + self, + buildings, + sample_damage_states, + socio_demographic_data, + financial_resources, + redi_delay_factors, + num_samples, + ): """ Calculates residential building recovery for buildings @@ -97,46 +119,83 @@ def residential_recovery(self, buildings, sample_damage_states, socio_demographi """ start_household_income_prediction = time.process_time() - household_income_prediction = ResidentialBuildingRecovery.household_income_prediction(socio_demographic_data, - num_samples) + household_income_prediction = ( + ResidentialBuildingRecovery.household_income_prediction( + socio_demographic_data, num_samples + ) + ) end_start_household_income_prediction = time.process_time() - print("Finished executing household_income_prediction() in " + - str(end_start_household_income_prediction - start_household_income_prediction) + " secs") - - household_aggregation = ResidentialBuildingRecovery.household_aggregation(household_income_prediction) + print( + "Finished executing household_income_prediction() in " + + str( + end_start_household_income_prediction + - start_household_income_prediction + ) + + " secs" + ) + + household_aggregation = ResidentialBuildingRecovery.household_aggregation( + household_income_prediction + ) end_household_aggregation = time.process_time() - print("Finished executing household_aggregation() in " + - str(end_household_aggregation - end_start_household_income_prediction) + " secs") - - financing_delay = ResidentialBuildingRecovery.financing_delay(household_aggregation, financial_resources) + print( + "Finished executing household_aggregation() in " + + str(end_household_aggregation - end_start_household_income_prediction) + + " secs" + ) + + financing_delay = ResidentialBuildingRecovery.financing_delay( + household_aggregation, financial_resources + ) end_financing_delay = time.process_time() - print("Finished executing financing_delay() in " + - str(end_financing_delay - end_household_aggregation) + " secs") - - total_delay = ResidentialBuildingRecovery.total_delay(sample_damage_states, redi_delay_factors, - financing_delay) + print( + "Finished executing financing_delay() in " + + str(end_financing_delay - end_household_aggregation) + + " secs" + ) + + total_delay = ResidentialBuildingRecovery.total_delay( + sample_damage_states, redi_delay_factors, financing_delay + ) end_total_delay = time.process_time() - print("Finished executing total_delay() in " + str(end_total_delay - end_financing_delay) + " secs") + print( + "Finished executing total_delay() in " + + str(end_total_delay - end_financing_delay) + + " secs" + ) recovery = self.recovery_rate(buildings, sample_damage_states, total_delay) end_recovery = time.process_time() - print("Finished executing recovery_rate() in " + str(end_recovery - end_total_delay) + " secs") - - time_stepping_recovery = ResidentialBuildingRecovery.time_stepping_recovery(recovery) + print( + "Finished executing recovery_rate() in " + + str(end_recovery - end_total_delay) + + " secs" + ) + + time_stepping_recovery = ResidentialBuildingRecovery.time_stepping_recovery( + recovery + ) end_time_stepping_recovery = time.process_time() - print("Finished executing time_stepping_recovery() in " + - str(end_time_stepping_recovery - end_recovery) + " secs") + print( + "Finished executing time_stepping_recovery() in " + + str(end_time_stepping_recovery - end_recovery) + + " secs" + ) result = time_stepping_recovery end_time = time.process_time() - print("Analysis completed in " + str(end_time - start_household_income_prediction) + " secs") + print( + "Analysis completed in " + + str(end_time - start_household_income_prediction) + + " secs" + ) return result @staticmethod def household_income_prediction(income_groups, num_samples): - """ Get Income group prediction for each household + """Get Income group prediction for each household Args: income_groups (pd.DataFrame): Socio-demographic data with household income group prediction. @@ -147,13 +206,13 @@ def household_income_prediction(income_groups, num_samples): """ - blockid = income_groups.groupby('blockid') + blockid = income_groups.groupby("blockid") prediction_results = pd.DataFrame() for name, group in blockid: # Prepare data for numpy processing group_size = group.shape[0] - group_hhinc_values = group['hhinc'].values + group_hhinc_values = group["hhinc"].values # Compute normal distribution parameters from group data @@ -188,15 +247,17 @@ def household_income_prediction(income_groups, num_samples): group_samples[i, :] = group_hhinc_values group_samples[i, group_nan_idx[0]] = np.around(sample) # Now reassemble into Pandas DataFrame - group['sample_{}'.format(i)] = group_samples[i, :] + group["sample_{}".format(i)] = group_samples[i, :] - prediction_results = pd.concat([prediction_results, group], ignore_index=True) + prediction_results = pd.concat( + [prediction_results, group], ignore_index=True + ) return prediction_results @staticmethod def household_aggregation(household_income_predictions): - """ Gets household aggregation of income groups at the building level. + """Gets household aggregation of income groups at the building level. Args: household_income_predictions (pd.DataFrame): Income group prediction for each household @@ -206,8 +267,10 @@ def household_aggregation(household_income_predictions): """ # Drop all unnecessary columns first - household_income_predictions_dropped = household_income_predictions.drop(columns=['huid', 'blockid', 'hhinc']) - guid_group = household_income_predictions_dropped.groupby('guid') + household_income_predictions_dropped = household_income_predictions.drop( + columns=["huid", "blockid", "hhinc"] + ) + guid_group = household_income_predictions_dropped.groupby("guid") # Obtain sample column names colnames = list(household_income_predictions_dropped.columns[1:]) @@ -217,10 +280,10 @@ def household_aggregation(household_income_predictions): for name, group in guid_group: # Obtain guids - local_guids = group['guid'] + local_guids = group["guid"] # Remove guids - no_guids = group.drop(columns=['guid']).to_numpy() + no_guids = group.drop(columns=["guid"]).to_numpy() # Compute the maxima of all columns maxima = list(no_guids.max(axis=0)) @@ -233,8 +296,10 @@ def household_aggregation(household_income_predictions): for i in range(0, num_cols): no_guids_maxima[:, i] = maxima[i] - group_new = pd.DataFrame(no_guids_maxima, columns=colnames, index=group.index) - group_new.insert(0, 'guid', local_guids) + group_new = pd.DataFrame( + no_guids_maxima, columns=colnames, index=group.index + ) + group_new.insert(0, "guid", local_guids) new_groups.append(group_new.head(1)) # Construct a new DataFrame @@ -244,7 +309,7 @@ def household_aggregation(household_income_predictions): @staticmethod def financing_delay(household_aggregated_income_groups, financial_resources): - """ Gets financing delay, the percentages calculated are the probabilities of housing units financed by + """Gets financing delay, the percentages calculated are the probabilities of housing units financed by different resources. Args: @@ -258,10 +323,12 @@ def financing_delay(household_aggregated_income_groups, financial_resources): colnames = list(household_aggregated_income_groups.columns)[1:] # Save guid's for later - household_guids = household_aggregated_income_groups['guid'] + household_guids = household_aggregated_income_groups["guid"] # Convert household aggregated income to numpy - samples_np = household_aggregated_income_groups.drop(columns=['guid']).to_numpy() + samples_np = household_aggregated_income_groups.drop( + columns=["guid"] + ).to_numpy() # Number of guids num_households = household_guids.shape[0] @@ -284,20 +351,26 @@ def financing_delay(household_aggregated_income_groups, financial_resources): idx = np.where(hhinc == value) # 1. Sample the lognormal distribution vectorially - lognormal_vec = np.random.lognormal(np.log(sources[mean_idx, :]), sources[sigma_idx, :]) + lognormal_vec = np.random.lognormal( + np.log(sources[mean_idx, :]), sources[sigma_idx, :] + ) # 2. Compute the delay using the dot product of the prior vector and sources for the current index, # round to one significant figure - samples_np[household, sample] = np.round(np.dot(lognormal_vec, sources[idx, :].flatten()), 1) + samples_np[household, sample] = np.round( + np.dot(lognormal_vec, sources[idx, :].flatten()), 1 + ) - financing_delay = pd.DataFrame(samples_np, columns=colnames, index=household_aggregated_income_groups.index) - financing_delay.insert(0, 'guid', household_guids) + financing_delay = pd.DataFrame( + samples_np, columns=colnames, index=household_aggregated_income_groups.index + ) + financing_delay.insert(0, "guid", household_guids) return financing_delay @staticmethod def total_delay(sample_damage_states, redi_delay_factors, financing_delay): - """ Calculates total delay by combining financial delay and other factors from REDi framework + """Calculates total delay by combining financial delay and other factors from REDi framework Args: sample_damage_states (pd.DataFrame): Building inventory damage states. @@ -314,24 +387,35 @@ def total_delay(sample_damage_states, redi_delay_factors, financing_delay): colnames = list(financing_delay.columns)[1:] # Perform an inner join to ensure only households with damage states are processed - merged_delay = pd.merge(financing_delay, sample_damage_states, on='guid') + merged_delay = pd.merge(financing_delay, sample_damage_states, on="guid") # Obtain the guids - merged_delay_guids = merged_delay['guid'] + merged_delay_guids = merged_delay["guid"] # Obtain the damage states - merged_delay_damage_states = merged_delay['sample_damage_states'] + merged_delay_damage_states = merged_delay["sample_damage_states"] # Convert to numpy - samples_np = merged_delay.drop(columns=['guid', 'sample_damage_states']).to_numpy() + samples_np = merged_delay.drop( + columns=["guid", "sample_damage_states"] + ).to_numpy() num_samples = len(colnames) # First, we decompose redi_delay_factors into two dictionaries that can be used to compute vector operations - redi_idx = dict(zip(redi_delay_factors['Building_specific_conditions'], redi_delay_factors.index)) + redi_idx = dict( + zip( + redi_delay_factors["Building_specific_conditions"], + redi_delay_factors.index, + ) + ) # Next, we produce two intermediate numpy matrices: one for med and one for sdv - redi_med = redi_delay_factors[['Ins_med', 'Enmo_med', 'Como_med', 'Per_med']].to_numpy() - redi_sdv = redi_delay_factors[['Ins_sdv', 'Enmo_sdv', 'Como_sdv', 'Per_sdv']].to_numpy() + redi_med = redi_delay_factors[ + ["Ins_med", "Enmo_med", "Como_med", "Per_med"] + ].to_numpy() + redi_sdv = redi_delay_factors[ + ["Ins_sdv", "Enmo_sdv", "Como_sdv", "Per_sdv"] + ].to_numpy() # Define indices to facilitate interpretation of the code inspection_idx = 0 @@ -355,21 +439,25 @@ def total_delay(sample_damage_states, redi_delay_factors, financing_delay): delay_vec = np.random.lognormal(np.log(mean_vec), sdv_vec) # Compute the delay using that vector and financing delays, already computed in the prior step - samples_np[i, j] = np.round(delay_vec[inspection_idx] + - np.max([ - delay_vec[engineer_idx], - samples_np[i, j], - delay_vec[contractor_idx] - ]) + - delay_vec[permit_idx]) + samples_np[i, j] = np.round( + delay_vec[inspection_idx] + + np.max( + [ + delay_vec[engineer_idx], + samples_np[i, j], + delay_vec[contractor_idx], + ] + ) + + delay_vec[permit_idx] + ) total_delay = pd.DataFrame(samples_np, columns=colnames) - total_delay.insert(0, 'guid', merged_delay_guids) + total_delay.insert(0, "guid", merged_delay_guids) return total_delay def recovery_rate(self, buildings, sample_damage_states, total_delay): - """ Gets total time required for each building to receive full restoration. Determined by the combination of + """Gets total time required for each building to receive full restoration. Determined by the combination of delay time and repair time Args: @@ -382,37 +470,43 @@ def recovery_rate(self, buildings, sample_damage_states, total_delay): """ repair_key = self.get_parameter("repair_key") - repair_sets = self.repairsvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, repair_key) - repair_sets_by_guid = {} # get repair sets by guid so they can be mapped with output of monte carlo + repair_sets = self.repairsvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), buildings, repair_key + ) + repair_sets_by_guid = ( + {} + ) # get repair sets by guid so they can be mapped with output of monte carlo # This is sort of a workaround until we define Repair Curve models and abstract this out there for i, b in enumerate(buildings): - repair_sets_by_guid[b["properties"]['guid']] = repair_sets[str(i)] + repair_sets_by_guid[b["properties"]["guid"]] = repair_sets[str(i)] # Obtain the column names colnames = list(total_delay.columns)[1:] # Perform an inner join to ensure only households with damage states are processed - merged_delay = pd.merge(total_delay, sample_damage_states, on='guid') + merged_delay = pd.merge(total_delay, sample_damage_states, on="guid") # Obtain the guids - merged_delay_guids = merged_delay['guid'] + merged_delay_guids = merged_delay["guid"] # Obtain the damage states - merged_delay_damage_states = merged_delay['sample_damage_states'] + merged_delay_damage_states = merged_delay["sample_damage_states"] # Convert to numpy - samples_np = merged_delay.drop(columns=['guid', 'sample_damage_states']).to_numpy() + samples_np = merged_delay.drop( + columns=["guid", "sample_damage_states"] + ).to_numpy() num_samples = len(colnames) num_households = samples_np.shape[0] # Generate a long numpy matrix for combined N1, N2 samples - samples_n1_n2 = np.zeros((num_households, num_samples*num_samples)) + samples_n1_n2 = np.zeros((num_households, num_samples * num_samples)) # Now, we define an internal function to take care of the index for the prior case # Now, we define an internal function to take care of the index for the prior case def idx(x, y): - return x*num_samples + y + return x * num_samples + y for household in range(0, num_households): # Obtain the damage states @@ -422,7 +516,7 @@ def idx(x, y): # Use a lambda to obtain the damage state in numeric form. Note that since damage states are single digits, # it suffices to look at the last character and convert into an integer value. Do this computation once # per household only. - extract_ds = lambda x: int(x[-1]) + extract_ds = lambda x: int(x[-1]) # noqa: E731 samples_mcs_ds = list(map(extract_ds, samples_mcs)) # Now, perform the two nested loops, using the indexing function to simplify the syntax. @@ -431,23 +525,34 @@ def idx(x, y): percent_func = np.random.random(num_samples) # NOTE: Even though the kwarg name is "repair_time", it actually takes percent of functionality. DFR3 # system currently doesn't have a way to represent the name correctly when calculating the inverse. - repair_time = mapped_repair.repair_curves[state].solve_curve_for_inverse( - hazard_values={}, curve_parameters=mapped_repair.curve_parameters, **{"repair_time": percent_func} - ) / 7 + repair_time = ( + mapped_repair.repair_curves[state].solve_curve_for_inverse( + hazard_values={}, + curve_parameters=mapped_repair.curve_parameters, + **{"repair_time": percent_func}, + ) + / 7 + ) for j in range(0, num_samples): - samples_n1_n2[household, idx(i, j)] = round(samples_np[household, i] + repair_time[j], 1) + samples_n1_n2[household, idx(i, j)] = round( + samples_np[household, i] + repair_time[j], 1 + ) # Now, generate all the labels using list comprehension outside the loops - colnames = [f'sample_{i}_{j}' for i in range(0, num_samples) for j in range(0, num_samples)] + colnames = [ + f"sample_{i}_{j}" + for i in range(0, num_samples) + for j in range(0, num_samples) + ] recovery_time = pd.DataFrame(samples_n1_n2, columns=colnames) - recovery_time.insert(0, 'guid', merged_delay_guids) + recovery_time.insert(0, "guid", merged_delay_guids) return recovery_time @staticmethod def time_stepping_recovery(recovery_results): - """ Converts results to a time frame. Currently gives results for 16 quarters over 4 year. + """Converts results to a time frame. Currently gives results for 16 quarters over 4 year. Args: recovery_results (pd.DataFrame): Total recovery time of financial delay and other factors from REDi framework. @@ -462,10 +567,10 @@ def time_stepping_recovery(recovery_results): total_time = time_step * np.linspace(0, 4 * year, num=17, endpoint=True) # Save guid's for later - recovery_results_guids = recovery_results['guid'] + recovery_results_guids = recovery_results["guid"] # Convert household aggregated income to numpy - samples_n1_n2 = recovery_results.drop(columns=['guid']).to_numpy() + samples_n1_n2 = recovery_results.drop(columns=["guid"]).to_numpy() # Number of guids num_households = recovery_results.shape[0] @@ -477,13 +582,16 @@ def time_stepping_recovery(recovery_results): for household in range(0, num_households): for i in range(len(total_time)): - fun_state = np.count_nonzero(samples_n1_n2[household, :] < total_time[i]) / num_samples + fun_state = ( + np.count_nonzero(samples_n1_n2[household, :] < total_time[i]) + / num_samples + ) times_np[household, i] = np.round(fun_state, 2) - colnames = [f'quarter_{i}' for i in range(0, num_times)] + colnames = [f"quarter_{i}" for i in range(0, num_times)] time_stepping_recovery = pd.DataFrame(times_np, columns=colnames) - time_stepping_recovery.insert(0, 'guid', recovery_results_guids) + time_stepping_recovery.insert(0, "guid", recovery_results_guids) return time_stepping_recovery @@ -495,79 +603,83 @@ def get_spec(self): """ return { - 'name': 'residential-building-recovery', - 'description': 'calculate residential building recovery', - 'input_parameters': [ + "name": "residential-building-recovery", + "description": "calculate residential building recovery", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'name of the result', - 'type': str + "id": "result_name", + "required": True, + "description": "name of the result", + "type": str, }, { - 'id': 'num_samples', - 'required': True, - 'description': 'Number of sample scenarios', - 'type': int + "id": "num_samples", + "required": True, + "description": "Number of sample scenarios", + "type": int, }, { - 'id': 'repair_key', - 'required': False, - 'description': 'Repair key to use in mapping dataset', - 'type': str + "id": "repair_key", + "required": False, + "description": "Repair key to use in mapping dataset", + "type": str, }, { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the probabilistic model', - 'type': int - } + "id": "seed", + "required": False, + "description": "Initial seed for the probabilistic model", + "type": int, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', 'ergo:buildingInventoryVer6', - 'ergo:buildingInventoryVer7'] + "id": "buildings", + "required": True, + "description": "Building Inventory", + "type": [ + "ergo:buildingInventoryVer4", + "ergo:buildingInventoryVer5", + "ergo:buildingInventoryVer6", + "ergo:buildingInventoryVer7", + ], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - 'id': 'sample_damage_states', - 'required': True, - 'description': 'Sample damage states', - 'type': ['incore:sampleDamageState'] + "id": "sample_damage_states", + "required": True, + "description": "Sample damage states", + "type": ["incore:sampleDamageState"], }, { - 'id': 'socio_demographic_data', - 'required': True, - 'description': 'Socio-demographic data with household income group predictions', - 'type': ['incore:socioDemograhicData'] + "id": "socio_demographic_data", + "required": True, + "description": "Socio-demographic data with household income group predictions", + "type": ["incore:socioDemograhicData"], }, { - 'id': 'financial_resources', - 'required': True, - 'description': 'Financial resources by household income groups', - 'type': ['incore:householdFinancialResources'] + "id": "financial_resources", + "required": True, + "description": "Financial resources by household income groups", + "type": ["incore:householdFinancialResources"], }, { - 'id': 'delay_factors', - 'required': True, - 'description': 'Delay impeding factors such as post-disaster inspection, insurance claim, ' - 'and government permit based on building\'s damage state. Provided by REDi framework', - 'type': ['incore:buildingRecoveryFactors'] - } + "id": "delay_factors", + "required": True, + "description": "Delay impeding factors such as post-disaster inspection, insurance claim, " + "and government permit based on building's damage state. Provided by REDi framework", + "type": ["incore:buildingRecoveryFactors"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'residential_building_recovery', - 'description': 'CSV file of residential building recovery percent', - 'type': 'incore:buildingRecovery' + "id": "residential_building_recovery", + "description": "CSV file of residential building recovery percent", + "type": "incore:buildingRecovery", } - ] + ], } diff --git a/pyincore/analyses/roaddamage/roaddamage.py b/pyincore/analyses/roaddamage/roaddamage.py index 82fb0ea63..5d8450cf0 100644 --- a/pyincore/analyses/roaddamage/roaddamage.py +++ b/pyincore/analyses/roaddamage/roaddamage.py @@ -7,7 +7,13 @@ import concurrent.futures from itertools import repeat -from pyincore import BaseAnalysis, HazardService, FragilityService, AnalysisUtil, GeoUtil +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + AnalysisUtil, + GeoUtil, +) from pyincore.models.dfr3curve import DFR3Curve @@ -18,6 +24,7 @@ class RoadDamage(BaseAnalysis): incore_client (IncoreClient): Service authentication. """ + DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" def __init__(self, incore_client): @@ -37,7 +44,11 @@ def run(self): fragility_key = self.DEFAULT_FRAGILITY_KEY # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() # Liquefaction use_liquefaction = False @@ -55,34 +66,45 @@ def run(self): use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") user_defined_cpu = 1 - if self.get_parameter("num_cpu") is not None and self.get_parameter("num_cpu") > 0: + if ( + self.get_parameter("num_cpu") is not None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(road_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(road_set), user_defined_cpu + ) avg_bulk_input_size = int(len(road_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(road_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - (ds_results, damage_results) = self.road_damage_concurrent_future(self.road_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazard), - repeat(hazard_type), - repeat(hazard_dataset_id), - repeat(use_hazard_uncertainty), - repeat(geology_dataset_id), - repeat(fragility_key), - repeat(use_liquefaction)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + (ds_results, damage_results) = self.road_damage_concurrent_future( + self.road_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + repeat(use_hazard_uncertainty), + repeat(geology_dataset_id), + repeat(fragility_key), + repeat(use_liquefaction), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True @@ -102,15 +124,26 @@ def road_damage_concurrent_future(self, function_name, num_workers, *args): output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dataset_id, use_hazard_uncertainty, - geology_dataset_id, fragility_key, use_liquefaction): + def road_damage_analysis_bulk_input( + self, + roads, + hazard, + hazard_type, + hazard_dataset_id, + use_hazard_uncertainty, + geology_dataset_id, + fragility_key, + use_liquefaction, + ): """Run analysis for multiple roads. Args: @@ -129,8 +162,9 @@ def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dat list: A list of ordered dictionaries with other road data/metadata. """ - fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), roads, - fragility_key) + fragility_sets = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), roads, fragility_key + ) values_payload = [] mapped_roads = [] @@ -145,14 +179,10 @@ def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dat loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types # for liquefaction - if any(demand.lower() != 'pgd' for demand in demands): + if any(demand.lower() != "pgd" for demand in demands): pgd_flag = False units = fragility_set.demand_units - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_roads.append(road) else: @@ -160,17 +190,20 @@ def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dat del roads # get hazard and liquefaction values - if hazard_type == 'earthquake': + if hazard_type == "earthquake": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) if pgd_flag and use_liquefaction and geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, - values_payload) + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload + ) - elif hazard_type == 'tsunami' or 'hurricane': + elif hazard_type == "tsunami" or "hurricane": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) else: - raise ValueError("The provided hazard type is not supported yet by this analysis") + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) # calculate LS and DS ds_results = [] @@ -183,62 +216,76 @@ def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dat liq_hazard_vals = None liquefaction_prob = None selected_fragility_set = fragility_sets[road["id"]] - hazard_std_dev = 0.0 + # hazard_std_dev = 0.0 if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): - hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + hazard_vals = AnalysisUtil.update_precision_of_lists( + hazard_resp[i]["hazardValues"] + ) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] hval_dict = dict() for j, d in enumerate(selected_fragility_set.demand_types): hval_dict[d] = hazard_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): - road_args = selected_fragility_set.construct_expression_args_from_inventory(road) + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_resp[i]["hazardValues"] + ): + road_args = ( + selected_fragility_set.construct_expression_args_from_inventory( + road + ) + ) dmg_probability = selected_fragility_set.calculate_limit_state( - hval_dict, inventory_type='road', **road_args) + hval_dict, inventory_type="road", **road_args + ) # if there is liquefaction, overwrite the hazardval with liquefaction value # recalculate dmg_probability and dmg_interval if liquefaction_resp is not None and len(liquefaction_resp) > 0: - liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) - demand_types_liq = liquefaction_resp[i]['demands'] - demand_units_liq = liquefaction_resp[i]['units'] - liquefaction_prob = liquefaction_resp[i]['liqProbability'] + liq_hazard_vals = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["pgdValues"] + ) + demand_types_liq = liquefaction_resp[i]["demands"] + demand_units_liq = liquefaction_resp[i]["units"] + liquefaction_prob = liquefaction_resp[i]["liqProbability"] liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] dmg_probability = selected_fragility_set.calculate_limit_state( - liq_hval_dict, - inventory_type='road', - **road_args) + liq_hval_dict, inventory_type="road", **road_args + ) - dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, - hazard_type=hazard_type, - inventory_type="road") + dmg_interval = selected_fragility_set.calculate_damage_interval( + dmg_probability, hazard_type=hazard_type, inventory_type="road" + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) ds_result = dict() - ds_result['guid'] = road['properties']['guid'] + ds_result["guid"] = road["properties"]["guid"] ds_result.update(dmg_probability) ds_result.update(dmg_interval) - ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + ds_result["haz_expose"] = AnalysisUtil.get_exposure_from_hazard_values( + hazard_vals, hazard_type + ) damage_result = dict() - damage_result['guid'] = road['properties']['guid'] - damage_result['fragility_id'] = selected_fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardvals'] = hazard_vals - damage_result['liqdemandtypes'] = demand_types_liq - damage_result['liqdemandunits'] = demand_units_liq - damage_result['liqhazvals'] = liq_hazard_vals - damage_result['liqprobability'] = liquefaction_prob + damage_result["guid"] = road["properties"]["guid"] + damage_result["fragility_id"] = selected_fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardvals"] = hazard_vals + damage_result["liqdemandtypes"] = demand_types_liq + damage_result["liqdemandunits"] = demand_units_liq + damage_result["liqhazvals"] = liq_hazard_vals + damage_result["liqprobability"] = liquefaction_prob ds_results.append(ds_result) damage_results.append(damage_result) @@ -247,18 +294,18 @@ def road_damage_analysis_bulk_input(self, roads, hazard, hazard_type, hazard_dat ds_result = dict() damage_result = dict() - ds_result['guid'] = road['properties']['guid'] + ds_result["guid"] = road["properties"]["guid"] - damage_result['guid'] = road['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardvals'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazvals'] = None - damage_result['liqprobability'] = None + damage_result["guid"] = road["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardvals"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazvals"] = None + damage_result["liqprobability"] = None ds_results.append(ds_result) damage_results.append(damage_result) @@ -274,94 +321,98 @@ def get_spec(self): """ return { - 'name': 'road-damage', - 'description': 'road damage analysis', - 'input_parameters': [ + "name": "road-damage", + "description": "road damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type (e.g. earthquake)", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Liquefaction geology/susceptibility dataset id. ' - 'If not provided, liquefaction will be ignored', - 'type': str + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Liquefaction geology/susceptibility dataset id. " + "If not provided, liquefaction will be ignored", + "type": str, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "hurricane", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "hurricane", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'roads', - 'required': True, - 'description': 'Road Inventory', - 'type': ['ergo:roadLinkTopo', 'incore:roads', 'ergo:roadLinkTopoVer2'] + "id": "roads", + "required": True, + "description": "Road Inventory", + "type": [ + "ergo:roadLinkTopo", + "incore:roads", + "ergo:roadLinkTopoVer2", + ], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'roads', - 'description': 'CSV file of road structural damage', - 'type': 'ergo:roadDamageVer3' + "id": "result", + "parent_type": "roads", + "description": "CSV file of road structural damage", + "type": "ergo:roadDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'roads', - 'description': 'additional metadata in json file about applied hazard value and ' - 'fragility', - 'type': 'incore:roadDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "roads", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:roadDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/socialvulnerability/__init__.py b/pyincore/analyses/socialvulnerability/__init__.py index 38847d9e4..1153f43d3 100644 --- a/pyincore/analyses/socialvulnerability/__init__.py +++ b/pyincore/analyses/socialvulnerability/__init__.py @@ -1 +1,3 @@ -from pyincore.analyses.socialvulnerability.socialvulnerability import SocialVulnerability +from pyincore.analyses.socialvulnerability.socialvulnerability import ( + SocialVulnerability, +) diff --git a/pyincore/analyses/socialvulnerability/socialvulnerability.py b/pyincore/analyses/socialvulnerability/socialvulnerability.py index 86d86d05a..8ca79ea57 100644 --- a/pyincore/analyses/socialvulnerability/socialvulnerability.py +++ b/pyincore/analyses/socialvulnerability/socialvulnerability.py @@ -9,8 +9,11 @@ from pyincore.analyses.socialvulnerabilityscore import SocialVulnerabilityScore -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use SocialVulnerabilityScore instead.") -class SocialVulnerability(): +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use SocialVulnerabilityScore instead.", +) +class SocialVulnerability: def __init__(self, incore_client): self._delegate = SocialVulnerabilityScore(incore_client) diff --git a/pyincore/analyses/socialvulnerabilityscore/__init__.py b/pyincore/analyses/socialvulnerabilityscore/__init__.py index c4b8e13bb..398760ec6 100644 --- a/pyincore/analyses/socialvulnerabilityscore/__init__.py +++ b/pyincore/analyses/socialvulnerabilityscore/__init__.py @@ -5,4 +5,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.socialvulnerabilityscore.socialvulnerabilityscore import SocialVulnerabilityScore +from pyincore.analyses.socialvulnerabilityscore.socialvulnerabilityscore import ( + SocialVulnerabilityScore, +) diff --git a/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py b/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py index f76e3f7ce..f8c91c19c 100644 --- a/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py +++ b/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py @@ -32,19 +32,32 @@ def __init__(self, incore_client): def run(self): """Execute the social vulnerability score analysis using known parameters.""" - df_navs = pd.DataFrame(self.get_input_dataset('national_vulnerability_feature_averages').get_csv_reader()) - - df_dem = pd.DataFrame(self.get_input_dataset('social_vulnerability_demographic_factors').get_csv_reader()) + df_navs = pd.DataFrame( + self.get_input_dataset( + "national_vulnerability_feature_averages" + ).get_csv_reader() + ) + + df_dem = pd.DataFrame( + self.get_input_dataset( + "social_vulnerability_demographic_factors" + ).get_csv_reader() + ) # Make sure data types match - df_dem["factor_white_nonHispanic"] = df_dem["factor_white_nonHispanic"].astype(float) + df_dem["factor_white_nonHispanic"] = df_dem["factor_white_nonHispanic"].astype( + float + ) df_dem["factor_owner_occupied"] = df_dem["factor_owner_occupied"].astype(float) - df_dem["factor_earning_higher_than_national_poverty_rate"] =\ - df_dem["factor_earning_higher_than_national_poverty_rate"].astype(float) - df_dem["factor_over_25_with_high_school_diploma_or_higher"] =\ - df_dem["factor_over_25_with_high_school_diploma_or_higher"].astype(float) - df_dem["factor_without_disability_age_18_to_65"] =\ - df_dem["factor_without_disability_age_18_to_65"].astype(float) + df_dem["factor_earning_higher_than_national_poverty_rate"] = df_dem[ + "factor_earning_higher_than_national_poverty_rate" + ].astype(float) + df_dem["factor_over_25_with_high_school_diploma_or_higher"] = df_dem[ + "factor_over_25_with_high_school_diploma_or_higher" + ].astype(float) + df_dem["factor_without_disability_age_18_to_65"] = df_dem[ + "factor_without_disability_age_18_to_65" + ].astype(float) self.social_vulnerability_score_model(df_navs, df_dem) @@ -64,13 +77,13 @@ def social_vulnerability_score_model(self, df_navs, df_dem): # Save into a CSV file result_name = self.get_parameter("result_name") - self.set_result_csv_data("sv_result", df_sv, - name=result_name, - source="dataframe") + self.set_result_csv_data( + "sv_result", df_sv, name=result_name, source="dataframe" + ) @staticmethod def compute_svs(df, df_navs): - """ Computation of the social vulnerability score and corresponding zoning + """Computation of the social vulnerability score and corresponding zoning Args: df (pd.DataFrame): dataframe for the census geographic unit of interest @@ -79,41 +92,44 @@ def compute_svs(df, df_navs): Returns: pd.DataFrame: Social vulnerability score and corresponding zoning data """ - navs = df_navs['average'].astype(float).array - - df['R1'] = df['factor_white_nonHispanic'] / navs[0] - df['R2'] = df['factor_owner_occupied'] / navs[1] - df['R3'] = df['factor_earning_higher_than_national_poverty_rate'] / navs[2] - df['R4'] = df['factor_over_25_with_high_school_diploma_or_higher'] / navs[3] - df['R5'] = df['factor_without_disability_age_18_to_65'] / navs[4] - df['SVS'] = df.apply(lambda row: (row['R1'] + row['R2'] + row['R3'] + row['R4'] + row['R5']) / 5, axis=1) - - maximum_nav = 1/navs + navs = df_navs["average"].astype(float).array + + df["R1"] = df["factor_white_nonHispanic"] / navs[0] + df["R2"] = df["factor_owner_occupied"] / navs[1] + df["R3"] = df["factor_earning_higher_than_national_poverty_rate"] / navs[2] + df["R4"] = df["factor_over_25_with_high_school_diploma_or_higher"] / navs[3] + df["R5"] = df["factor_without_disability_age_18_to_65"] / navs[4] + df["SVS"] = df.apply( + lambda row: (row["R1"] + row["R2"] + row["R3"] + row["R4"] + row["R5"]) / 5, + axis=1, + ) + + maximum_nav = 1 / navs std = abs(1 - (sum(maximum_nav) / len(maximum_nav))) / 3 - lb_2 = 1 - 1.5*std - lb_1 = 1 - 0.5*std - ub_1 = 1 + 0.5*std - ub_2 = 1 + 1.5*std + lb_2 = 1 - 1.5 * std + lb_1 = 1 - 0.5 * std + ub_1 = 1 + 0.5 * std + ub_2 = 1 + 1.5 * std zones = [] - for svs in df['SVS'].tolist(): + for svs in df["SVS"].tolist(): if svs < lb_2: - new_zone = 'High Vulnerable (zone5)' + new_zone = "High Vulnerable (zone5)" elif svs < lb_1: - new_zone = 'Medium to High Vulnerable (zone4)' + new_zone = "Medium to High Vulnerable (zone4)" elif svs < ub_1: - new_zone = 'Medium Vulnerable (zone3)' + new_zone = "Medium Vulnerable (zone3)" elif svs < ub_2: - new_zone = 'Medium to Low Vulnerable (zone2)' + new_zone = "Medium to Low Vulnerable (zone2)" elif svs > ub_2: - new_zone = 'Low Vulnerable (zone1)' + new_zone = "Low Vulnerable (zone1)" else: - new_zone = 'No Data' + new_zone = "No Data" zones.append(new_zone) - df['zone'] = zones + df["zone"] = zones df = df.sort_values(by="GEO_ID") return df @@ -126,38 +142,38 @@ def get_spec(self): """ return { - 'name': 'social-vulnerability-score', - 'description': 'Social vulnerability score model', - 'input_parameters': [ + "name": "social-vulnerability-score", + "description": "Social vulnerability score model", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'Result CSV dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str, }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'national_vulnerability_feature_averages', - 'required': True, - 'description': 'A csv file with national vulnerability feature averages', - 'type': ['incore:socialVulnerabilityFeatureAverages'] + "id": "national_vulnerability_feature_averages", + "required": True, + "description": "A csv file with national vulnerability feature averages", + "type": ["incore:socialVulnerabilityFeatureAverages"], }, { - 'id': 'social_vulnerability_demographic_factors', - 'required': True, - 'description': 'A csv file with social vulnerability score demographic factors for a given geographic ' - 'type', - 'type': ['incore:socialVulnerabilityDemFactors'] - } + "id": "social_vulnerability_demographic_factors", + "required": True, + "description": "A csv file with social vulnerability score demographic factors for a given geographic " + "type", + "type": ["incore:socialVulnerabilityDemFactors"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'sv_result', - 'parent_type': 'social_vulnerability_score', - 'description': 'A csv file with zones containing demographic factors' - 'qualified by a social vulnerability score', - 'type': 'incore:socialVulnerabilityScore' + "id": "sv_result", + "parent_type": "social_vulnerability_score", + "description": "A csv file with zones containing demographic factors" + "qualified by a social vulnerability score", + "type": "incore:socialVulnerabilityScore", } - ] + ], } diff --git a/pyincore/analyses/tornadoepndamage/tornadoepndamage.py b/pyincore/analyses/tornadoepndamage/tornadoepndamage.py index 57b1aabbe..312216d1b 100644 --- a/pyincore/analyses/tornadoepndamage/tornadoepndamage.py +++ b/pyincore/analyses/tornadoepndamage/tornadoepndamage.py @@ -13,7 +13,13 @@ from pyincore.utils.analysisutil import AnalysisUtil from shapely.geometry import shape, LineString, MultiLineString -from pyincore import BaseAnalysis, HazardService, FragilityService, DataService, FragilityCurveSet +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + DataService, + FragilityCurveSet, +) from pyincore import GeoUtil, NetworkUtil, NetworkDataset from pyincore.models.dfr3curve import DFR3Curve @@ -31,8 +37,8 @@ def __init__(self, incore_client): self.hazardsvc = HazardService(incore_client) self.fragilitysvc = FragilityService(incore_client) self.datasetsvc = DataService(incore_client) - self.fragility_tower_id = '5b201b41b1cf3e336de8fa67' - self.fragility_pole_id = '5b201d91b1cf3e336de8fa68' + self.fragility_tower_id = "5b201b41b1cf3e336de8fa67" + self.fragility_pole_id = "5b201d91b1cf3e336de8fa68" # this is for deciding to use indpnode field. Not using this could be safer for general dataset self.use_indpnode = False @@ -44,14 +50,16 @@ def __init__(self, incore_client): self.mcost = 1435 # mean repair cost for single distribution pole self.vcost = (0.1 * self.mcost) ** 2 self.sigmad = math.sqrt( - math.log(self.vcost / (self.mcost ** 2) + 1)) # convert to gaussian Std Deviation to be used in logncdf - self.mud = math.log((self.mcost ** 2) / math.sqrt(self.vcost + self.mcost ** 2)) + math.log(self.vcost / (self.mcost**2) + 1) + ) # convert to gaussian Std Deviation to be used in logncdf + self.mud = math.log((self.mcost**2) / math.sqrt(self.vcost + self.mcost**2)) self.mcost = 400000 # mean repair cost for single transmission pole self.vcost = (0.1 * self.mcost) ** 2 self.sigmat = math.sqrt( - math.log(self.vcost / (self.mcost ** 2) + 1)) # convert to gaussian Std Deviation to be used in logncdf - self.mut = math.log((self.mcost ** 2) / math.sqrt(self.vcost + self.mcost ** 2)) + math.log(self.vcost / (self.mcost**2) + 1) + ) # convert to gaussian Std Deviation to be used in logncdf + self.mut = math.log((self.mcost**2) / math.sqrt(self.vcost + self.mcost**2)) self.tmut = 72 # mean repairtime for transmission tower in hrs self.tsigmat = 36 # std dev @@ -62,8 +70,8 @@ def __init__(self, incore_client): self.totalcost2repairpath = [] self.totalpoles2repair = [] - self.tornado_sim_field_name = 'SIMULATION' - self.tornado_ef_field_name = 'EF_RATING' + self.tornado_sim_field_name = "SIMULATION" + self.tornado_ef_field_name = "EF_RATING" # tornado number of simulation and ef_rate self.nmcs = 0 @@ -74,7 +82,7 @@ def __init__(self, incore_client): # node variables self.nodenwid_fld_name = "NODENWID" self.indpnode_fld_name = "INDPNODE" - self.guid_fldname = 'GUID' + self.guid_fldname = "GUID" # link variables self.tonode_fld_name = "TONODE" @@ -88,22 +96,35 @@ def __init__(self, incore_client): super(TornadoEpnDamage, self).__init__(incore_client) def run(self): - network_dataset = NetworkDataset.from_dataset(self.get_input_dataset("epn_network")) + network_dataset = NetworkDataset.from_dataset( + self.get_input_dataset("epn_network") + ) tornado = self.get_input_hazard("hazard") - tornado_id = self.get_parameter('tornado_id') + tornado_id = self.get_parameter("tornado_id") if tornado is None and tornado_id is None: - raise ValueError("Either tornado hazard object or tornado id must be provided") + raise ValueError( + "Either tornado hazard object or tornado id must be provided" + ) elif tornado_id is None: tornado_id = tornado.id tornado_metadata = self.hazardsvc.get_tornado_hazard_metadata(tornado_id) - self.load_remote_input_dataset("tornado", tornado_metadata["hazardDatasets"][0].get("datasetId")) + self.load_remote_input_dataset( + "tornado", tornado_metadata["hazardDatasets"][0].get("datasetId") + ) tornado_dataset = self.get_input_dataset("tornado").get_inventory_reader() - ds_results, damage_results = self.get_damage(network_dataset, tornado_dataset, tornado_id) + ds_results, damage_results = self.get_damage( + network_dataset, tornado_dataset, tornado_id + ) - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) self.set_result_json_data( - "metadata", damage_results, name=self.get_parameter("result_name") + "_additional_info") + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True @@ -122,14 +143,22 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): self.set_node_variables(node_dataset) # get fragility curves set - tower for transmission, pole for distribution - fragility_set_tower = FragilityCurveSet(self.fragilitysvc.get_dfr3_set(self.fragility_tower_id)) + fragility_set_tower = FragilityCurveSet( + self.fragilitysvc.get_dfr3_set(self.fragility_tower_id) + ) assert fragility_set_tower.id == self.fragility_tower_id - fragility_set_pole = FragilityCurveSet(self.fragilitysvc.get_dfr3_set(self.fragility_pole_id)) + fragility_set_pole = FragilityCurveSet( + self.fragilitysvc.get_dfr3_set(self.fragility_pole_id) + ) assert fragility_set_pole.id == self.fragility_pole_id # network test node_id_validation = NetworkUtil.validate_network_node_ids( - network_dataset, self.fromnode_fld_name, self.tonode_fld_name, self.nodenwid_fld_name) + network_dataset, + self.fromnode_fld_name, + self.tonode_fld_name, + self.nodenwid_fld_name, + ) if node_id_validation is False: sys.exit("ID in from or to node field doesn't exist in the node dataset") @@ -138,7 +167,11 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): link_filepath = link_dataset.path graph, node_coords = NetworkUtil.create_network_graph_from_link( - link_filepath, self.fromnode_fld_name, self.tonode_fld_name, is_directed_graph) + link_filepath, + self.fromnode_fld_name, + self.tonode_fld_name, + is_directed_graph, + ) # reverse the graph to acculate the damage to next to node graph = nx.DiGraph.reverse(graph, copy=True) @@ -169,27 +202,41 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): nodenwid_list = [] for node_feature in node_dataset: # get guid colum - guid_fld_val = '' - if self.guid_fldname.lower() in node_feature['properties']: - guid_fld_val = node_feature['properties'][self.guid_fldname.lower()] - elif self.guid_fldname in node_feature['properties']: - guid_fld_val = node_feature['properties'][self.guid_fldname] + guid_fld_val = "" + if self.guid_fldname.lower() in node_feature["properties"]: + guid_fld_val = node_feature["properties"][self.guid_fldname.lower()] + elif self.guid_fldname in node_feature["properties"]: + guid_fld_val = node_feature["properties"][self.guid_fldname] guid_list.append(guid_fld_val) # get nodenwid colum - nodenwid_fld_val = '' - if self.nodenwid_fld_name.lower() in node_feature['properties']: - nodenwid_fld_val = int(node_feature['properties'][self.nodenwid_fld_name.lower()]) - elif self.nodenwid_fld_name in node_feature['properties']: - nodenwid_fld_val = int(node_feature['properties'][self.nodenwid_fld_name]) + nodenwid_fld_val = "" + if self.nodenwid_fld_name.lower() in node_feature["properties"]: + nodenwid_fld_val = int( + node_feature["properties"][self.nodenwid_fld_name.lower()] + ) + elif self.nodenwid_fld_name in node_feature["properties"]: + nodenwid_fld_val = int( + node_feature["properties"][self.nodenwid_fld_name] + ) nodenwid_list.append(nodenwid_fld_val) for z in range(self.nmcs): - nodedam = [0] * self.nnode # placeholder for recording number of damaged pole for each node - noderepair = [0] * self.nnode # placeholder for recording repair cost for each node - poles2repair = [0] * self.nnode # placeholder for recording total number of poles to repair - cost2repairpath = [0] * self.nnode # placeholder for recording total repair cost for the network - time2repairpath = [0] * self.nnode # placeholder for recording total repair time for the network + nodedam = [ + 0 + ] * self.nnode # placeholder for recording number of damaged pole for each node + noderepair = [ + 0 + ] * self.nnode # placeholder for recording repair cost for each node + poles2repair = [ + 0 + ] * self.nnode # placeholder for recording total number of poles to repair + cost2repairpath = [ + 0 + ] * self.nnode # placeholder for recording total repair cost for the network + time2repairpath = [ + 0 + ] * self.nnode # placeholder for recording total repair time for the network nodetimerep = [0] * self.nnode hazardval = [[0]] * self.nnode # placeholder for recording hazard values demandtypes = [[""]] * self.nnode # placeholder for recording demand types @@ -206,44 +253,68 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): demand_types = [""] demand_units = [""] - if self.tonode_fld_name.lower() in line_feature['properties']: - to_node_val = line_feature['properties'][self.tonode_fld_name.lower()] - elif self.tonode_fld_name in line_feature['properties']: - to_node_val = line_feature['properties'][self.tonode_fld_name] + if self.tonode_fld_name.lower() in line_feature["properties"]: + to_node_val = line_feature["properties"][ + self.tonode_fld_name.lower() + ] + elif self.tonode_fld_name in line_feature["properties"]: + to_node_val = line_feature["properties"][self.tonode_fld_name] - if self.linetype_fld_name in line_feature['properties']: - linetype_val = line_feature['properties'][self.linetype_fld_name] - elif self.linetype_fld_name.lower() in line_feature['properties']: - linetype_val = line_feature['properties'][self.linetype_fld_name.lower()] + if self.linetype_fld_name in line_feature["properties"]: + linetype_val = line_feature["properties"][self.linetype_fld_name] + elif self.linetype_fld_name.lower() in line_feature["properties"]: + linetype_val = line_feature["properties"][ + self.linetype_fld_name.lower() + ] - line = shape(line_feature['geometry']) + line = shape(line_feature["geometry"]) # iterate tornado for tornado_feature in tornado_dataset: - resistivity_probability = 0 # resistivity value at the point of windSpeed + resistivity_probability = ( + 0 # resistivity value at the point of windSpeed + ) random_resistivity = 0 # random resistivity value between 0 and one sim_fld_val = "" ef_fld_val = "" # get EF rating and simulation number column - if self.tornado_sim_field_name.lower() in tornado_feature['properties']: - sim_fld_val = int(tornado_feature['properties'][self.tornado_sim_field_name.lower()]) - elif self.tornado_sim_field_name in tornado_feature['properties']: - sim_fld_val = int(tornado_feature['properties'][self.tornado_sim_field_name]) - - if self.tornado_ef_field_name.lower() in tornado_feature['properties']: - ef_fld_val = tornado_feature['properties'][self.tornado_ef_field_name.lower()] - elif self.tornado_ef_field_name in tornado_feature['properties']: - ef_fld_val = tornado_feature['properties'][self.tornado_ef_field_name] + if ( + self.tornado_sim_field_name.lower() + in tornado_feature["properties"] + ): + sim_fld_val = int( + tornado_feature["properties"][ + self.tornado_sim_field_name.lower() + ] + ) + elif self.tornado_sim_field_name in tornado_feature["properties"]: + sim_fld_val = int( + tornado_feature["properties"][self.tornado_sim_field_name] + ) + + if ( + self.tornado_ef_field_name.lower() + in tornado_feature["properties"] + ): + ef_fld_val = tornado_feature["properties"][ + self.tornado_ef_field_name.lower() + ] + elif self.tornado_ef_field_name in tornado_feature["properties"]: + ef_fld_val = tornado_feature["properties"][ + self.tornado_ef_field_name + ] if sim_fld_val == "" or ef_fld_val == "": - print("unable to convert tornado simulation field value to integer") + print( + "unable to convert tornado simulation field value to integer" + ) sys.exit(0) # get Tornado EF polygon # assumes that the polygon is not a multipolygon - poly = shape(tornado_feature['geometry']) + poly = shape(tornado_feature["geometry"]) poly_list.append(poly) # loop for ef ranges @@ -257,17 +328,23 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): # also figure out the length of the line that ovelapped with EF box inter_length_meter = None # compute the intersection between tornado polygon and line - if sim_fld_val == z and ef_fld_val.lower() == ef_content.lower(): + if ( + sim_fld_val == z + and ef_fld_val.lower() == ef_content.lower() + ): if poly is not None and line is not None: if poly.intersects(line): intersection = poly.intersection(line) any_point = None - intersection_length = intersection.length if intersection.length > 0: # print(intersection.__class__.__name__) # calculate the length of intersected line # since this is a geographic, it has to be projected to meters to be calcuated - inter_length_meter = GeoUtil.calc_geog_distance_from_linestring(intersection) + inter_length_meter = ( + GeoUtil.calc_geog_distance_from_linestring( + intersection + ) + ) if isinstance(intersection, MultiLineString): intersection_list.append(intersection) for inter_line in intersection.geoms: @@ -293,16 +370,32 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): else: fragility_set_used = fragility_set_pole - values_payload = [{ - "demands": [x.lower() for x in fragility_set_used.demand_types], - "units": [x.lower() for x in fragility_set_used.demand_units], - "loc": str(any_point.coords[0][1]) + "," + str(any_point.coords[0][0]) - }] + values_payload = [ + { + "demands": [ + x.lower() + for x in fragility_set_used.demand_types + ], + "units": [ + x.lower() + for x in fragility_set_used.demand_units + ], + "loc": str(any_point.coords[0][1]) + + "," + + str(any_point.coords[0][0]), + } + ] h_vals = self.hazardsvc.post_tornado_hazard_values( - tornado_id, values_payload, self.get_parameter('seed')) - tor_hazard_values = AnalysisUtil.update_precision_of_lists( - h_vals[0]["hazardValues"]) + tornado_id, + values_payload, + self.get_parameter("seed"), + ) + tor_hazard_values = ( + AnalysisUtil.update_precision_of_lists( + h_vals[0]["hazardValues"] + ) + ) demand_types = h_vals[0]["demands"] demand_units = h_vals[0]["units"] hval_dict = dict() @@ -310,28 +403,36 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): for d in h_vals[0]["demands"]: hval_dict[d] = tor_hazard_values[j] j += 1 - if isinstance(fragility_set_used.fragility_curves[0], - DFR3Curve): + if isinstance( + fragility_set_used.fragility_curves[0], + DFR3Curve, + ): inventory_args = fragility_set_used.construct_expression_args_from_inventory( - tornado_feature) - resistivity_probability = \ - fragility_set_used.calculate_limit_state( - hval_dict, - inventory_type=fragility_set_used.inventory_type, **inventory_args) + tornado_feature + ) + resistivity_probability = fragility_set_used.calculate_limit_state( + hval_dict, + inventory_type=fragility_set_used.inventory_type, + **inventory_args + ) else: raise ValueError( "One of the fragilities is in deprecated format. This should not happen. " - "If you are seeing this please report the issue.") + "If you are seeing this please report the issue." + ) # randomly generated capacity of each poles ; 1 m/s is 2.23694 mph - poleresist = resistivity_probability.get('LS_0') * 2.23694 - npoles = int(round(inter_length_meter / self.pole_distance)) + poleresist = ( + resistivity_probability.get("LS_0") * 2.23694 + ) + npoles = int( + round(inter_length_meter / self.pole_distance) + ) repairtime_list = [] mu = None sigma = None for k in range(npoles): - repair_time = 0 random_resistivity = random.uniform(0, 1) if random_resistivity <= poleresist: @@ -341,7 +442,10 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): # since the time and cost differs when it is pole or tower, # this could be changed by see if it is tower or pole # if numpy.cross(k, z) <= 3 or numpy.cross(k, z) == 24: - if linetype_val.lower() == self.line_transmission: + if ( + linetype_val.lower() + == self.line_transmission + ): mu = self.mut sigma = self.sigmat tmu = self.tmut @@ -352,7 +456,9 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): tmu = self.tmud tsigma = self.tsigmad - repairtime_list.append(numpy.random.normal(tmu, tsigma)) + repairtime_list.append( + numpy.random.normal(tmu, tsigma) + ) for k in range(ndamage): repaircost += numpy.random.lognormal(mu, sigma) @@ -372,7 +478,11 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): for i in range(len(first_node_list)): for j in range(len(connection_list[i])): # print(connection_list[i][j], first_node_list[i]) - pathij = list(nx.all_simple_paths(graph, connection_list[i][j], first_node_list[i])) + pathij = list( + nx.all_simple_paths( + graph, connection_list[i][j], first_node_list[i] + ) + ) poler = 0 coster = 0 timer = [] @@ -413,22 +523,24 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): ds_result = dict() damage_result = dict() - ds_result['guid'] = guid_list[i] + ds_result["guid"] = guid_list[i] ds_result["meanpoles"] = meanpoles[i] ds_result["stdpoles"] = stdpoles[i] ds_result["meancost"] = meancost[i] ds_result["stdcost"] = stdcost[i] ds_result["meantime"] = meantime[i] ds_result["stdtime"] = stdtime[i] - ds_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazardval[i], "tornado") + ds_result["haz_expose"] = AnalysisUtil.get_exposure_from_hazard_values( + hazardval[i], "tornado" + ) - damage_result['guid'] = guid_list[i] + damage_result["guid"] = guid_list[i] damage_result["fragility_tower_id"] = self.fragility_tower_id damage_result["fragility_pole_id"] = self.fragility_pole_id damage_result["hazardtype"] = "Tornado" - damage_result['hazardvals'] = hazardval[i] - damage_result['demandtypes'] = demandtypes[i] - damage_result['demandunits'] = demandunits[i] + damage_result["hazardvals"] = hazardval[i] + damage_result["demandtypes"] = demandtypes[i] + damage_result["demandunits"] = demandunits[i] ds_results.append(ds_result) damage_results.append(damage_result) @@ -439,31 +551,25 @@ def get_damage(self, network_dataset, tornado_dataset, tornado_id): align coordinate values in a list as a single pair in order """ - def align_list_cooridnate(self, coord_list): - coord_iterator = iter(coord_list) - first = prev = next(coord_iterator) - for coord in coord_iterator: - yield prev, coord - prev = coord - - # if it is polygon the following line is needed to close the polygon geometry - # yield coord, first - def set_tornado_variables(self, tornado_dataset): sim_num_list = [] ef_rate_list = [] for ef_poly in tornado_dataset: - ef_string = '' - if self.tornado_sim_field_name.lower() in ef_poly['properties']: - sim_num_list.append(int(ef_poly['properties'][self.tornado_sim_field_name.lower()])) - elif self.tornado_sim_field_name in ef_poly['properties']: - sim_num_list.append(int(ef_poly['properties'][self.tornado_sim_field_name])) - - if self.tornado_ef_field_name.lower() in ef_poly['properties']: - ef_string = ef_poly['properties'][self.tornado_ef_field_name.lower()] - elif self.tornado_ef_field_name in ef_poly['properties']: - ef_string = ef_poly['properties'][self.tornado_ef_field_name] + ef_string = "" + if self.tornado_sim_field_name.lower() in ef_poly["properties"]: + sim_num_list.append( + int(ef_poly["properties"][self.tornado_sim_field_name.lower()]) + ) + elif self.tornado_sim_field_name in ef_poly["properties"]: + sim_num_list.append( + int(ef_poly["properties"][self.tornado_sim_field_name]) + ) + + if self.tornado_ef_field_name.lower() in ef_poly["properties"]: + ef_string = ef_poly["properties"][self.tornado_ef_field_name.lower()] + elif self.tornado_ef_field_name in ef_poly["properties"]: + ef_string = ef_poly["properties"][self.tornado_ef_field_name] # parse the number in EF and the format should be "EF0", "EF1", or something like it ef_rate_list.append(int(ef_string.lower().split("ef", 1)[1])) @@ -480,16 +586,18 @@ def set_node_variables(self, node_dataset): for node_point in node_dataset: node_id = None indpnode_val = None - if self.nodenwid_fld_name.lower() in node_point['properties']: - node_id = int(node_point['properties'][self.nodenwid_fld_name.lower()]) - elif self.nodenwid_fld_name in node_point['properties']: - node_id = int(node_point['properties'][self.nodenwid_fld_name]) + if self.nodenwid_fld_name.lower() in node_point["properties"]: + node_id = int(node_point["properties"][self.nodenwid_fld_name.lower()]) + elif self.nodenwid_fld_name in node_point["properties"]: + node_id = int(node_point["properties"][self.nodenwid_fld_name]) if self.use_indpnode is True: - if self.indpnode_fld_name.lower() in node_point['properties']: - indpnode_val = int(node_point['properties'][self.indpnode_fld_name.lower()]) - elif self.indpnode_fld_name in node_point['properties']: - indpnode_val = int(node_point['properties'][self.indpnode_fld_name]) + if self.indpnode_fld_name.lower() in node_point["properties"]: + indpnode_val = int( + node_point["properties"][self.indpnode_fld_name.lower()] + ) + elif self.indpnode_fld_name in node_point["properties"]: + indpnode_val = int(node_point["properties"][self.indpnode_fld_name]) if node_id is None and indpnode_val is None: print("problem getting the value") @@ -511,62 +619,62 @@ def set_node_variables(self, node_dataset): def get_spec(self): return { - 'name': 'tornado-epn-damage', - 'description': 'tornado epn damage analysis', - 'input_parameters': [ + "name": "tornado-epn-damage", + "description": "tornado epn damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'tornado_id', - 'required': False, - 'description': 'Tornado hazard id', - 'type': str + "id": "tornado_id", + "required": False, + "description": "Tornado hazard id", + "type": str, }, { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the tornado hazard value', - 'type': int - } + "id": "seed", + "required": False, + "description": "Initial seed for the tornado hazard value", + "type": int, + }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["tornado"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["tornado"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'epn_network', - 'required': True, - 'description': 'EPN Network Dataset', - 'type': ['incore:epnNetwork'], + "id": "epn_network", + "required": True, + "description": "EPN Network Dataset", + "type": ["incore:epnNetwork"], }, { - 'id': 'tornado', - 'required': False, - 'description': 'Tornado Dataset', - 'type': ['incore:tornadoWindfield'], - } + "id": "tornado", + "required": False, + "description": "Tornado Dataset", + "type": ["incore:tornadoWindfield"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'epn_network', - 'description': 'CSV file of damages for electric power network by tornado', - 'type': 'incore:tornadoEPNDamageVer3' + "id": "result", + "parent_type": "epn_network", + "description": "CSV file of damages for electric power network by tornado", + "type": "incore:tornadoEPNDamageVer3", }, { - 'id': 'metadata', - 'parent_type': 'epn_network', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:tornadoEPNDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "epn_network", + "description": "Json file with information about applied hazard value and fragility", + "type": "incore:tornadoEPNDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/trafficflowrecovery/WIPW.py b/pyincore/analyses/trafficflowrecovery/WIPW.py index e89d4f1f9..9c6777a7b 100644 --- a/pyincore/analyses/trafficflowrecovery/WIPW.py +++ b/pyincore/analyses/trafficflowrecovery/WIPW.py @@ -28,7 +28,7 @@ def ipw_search(v, e): # calculate length of each link which is 1 for all edges length = {} - for (i, j) in edgeslist: + for i, j in edgeslist: length[i, j] = 1 length[j, i] = 1 @@ -55,8 +55,7 @@ def ipw_search(v, e): # the length of kth independent path between node pair path_length = {} - for (w, q) in nodespair: - + for w, q in nodespair: # creat a temp list to search path temp_edgelist = copy.deepcopy(edgeslist) @@ -73,15 +72,13 @@ def ipw_search(v, e): g_local.add_nodes_from(nodelist) for headnode, tailnode in temp_edgelist: - g_local.add_edge(headnode, tailnode, length=length[headnode, - tailnode]) + g_local.add_edge(headnode, tailnode, length=length[headnode, tailnode]) try: - temp = copy.deepcopy(nx.shortest_path(g_local, - source=w, - target=q, - weight='length')) - except nx.NetworkXNoPath as e: + temp = copy.deepcopy( + nx.shortest_path(g_local, source=w, target=q, weight="length") + ) + except nx.NetworkXNoPath: # print(w,q) # print("NetworkXNoPath") temp = [] @@ -89,7 +86,6 @@ def ipw_search(v, e): # if there is a path connecting the source and target, # start to calculate IPW if temp: - # find the shortest path ipath[w, q][k] = copy.deepcopy(temp) path_length[w, q][k] = 0 @@ -98,22 +94,20 @@ def ipw_search(v, e): if len(ipath[w, q][k]) == 2: # for the path just has two nodes # (origin and destination) - ipathtuple.append((ipath[w, q][k][0], - ipath[w, q][k][1])) - path_length[w, q][k] = length[ipath[w, q][k][0], - ipath[w, q][k][1]] + ipathtuple.append((ipath[w, q][k][0], ipath[w, q][k][1])) + path_length[w, q][k] = length[ipath[w, q][k][0], ipath[w, q][k][1]] else: # for the path has more than two nodes for p in range(0, len(ipath[w, q][k]) - 1): - ipathtuple.append((ipath[w, q][k][p], - ipath[w, q][k][p + 1])) + ipathtuple.append((ipath[w, q][k][p], ipath[w, q][k][p + 1])) - path_length[w, q][k] += length[ipath[w, q][k][p], - ipath[w, q][k][p + 1]] + path_length[w, q][k] += length[ + ipath[w, q][k][p], ipath[w, q][k][p + 1] + ] # delete edges that used in previous shortest paths - for (s, t) in ipathtuple: + for s, t in ipathtuple: if (s, t) in temp_edgelist: temp_edgelist.remove((s, t)) # temp_edgelist.remove((t, s)) @@ -126,11 +120,11 @@ def ipw_search(v, e): return ipath, path_length -def tipw_index(g, l, path_adt): +def tipw_index(g, p, path_adt): """ caculate the TIPW index of the network :param g: graph - :param l: Indpendent pathway + :param p: Independent pathway :param path_adt: Adt of the path :return: TIPW index of the network """ @@ -142,8 +136,9 @@ def tipw_index(g, l, path_adt): for key in path_adt.keys(): normal_path_adt[key] = {} for i, j in path_adt[key].items(): - normal_path_adt[key][i] = len(path_adt[key].values()) * j \ - / sum(path_adt[key].values()) + normal_path_adt[key][i] = ( + len(path_adt[key].values()) * j / sum(path_adt[key].values()) + ) # compute the TIPW of node node_tipw = {} @@ -151,23 +146,22 @@ def tipw_index(g, l, path_adt): node_tipw[node] = 0 for pairnode in gnodes: if pairnode != node: - if (node, pairnode) in l.keys(): - for key, value in l[node, pairnode].items(): - node_tipw[node] \ - += normal_path_adt[node, pairnode][key] \ - * path_service_level_edges(g, value) - elif (pairnode, node) in l.keys(): - for key, value in l[pairnode, node].items(): - node_tipw[node] \ - += normal_path_adt[pairnode, node][key] \ - * path_service_level_edges(g, value) + if (node, pairnode) in p.keys(): + for key, value in p[node, pairnode].items(): + node_tipw[node] += normal_path_adt[node, pairnode][ + key + ] * path_service_level_edges(g, value) + elif (pairnode, node) in p.keys(): + for key, value in p[pairnode, node].items(): + node_tipw[node] += normal_path_adt[pairnode, node][ + key + ] * path_service_level_edges(g, value) # caculate the TIPW index tipw_index_val = 0 for node in gnodes: # network IPW - tipw_index_val \ - += (1 / float(len(gnodes)) * node_tipw[node]) / (len(gnodes) - 1) + tipw_index_val += (1 / float(len(gnodes)) * node_tipw[node]) / (len(gnodes) - 1) return tipw_index_val @@ -182,8 +176,7 @@ def path_service_level_edges(g, path): service_level = 1 for i in range(len(path) - 1): - service_level \ - *= (1 - g.edges[path[i], path[i + 1]]['Damage_Status'] / 4.0) + service_level *= 1 - g.edges[path[i], path[i + 1]]["Damage_Status"] / 4.0 return service_level @@ -195,8 +188,8 @@ def path_adt_from_edges(g, path): :return: reliability """ - adt = max(nx.get_edge_attributes(g, 'adt').values()) + adt = max(nx.get_edge_attributes(g, "adt").values()) for i in range(len(path) - 1): - adt = min(adt, g.edges[path[i], path[i + 1]]['adt']) + adt = min(adt, g.edges[path[i], path[i + 1]]["adt"]) return adt diff --git a/pyincore/analyses/trafficflowrecovery/__init__.py b/pyincore/analyses/trafficflowrecovery/__init__.py index 0387c8dbe..1588ab154 100644 --- a/pyincore/analyses/trafficflowrecovery/__init__.py +++ b/pyincore/analyses/trafficflowrecovery/__init__.py @@ -7,7 +7,13 @@ from pyincore.analyses.trafficflowrecovery.nsga2 import Solution from pyincore.analyses.trafficflowrecovery.nsga2 import NSGAII -from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution +from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import ( + PostDisasterLongTermSolution, +) from pyincore.analyses.trafficflowrecovery import WIPW -from pyincore.analyses.trafficflowrecovery.trafficflowrecovery import TrafficFlowRecovery -from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil +from pyincore.analyses.trafficflowrecovery.trafficflowrecovery import ( + TrafficFlowRecovery, +) +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import ( + TrafficFlowRecoveryUtil, +) diff --git a/pyincore/analyses/trafficflowrecovery/nsga2.py b/pyincore/analyses/trafficflowrecovery/nsga2.py index 04e87e162..98e804f95 100644 --- a/pyincore/analyses/trafficflowrecovery/nsga2.py +++ b/pyincore/analyses/trafficflowrecovery/nsga2.py @@ -3,7 +3,9 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import sys, random + +import sys +import random class Solution: @@ -69,6 +71,7 @@ def crowded_comparison(s1, s2): class NSGAII: """Implementation of NSGA-II algorithm.""" + current_evaluated_objective = 0 def __init__(self, num_objectives, mutation_rate=0.1, crossover_rate=1.0): @@ -103,7 +106,6 @@ def run(self, p, population_size, num_generations): first_front = [] for i in range(num_generations): - r = [] r.extend(p) @@ -284,8 +286,8 @@ def crowding_distance_assignment(self, front): for obj_index in range(self.num_objectives): self.sort_objective(front, obj_index) - front[0].distance = float('inf') - front[len(front) - 1].distance = float('inf') + front[0].distance = float("inf") + front[len(front) - 1].distance = float("inf") for i in range(1, len(front) - 1): - front[i].distance += (front[i + 1].distance - front[i - 1].distance) + front[i].distance += front[i + 1].distance - front[i - 1].distance diff --git a/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py b/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py index 7eef30092..6e337d236 100644 --- a/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py +++ b/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py @@ -9,7 +9,9 @@ import random from pyincore.analyses.trafficflowrecovery.nsga2 import Solution from pyincore.analyses.trafficflowrecovery import WIPW as WIPW -from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import ( + TrafficFlowRecoveryUtil, +) class PostDisasterLongTermSolution(Solution): @@ -27,8 +29,18 @@ class PostDisasterLongTermSolution(Solution): # complete damage state compRepair = 230 - def __init__(self, candidates, node_df, arc_df, bridge_df, bridge_damage_value, - network, pm, all_ipw, path_adt): + def __init__( + self, + candidates, + node_df, + arc_df, + bridge_df, + bridge_damage_value, + network, + pm, + all_ipw, + path_adt, + ): """ initialize the chromosomes """ @@ -70,41 +82,44 @@ def evaluate_solution(self, final): end = {} schedule_time = [] - l = copy.deepcopy(self.attributes) + attributes_copy = copy.deepcopy(self.attributes) - for i in range(len(l)): + for i in range(len(attributes_copy)): if i <= simax - 1: - # repair start from time 0 start[candidate_schedule[i]] = 0.0 # if damage state of bridge is slight damage, repair time # is slightRepair if temp_bridge_damage_value[candidate_schedule[i]] == 1: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.slightRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.slightRepair + ) # if damage state of bridge is moderate damage, repair time # is modRepair elif temp_bridge_damage_value[candidate_schedule[i]] == 2: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.modRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.modRepair + ) # if damage state of bridge is extensive damage, repair time # is extRepair elif temp_bridge_damage_value[candidate_schedule[i]] == 3: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.extRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.extRepair + ) # if damage state of bridge is complete damage, repair time # is compRepair else: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.compRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.compRepair + ) # store the ending time schedule_time.append(end[candidate_schedule[i]]) @@ -119,24 +134,28 @@ def evaluate_solution(self, final): start[candidate_schedule[i]] = schedule_time.pop(0) if temp_bridge_damage_value[candidate_schedule[i]] == 1: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.slightRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.slightRepair + ) elif temp_bridge_damage_value[candidate_schedule[i]] == 2: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.modRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.modRepair + ) elif temp_bridge_damage_value[candidate_schedule[i]] == 3: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.extRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.extRepair + ) else: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.compRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.compRepair + ) schedule_time.append(end[candidate_schedule[i]]) schedule_time.sort() @@ -169,9 +188,7 @@ def evaluate_solution(self, final): pl = len(schedule_time) for ii in range(pl % inte - 1 + inte, pl, inte): - if ii > 0: - # update the damage status of bridges for bridge in self_candidates: if fg[bridge] == 0: @@ -181,22 +198,36 @@ def evaluate_solution(self, final): fg[bridge] = 1 for i in range(len(self.arc_df)): - nod1 = self.node_df.loc[self.node_df['ID'] == self.arc_df['fromnode'][i], 'guid'].values[0] - nod2 = self.node_df.loc[self.node_df['ID'] == self.arc_df['tonode'][i], 'guid'].values[0] - self.network.edges[nod1, nod2]['Damage_Status'] = 0 + nod1 = self.node_df.loc[ + self.node_df["ID"] == self.arc_df["fromnode"][i], "guid" + ].values[0] + nod2 = self.node_df.loc[ + self.node_df["ID"] == self.arc_df["tonode"][i], "guid" + ].values[0] + self.network.edges[nod1, nod2]["Damage_Status"] = 0 for key, val in temp_bridge_damage_value.items(): - linknwid = self.bridge_df.loc[self.bridge_df['guid'] == key, 'linkID'].values[0] + linknwid = self.bridge_df.loc[ + self.bridge_df["guid"] == key, "linkID" + ].values[0] - nod_id1 = self.arc_df[self.arc_df['id'] == linknwid]['fromnode'].values[0] - nod1 = self.node_df.loc[self.node_df['ID'] == nod_id1, 'guid'].values[0] + nod_id1 = self.arc_df[self.arc_df["id"] == linknwid][ + "fromnode" + ].values[0] + nod1 = self.node_df.loc[ + self.node_df["ID"] == nod_id1, "guid" + ].values[0] - nod_id2 = self.arc_df[self.arc_df['id'] == linknwid]['tonode'].values[0] - nod2 = self.node_df.loc[self.node_df['ID'] == nod_id2, 'guid'].values[0] + nod_id2 = self.arc_df[self.arc_df["id"] == linknwid][ + "tonode" + ].values[0] + nod2 = self.node_df.loc[ + self.node_df["ID"] == nod_id2, "guid" + ].values[0] - self.network.edges[nod1, nod2]['Damage_Status'] = val + self.network.edges[nod1, nod2]["Damage_Status"] = val - nx.get_edge_attributes(self.network, 'Damage_Status') + nx.get_edge_attributes(self.network, "Damage_Status") # calculate the travel efficiency based on different # performance metrics based on travel time @@ -206,15 +237,14 @@ def evaluate_solution(self, final): # based on WIPW elif self.pm == 0: - te = WIPW.tipw_index(self.network, - self.all_ipw, self.path_adt) + te = WIPW.tipw_index(self.network, self.all_ipw, self.path_adt) - numerator += te * schedule_time[ii] * (schedule_time[ii] - - schedule_time[ - ii - inte]) - aa = te - denominator += te * (schedule_time[ii] - - schedule_time[ii - inte]) + numerator += ( + te + * schedule_time[ii] + * (schedule_time[ii] - schedule_time[ii - inte]) + ) + denominator += te * (schedule_time[ii] - schedule_time[ii - inte]) # calculate the skewness of the recovery trajectory try: @@ -234,8 +264,11 @@ def evaluate_solution(self, final): if final == 0: return self.objectives[0], self.objectives[1] else: - return self.objectives[0], self.objectives[1], \ - self.sch[self.objectives[0], self.objectives[1]] + return ( + self.objectives[0], + self.objectives[1], + self.sch[self.objectives[0], self.objectives[1]], + ) def mutate(self): """ diff --git a/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py b/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py index 065de4584..452db04e0 100644 --- a/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py +++ b/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py @@ -9,45 +9,58 @@ import copy import random -from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import ( + TrafficFlowRecoveryUtil, +) from pyincore.analyses.trafficflowrecovery.nsga2 import NSGAII from pyincore.analyses.trafficflowrecovery import WIPW as WIPW -from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution +from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import ( + PostDisasterLongTermSolution, +) from pyincore import BaseAnalysis class TrafficFlowRecovery(BaseAnalysis): - def run(self): - """ Executes traffic flow recovery analysis""" + """Executes traffic flow recovery analysis""" # read the nodes in traffic flow node_set = self.get_input_dataset("nodes").get_inventory_reader() nodes = list(node_set) - node_df = pd.DataFrame(columns=nodes[0]['properties'].keys()) + node_df = pd.DataFrame(columns=nodes[0]["properties"].keys()) for node in nodes: - node_properties_df = pd.DataFrame.from_dict(node['properties'], orient='index').T + node_properties_df = pd.DataFrame.from_dict( + node["properties"], orient="index" + ).T node_df = pd.concat([node_df, node_properties_df], ignore_index=True) # read the link in traffic flow link_set = self.get_input_dataset("links").get_inventory_reader() links = list(link_set) - arc_df = pd.DataFrame(columns=links[0]['properties'].keys()) + arc_df = pd.DataFrame(columns=links[0]["properties"].keys()) for link in links: - link_properties_df = pd.DataFrame.from_dict(link['properties'], orient='index').T + link_properties_df = pd.DataFrame.from_dict( + link["properties"], orient="index" + ).T arc_df = pd.concat([arc_df, link_properties_df], ignore_index=True) # read bridge information bridge_set = self.get_input_dataset("bridges").get_inventory_reader() bridges = list(bridge_set) - bridge_df = pd.DataFrame(columns=bridges[0]['properties'].keys()) + bridge_df = pd.DataFrame(columns=bridges[0]["properties"].keys()) for bridge in bridges: - bridge_properties_df = pd.DataFrame.from_dict(bridge['properties'], orient='index').T + bridge_properties_df = pd.DataFrame.from_dict( + bridge["properties"], orient="index" + ).T bridge_df = pd.concat([bridge_df, bridge_properties_df], ignore_index=True) # read bridge damage information - bridge_damage_value = self.get_input_dataset("bridge_damage_value").get_json_reader() - unrepaired_bridge = self.get_input_dataset("unrepaired_bridge").get_json_reader() + bridge_damage_value = self.get_input_dataset( + "bridge_damage_value" + ).get_json_reader() + unrepaired_bridge = self.get_input_dataset( + "unrepaired_bridge" + ).get_json_reader() seed = 333 random.seed(seed) @@ -84,13 +97,14 @@ def run(self): path_adt[node, pairnode] = {} path_adt[pairnode, node] = {} for key, value in all_ipw[node, pairnode].items(): - path_adt[node, pairnode][key] \ - = WIPW.path_adt_from_edges(network, value) - path_adt[pairnode, node][key] \ - = path_adt[node, pairnode][key] + path_adt[node, pairnode][key] = WIPW.path_adt_from_edges( + network, value + ) + path_adt[pairnode, node][key] = path_adt[node, pairnode][ + key + ] else: all_ipw = None - all_ipw_length = None path_adt = None num_objectives = 2 @@ -100,10 +114,19 @@ def run(self): p = [] for i in range(ini_num_population): - p.append(PostDisasterLongTermSolution(unrepaired_bridge, node_df, - arc_df, bridge_df, - bridge_damage_value, network, - pm, all_ipw, path_adt)) + p.append( + PostDisasterLongTermSolution( + unrepaired_bridge, + node_df, + arc_df, + bridge_df, + bridge_damage_value, + network, + pm, + all_ipw, + path_adt, + ) + ) first_front = nsga2.run(p, population_size, num_generation) @@ -122,10 +145,14 @@ def run(self): # output the optimal solution of bridge repair schedule based on NSGA bridge_recovery = pd.DataFrame( - {"Bridge ID": bridge_set, "Ending Time": ending_time}) + {"Bridge ID": bridge_set, "Ending Time": ending_time} + ) self.set_result_csv_data( - "optimal_solution_of_bridge_repair_schedule", bridge_recovery, - name="optimal_solution_of_bridge_repair_schedule", source="dataframe") + "optimal_solution_of_bridge_repair_schedule", + bridge_recovery, + name="optimal_solution_of_bridge_repair_schedule", + source="dataframe", + ) network = TrafficFlowRecoveryUtil.nw_reconstruct(node_df, arc_df, adt_data) @@ -149,7 +176,6 @@ def run(self): # calculate the traffic flow network efficiency efficiency = [] for ii in range(len(schedule_time)): - # update the damage status of bridge for bridge in bridge_repair: if fg[bridge] == 0: @@ -158,20 +184,24 @@ def run(self): fg[bridge] = 1 for i in range(len(arc_df)): - nod1 = node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[0] - nod2 = node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] - network.edges[nod1, nod2]['Damage_Status'] = 0 + nod1 = node_df.loc[ + node_df["ID"] == arc_df["fromnode"][i], "guid" + ].values[0] + nod2 = node_df.loc[node_df["ID"] == arc_df["tonode"][i], "guid"].values[ + 0 + ] + network.edges[nod1, nod2]["Damage_Status"] = 0 for key, val in temp_bridge_damage_value.items(): - linknwid = bridge_df.loc[bridge_df['guid'] == key, 'linkID'].values[0] + linknwid = bridge_df.loc[bridge_df["guid"] == key, "linkID"].values[0] - nod_id1 = arc_df[arc_df['id'] == linknwid]['fromnode'].values[0] - nod1 = node_df.loc[node_df['ID'] == nod_id1, 'guid'].values[0] + nod_id1 = arc_df[arc_df["id"] == linknwid]["fromnode"].values[0] + nod1 = node_df.loc[node_df["ID"] == nod_id1, "guid"].values[0] - nod_id2 = arc_df[arc_df['id'] == linknwid]['tonode'].values[0] - nod2 = node_df.loc[node_df['ID'] == nod_id2, 'guid'].values[0] + nod_id2 = arc_df[arc_df["id"] == linknwid]["tonode"].values[0] + nod2 = node_df.loc[node_df["ID"] == nod_id2, "guid"].values[0] - network.edges[nod1, nod2]['Damage_Status'] = val + network.edges[nod1, nod2]["Damage_Status"] = val # calculate different travel efficiency based on different # performance metrics @@ -194,9 +224,14 @@ def run(self): # output the recovery trajectory recovery_trajectory = pd.DataFrame( - {"Ending Time": schedule_time, "Travel Efficiency": efficiency}) - self.set_result_csv_data("overall_traffic_flow_recovery_trajectory", recovery_trajectory, - name="overall_traffic_flow_recovery_trajectory", source="dataframe") + {"Ending Time": schedule_time, "Travel Efficiency": efficiency} + ) + self.set_result_csv_data( + "overall_traffic_flow_recovery_trajectory", + recovery_trajectory, + name="overall_traffic_flow_recovery_trajectory", + source="dataframe", + ) return None @@ -208,103 +243,102 @@ def get_spec(self): """ return { - 'name': 'traffic-flow-recovery', - 'description': 'traffic flow recovery model', - 'input_parameters': [ + "name": "traffic-flow-recovery", + "description": "traffic flow recovery model", + "input_parameters": [ { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, { - 'id': 'pm', - 'required': True, - 'description': 'traffic flow performance metrics 0: WIPW, 1:Free flow travel time', - 'type': int + "id": "pm", + "required": True, + "description": "traffic flow performance metrics 0: WIPW, 1:Free flow travel time", + "type": int, }, { - 'id': 'ini_num_population', - 'required': True, - 'description': 'ini_num_population: 5 or 50', - 'type': int + "id": "ini_num_population", + "required": True, + "description": "ini_num_population: 5 or 50", + "type": int, }, { - 'id': 'population_size', - 'required': True, - 'description': 'population_size: 3 or 30', - 'type': int + "id": "population_size", + "required": True, + "description": "population_size: 3 or 30", + "type": int, }, { - 'id': 'num_generation', - 'required': True, - 'description': 'num_generation: 2 or 250', - 'type': int + "id": "num_generation", + "required": True, + "description": "num_generation: 2 or 250", + "type": int, }, { - 'id': 'mutation_rate', - 'required': True, - 'description': '0.1', - 'type': float + "id": "mutation_rate", + "required": True, + "description": "0.1", + "type": float, }, { - 'id': 'crossover_rate', - 'required': True, - 'description': '1.0', - 'type': float - } + "id": "crossover_rate", + "required": True, + "description": "1.0", + "type": float, + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'nodes', - 'required': True, - 'description': 'road nodes', - 'type': ['ergo:roadNetwork'], + "id": "nodes", + "required": True, + "description": "road nodes", + "type": ["ergo:roadNetwork"], }, { - 'id': 'links', - 'required': True, - 'description': 'road links', - 'type': ['ergo:roadNetwork'], + "id": "links", + "required": True, + "description": "road links", + "type": ["ergo:roadNetwork"], }, { - 'id': 'bridges', - 'required': True, - 'description': 'bridges', - 'type': ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'], + "id": "bridges", + "required": True, + "description": "bridges", + "type": ["ergo:bridges", "ergo:bridgesVer2", "ergo:bridgesVer3"], }, { - 'id': 'bridge_damage_value', - 'required': True, - 'description': '', - 'type': ['incore:bridgeDamageValue'] + "id": "bridge_damage_value", + "required": True, + "description": "", + "type": ["incore:bridgeDamageValue"], }, { - 'id': 'unrepaired_bridge', - 'required': True, - 'description': '', - 'type': ['incore:unrepairedBridge'] + "id": "unrepaired_bridge", + "required": True, + "description": "", + "type": ["incore:unrepairedBridge"], }, { - 'id': 'ADT', - 'required': True, - 'description': '', - 'type': ['incore:ADT'] - } - + "id": "ADT", + "required": True, + "description": "", + "type": ["incore:ADT"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'optimal_solution_of_bridge_repair_schedule', - 'description': 'List the Bridge id and its ending repair time.', - 'type': 'incore:transportationRepairSchedule' + "id": "optimal_solution_of_bridge_repair_schedule", + "description": "List the Bridge id and its ending repair time.", + "type": "incore:transportationRepairSchedule", }, { - 'id': 'overall_traffic_flow_recovery_trajectory', - 'description': 'shows the overall recovery trajectory of the ' + - 'traffic flow system. List the ending time and ' + - 'travel efficiency for the whole network.', - 'type': 'incore:trafficFlowRecovery' - } - ] + "id": "overall_traffic_flow_recovery_trajectory", + "description": "shows the overall recovery trajectory of the " + + "traffic flow system. List the ending time and " + + "travel efficiency for the whole network.", + "type": "incore:trafficFlowRecovery", + }, + ], } diff --git a/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py b/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py index 453dead6b..92fb5b0eb 100644 --- a/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py +++ b/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py @@ -14,7 +14,6 @@ class TrafficFlowRecoveryUtil: - @staticmethod def NBI_coordinate_mapping(NBI_file): """Coordinate in NBI is in format of xx(degree)xx(minutes)xx.xx(seconds) @@ -28,8 +27,10 @@ def NBI_coordinate_mapping(NBI_file): """ NBI = pd.read_csv(NBI_file) - NBI['LONG_017'] = NBI['LONG_017'].apply(lambda x: -1 * (GeoUtil.degree_to_decimal(x))) - NBI['LAT_016'] = NBI['LAT_016'].apply(lambda x: GeoUtil.degree_to_decimal(x)) + NBI["LONG_017"] = NBI["LONG_017"].apply( + lambda x: -1 * (GeoUtil.degree_to_decimal(x)) + ) + NBI["LAT_016"] = NBI["LAT_016"].apply(lambda x: GeoUtil.degree_to_decimal(x)) return NBI @@ -42,9 +43,11 @@ def get_average_daily_traffic(bridges, NBI_shapefile): for bridge in bridges: # convert lon and lat to the right format bridge_coord = GeoUtil.get_location(bridge) - nearest_feature, distance = GeoUtil.find_nearest_feature(NBI_features, bridge_coord) + nearest_feature, distance = GeoUtil.find_nearest_feature( + NBI_features, bridge_coord + ) - ADT[bridge['properties']['guid']] = nearest_feature['properties']['ADT_029'] + ADT[bridge["properties"]["guid"]] = nearest_feature["properties"]["ADT_029"] return ADT @@ -64,7 +67,7 @@ def convert_dmg_prob2state(dmg_results_filename): bridge_damage_value = {} unrepaired_bridge = [] - with open(dmg_results_filename, 'r') as f: + with open(dmg_results_filename, "r") as f: reader = csv.reader(f) next(reader) for row in reader: @@ -80,7 +83,7 @@ def convert_dmg_prob2state(dmg_results_filename): elif mean_damage >= 0.75 and mean_damage <= 1: bridge_damage_value[state_id] = 4 else: - raise ValueError('mean damage should not larger than 1!') + raise ValueError("mean damage should not larger than 1!") unrepaired_bridge = list(bridge_damage_value.keys()) @@ -103,17 +106,18 @@ def nw_reconstruct(node_df, arc_df, adt_data): network = nx.Graph() # add nodes to the network - network.add_nodes_from(node_df['guid']) + network.add_nodes_from(node_df["guid"]) # add arcs to the network for i in range(len(arc_df)): - fromnode = \ - node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[ - 0] - tonode = \ - node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] - dis = arc_df['len_mile'][i] / arc_df['freeflowsp'][i] - network.add_edge(fromnode, tonode, distance=dis, adt=adt_data[arc_df['guid'][i]]) + fromnode = node_df.loc[ + node_df["ID"] == arc_df["fromnode"][i], "guid" + ].values[0] + tonode = node_df.loc[node_df["ID"] == arc_df["tonode"][i], "guid"].values[0] + dis = arc_df["len_mile"][i] / arc_df["freeflowsp"][i] + network.add_edge( + fromnode, tonode, distance=dis, adt=adt_data[arc_df["guid"][i]] + ) return network @@ -131,20 +135,21 @@ def traveltime_freeflow(temp_network): network = copy.deepcopy(temp_network) for Ed in temp_network.edges(): - if network.edges[Ed[0], Ed[1]]['Damage_Status'] > 2: + if network.edges[Ed[0], Ed[1]]["Damage_Status"] > 2: network.remove_edge(Ed[0], Ed[1]) - elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 2: - network.edges[Ed[0], Ed[1]]['distance'] \ - = network.edges[Ed[0], Ed[1]]['distance'] / 0.5 - elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 1: - network.edges[Ed[0], Ed[1]]['distance'] \ - = network.edges[Ed[0], Ed[1]]['distance'] / 0.75 + elif network.edges[Ed[0], Ed[1]]["Damage_Status"] == 2: + network.edges[Ed[0], Ed[1]]["distance"] = ( + network.edges[Ed[0], Ed[1]]["distance"] / 0.5 + ) + elif network.edges[Ed[0], Ed[1]]["Damage_Status"] == 1: + network.edges[Ed[0], Ed[1]]["distance"] = ( + network.edges[Ed[0], Ed[1]]["distance"] / 0.75 + ) num_node = len(network.nodes()) distance = [[0 for x in range(num_node)] for y in range(num_node)] - tdistance = dict(nx.all_pairs_dijkstra_path_length(network, - weight='distance')) + tdistance = dict(nx.all_pairs_dijkstra_path_length(network, weight="distance")) i = 0 for key1, value1 in tdistance.items(): j = 0 diff --git a/pyincore/analyses/transportationrecovery/WIPW.py b/pyincore/analyses/transportationrecovery/WIPW.py index e89d4f1f9..05750674f 100644 --- a/pyincore/analyses/transportationrecovery/WIPW.py +++ b/pyincore/analyses/transportationrecovery/WIPW.py @@ -28,7 +28,7 @@ def ipw_search(v, e): # calculate length of each link which is 1 for all edges length = {} - for (i, j) in edgeslist: + for i, j in edgeslist: length[i, j] = 1 length[j, i] = 1 @@ -55,8 +55,7 @@ def ipw_search(v, e): # the length of kth independent path between node pair path_length = {} - for (w, q) in nodespair: - + for w, q in nodespair: # creat a temp list to search path temp_edgelist = copy.deepcopy(edgeslist) @@ -73,15 +72,13 @@ def ipw_search(v, e): g_local.add_nodes_from(nodelist) for headnode, tailnode in temp_edgelist: - g_local.add_edge(headnode, tailnode, length=length[headnode, - tailnode]) + g_local.add_edge(headnode, tailnode, length=length[headnode, tailnode]) try: - temp = copy.deepcopy(nx.shortest_path(g_local, - source=w, - target=q, - weight='length')) - except nx.NetworkXNoPath as e: + temp = copy.deepcopy( + nx.shortest_path(g_local, source=w, target=q, weight="length") + ) + except nx.NetworkXNoPath: # print(w,q) # print("NetworkXNoPath") temp = [] @@ -89,7 +86,6 @@ def ipw_search(v, e): # if there is a path connecting the source and target, # start to calculate IPW if temp: - # find the shortest path ipath[w, q][k] = copy.deepcopy(temp) path_length[w, q][k] = 0 @@ -98,22 +94,20 @@ def ipw_search(v, e): if len(ipath[w, q][k]) == 2: # for the path just has two nodes # (origin and destination) - ipathtuple.append((ipath[w, q][k][0], - ipath[w, q][k][1])) - path_length[w, q][k] = length[ipath[w, q][k][0], - ipath[w, q][k][1]] + ipathtuple.append((ipath[w, q][k][0], ipath[w, q][k][1])) + path_length[w, q][k] = length[ipath[w, q][k][0], ipath[w, q][k][1]] else: # for the path has more than two nodes for p in range(0, len(ipath[w, q][k]) - 1): - ipathtuple.append((ipath[w, q][k][p], - ipath[w, q][k][p + 1])) + ipathtuple.append((ipath[w, q][k][p], ipath[w, q][k][p + 1])) - path_length[w, q][k] += length[ipath[w, q][k][p], - ipath[w, q][k][p + 1]] + path_length[w, q][k] += length[ + ipath[w, q][k][p], ipath[w, q][k][p + 1] + ] # delete edges that used in previous shortest paths - for (s, t) in ipathtuple: + for s, t in ipathtuple: if (s, t) in temp_edgelist: temp_edgelist.remove((s, t)) # temp_edgelist.remove((t, s)) @@ -126,11 +120,11 @@ def ipw_search(v, e): return ipath, path_length -def tipw_index(g, l, path_adt): +def tipw_index(g, p, path_adt): """ caculate the TIPW index of the network :param g: graph - :param l: Indpendent pathway + :param p: Indpendent pathway :param path_adt: Adt of the path :return: TIPW index of the network """ @@ -142,8 +136,9 @@ def tipw_index(g, l, path_adt): for key in path_adt.keys(): normal_path_adt[key] = {} for i, j in path_adt[key].items(): - normal_path_adt[key][i] = len(path_adt[key].values()) * j \ - / sum(path_adt[key].values()) + normal_path_adt[key][i] = ( + len(path_adt[key].values()) * j / sum(path_adt[key].values()) + ) # compute the TIPW of node node_tipw = {} @@ -151,23 +146,22 @@ def tipw_index(g, l, path_adt): node_tipw[node] = 0 for pairnode in gnodes: if pairnode != node: - if (node, pairnode) in l.keys(): - for key, value in l[node, pairnode].items(): - node_tipw[node] \ - += normal_path_adt[node, pairnode][key] \ - * path_service_level_edges(g, value) - elif (pairnode, node) in l.keys(): - for key, value in l[pairnode, node].items(): - node_tipw[node] \ - += normal_path_adt[pairnode, node][key] \ - * path_service_level_edges(g, value) + if (node, pairnode) in p.keys(): + for key, value in p[node, pairnode].items(): + node_tipw[node] += normal_path_adt[node, pairnode][ + key + ] * path_service_level_edges(g, value) + elif (pairnode, node) in p.keys(): + for key, value in p[pairnode, node].items(): + node_tipw[node] += normal_path_adt[pairnode, node][ + key + ] * path_service_level_edges(g, value) # caculate the TIPW index tipw_index_val = 0 for node in gnodes: # network IPW - tipw_index_val \ - += (1 / float(len(gnodes)) * node_tipw[node]) / (len(gnodes) - 1) + tipw_index_val += (1 / float(len(gnodes)) * node_tipw[node]) / (len(gnodes) - 1) return tipw_index_val @@ -182,8 +176,7 @@ def path_service_level_edges(g, path): service_level = 1 for i in range(len(path) - 1): - service_level \ - *= (1 - g.edges[path[i], path[i + 1]]['Damage_Status'] / 4.0) + service_level *= 1 - g.edges[path[i], path[i + 1]]["Damage_Status"] / 4.0 return service_level @@ -195,8 +188,8 @@ def path_adt_from_edges(g, path): :return: reliability """ - adt = max(nx.get_edge_attributes(g, 'adt').values()) + adt = max(nx.get_edge_attributes(g, "adt").values()) for i in range(len(path) - 1): - adt = min(adt, g.edges[path[i], path[i + 1]]['adt']) + adt = min(adt, g.edges[path[i], path[i + 1]]["adt"]) return adt diff --git a/pyincore/analyses/transportationrecovery/__init__.py b/pyincore/analyses/transportationrecovery/__init__.py index 944a1706c..cf682a14e 100644 --- a/pyincore/analyses/transportationrecovery/__init__.py +++ b/pyincore/analyses/transportationrecovery/__init__.py @@ -6,7 +6,13 @@ from pyincore.analyses.transportationrecovery.nsga2 import Solution from pyincore.analyses.transportationrecovery.nsga2 import NSGAII -from pyincore.analyses.transportationrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution +from pyincore.analyses.transportationrecovery.post_disaster_long_term_solution import ( + PostDisasterLongTermSolution, +) from pyincore.analyses.transportationrecovery import WIPW -from pyincore.analyses.transportationrecovery.transportationrecovery import TransportationRecovery -from pyincore.analyses.transportationrecovery.transportationrecoveryutil import TransportationRecoveryUtil +from pyincore.analyses.transportationrecovery.transportationrecovery import ( + TransportationRecovery, +) +from pyincore.analyses.transportationrecovery.transportationrecoveryutil import ( + TransportationRecoveryUtil, +) diff --git a/pyincore/analyses/transportationrecovery/nsga2.py b/pyincore/analyses/transportationrecovery/nsga2.py index c041b0427..98e804f95 100644 --- a/pyincore/analyses/transportationrecovery/nsga2.py +++ b/pyincore/analyses/transportationrecovery/nsga2.py @@ -4,7 +4,8 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import sys, random +import sys +import random class Solution: @@ -70,6 +71,7 @@ def crowded_comparison(s1, s2): class NSGAII: """Implementation of NSGA-II algorithm.""" + current_evaluated_objective = 0 def __init__(self, num_objectives, mutation_rate=0.1, crossover_rate=1.0): @@ -104,7 +106,6 @@ def run(self, p, population_size, num_generations): first_front = [] for i in range(num_generations): - r = [] r.extend(p) @@ -285,8 +286,8 @@ def crowding_distance_assignment(self, front): for obj_index in range(self.num_objectives): self.sort_objective(front, obj_index) - front[0].distance = float('inf') - front[len(front) - 1].distance = float('inf') + front[0].distance = float("inf") + front[len(front) - 1].distance = float("inf") for i in range(1, len(front) - 1): - front[i].distance += (front[i + 1].distance - front[i - 1].distance) + front[i].distance += front[i + 1].distance - front[i - 1].distance diff --git a/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py b/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py index 5681b5505..7979bd949 100644 --- a/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py +++ b/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py @@ -9,7 +9,9 @@ import random from pyincore.analyses.transportationrecovery.nsga2 import Solution from pyincore.analyses.transportationrecovery import WIPW as WIPW -from pyincore.analyses.transportationrecovery.transportationrecoveryutil import TransportationRecoveryUtil +from pyincore.analyses.transportationrecovery.transportationrecoveryutil import ( + TransportationRecoveryUtil, +) class PostDisasterLongTermSolution(Solution): @@ -27,8 +29,18 @@ class PostDisasterLongTermSolution(Solution): # complete damage state compRepair = 230 - def __init__(self, candidates, node_df, arc_df, bridge_df, bridge_damage_value, - network, pm, all_ipw, path_adt): + def __init__( + self, + candidates, + node_df, + arc_df, + bridge_df, + bridge_damage_value, + network, + pm, + all_ipw, + path_adt, + ): """ initialize the chromosomes """ @@ -70,41 +82,44 @@ def evaluate_solution(self, final): end = {} schedule_time = [] - l = copy.deepcopy(self.attributes) + attributes_copy = copy.deepcopy(self.attributes) - for i in range(len(l)): + for i in range(len(attributes_copy)): if i <= simax - 1: - # repair start from time 0 start[candidate_schedule[i]] = 0.0 # if damage state of bridge is slight damage, repair time # is slightRepair if temp_bridge_damage_value[candidate_schedule[i]] == 1: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.slightRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.slightRepair + ) # if damage state of bridge is moderate damage, repair time # is modRepair elif temp_bridge_damage_value[candidate_schedule[i]] == 2: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.modRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.modRepair + ) # if damage state of bridge is extensive damage, repair time # is extRepair elif temp_bridge_damage_value[candidate_schedule[i]] == 3: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.extRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.extRepair + ) # if damage state of bridge is complete damage, repair time # is compRepair else: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.compRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.compRepair + ) # store the ending time schedule_time.append(end[candidate_schedule[i]]) @@ -119,24 +134,28 @@ def evaluate_solution(self, final): start[candidate_schedule[i]] = schedule_time.pop(0) if temp_bridge_damage_value[candidate_schedule[i]] == 1: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.slightRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.slightRepair + ) elif temp_bridge_damage_value[candidate_schedule[i]] == 2: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.modRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.modRepair + ) elif temp_bridge_damage_value[candidate_schedule[i]] == 3: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.extRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.extRepair + ) else: - end[candidate_schedule[i]] \ - = start[candidate_schedule[i]] \ - + PostDisasterLongTermSolution.compRepair + end[candidate_schedule[i]] = ( + start[candidate_schedule[i]] + + PostDisasterLongTermSolution.compRepair + ) schedule_time.append(end[candidate_schedule[i]]) schedule_time.sort() @@ -169,9 +188,7 @@ def evaluate_solution(self, final): pl = len(schedule_time) for ii in range(pl % inte - 1 + inte, pl, inte): - if ii > 0: - # update the damage status of bridges for bridge in self_candidates: if fg[bridge] == 0: @@ -181,22 +198,36 @@ def evaluate_solution(self, final): fg[bridge] = 1 for i in range(len(self.arc_df)): - nod1 = self.node_df.loc[self.node_df['ID'] == self.arc_df['fromnode'][i], 'guid'].values[0] - nod2 = self.node_df.loc[self.node_df['ID'] == self.arc_df['tonode'][i], 'guid'].values[0] - self.network.edges[nod1, nod2]['Damage_Status'] = 0 + nod1 = self.node_df.loc[ + self.node_df["ID"] == self.arc_df["fromnode"][i], "guid" + ].values[0] + nod2 = self.node_df.loc[ + self.node_df["ID"] == self.arc_df["tonode"][i], "guid" + ].values[0] + self.network.edges[nod1, nod2]["Damage_Status"] = 0 for key, val in temp_bridge_damage_value.items(): - linknwid = self.bridge_df.loc[self.bridge_df['guid'] == key, 'linkID'].values[0] + linknwid = self.bridge_df.loc[ + self.bridge_df["guid"] == key, "linkID" + ].values[0] - nod_id1 = self.arc_df[self.arc_df['id'] == linknwid]['fromnode'].values[0] - nod1 = self.node_df.loc[self.node_df['ID'] == nod_id1, 'guid'].values[0] + nod_id1 = self.arc_df[self.arc_df["id"] == linknwid][ + "fromnode" + ].values[0] + nod1 = self.node_df.loc[ + self.node_df["ID"] == nod_id1, "guid" + ].values[0] - nod_id2 = self.arc_df[self.arc_df['id'] == linknwid]['tonode'].values[0] - nod2 = self.node_df.loc[self.node_df['ID'] == nod_id2, 'guid'].values[0] + nod_id2 = self.arc_df[self.arc_df["id"] == linknwid][ + "tonode" + ].values[0] + nod2 = self.node_df.loc[ + self.node_df["ID"] == nod_id2, "guid" + ].values[0] - self.network.edges[nod1, nod2]['Damage_Status'] = val + self.network.edges[nod1, nod2]["Damage_Status"] = val - nx.get_edge_attributes(self.network, 'Damage_Status') + nx.get_edge_attributes(self.network, "Damage_Status") # calculate the travel efficiency based on different # performance metrics based on travel time @@ -206,15 +237,14 @@ def evaluate_solution(self, final): # based on WIPW elif self.pm == 0: - te = WIPW.tipw_index(self.network, - self.all_ipw, self.path_adt) + te = WIPW.tipw_index(self.network, self.all_ipw, self.path_adt) - numerator += te * schedule_time[ii] * (schedule_time[ii] - - schedule_time[ - ii - inte]) - aa = te - denominator += te * (schedule_time[ii] - - schedule_time[ii - inte]) + numerator += ( + te + * schedule_time[ii] + * (schedule_time[ii] - schedule_time[ii - inte]) + ) + denominator += te * (schedule_time[ii] - schedule_time[ii - inte]) # calculate the skewness of the recovery trajectory try: @@ -234,8 +264,11 @@ def evaluate_solution(self, final): if final == 0: return self.objectives[0], self.objectives[1] else: - return self.objectives[0], self.objectives[1], \ - self.sch[self.objectives[0], self.objectives[1]] + return ( + self.objectives[0], + self.objectives[1], + self.sch[self.objectives[0], self.objectives[1]], + ) def mutate(self): """ diff --git a/pyincore/analyses/transportationrecovery/transportationrecovery.py b/pyincore/analyses/transportationrecovery/transportationrecovery.py index ec63b5202..e114989cf 100644 --- a/pyincore/analyses/transportationrecovery/transportationrecovery.py +++ b/pyincore/analyses/transportationrecovery/transportationrecovery.py @@ -9,8 +9,11 @@ from pyincore.analyses.trafficflowrecovery import TrafficFlowRecovery -@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use TrafficFlowRecovery instead.") -class TransportationRecovery(): +@deprecated( + version="1.19.0", + reason="This class will be deprecated soon. Use TrafficFlowRecovery instead.", +) +class TransportationRecovery: def __init__(self, incore_client): self._delegate = TrafficFlowRecovery(incore_client) diff --git a/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py b/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py index 477dfa134..7b0142dcd 100644 --- a/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py +++ b/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py @@ -14,7 +14,6 @@ class TransportationRecoveryUtil: - @staticmethod def NBI_coordinate_mapping(NBI_file): """Coordinate in NBI is in format of xx(degree)xx(minutes)xx.xx(seconds) @@ -28,8 +27,10 @@ def NBI_coordinate_mapping(NBI_file): """ NBI = pd.read_csv(NBI_file) - NBI['LONG_017'] = NBI['LONG_017'].apply(lambda x: -1 * (GeoUtil.degree_to_decimal(x))) - NBI['LAT_016'] = NBI['LAT_016'].apply(lambda x: GeoUtil.degree_to_decimal(x)) + NBI["LONG_017"] = NBI["LONG_017"].apply( + lambda x: -1 * (GeoUtil.degree_to_decimal(x)) + ) + NBI["LAT_016"] = NBI["LAT_016"].apply(lambda x: GeoUtil.degree_to_decimal(x)) return NBI @@ -42,9 +43,11 @@ def get_average_daily_traffic(bridges, NBI_shapefile): for bridge in bridges: # convert lon and lat to the right format bridge_coord = GeoUtil.get_location(bridge) - nearest_feature, distance = GeoUtil.find_nearest_feature(NBI_features, bridge_coord) + nearest_feature, distance = GeoUtil.find_nearest_feature( + NBI_features, bridge_coord + ) - ADT[bridge['properties']['guid']] = nearest_feature['properties']['ADT_029'] + ADT[bridge["properties"]["guid"]] = nearest_feature["properties"]["ADT_029"] return ADT @@ -64,7 +67,7 @@ def convert_dmg_prob2state(dmg_results_filename): bridge_damage_value = {} unrepaired_bridge = [] - with open(dmg_results_filename, 'r') as f: + with open(dmg_results_filename, "r") as f: reader = csv.reader(f) next(reader) for row in reader: @@ -80,7 +83,7 @@ def convert_dmg_prob2state(dmg_results_filename): elif mean_damage >= 0.75 and mean_damage <= 1: bridge_damage_value[state_id] = 4 else: - raise ValueError('mean damage should not larger than 1!') + raise ValueError("mean damage should not larger than 1!") unrepaired_bridge = list(bridge_damage_value.keys()) @@ -103,17 +106,18 @@ def nw_reconstruct(node_df, arc_df, adt_data): network = nx.Graph() # add nodes to the network - network.add_nodes_from(node_df['guid']) + network.add_nodes_from(node_df["guid"]) # add arcs to the network for i in range(len(arc_df)): - fromnode = \ - node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[ - 0] - tonode = \ - node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] - dis = arc_df['len_mile'][i] / arc_df['freeflowsp'][i] - network.add_edge(fromnode, tonode, distance=dis, adt=adt_data[arc_df['guid'][i]]) + fromnode = node_df.loc[ + node_df["ID"] == arc_df["fromnode"][i], "guid" + ].values[0] + tonode = node_df.loc[node_df["ID"] == arc_df["tonode"][i], "guid"].values[0] + dis = arc_df["len_mile"][i] / arc_df["freeflowsp"][i] + network.add_edge( + fromnode, tonode, distance=dis, adt=adt_data[arc_df["guid"][i]] + ) return network @@ -131,20 +135,21 @@ def traveltime_freeflow(temp_network): network = copy.deepcopy(temp_network) for Ed in temp_network.edges(): - if network.edges[Ed[0], Ed[1]]['Damage_Status'] > 2: + if network.edges[Ed[0], Ed[1]]["Damage_Status"] > 2: network.remove_edge(Ed[0], Ed[1]) - elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 2: - network.edges[Ed[0], Ed[1]]['distance'] \ - = network.edges[Ed[0], Ed[1]]['distance'] / 0.5 - elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 1: - network.edges[Ed[0], Ed[1]]['distance'] \ - = network.edges[Ed[0], Ed[1]]['distance'] / 0.75 + elif network.edges[Ed[0], Ed[1]]["Damage_Status"] == 2: + network.edges[Ed[0], Ed[1]]["distance"] = ( + network.edges[Ed[0], Ed[1]]["distance"] / 0.5 + ) + elif network.edges[Ed[0], Ed[1]]["Damage_Status"] == 1: + network.edges[Ed[0], Ed[1]]["distance"] = ( + network.edges[Ed[0], Ed[1]]["distance"] / 0.75 + ) num_node = len(network.nodes()) distance = [[0 for x in range(num_node)] for y in range(num_node)] - tdistance = dict(nx.all_pairs_dijkstra_path_length(network, - weight='distance')) + tdistance = dict(nx.all_pairs_dijkstra_path_length(network, weight="distance")) i = 0 for key1, value1 in tdistance.items(): j = 0 diff --git a/pyincore/analyses/waterfacilitydamage/__init__.py b/pyincore/analyses/waterfacilitydamage/__init__.py index 54340e9a9..fc06012d3 100644 --- a/pyincore/analyses/waterfacilitydamage/__init__.py +++ b/pyincore/analyses/waterfacilitydamage/__init__.py @@ -3,4 +3,6 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.waterfacilitydamage.waterfacilitydamage import WaterFacilityDamage +from pyincore.analyses.waterfacilitydamage.waterfacilitydamage import ( + WaterFacilityDamage, +) diff --git a/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py b/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py index d6aa177eb..62d844b66 100644 --- a/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py +++ b/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py @@ -11,15 +11,18 @@ import random from itertools import repeat -from pyincore import BaseAnalysis, HazardService, FragilityService, GeoUtil, \ - AnalysisUtil +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + GeoUtil, + AnalysisUtil, +) from pyincore.models.dfr3curve import DFR3Curve class WaterFacilityDamage(BaseAnalysis): - """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure. - - """ + """Computes water facility damage for an earthquake tsunami, tornado, or hurricane exposure.""" DEFAULT_EQ_FRAGILITY_KEY = "pga" DEFAULT_TSU_FRAGILITY_KEY = "Non-Retrofit inundationDepth Fragility ID Code" @@ -41,68 +44,87 @@ def run(self): """ # Facility dataset inventory_set = self.get_input_dataset( - "water_facilities").get_inventory_reader() + "water_facilities" + ).get_inventory_reader() # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + ( + hazard, + hazard_type, + hazard_dataset_id, + ) = self.create_hazard_object_from_input_params() user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, - len(inventory_set), - user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(inventory_set), user_defined_cpu + ) avg_bulk_input_size = int(len(inventory_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(inventory_set) while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size (ds_results, damage_results) = self.waterfacility_damage_concurrent_futures( - self.waterfacilityset_damage_analysis_bulk_input, num_workers, - inventory_args, repeat(hazard), repeat(hazard_type), repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") + self.waterfacilityset_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + self.set_result_csv_data( + "result", ds_results, name=self.get_parameter("result_name") + ) + self.set_result_json_data( + "metadata", + damage_results, + name=self.get_parameter("result_name") + "_additional_info", + ) return True - def waterfacility_damage_concurrent_futures(self, function_name, - parallel_processes, - *args): + def waterfacility_damage_concurrent_futures( + self, function_name, parallel_processes, *args + ): """Utilizes concurrent.future module. - Args: - function_name (function): The function to be parallelized. - parallel_processes (int): Number of workers in parallelization. - *args: All the arguments in order to pass into parameter function_name. + Args: + function_name (function): The function to be parallelized. + parallel_processes (int): Number of workers in parallelization. + *args: All the arguments in order to pass into parameter function_name. - Returns: - list: A list of ordered dictionaries with water facility damage values - list: A list of ordered dictionaries with other water facility data/metadata + Returns: + list: A list of ordered dictionaries with water facility damage values + list: A list of ordered dictionaries with other water facility data/metadata """ output_ds = [] output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=parallel_processes) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallel_processes + ) as executor: for ret1, ret2 in executor.map(function_name, *args): output_ds.extend(ret1) output_dmg.extend(ret2) return output_ds, output_dmg - def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard_type, hazard_dataset_id): + def waterfacilityset_damage_analysis_bulk_input( + self, facilities, hazard, hazard_type, hazard_dataset_id + ): """Gets applicable fragilities and calculates damage Args: @@ -132,23 +154,24 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard fragility_key = self.get_parameter("fragility_key") if fragility_key is None: - if hazard_type == 'tsunami': + if hazard_type == "tsunami": fragility_key = self.DEFAULT_TSU_FRAGILITY_KEY - elif hazard_type == 'earthquake': + elif hazard_type == "earthquake": fragility_key = self.DEFAULT_EQ_FRAGILITY_KEY else: raise ValueError( - "Hazard type other than Earthquake and Tsunami are not currently supported.") + "Hazard type other than Earthquake and Tsunami are not currently supported." + ) self.set_parameter("fragility_key", fragility_key) # Obtain the fragility set fragility_sets = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key) + self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key + ) # Obtain the liquefaction fragility Key - liquefaction_fragility_key = self.get_parameter( - "liquefaction_fragility_key") + liquefaction_fragility_key = self.get_parameter("liquefaction_fragility_key") if hazard_type == "earthquake": if self.get_parameter("use_liquefaction") is True: @@ -158,12 +181,16 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard use_liquefaction = self.get_parameter("use_liquefaction") # Obtain the geology dataset - geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") + geology_dataset_id = self.get_parameter( + "liquefaction_geology_dataset_id" + ) if geology_dataset_id is not None: fragility_sets_liq = self.fragilitysvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), facilities, - liquefaction_fragility_key) + self.get_input_dataset("dfr3_mapping_set"), + facilities, + liquefaction_fragility_key, + ) if fragility_sets_liq is not None: liquefaction_available = True @@ -185,11 +212,7 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard loc = str(location.y) + "," + str(location.x) demands = fragility_set.demand_types units = fragility_set.demand_units - value = { - "demands": demands, - "units": units, - "loc": loc - } + value = {"demands": demands, "units": units, "loc": loc} values_payload.append(value) mapped_waterfacilities.append(facility) @@ -198,27 +221,25 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard fragility_set_liq = fragility_sets_liq[facility["id"]] demands_liq = fragility_set_liq.demand_types units_liq = fragility_set_liq.demand_units - value_liq = { - "demands": demands_liq, - "units": units_liq, - "loc": loc - } + value_liq = {"demands": demands_liq, "units": units_liq, "loc": loc} values_payload_liq.append(value_liq) else: unmapped_waterfacilities.append(facility) del facilities - if hazard_type == 'earthquake' or 'tsunami': + if hazard_type == "earthquake" or "tsunami": hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) else: - raise ValueError("The provided hazard type is not supported yet by this analysis") + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) # Check if liquefaction is applicable if liquefaction_available: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, - geology_dataset_id, - values_payload_liq) + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) # Calculate LS and DS facility_results = [] @@ -236,7 +257,9 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard hazard_std_dev = random.random() if isinstance(fragility_set.fragility_curves[0], DFR3Curve): - hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + hazard_vals = AnalysisUtil.update_precision_of_lists( + hazard_resp[i]["hazardValues"] + ) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] @@ -245,70 +268,97 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard for j, d in enumerate(fragility_set.demand_types): hval_dict[d] = hazard_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): - facility_args = fragility_set.construct_expression_args_from_inventory(facility) - limit_states = \ - fragility_set.calculate_limit_state(hval_dict, - std_dev=hazard_std_dev, - inventory_type='water_facility', - **facility_args) + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_resp[i]["hazardValues"] + ): + facility_args = ( + fragility_set.construct_expression_args_from_inventory(facility) + ) + limit_states = fragility_set.calculate_limit_state( + hval_dict, + std_dev=hazard_std_dev, + inventory_type="water_facility", + **facility_args + ) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): - liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) + liq_hazard_vals = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["pgdValues"] + ) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] - liquefaction_prob = liquefaction_resp[i]['liqProbability'] + liquefaction_prob = liquefaction_resp[i]["liqProbability"] hval_dict_liq = dict() for j, d in enumerate(fragility_set_liq.demand_types): hval_dict_liq[d] = liq_hazard_vals[j] - facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(facility) - pgd_limit_states = \ - fragility_set_liq.calculate_limit_state( - hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", - **facility_liq_args) + facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( + facility + ) + pgd_limit_states = fragility_set_liq.calculate_limit_state( + hval_dict_liq, + std_dev=hazard_std_dev, + inventory_type="water_facility", + **facility_liq_args + ) else: - raise ValueError("One of the fragilities is in deprecated format. " - "This should not happen If you are seeing this please report the issue.") - - limit_states = AnalysisUtil.adjust_limit_states_for_pgd(limit_states, pgd_limit_states) - - dmg_intervals = fragility_set.calculate_damage_interval(limit_states, - hazard_type=hazard_type, - inventory_type='water_facility') + raise ValueError( + "One of the fragilities is in deprecated format. " + "This should not happen If you are seeing this please report the issue." + ) + + limit_states = AnalysisUtil.adjust_limit_states_for_pgd( + limit_states, pgd_limit_states + ) + + dmg_intervals = fragility_set.calculate_damage_interval( + limit_states, + hazard_type=hazard_type, + inventory_type="water_facility", + ) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) # TODO: ideally, this goes into a single variable declaration section - facility_result = {'guid': facility['properties']['guid'], **limit_states, **dmg_intervals} - facility_result['haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + facility_result = { + "guid": facility["properties"]["guid"], + **limit_states, + **dmg_intervals, + } + facility_result[ + "haz_expose" + ] = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) damage_result = dict() - damage_result['guid'] = facility['properties']['guid'] - damage_result['fragility_id'] = fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardvals'] = hazard_vals + damage_result["guid"] = facility["properties"]["guid"] + damage_result["fragility_id"] = fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardvals"] = hazard_vals if use_liquefaction and fragility_sets_liq and geology_dataset_id: - damage_result['liq_fragility_id'] = fragility_sets_liq[facility["id"]].id - damage_result['liqdemandtypes'] = liq_demand_types - damage_result['liqdemandunits'] = liq_demand_units - damage_result['liqhazval'] = liq_hazard_vals - damage_result['liqprobability'] = liquefaction_prob + damage_result["liq_fragility_id"] = fragility_sets_liq[ + facility["id"] + ].id + damage_result["liqdemandtypes"] = liq_demand_types + damage_result["liqdemandunits"] = liq_demand_units + damage_result["liqhazval"] = liq_hazard_vals + damage_result["liqprobability"] = liquefaction_prob else: - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqprobability'] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None facility_results.append(facility_result) damage_results.append(damage_result) @@ -316,18 +366,18 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard for facility in unmapped_waterfacilities: facility_result = dict() damage_result = dict() - facility_result['guid'] = facility['properties']['guid'] - damage_result['guid'] = facility['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardvals'] = None - damage_result['liq_fragility_id'] = None - damage_result['liqdemandtypes'] = None - damage_result['liqdemandunits'] = None - damage_result['liqhazval'] = None - damage_result['liqprobability'] = None + facility_result["guid"] = facility["properties"]["guid"] + damage_result["guid"] = facility["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardvals"] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None facility_results.append(facility_result) damage_results.append(damage_result) @@ -336,102 +386,101 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard, hazard def get_spec(self): return { - 'name': 'water-facility-damage', - 'description': 'water facility damage analysis', - 'input_parameters': [ + "name": "water-facility-damage", + "description": "water facility damage analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': False, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": False, + "description": "result dataset name", + "type": str, }, { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', - 'type': str + "id": "hazard_type", + "required": False, + "description": "Hazard Type (e.g. earthquake)", + "type": str, }, { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str + "id": "hazard_id", + "required": False, + "description": "Hazard ID", + "type": str, }, { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, }, { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, }, - { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Liquefaction geology/susceptibility dataset id. ' - 'If not provided, liquefaction will be ignored', - 'type': str + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Liquefaction geology/susceptibility dataset id. " + "If not provided, liquefaction will be ignored", + "type": str, }, { - 'id': 'liquefaction_fragility_key', - 'required': False, - 'description': 'Fragility key to use in liquefaction mapping dataset', - 'type': str + "id": "liquefaction_fragility_key", + "required": False, + "description": "Fragility key to use in liquefaction mapping dataset", + "type": str, }, { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, }, { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, }, ], - 'input_hazards': [ + "input_hazards": [ { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tsunami"] + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake", "tsunami"], }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'water_facilities', - 'required': True, - 'description': 'Water Facility Inventory', - 'type': ['ergo:waterFacilityTopo'], + "id": "water_facilities", + "required": True, + "description": "Water Facility Inventory", + "type": ["ergo:waterFacilityTopo"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - } + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'result', - 'parent_type': 'water_facilities', - 'description': 'A csv file with limit state probabilities and damage states ' - 'for each water facility', - 'type': 'ergo:waterFacilityDamageVer6' + "id": "result", + "parent_type": "water_facilities", + "description": "A csv file with limit state probabilities and damage states " + "for each water facility", + "type": "ergo:waterFacilityDamageVer6", }, { - 'id': 'metadata', - 'parent_type': 'water_facilities', - 'description': 'additional metadata in json file about applied hazard value and ' - 'fragility', - 'type': 'incore:waterFacilityDamageSupplement' - } - ] + "id": "metadata", + "parent_type": "water_facilities", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:waterFacilityDamageSupplement", + }, + ], } diff --git a/pyincore/analyses/waterfacilityrepaircost/__init__.py b/pyincore/analyses/waterfacilityrepaircost/__init__.py index 8335e745c..9365e95e8 100644 --- a/pyincore/analyses/waterfacilityrepaircost/__init__.py +++ b/pyincore/analyses/waterfacilityrepaircost/__init__.py @@ -1 +1,3 @@ -from pyincore.analyses.waterfacilityrepaircost.waterfacilityrepaircost import WaterFacilityRepairCost +from pyincore.analyses.waterfacilityrepaircost.waterfacilityrepaircost import ( + WaterFacilityRepairCost, +) diff --git a/pyincore/analyses/waterfacilityrepaircost/waterfacilityrepaircost.py b/pyincore/analyses/waterfacilityrepaircost/waterfacilityrepaircost.py index 22f95e7f8..27f481071 100644 --- a/pyincore/analyses/waterfacilityrepaircost/waterfacilityrepaircost.py +++ b/pyincore/analyses/waterfacilityrepaircost/waterfacilityrepaircost.py @@ -24,9 +24,15 @@ def __init__(self, incore_client): def run(self): """Executes water facility repair cost analysis.""" - wf_df = self.get_input_dataset("water_facilities").get_dataframe_from_shapefile() - sample_damage_states_df = self.get_input_dataset("sample_damage_states").get_dataframe_from_csv() - replacement_cost = self.get_input_dataset("replacement_cost").get_dataframe_from_csv() + wf_df = self.get_input_dataset( + "water_facilities" + ).get_dataframe_from_shapefile() + sample_damage_states_df = self.get_input_dataset( + "sample_damage_states" + ).get_dataframe_from_csv() + replacement_cost = self.get_input_dataset( + "replacement_cost" + ).get_dataframe_from_csv() # join damage state, replacement cost, with original inventory wf_df = wf_df.merge(sample_damage_states_df, on="guid") @@ -34,22 +40,32 @@ def run(self): wf_set = wf_df.to_dict(orient="records") user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): user_defined_cpu = self.get_parameter("num_cpu") - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(wf_set), user_defined_cpu) + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(wf_set), user_defined_cpu + ) avg_bulk_input_size = int(len(wf_set) / num_workers) inventory_args = [] count = 0 inventory_list = list(wf_set) while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) count += avg_bulk_input_size - repair_costs = self.wf_repair_cost_concurrent_future(self.wf_repair_cost_bulk_input, num_workers, - inventory_args) - self.set_result_csv_data("result", repair_costs, name=self.get_parameter("result_name") + "_repair_cost") + repair_costs = self.wf_repair_cost_concurrent_future( + self.wf_repair_cost_bulk_input, num_workers, inventory_args + ) + self.set_result_csv_data( + "result", + repair_costs, + name=self.get_parameter("result_name") + "_repair_cost", + ) return True @@ -67,7 +83,9 @@ def wf_repair_cost_concurrent_future(self, function_name, num_workers, *args): """ output = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + with concurrent.futures.ProcessPoolExecutor( + max_workers=num_workers + ) as executor: for ret1 in executor.map(function_name, *args): output.extend(ret1) @@ -85,7 +103,9 @@ def wf_repair_cost_bulk_input(self, water_facilities): """ # read in the damage ratio tables wf_dmg_ratios_csv = self.get_input_dataset("wf_dmg_ratios").get_csv_reader() - dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows(wf_dmg_ratios_csv, ignore_first_row=False) + dmg_ratio_tbl = AnalysisUtil.get_csv_table_rows( + wf_dmg_ratios_csv, ignore_first_row=False + ) repair_costs = [] @@ -99,7 +119,10 @@ def wf_repair_cost_bulk_input(self, water_facilities): for n, ds in enumerate(sample_damage_states): for dmg_ratio_row in dmg_ratio_tbl: # use "in" instead of "==" since some inventory has pending number (e.g. EDC2) - if dmg_ratio_row["Inventory Type"] in wf_type and dmg_ratio_row["Damage State"] == ds: + if ( + dmg_ratio_row["Inventory Type"] in wf_type + and dmg_ratio_row["Damage State"] == ds + ): dr = float(dmg_ratio_row["Best Mean Damage Ratio"]) repair_cost[n] = str(wf["replacement_cost"] * dr) @@ -125,13 +148,13 @@ def get_spec(self): "id": "result_name", "required": True, "description": "A name of the resulting dataset", - "type": str + "type": str, }, { "id": "num_cpu", "required": False, "description": "If using parallel execution, the number of cpus to request.", - "type": int + "type": int, }, ], "input_datasets": [ @@ -151,13 +174,13 @@ def get_spec(self): "id": "sample_damage_states", "required": True, "description": "sample damage states from Monte Carlo Simulation", - "type": ["incore:sampleDamageState"] + "type": ["incore:sampleDamageState"], }, { "id": "wf_dmg_ratios", "required": True, "description": "Damage Ratios table", - "type": ["incore:waterFacilityDamageRatios"] + "type": ["incore:waterFacilityDamageRatios"], }, ], "output_datasets": [ @@ -165,7 +188,7 @@ def get_spec(self): "id": "result", "parent_type": "water_facilities", "description": "A csv file with repair cost for each water facility", - "type": "incore:repairCost" + "type": "incore:repairCost", } - ] + ], } diff --git a/pyincore/analyses/waterfacilityrestoration/__init__.py b/pyincore/analyses/waterfacilityrestoration/__init__.py index 0b70bb1ec..1ef18ae6c 100644 --- a/pyincore/analyses/waterfacilityrestoration/__init__.py +++ b/pyincore/analyses/waterfacilityrestoration/__init__.py @@ -3,5 +3,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore.analyses.waterfacilityrestoration.waterfacilityrestoration import WaterFacilityRestoration -from pyincore.analyses.waterfacilityrestoration.waterfacilityrestorationutil import WaterFacilityRestorationUtil +from pyincore.analyses.waterfacilityrestoration.waterfacilityrestoration import ( + WaterFacilityRestoration, +) +from pyincore.analyses.waterfacilityrestoration.waterfacilityrestorationutil import ( + WaterFacilityRestorationUtil, +) diff --git a/pyincore/analyses/waterfacilityrestoration/waterfacilityrestoration.py b/pyincore/analyses/waterfacilityrestoration/waterfacilityrestoration.py index f864af626..d48e141bb 100644 --- a/pyincore/analyses/waterfacilityrestoration/waterfacilityrestoration.py +++ b/pyincore/analyses/waterfacilityrestoration/waterfacilityrestoration.py @@ -15,9 +15,7 @@ class WaterFacilityRestoration(BaseAnalysis): - """Computes water facility restoration for an earthquake, tsunami, tornado, or hurricane exposure. - - """ + """Computes water facility restoration for an earthquake, tsunami, tornado, or hurricane exposure.""" def __init__(self, incore_client): self.restorationsvc = RestorationService(incore_client) @@ -30,7 +28,9 @@ def run(self): Returns: bool: True if successful, False otherwise """ - inventory_list = list(self.get_input_dataset("water_facilities").get_inventory_reader()) + inventory_list = list( + self.get_input_dataset("water_facilities").get_inventory_reader() + ) mapping_set = self.get_input_dataset("dfr3_mapping_set") restoration_key = self.get_parameter("restoration_key") @@ -55,29 +55,68 @@ def run(self): if self.get_input_dataset("damage") is not None: damage = self.get_input_dataset("damage").get_csv_reader() - damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) + damage_result = AnalysisUtil.get_csv_table_rows( + damage, ignore_first_row=False + ) else: damage_result = None - (inventory_restoration_map, pf_results, time_results, func_results, repair_times) = \ - self.waterfacility_restoration(inventory_list, damage_result, mapping_set, restoration_key, end_time, - time_interval, pf_interval, discretized_days) - - self.set_result_csv_data("inventory_restoration_map", inventory_restoration_map, - name="inventory_restoration_map_" + self.get_parameter("result_name")) - self.set_result_csv_data("pf_results", time_results, name="percentage_of_functionality_" + - self.get_parameter("result_name")) - self.set_result_csv_data("time_results", pf_results, name="reptime_" + self.get_parameter("result_name")) - self.set_result_csv_data("func_results", func_results, - name=self.get_parameter("result_name") + "_discretized_restoration") - self.set_result_csv_data("repair_times", repair_times, name="full_reptime_" + self.get_parameter( - "result_name")) + ( + inventory_restoration_map, + pf_results, + time_results, + func_results, + repair_times, + ) = self.waterfacility_restoration( + inventory_list, + damage_result, + mapping_set, + restoration_key, + end_time, + time_interval, + pf_interval, + discretized_days, + ) + + self.set_result_csv_data( + "inventory_restoration_map", + inventory_restoration_map, + name="inventory_restoration_map_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "pf_results", + time_results, + name="percentage_of_functionality_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "time_results", + pf_results, + name="reptime_" + self.get_parameter("result_name"), + ) + self.set_result_csv_data( + "func_results", + func_results, + name=self.get_parameter("result_name") + "_discretized_restoration", + ) + self.set_result_csv_data( + "repair_times", + repair_times, + name="full_reptime_" + self.get_parameter("result_name"), + ) return True - def waterfacility_restoration(self, inventory_list, damage_result, mapping_set, restoration_key, end_time, - time_interval, pf_interval, discretized_days): - + def waterfacility_restoration( + self, + inventory_list, + damage_result, + mapping_set, + restoration_key, + end_time, + time_interval, + pf_interval, + discretized_days, + ): """Gets applicable restoration curve set and calculates restoration time and functionality Args: @@ -103,21 +142,30 @@ def waterfacility_restoration(self, inventory_list, damage_result, mapping_set, inventory_class_map = {} restoration_sets = self.restorationsvc.match_inventory( - self.get_input_dataset("dfr3_mapping_set"), inventory_list, restoration_key) + self.get_input_dataset("dfr3_mapping_set"), inventory_list, restoration_key + ) for inventory in inventory_list: if inventory["id"] in restoration_sets.keys(): restoration_set_id = restoration_sets[inventory["id"]].id else: restoration_set_id = None - inventory_restoration_map.append({"guid": inventory['properties']['guid'], - "restoration_id": restoration_set_id}) + inventory_restoration_map.append( + { + "guid": inventory["properties"]["guid"], + "restoration_id": restoration_set_id, + } + ) if inventory["id"] in restoration_sets.keys(): restoration_curve_set = restoration_sets[inventory["id"]] # For each facility, get the discretized restoration from the continuous curve - discretized_restoration = AnalysisUtil.get_discretized_restoration(restoration_curve_set, discretized_days) - inventory_class_map[inventory['properties']['guid']] = discretized_restoration + discretized_restoration = AnalysisUtil.get_discretized_restoration( + restoration_curve_set, discretized_days + ) + inventory_class_map[ + inventory["properties"]["guid"] + ] = discretized_restoration time_results = [] pf_results = [] @@ -129,52 +177,72 @@ def waterfacility_restoration(self, inventory_list, damage_result, mapping_set, # if it's string:id; then need to fetch it from remote and cast to restorationcurveset object restoration_curve_set = mapping.entry[restoration_key] if isinstance(restoration_curve_set, str): - restoration_curve_set = RestorationCurveSet(self.restorationsvc.get_dfr3_set(restoration_curve_set)) + restoration_curve_set = RestorationCurveSet( + self.restorationsvc.get_dfr3_set(restoration_curve_set) + ) # given time calculate pf time = np.arange(0, end_time + time_interval, time_interval) for t in time: - pf_results.append({ - "restoration_id": restoration_curve_set.id, - "time": t, - **restoration_curve_set.calculate_restoration_rates(time=t) - }) + pf_results.append( + { + "restoration_id": restoration_curve_set.id, + "time": t, + **restoration_curve_set.calculate_restoration_rates(time=t), + } + ) # given pf calculate time pf = np.arange(0, 1 + pf_interval, pf_interval) for p in pf: new_dict = {} - t_res = restoration_curve_set.calculate_inverse_restoration_rates(time=p) + t_res = restoration_curve_set.calculate_inverse_restoration_rates( + time=p + ) for key, value in t_res.items(): new_dict.update({"time_" + key: value}) - time_results.append({ - "restoration_id": restoration_curve_set.id, - "percentage_of_functionality": p, - **new_dict - }) - - repair_time[restoration_curve_set.id] = \ - restoration_curve_set.calculate_inverse_restoration_rates(time=0.99) + time_results.append( + { + "restoration_id": restoration_curve_set.id, + "percentage_of_functionality": p, + **new_dict, + } + ) + + repair_time[ + restoration_curve_set.id + ] = restoration_curve_set.calculate_inverse_restoration_rates(time=0.99) # Compute discretized restoration func_result = [] if damage_result is not None: for dmg in damage_result: - guid = dmg['guid'] + guid = dmg["guid"] # Dictionary of discretized restoration functionality if guid in inventory_class_map.keys(): rest_dict = inventory_class_map[guid] - ds_0, ds_1, ds_2, ds_3, ds_4 = dmg['DS_0'], dmg['DS_1'], dmg['DS_2'], dmg['DS_3'], dmg['DS_4'] + ds_0, ds_1, ds_2, ds_3, ds_4 = ( + dmg["DS_0"], + dmg["DS_1"], + dmg["DS_2"], + dmg["DS_3"], + dmg["DS_4"], + ) result_dict = {} for time in discretized_days: key = "day" + str(time) # Only compute if we have damage if ds_0: - functionality = (rest_dict[key][0] * float(ds_0) + rest_dict[key][1] * float(ds_1) + rest_dict[ - key][2] * float(ds_2) + rest_dict[key][3] * float(ds_3) + rest_dict[key][4] * float(ds_4)) + functionality = ( + rest_dict[key][0] * float(ds_0) + + rest_dict[key][1] * float(ds_1) + + rest_dict[key][2] * float(ds_2) + + rest_dict[key][3] * float(ds_3) + + rest_dict[key][4] * float(ds_4) + ) result_dict.update({str(key): functionality}) func_result.append({"guid": guid, **result_dict}) @@ -182,108 +250,117 @@ def waterfacility_restoration(self, inventory_list, damage_result, mapping_set, repair_times = [] for inventory in inventory_restoration_map: if inventory["restoration_id"] is not None: - repair_times.append({"guid": inventory["guid"], **repair_time[inventory["restoration_id"]]}) - - return inventory_restoration_map, pf_results, time_results, func_result, repair_times + repair_times.append( + { + "guid": inventory["guid"], + **repair_time[inventory["restoration_id"]], + } + ) + + return ( + inventory_restoration_map, + pf_results, + time_results, + func_result, + repair_times, + ) def get_spec(self): return { - 'name': 'water-facility-restoration', - 'description': 'water facility restoration analysis', - 'input_parameters': [ + "name": "water-facility-restoration", + "description": "water facility restoration analysis", + "input_parameters": [ { - 'id': 'restoration_key', - 'required': False, - 'description': 'restoration key to use in mapping dataset', - 'type': str + "id": "restoration_key", + "required": False, + "description": "restoration key to use in mapping dataset", + "type": str, }, { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'end_time', - 'required': False, - 'description': 'end time in days. Default to 365.', - 'type': float + "id": "end_time", + "required": False, + "description": "end time in days. Default to 365.", + "type": float, }, { - 'id': 'time_interval', - 'required': False, - 'description': 'incremental interval for time in days. Default to 1', - 'type': float + "id": "time_interval", + "required": False, + "description": "incremental interval for time in days. Default to 1", + "type": float, }, { - 'id': 'pf_interval', - 'required': False, - 'description': 'incremental interval for percentage of functionality. Default to 0.05', - 'type': float + "id": "pf_interval", + "required": False, + "description": "incremental interval for percentage of functionality. Default to 0.05", + "type": float, }, { - 'id': 'discretized_days', - 'required': False, - 'description': 'Discretized days to compute functionality', - 'type': List[int] - } - + "id": "discretized_days", + "required": False, + "description": "Discretized days to compute functionality", + "type": List[int], + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'water_facilities', - 'required': True, - 'description': 'Water Facility Inventory', - 'type': ['ergo:waterFacilityTopo'], + "id": "water_facilities", + "required": True, + "description": "Water Facility Inventory", + "type": ["ergo:waterFacilityTopo"], }, { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], }, { - - 'id': 'damage', - 'required': False, - 'description': 'damage result that has damage intervals in it', - 'type': ['ergo:waterFacilityDamageVer6'] - } + "id": "damage", + "required": False, + "description": "damage result that has damage intervals in it", + "type": ["ergo:waterFacilityDamageVer6"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': "inventory_restoration_map", - 'parent_type': '', - 'description': 'A csv file recording the mapping relationship between GUID and restoration id ' - 'applicable.', - 'type': 'incore:inventoryRestorationMap' + "id": "inventory_restoration_map", + "parent_type": "", + "description": "A csv file recording the mapping relationship between GUID and restoration id " + "applicable.", + "type": "incore:inventoryRestorationMap", }, { - 'id': 'pf_results', - 'parent_type': '', - 'description': 'A csv file recording functionality change with time for each class and limit ' - 'state.', - 'type': 'incore:waterFacilityRestorationFunc' + "id": "pf_results", + "parent_type": "", + "description": "A csv file recording functionality change with time for each class and limit " + "state.", + "type": "incore:waterFacilityRestorationFunc", }, { - 'id': 'time_results', - 'parent_type': '', - 'description': 'A csv file recording repair time at certain functionality recovery for each class ' - 'and limit state.', - 'type': 'incore:waterFacilityRestorationTime' + "id": "time_results", + "parent_type": "", + "description": "A csv file recording repair time at certain functionality recovery for each class " + "and limit state.", + "type": "incore:waterFacilityRestorationTime", }, { - 'id': 'func_results', - 'parent_type': '', - 'description': 'A csv file recording discretized functionality over time', - 'type': 'incore:waterFacilityDiscretizedRestorationFunc' + "id": "func_results", + "parent_type": "", + "description": "A csv file recording discretized functionality over time", + "type": "incore:waterFacilityDiscretizedRestorationFunc", }, { - 'id': 'repair_times', - 'parent_type': '', - 'description': 'A csv file recording repair time at full functionality recovery for each guid ' - 'and limit state.', - 'type': 'incore:waterFacilityRepairTime' - } - ] + "id": "repair_times", + "parent_type": "", + "description": "A csv file recording repair time at full functionality recovery for each guid " + "and limit state.", + "type": "incore:waterFacilityRepairTime", + }, + ], } diff --git a/pyincore/analyses/waterfacilityrestoration/waterfacilityrestorationutil.py b/pyincore/analyses/waterfacilityrestoration/waterfacilityrestorationutil.py index 345e2ec23..aa4feb731 100644 --- a/pyincore/analyses/waterfacilityrestoration/waterfacilityrestorationutil.py +++ b/pyincore/analyses/waterfacilityrestoration/waterfacilityrestorationutil.py @@ -1,16 +1,32 @@ -from pyincore.analyses.waterfacilityrestoration import WaterFacilityRestoration +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ -class WaterFacilityRestorationUtil: - def __init__(self, inventory_restoration_map, pf_results, time_results, time_interval, pf_interval, end_time): +class WaterFacilityRestorationUtil: + def __init__( + self, + inventory_restoration_map, + pf_results, + time_results, + time_interval, + pf_interval, + end_time, + ): # merge inventory_restoration_map with pf and timetables - inventory_restoration_map_df = inventory_restoration_map.get_dataframe_from_csv() + inventory_restoration_map_df = ( + inventory_restoration_map.get_dataframe_from_csv() + ) pf_results_df = pf_results.get_dataframe_from_csv() time_results_df = time_results.get_dataframe_from_csv() - self.pf_results_df = inventory_restoration_map_df.merge(pf_results_df, on="restoration_id").set_index('guid') - self.time_results_df = inventory_restoration_map_df.merge(time_results_df, on="restoration_id").set_index( - 'guid') + self.pf_results_df = inventory_restoration_map_df.merge( + pf_results_df, on="restoration_id" + ).set_index("guid") + self.time_results_df = inventory_restoration_map_df.merge( + time_results_df, on="restoration_id" + ).set_index("guid") self.time_interval = time_interval self.pf_interval = pf_interval @@ -23,18 +39,25 @@ def get_restoration_time(self, guid, damage_state="DS_0", pf=0.99): state = "time_" + damage_state.replace("DS", "PF") df = self.pf_results_df.loc[guid].reset_index(drop=True) # round up and get the closest - time = df.loc[(df["percentage_of_functionality"] >= pf) & (df["percentage_of_functionality"] < - pf+self.pf_interval), state].values[0] + time = df.loc[ + (df["percentage_of_functionality"] >= pf) + & (df["percentage_of_functionality"] < pf + self.pf_interval), + state, + ].values[0] return time def get_percentage_func(self, guid, damage_state="DS_0", time=1): if time > self.end_time: - raise ValueError("restore time should not be larger than end time for restoration model!") + raise ValueError( + "restore time should not be larger than end time for restoration model!" + ) state = damage_state.replace("DS", "PF") df = self.time_results_df.loc[guid].reset_index(drop=True) # round up and get the closest - pf = df.loc[(df["time"] >= time) & df['time'] < time+self.time_interval, state].values[0] + pf = df.loc[ + (df["time"] >= time) & df["time"] < time + self.time_interval, state + ].values[0] return pf diff --git a/pyincore/analyses/wfnfunctionality/wfnfunctionality.py b/pyincore/analyses/wfnfunctionality/wfnfunctionality.py index 95987e48f..80257bb12 100644 --- a/pyincore/analyses/wfnfunctionality/wfnfunctionality.py +++ b/pyincore/analyses/wfnfunctionality/wfnfunctionality.py @@ -25,79 +25,126 @@ def __init__(self, incore_client): super(WfnFunctionality, self).__init__(incore_client) def run(self): - """Execute water facility network functionality analysis """ + """Execute water facility network functionality analysis""" # Obtain tank nodes - tank_nodes = self.get_parameter('tank_node_list') + tank_nodes = self.get_parameter("tank_node_list") # Obtain pump station nodes - pumpstation_nodes = self.get_parameter('pumpstation_node_list') + pumpstation_nodes = self.get_parameter("pumpstation_node_list") # Get network dataset - network_dataset = NetworkDataset.from_dataset(self.get_input_dataset('wfn_network')) + network_dataset = NetworkDataset.from_dataset( + self.get_input_dataset("wfn_network") + ) edges_wfl_gdf = network_dataset.links.get_dataframe_from_shapefile() nodes_wfn_gdf = network_dataset.nodes.get_dataframe_from_shapefile() - edges_wfl_gdf['weight'] = edges_wfl_gdf.loc[:, 'length'] + edges_wfl_gdf["weight"] = edges_wfl_gdf.loc[:, "length"] G_wfn = network_dataset.get_graph_networkx() # network test - fromnode_fld_name = 'fromnode' - tonode_fld_name = 'tonode' - nodenwid_fld_name = 'nodenwid' + fromnode_fld_name = "fromnode" + tonode_fld_name = "tonode" + nodenwid_fld_name = "nodenwid" node_id_validation = NetworkUtil.validate_network_node_ids( - network_dataset, fromnode_fld_name, tonode_fld_name, nodenwid_fld_name) + network_dataset, fromnode_fld_name, tonode_fld_name, nodenwid_fld_name + ) if node_id_validation is False: print("ID in from or to node field doesn't exist in the node dataset") return False # Get water facility damage states - wf_dmg_fs = self.get_input_dataset('wf_sample_failure_state').get_dataframe_from_csv() + wf_dmg_fs = self.get_input_dataset( + "wf_sample_failure_state" + ).get_dataframe_from_csv() wf_sample_df = pd.DataFrame( - np.array([np.array(wf_dmg_fs.failure.values[i].split(',')).astype('int') - for i in np.arange(wf_dmg_fs.shape[0])]), - index=wf_dmg_fs.guid.values) + np.array( + [ + np.array(wf_dmg_fs.failure.values[i].split(",")).astype("int") + for i in np.arange(wf_dmg_fs.shape[0]) + ] + ), + index=wf_dmg_fs.guid.values, + ) # Get pipeline damage states - pp_dmg_fs = self.get_input_dataset('pp_sample_failure_state').get_dataframe_from_csv() + pp_dmg_fs = self.get_input_dataset( + "pp_sample_failure_state" + ).get_dataframe_from_csv() pp_sample_df = pd.DataFrame( - np.array([np.array(pp_dmg_fs.failure.values[i].split(',')).astype('int') - for i in np.arange(pp_dmg_fs.shape[0])]), - index=pp_dmg_fs.guid.values) + np.array( + [ + np.array(pp_dmg_fs.failure.values[i].split(",")).astype("int") + for i in np.arange(pp_dmg_fs.shape[0]) + ] + ), + index=pp_dmg_fs.guid.values, + ) # Get the sample number num_samples = wf_sample_df.shape[1] - sampcols = ['s' + samp for samp in np.arange(num_samples).astype(str)] + sampcols = ["s" + samp for samp in np.arange(num_samples).astype(str)] # Compose the corresponding dataframes based on columns for water facilities and pipelines wf_sample_df.columns = sampcols - wf_sample_df1 = nodes_wfn_gdf.loc[:, ['guid', 'nodenwid']].set_index('guid').join(wf_sample_df) + wf_sample_df1 = ( + nodes_wfn_gdf.loc[:, ["guid", "nodenwid"]] + .set_index("guid") + .join(wf_sample_df) + ) wf_sample_df1 = wf_sample_df1.fillna(1) pp_sample_df.columns = sampcols - pp_sample_df1 = edges_wfl_gdf.loc[:, ['guid', 'fromnode', 'tonode']].set_index('guid').join(pp_sample_df) + pp_sample_df1 = ( + edges_wfl_gdf.loc[:, ["guid", "fromnode", "tonode"]] + .set_index("guid") + .join(pp_sample_df) + ) # Obtain distribution nodes based on user input - distribution_nodes = list(set(list(G_wfn.nodes)) - set(tank_nodes) - set(pumpstation_nodes)) - - (fs_results, fp_results) = self.wfn_functionality(distribution_nodes, pumpstation_nodes, num_samples, - sampcols, wf_sample_df1, pp_sample_df1, G_wfn) - - self.set_result_csv_data("sample_failure_state", - fs_results, name=self.get_parameter("result_name") + "_failure_state", - source="dataframe") - self.set_result_csv_data("failure_probability", - fp_results, - name=self.get_parameter("result_name") + "_failure_probability", - source="dataframe") + distribution_nodes = list( + set(list(G_wfn.nodes)) - set(tank_nodes) - set(pumpstation_nodes) + ) + + (fs_results, fp_results) = self.wfn_functionality( + distribution_nodes, + pumpstation_nodes, + num_samples, + sampcols, + wf_sample_df1, + pp_sample_df1, + G_wfn, + ) + + self.set_result_csv_data( + "sample_failure_state", + fs_results, + name=self.get_parameter("result_name") + "_failure_state", + source="dataframe", + ) + self.set_result_csv_data( + "failure_probability", + fp_results, + name=self.get_parameter("result_name") + "_failure_probability", + source="dataframe", + ) return True - def wfn_functionality(self, distribution_nodes, pumpstation_nodes, num_samples, sampcols, wf_sample_df1, - pp_sample_df1, G_wfn): + def wfn_functionality( + self, + distribution_nodes, + pumpstation_nodes, + num_samples, + sampcols, + wf_sample_df1, + pp_sample_df1, + G_wfn, + ): """ Run Water facility network functionality analysis. @@ -119,38 +166,52 @@ def wfn_functionality(self, distribution_nodes, pumpstation_nodes, num_samples, # a distance of M denotes disconnection M = 9999 - func_wf_df = pd.DataFrame(np.zeros((len(distribution_nodes), num_samples)), index=distribution_nodes, - columns=sampcols) + func_wf_df = pd.DataFrame( + np.zeros((len(distribution_nodes), num_samples)), + index=distribution_nodes, + columns=sampcols, + ) for si, scol in enumerate(sampcols): - nodestate_wfn = wf_sample_df1.loc[:, ['nodenwid', scol]] - linkstate_wfn = pp_sample_df1.loc[:, ['fromnode', 'tonode', scol]] - badlinks_wfn = WfnFunctionalityUtil.get_bad_edges(G_wfn, nodestate_wfn, linkstate_wfn, scol) - badlinkdict_wfn = {k: {'weight': M} for k in badlinks_wfn} + nodestate_wfn = wf_sample_df1.loc[:, ["nodenwid", scol]] + linkstate_wfn = pp_sample_df1.loc[:, ["fromnode", "tonode", scol]] + badlinks_wfn = WfnFunctionalityUtil.get_bad_edges( + G_wfn, nodestate_wfn, linkstate_wfn, scol + ) + badlinkdict_wfn = {k: {"weight": M} for k in badlinks_wfn} G1_wfn = copy.deepcopy(G_wfn) nx.set_edge_attributes(G1_wfn, badlinkdict_wfn) - res_ep = WfnFunctionalityUtil.network_shortest_paths(G1_wfn, pumpstation_nodes, distribution_nodes) + res_ep = WfnFunctionalityUtil.network_shortest_paths( + G1_wfn, pumpstation_nodes, distribution_nodes + ) func_wf_df.loc[distribution_nodes, scol] = (res_ep < M) * 1 # Use nodenwid index to get its guid func_wf_df.index = func_wf_df.index.map(np.int64) - fs_temp = pd.merge(func_wf_df, wf_sample_df1["nodenwid"], left_index=True, right_on="nodenwid", - how='left').drop(columns=["nodenwid"]) + fs_temp = pd.merge( + func_wf_df, + wf_sample_df1["nodenwid"], + left_index=True, + right_on="nodenwid", + how="left", + ).drop(columns=["nodenwid"]) fp_temp = fs_temp.copy(deep=True) # shape the dataframe into failure probability and failure samples - fs_temp['failure'] = fs_temp.astype(str).apply(','.join, axis=1) - fs_results = fs_temp.filter(['failure']) + fs_temp["failure"] = fs_temp.astype(str).apply(",".join, axis=1) + fs_results = fs_temp.filter(["failure"]) fs_results.reset_index(inplace=True) - fs_results = fs_results.rename(columns={'index': 'guid'}) + fs_results = fs_results.rename(columns={"index": "guid"}) # calculate failure probability # count of 0s divided by sample size - fp_temp["failure_probability"] = (num_samples - fp_temp.sum(axis=1).astype(int)) / num_samples - fp_results = fp_temp.filter(['failure_probability']) + fp_temp["failure_probability"] = ( + num_samples - fp_temp.sum(axis=1).astype(int) + ) / num_samples + fp_results = fp_temp.filter(["failure_probability"]) fp_results.reset_index(inplace=True) - fp_results = fp_results.rename(columns={'index': 'guid'}) + fp_results = fp_results.rename(columns={"index": "guid"}) return fs_results, fp_results @@ -160,58 +221,58 @@ def get_spec(self): obj: A JSON object of specifications of the WFN functionality analysis. """ return { - 'name': 'wfn-functionality', - 'description': 'water facility network functionality analysis', - 'input_parameters': [ + "name": "wfn-functionality", + "description": "water facility network functionality analysis", + "input_parameters": [ { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, }, { - 'id': 'tank_node_list', - 'required': True, - 'description': "list of tank nodes", - 'type': List[int] + "id": "tank_node_list", + "required": True, + "description": "list of tank nodes", + "type": List[int], }, { - 'id': 'pumpstation_node_list', - 'required': True, - 'description': "list of pump station nodes", - 'type': List[int] - } + "id": "pumpstation_node_list", + "required": True, + "description": "list of pump station nodes", + "type": List[int], + }, ], - 'input_datasets': [ + "input_datasets": [ { - 'id': 'wfn_network', - 'required': True, - 'description': 'Water Facility Network Dataset', - 'type': ['incore:waterNetwork'], + "id": "wfn_network", + "required": True, + "description": "Water Facility Network Dataset", + "type": ["incore:waterNetwork"], }, { - 'id': 'wf_sample_failure_state', - 'required': True, - 'description': 'CSV file of failure state for each sample. Output from MCS analysis', - 'type': ['incore:sampleFailureState'] + "id": "wf_sample_failure_state", + "required": True, + "description": "CSV file of failure state for each sample. Output from MCS analysis", + "type": ["incore:sampleFailureState"], }, { - 'id': 'pp_sample_failure_state', - 'required': True, - 'description': 'CSV file of failure state for each sample. Output from MCS analysis', - 'type': ['incore:sampleFailureState'] - } + "id": "pp_sample_failure_state", + "required": True, + "description": "CSV file of failure state for each sample. Output from MCS analysis", + "type": ["incore:sampleFailureState"], + }, ], - 'output_datasets': [ + "output_datasets": [ { - 'id': 'failure_probability', - 'description': 'CSV file of failure probability', - 'type': 'incore:failureProbability' + "id": "failure_probability", + "description": "CSV file of failure probability", + "type": "incore:failureProbability", }, { - 'id': 'sample_failure_state', - 'description': 'CSV file of failure state for each sample', - 'type': 'incore:sampleFailureState' + "id": "sample_failure_state", + "description": "CSV file of failure state for each sample", + "type": "incore:sampleFailureState", }, - ] + ], } diff --git a/pyincore/analyses/wfnfunctionality/wfnfunctionalityutil.py b/pyincore/analyses/wfnfunctionality/wfnfunctionalityutil.py index 3fc846998..b6176c755 100644 --- a/pyincore/analyses/wfnfunctionality/wfnfunctionalityutil.py +++ b/pyincore/analyses/wfnfunctionality/wfnfunctionalityutil.py @@ -10,13 +10,14 @@ class WfnFunctionalityUtil: - @staticmethod - def get_bad_edges(G, nodestate, linkstate=None, scol='s0'): - badnodes = nodestate.loc[nodestate.loc[:, scol] == 0, 'nodenwid'].values + def get_bad_edges(G, nodestate, linkstate=None, scol="s0"): + badnodes = nodestate.loc[nodestate.loc[:, scol] == 0, "nodenwid"].values if linkstate is not None: - badlinks = linkstate.loc[linkstate.loc[:, scol] == 0, ['fromnode', 'tonode']].values + badlinks = linkstate.loc[ + linkstate.loc[:, scol] == 0, ["fromnode", "tonode"] + ].values badlinks = list(zip(badlinks[:, 0], badlinks[:, 1])) else: badlinks = [] @@ -25,5 +26,9 @@ def get_bad_edges(G, nodestate, linkstate=None, scol='s0'): return list(set(badlinks)) @staticmethod - def network_shortest_paths(G, sources, sinks, weightcol='weight'): - return pd.Series(nx.multi_source_dijkstra_path_length(G, sources, cutoff=None, weight=weightcol))[sinks] + def network_shortest_paths(G, sources, sinks, weightcol="weight"): + return pd.Series( + nx.multi_source_dijkstra_path_length( + G, sources, cutoff=None, weight=weightcol + ) + )[sinks] diff --git a/pyincore/baseanalysis.py b/pyincore/baseanalysis.py index 49dba1d9a..b60c7637a 100644 --- a/pyincore/baseanalysis.py +++ b/pyincore/baseanalysis.py @@ -5,7 +5,15 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ # TODO: exception handling for validation and set methods -from pyincore import DataService, AnalysisUtil, Earthquake, Tornado, Tsunami, Hurricane, Flood +from pyincore import ( + DataService, + AnalysisUtil, + Earthquake, + Tornado, + Tsunami, + Hurricane, + Flood, +) from pyincore.dataset import Dataset import typing @@ -27,20 +35,29 @@ def __init__(self, incore_client): # initialize parameters, input_datasets, output_datasets, etc self.parameters = {} - for param in self.spec['input_parameters']: - self.parameters[param['id']] = {'spec': param, 'value': None} + for param in self.spec["input_parameters"]: + self.parameters[param["id"]] = {"spec": param, "value": None} self.input_datasets = {} - for input_dataset in self.spec['input_datasets']: - self.input_datasets[input_dataset['id']] = {'spec': input_dataset, 'value': None} + for input_dataset in self.spec["input_datasets"]: + self.input_datasets[input_dataset["id"]] = { + "spec": input_dataset, + "value": None, + } self.input_hazards = {} - if 'input_hazards' in self.spec: - for input_hazards in self.spec['input_hazards']: - self.input_hazards[input_hazards['id']] = {'spec': input_hazards, 'value': None} + if "input_hazards" in self.spec: + for input_hazards in self.spec["input_hazards"]: + self.input_hazards[input_hazards["id"]] = { + "spec": input_hazards, + "value": None, + } self.output_datasets = {} - for output_dataset in self.spec['output_datasets']: - self.output_datasets[output_dataset['id']] = {'spec': output_dataset, 'value': None} + for output_dataset in self.spec["output_datasets"]: + self.output_datasets[output_dataset["id"]] = { + "spec": output_dataset, + "value": None, + } def get_spec(self): """Get basic specifications. @@ -55,14 +72,11 @@ def get_spec(self): """ return { - 'name': 'base-analysis', - 'description': 'this should be replaced by analysis spec', - 'input_parameters': [ - ], - 'input_datasets': [ - ], - 'output_datasets': [ - ] + "name": "base-analysis", + "description": "this should be replaced by analysis spec", + "input_parameters": [], + "input_datasets": [], + "output_datasets": [], } def load_remote_input_dataset(self, analysis_param_id, remote_id): @@ -80,29 +94,29 @@ def load_remote_input_dataset(self, analysis_param_id, remote_id): def get_name(self): """Get the analysis name.""" - return self.spec['name'] + return self.spec["name"] def get_description(self): """Get the description of an analysis.""" - return self.spec['description'] + return self.spec["description"] def get_parameters(self): """Get the dictionary of analysis' parameters.""" param = {} for key in self.parameters.keys(): - param[key] = self.parameters[key]['value'] + param[key] = self.parameters[key]["value"] return param def get_parameter(self, par_id): """Get or set the analysis parameter value. Setting a parameter to a new value will return True or False on error.""" - return self.parameters[par_id]['value'] + return self.parameters[par_id]["value"] def set_parameter(self, par_id, parameter): - result = self.validate_parameter(self.parameters[par_id]['spec'], parameter) + result = self.validate_parameter(self.parameters[par_id]["spec"], parameter) if result[0]: - self.parameters[par_id]['value'] = parameter + self.parameters[par_id]["value"] = parameter return True else: print("Error setting parameter: " + result[1]) @@ -112,18 +126,20 @@ def get_input_datasets(self): """Get the dictionary of the input datasets of an analysis.""" inputs = {} for key in self.input_datasets.keys(): - inputs[key] = self.input_datasets[key]['value'] + inputs[key] = self.input_datasets[key]["value"] return inputs def get_input_dataset(self, ds_id): """Get or set the analysis dataset. Setting the dataset to a new value will return True or False on error.""" - return self.input_datasets[ds_id]['value'] + return self.input_datasets[ds_id]["value"] def set_input_dataset(self, ds_id, dataset): - result = self.validate_input_dataset(self.input_datasets[ds_id]['spec'], dataset) + result = self.validate_input_dataset( + self.input_datasets[ds_id]["spec"], dataset + ) if result[0]: - self.input_datasets[ds_id]['value'] = dataset + self.input_datasets[ds_id]["value"] = dataset return True else: print(result[1]) @@ -133,18 +149,18 @@ def get_input_hazards(self): """Get the dictionary of the input hazards of an analysis.""" inputs = {} for key in self.input_hazards.keys(): - inputs[key] = self.input_hazards[key]['value'] + inputs[key] = self.input_hazards[key]["value"] return inputs def get_input_hazard(self, hz_id): """Get or set the analysis dataset. Setting the hazard to a new value will return True or False on error.""" - return self.input_hazards[hz_id]['value'] + return self.input_hazards[hz_id]["value"] def set_input_hazard(self, hz_id, hazard): - result = self.validate_input_hazard(self.input_hazards[hz_id]['spec'], hazard) + result = self.validate_input_hazard(self.input_hazards[hz_id]["spec"], hazard) if result[0]: - self.input_hazards[hz_id]['value'] = hazard + self.input_hazards[hz_id]["value"] = hazard return True else: print(result[1]) @@ -158,11 +174,19 @@ def create_hazard_object_from_input_params(self): # either hazard object or hazard id + hazard type must be provided if hazard_object is None and (hazard_type is None or hazard_dataset_id is None): - raise ValueError("Either hazard object or hazard id + hazard type must be provided") + raise ValueError( + "Either hazard object or hazard id + hazard type must be provided" + ) # create hazard object from remote - elif hazard_object is None and hazard_type is not None and hazard_dataset_id is not None: - hazard_object = BaseAnalysis._create_hazard_object(hazard_type, hazard_dataset_id, self.hazardsvc) + elif ( + hazard_object is None + and hazard_type is not None + and hazard_dataset_id is not None + ): + hazard_object = BaseAnalysis._create_hazard_object( + hazard_type, hazard_dataset_id, self.hazardsvc + ) # use hazard object else: @@ -193,17 +217,19 @@ def get_output_datasets(self): """Get the output dataset of the analysis.""" outputs = {} for key in self.output_datasets.keys(): - outputs[key] = self.output_datasets[key]['value'] + outputs[key] = self.output_datasets[key]["value"] return outputs def get_output_dataset(self, ds_id): """Get or set the output dataset. Setting the output dataset to a new value will return True or False on error.""" - return self.output_datasets[ds_id]['value'] + return self.output_datasets[ds_id]["value"] def set_output_dataset(self, ds_id, dataset): - if self.validate_output_dataset(self.output_datasets[ds_id]['spec'], dataset)[0]: - self.output_datasets[ds_id]['value'] = dataset + if self.validate_output_dataset(self.output_datasets[ds_id]["spec"], dataset)[ + 0 + ]: + self.output_datasets[ds_id]["value"] = dataset return True else: # TODO handle error message @@ -212,19 +238,27 @@ def set_output_dataset(self, ds_id, dataset): @staticmethod def validate_parameter_nested(parameter, parameter_spec): is_valid = True - err_msg = '' + err_msg = "" - if type(parameter_spec['type']) is typing._GenericAlias: - if not (type(parameter) is parameter_spec['type'].__origin__): + if type(parameter_spec["type"]) is typing._GenericAlias: + if not (type(parameter) is parameter_spec["type"].__origin__): is_valid = False - err_msg = 'container parameter type does not match - spec: ' + str(parameter_spec) - elif not (all(isinstance(s, parameter_spec['type'].__args__[0]) for s in parameter)): + err_msg = "container parameter type does not match - spec: " + str( + parameter_spec + ) + elif not ( + all( + isinstance(s, parameter_spec["type"].__args__[0]) for s in parameter + ) + ): is_valid = False - err_msg = 'element parameter type does not match - spec: ' + str(parameter_spec) + err_msg = "element parameter type does not match - spec: " + str( + parameter_spec + ) else: - if not type(parameter) is parameter_spec['type']: + if not type(parameter) is parameter_spec["type"]: is_valid = False - err_msg = 'parameter type does not match - spec: ' + str(parameter_spec) + err_msg = "parameter type does not match - spec: " + str(parameter_spec) return is_valid, err_msg @@ -240,17 +274,21 @@ def validate_parameter(self, parameter_spec, parameter): """ is_valid = True - err_msg = '' + err_msg = "" - if parameter_spec['required']: + if parameter_spec["required"]: if parameter is None: is_valid = False - err_msg = 'required parameter is missing - spec: ' + str(parameter_spec) + err_msg = "required parameter is missing - spec: " + str(parameter_spec) else: - is_valid, err_msg = self.validate_parameter_nested(parameter, parameter_spec) + is_valid, err_msg = self.validate_parameter_nested( + parameter, parameter_spec + ) else: if parameter is not None: - is_valid, err_msg = self.validate_parameter_nested(parameter, parameter_spec) + is_valid, err_msg = self.validate_parameter_nested( + parameter, parameter_spec + ) return is_valid, err_msg @@ -267,21 +305,26 @@ def validate_input_dataset(dataset_spec, dataset): """ is_valid = True - err_msg = '' + err_msg = "" if not isinstance(dataset, type(None)): # if dataset is not none, check data type - if not (dataset.data_type in dataset_spec['type']): + if not (dataset.data_type in dataset_spec["type"]): # if dataset type is not equal to spec, then return false is_valid = False - err_msg = 'dataset type does not match - ' + 'given type: ' + \ - dataset.data_type + ' spec types: ' + str(dataset_spec['type']) + err_msg = ( + "dataset type does not match - " + + "given type: " + + dataset.data_type + + " spec types: " + + str(dataset_spec["type"]) + ) else: # if dataset is none, check 'requirement' - if dataset_spec['required']: + if dataset_spec["required"]: # if dataset is 'required', return false is_valid = False - err_msg = 'required dataset is missing - spec: ' + str(dataset_spec) + err_msg = "required dataset is missing - spec: " + str(dataset_spec) return is_valid, err_msg @staticmethod @@ -297,23 +340,28 @@ def validate_input_hazard(hazard_spec, hazard): """ is_valid = True - err_msg = '' + err_msg = "" if not isinstance(hazard, type(None)): # if hazard is not none, check hazard instance type is_valid = False - for hazard_type in hazard_spec['type']: + for hazard_type in hazard_spec["type"]: if hazard.hazard_type == hazard_type: is_valid = True break if not is_valid: - err_msg = 'hazard type does not match - ' + 'given type: ' + \ - hazard.hazard_type + ' spec types: ' + str(hazard_spec['type']) + err_msg = ( + "hazard type does not match - " + + "given type: " + + hazard.hazard_type + + " spec types: " + + str(hazard_spec["type"]) + ) else: # if hazard is none, check 'requirement' - if hazard_spec['required']: + if hazard_spec["required"]: is_valid = False - err_msg = 'required hazard is missing - spec: ' + str(hazard_spec) + err_msg = "required hazard is missing - spec: " + str(hazard_spec) return is_valid, err_msg @staticmethod @@ -329,15 +377,17 @@ def validate_output_dataset(dataset_spec, dataset): """ is_valid = True - err_msg = '' - if not (dataset.data_type is dataset_spec['type']): + err_msg = "" + if not (dataset.data_type is dataset_spec["type"]): is_valid = False - err_msg = 'dataset type does not match' + err_msg = "dataset type does not match" return is_valid, err_msg """ convenience function(s) for setting result data as a csv """ - def set_result_csv_data(self, result_id, result_data, name, source='file', index=False): + def set_result_csv_data( + self, result_id, result_data, name, source="file", index=False + ): if name is None: name = self.spec["name"] + "-result" @@ -347,14 +397,14 @@ def set_result_csv_data(self, result_id, result_data, name, source='file', index dataset_type = self.output_datasets[result_id]["spec"]["type"] dataset = None - if source == 'file': + if source == "file": dataset = Dataset.from_csv_data(result_data, name, dataset_type) - elif source == 'dataframe': + elif source == "dataframe": dataset = Dataset.from_dataframe(result_data, name, dataset_type, index) self.set_output_dataset(result_id, dataset) - def set_result_json_data(self, result_id, result_data, name, source='file'): + def set_result_json_data(self, result_id, result_data, name, source="file"): if name is None: name = self.spec["name"] + "-result" @@ -363,32 +413,36 @@ def set_result_json_data(self, result_id, result_data, name, source='file'): dataset_type = self.output_datasets[result_id]["spec"]["type"] dataset = None - if source == 'file': + if source == "file": dataset = Dataset.from_json_data(result_data, name, dataset_type) self.set_output_dataset(result_id, dataset) def run_analysis(self): - """ Validates and runs the analysis.""" - for dataset_spec in self.spec['input_datasets']: + """Validates and runs the analysis.""" + for dataset_spec in self.spec["input_datasets"]: ds_id = dataset_spec["id"] - result = self.validate_input_dataset(dataset_spec, self.input_datasets[ds_id]["value"]) + result = self.validate_input_dataset( + dataset_spec, self.input_datasets[ds_id]["value"] + ) if not result[0]: print("Error reading dataset: " + result[1]) return result # TODO: We will iteratively roll out input hazard; once it's done, we will remove this if block - if 'input_hazards' in self.spec: - for hazard_spec in self.spec['input_hazards']: + if "input_hazards" in self.spec: + for hazard_spec in self.spec["input_hazards"]: hz_id = hazard_spec["id"] - result = self.validate_input_hazard(hazard_spec, self.input_hazards[hz_id]["value"]) + result = self.validate_input_hazard( + hazard_spec, self.input_hazards[hz_id]["value"] + ) if not result[0]: print("Error reading hazard: " + result[1]) return result - for parameter_spec in self.spec['input_parameters']: + for parameter_spec in self.spec["input_parameters"]: par_id = parameter_spec["id"] result = self.validate_parameter(parameter_spec, self.get_parameter(par_id)) diff --git a/pyincore/client.py b/pyincore/client.py index 966f81c14..766837872 100644 --- a/pyincore/client.py +++ b/pyincore/client.py @@ -45,10 +45,10 @@ def update_hash_entry(mode, hashed_url=None, service_url=None): # to add a hash entry if mode == "add" and (service_url is not None and hashed_url is not None): entry = { - "service-name": "", - "service-url": service_url, - "hash": hashed_url, - "description": "" + "service-name": "", + "service-url": service_url, + "hash": hashed_url, + "description": "", } if not os.path.exists(pyglobals.PYINCORE_SERVICE_JSON): with open(pyglobals.PYINCORE_SERVICE_JSON, "w") as f: @@ -164,7 +164,12 @@ def delete(self, url: str, timeout=(30, 600), **kwargs): class IncoreClient(Client): """IN-CORE service client class. It contains token and service root url.""" - def __init__(self, service_url: str = None, token_file_name: str = None, offline: bool = False): + def __init__( + self, + service_url: str = None, + token_file_name: str = None, + offline: bool = False, + ): """ Args: @@ -179,7 +184,9 @@ def __init__(self, service_url: str = None, token_file_name: str = None, offline if service_url is None or len(service_url.strip()) == 0: service_url = pyglobals.INCORE_API_PROD_URL self.service_url = service_url - self.token_url = urllib.parse.urljoin(self.service_url, pyglobals.KEYCLOAK_AUTH_PATH) + self.token_url = urllib.parse.urljoin( + self.service_url, pyglobals.KEYCLOAK_AUTH_PATH + ) # hashlib requires bytes array for hash operations byte_url_string = str.encode(self.service_url) @@ -200,24 +207,37 @@ def __init__(self, service_url: str = None, token_file_name: str = None, offline # store the token file in the respective repository's directory if token_file_name is None or len(token_file_name.strip()) == 0: token_file_name = "." + self.hashed_service_url + "_token" - self.token_file = os.path.join(pyglobals.PYINCORE_USER_CACHE, token_file_name) + self.token_file = os.path.join( + pyglobals.PYINCORE_USER_CACHE, token_file_name + ) authorization = self.retrieve_token_from_file() if authorization is not None: self.session.headers["Authorization"] = authorization - print("Connection successful to IN-CORE services.", "pyIncore version detected:", pyglobals.PACKAGE_VERSION) + print( + "Connection successful to IN-CORE services.", + "pyIncore version detected:", + pyglobals.PACKAGE_VERSION, + ) else: if self.login(): - print("Connection successful to IN-CORE services.", "pyIncore version detected:", - pyglobals.PACKAGE_VERSION) + print( + "Connection successful to IN-CORE services.", + "pyIncore version detected:", + pyglobals.PACKAGE_VERSION, + ) else: self.service_url = "" self.token_url = "" self.hashed_service_url = "" self.hashed_svc_data_dir = "" self.token_file = "" - print("You are working with the offline version of IN-CORE.", "pyIncore version detected:", pyglobals.PACKAGE_VERSION) + print( + "You are working with the offline version of IN-CORE.", + "pyIncore version detected:", + pyglobals.PACKAGE_VERSION, + ) def login(self): for attempt in range(pyglobals.MAX_LOGIN_ATTEMPTS): @@ -227,9 +247,15 @@ def login(self): except EOFError as e: logger.warning(e) raise e - r = requests.post(self.token_url, data={'grant_type': 'password', - 'client_id': pyglobals.CLIENT_ID, - 'username': username, 'password': password}) + r = requests.post( + self.token_url, + data={ + "grant_type": "password", + "client_id": pyglobals.CLIENT_ID, + "username": username, + "password": password, + }, + ) try: token = return_http_response(r).json() if token is None or token["access_token"] is None: @@ -237,13 +263,12 @@ def login(self): exit(0) authorization = str("bearer " + token["access_token"]) self.store_authorization_in_file(authorization) - self.session.headers['Authorization'] = authorization + self.session.headers["Authorization"] = authorization return True except Exception as e: logger.warning("Authentication failed, attempting login again.") print(e) - logger.warning("Authentication failed.") exit(0) @@ -255,7 +280,7 @@ def store_authorization_in_file(self, authorization: str): """ try: - with open(self.token_file, 'w') as f: + with open(self.token_file, "w") as f: f.write(authorization) except IOError as e: logger.warning(e) @@ -267,14 +292,16 @@ def is_token_expired(self, token): True if the token has expired, False otherwise """ # Split the token to get payload - _, payload_encoded, _ = token.split('.') + _, payload_encoded, _ = token.split(".") # Decode the payload - payload = base64.urlsafe_b64decode(payload_encoded + '==') # Padding just in case + payload = base64.urlsafe_b64decode( + payload_encoded + "==" + ) # Padding just in case payload_json = json.loads(payload) now = datetime.now(timezone.utc) current_time = now.timestamp() # Compare current time with exp claim - return current_time > payload_json['exp'] + return current_time > payload_json["exp"] def retrieve_token_from_file(self): """Attempts to retrieve authorization from a local file, if it exists. @@ -288,7 +315,7 @@ def retrieve_token_from_file(self): return None else: try: - with open(self.token_file, 'r') as f: + with open(self.token_file, "r") as f: auth = f.read().splitlines() # check if token is valid if self.is_token_expired(auth[0]): @@ -384,7 +411,9 @@ def delete(self, url: str, timeout=(30, 600), **kwargs): return return_http_response(r) def create_service_json_entry(self): - update_hash_entry("add", hashed_url=self.hashed_service_url, service_url=self.service_url) + update_hash_entry( + "add", hashed_url=self.hashed_service_url, service_url=self.service_url + ) @staticmethod def clear_root_cache(): @@ -401,9 +430,9 @@ def clear_root_cache(): def clear_cache(self): """ - This function helps clear the data cache for a specific repository or the entire cache + This function helps clear the data cache for a specific repository or the entire cache - Returns: None + Returns: None """ # incase cache_data folder doesn't exist @@ -427,11 +456,11 @@ def clear_cache(self): class InsecureIncoreClient(Client): """IN-CORE service client class that bypasses Ambassador auth. It contains token and service root url. - Args: - service_url (str): Service url. - username (str): Username string. + Args: + service_url (str): Service url. + username (str): Username string. - """ + """ def __init__(self, service_url: str = None, username: str = None): super().__init__() @@ -439,7 +468,9 @@ def __init__(self, service_url: str = None, username: str = None): service_url = pyglobals.INCORE_API_PROD_URL self.service_url = service_url if username is None or len(username.strip()) == 0: - self.session.headers["x-auth-userinfo"] = pyglobals.INCORE_LDAP_TEST_USER_INFO + self.session.headers[ + "x-auth-userinfo" + ] = pyglobals.INCORE_LDAP_TEST_USER_INFO else: - user_info = "{\"preferred_username\": \"" + username + "\"}" + user_info = '{"preferred_username": "' + username + '"}' self.session.headers["x-auth-userinfo"] = user_info diff --git a/pyincore/dataservice.py b/pyincore/dataservice.py index 819d96cc1..ac9d55ffd 100644 --- a/pyincore/dataservice.py +++ b/pyincore/dataservice.py @@ -31,10 +31,12 @@ class DataService: def __init__(self, client: IncoreClient): self.client = client - self.base_url = urljoin(client.service_url, 'data/api/datasets/') - self.files_url = urljoin(client.service_url, 'data/api/files/') - self.base_earthquake_url = urljoin(client.service_url, 'hazard/api/earthquakes/') - self.base_tornado_url = urljoin(client.service_url, 'hazard/api/tornadoes/') + self.base_url = urljoin(client.service_url, "data/api/datasets/") + self.files_url = urljoin(client.service_url, "data/api/files/") + self.base_earthquake_url = urljoin( + client.service_url, "hazard/api/earthquakes/" + ) + self.base_tornado_url = urljoin(client.service_url, "hazard/api/tornadoes/") @forbid_offline def get_dataset_metadata(self, dataset_id: str, timeout=(30, 600), **kwargs): @@ -66,12 +68,14 @@ def get_dataset_files_metadata(self, dataset_id: str, timeout=(30, 600), **kwarg obj: HTTP response containing the metadata. """ - url = urljoin(self.base_url, dataset_id + '/files') + url = urljoin(self.base_url, dataset_id + "/files") r = self.client.get(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_dataset_file_metadata(self, dataset_id: str, file_id: str, timeout=(30, 600), **kwargs): + def get_dataset_file_metadata( + self, dataset_id: str, file_id: str, timeout=(30, 600), **kwargs + ): """Retrieve metadata of all files associated with the dataset. Files API endpoint is called. Args: @@ -84,8 +88,7 @@ def get_dataset_file_metadata(self, dataset_id: str, file_id: str, timeout=(30, obj: HTTP response containing the metadata. """ - url = urljoin(self.base_url, - dataset_id + "/files/" + file_id) + url = urljoin(self.base_url, dataset_id + "/files/" + file_id) r = self.client.get(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -114,16 +117,22 @@ def get_dataset_blob(self, dataset_id: str, join=None, timeout=(30, 600), **kwar # for consistency check to ensure the repository hash is recorded in service.json self.client.create_service_json_entry() - local_filename = self.download_dataset_blob(cache_data_dir, dataset_id, timeout=timeout, **kwargs ) + local_filename = self.download_dataset_blob( + cache_data_dir, dataset_id, timeout=timeout, **kwargs + ) # if cache_data_dir exist, check if id folder and zip file exist inside else: for fname in os.listdir(cache_data_dir): - if fname.endswith('.zip'): + if fname.endswith(".zip"): local_filename = os.path.join(cache_data_dir, fname) - print('Dataset already exists locally. Reading from local cached zip.') + print( + "Dataset already exists locally. Reading from local cached zip." + ) if not local_filename: - local_filename = self.download_dataset_blob(cache_data_dir, dataset_id, timeout=timeout, **kwargs) + local_filename = self.download_dataset_blob( + cache_data_dir, dataset_id, timeout=timeout, **kwargs + ) folder = self.unzip_dataset(local_filename) if folder is not None: @@ -132,27 +141,34 @@ def get_dataset_blob(self, dataset_id: str, join=None, timeout=(30, 600), **kwar return local_filename @forbid_offline - def download_dataset_blob(self, cache_data_dir: str, dataset_id: str, join=None, timeout=(30, 600), **kwargs): + def download_dataset_blob( + self, + cache_data_dir: str, + dataset_id: str, + join=None, + timeout=(30, 600), + **kwargs + ): # construct url for file download - url = urljoin(self.base_url, dataset_id + '/blob') + url = urljoin(self.base_url, dataset_id + "/blob") kwargs["stream"] = True if join is None: r = self.client.get(url, timeout=timeout, **kwargs) else: payload = {} if join is True: - payload['join'] = 'true' + payload["join"] = "true" elif join is False: - payload['join'] = 'false' + payload["join"] = "false" r = self.client.get(url, params=payload, timeout=timeout, **kwargs) # extract filename - disposition = r.headers['content-disposition'] + disposition = r.headers["content-disposition"] fname = re.findall("filename=(.+)", disposition) - local_filename = os.path.join(cache_data_dir, fname[0].strip('\"')) + local_filename = os.path.join(cache_data_dir, fname[0].strip('"')) # download - with open(local_filename, 'wb') as f: + with open(local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) @@ -160,8 +176,17 @@ def download_dataset_blob(self, cache_data_dir: str, dataset_id: str, join=None, return local_filename @forbid_offline - def get_datasets(self, datatype: str = None, title: str = None, creator: str = None, skip: int = None, - limit: int = None, space: str = None, timeout=(30, 600), **kwargs): + def get_datasets( + self, + datatype: str = None, + title: str = None, + creator: str = None, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Function to get datasets. Blob API endpoint is called. Args: @@ -182,17 +207,17 @@ def get_datasets(self, datatype: str = None, title: str = None, creator: str = N url = self.base_url payload = {} if datatype is not None: - payload['type'] = datatype + payload["type"] = datatype if title is not None: - payload['title'] = title + payload["title"] = title if creator is not None: - payload['creator'] = creator + payload["creator"] = creator if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) # need to handle there is no datasets @@ -210,15 +235,21 @@ def create_dataset(self, properties: dict, timeout=(30, 600), **kwargs): obj: HTTP POST Response. Json of the dataset posted to the server. """ - payload = {'dataset': json.dumps(properties)} + payload = {"dataset": json.dumps(properties)} url = self.base_url kwargs["files"] = payload r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def update_dataset(self, dataset_id, property_name: str, - property_value: str, timeout=(30, 600), **kwargs): + def update_dataset( + self, + dataset_id, + property_name: str, + property_value: str, + timeout=(30, 600), + **kwargs + ): """Update dataset. Put API endpoint is called. Args: @@ -233,14 +264,19 @@ def update_dataset(self, dataset_id, property_name: str, """ url = urljoin(self.base_url, dataset_id) - payload = {'update': json.dumps({"property name": property_name, - "property value": property_value})} + payload = { + "update": json.dumps( + {"property name": property_name, "property value": property_value} + ) + } kwargs["files"] = payload r = self.client.put(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def add_files_to_dataset(self, dataset_id: str, filepaths: list, timeout=(30, 600), **kwargs): + def add_files_to_dataset( + self, dataset_id: str, filepaths: list, timeout=(30, 600), **kwargs + ): """Add files to the dataset. Post API endpoint is called. Args: @@ -256,10 +292,10 @@ def add_files_to_dataset(self, dataset_id: str, filepaths: list, timeout=(30, 60 url = urljoin(self.base_url, dataset_id + "/files") listfiles = [] for filepath in filepaths: - file = open(filepath, 'rb') - tuple = ('file', file) + file = open(filepath, "rb") + tuple = ("file", file) listfiles.append(tuple) - kwargs['files'] = listfiles + kwargs["files"] = listfiles r = self.client.post(url, timeout=timeout, **kwargs) # close files @@ -269,8 +305,16 @@ def add_files_to_dataset(self, dataset_id: str, filepaths: list, timeout=(30, 60 return return_http_response(r).json() @forbid_offline - def add_files_to_network_dataset(self, dataset_id: str, filepaths: list, - nodename: str, linkname: str, graphname: str, timeout=(30, 600), **kwargs): + def add_files_to_network_dataset( + self, + dataset_id: str, + filepaths: list, + nodename: str, + linkname: str, + graphname: str, + timeout=(30, 600), + **kwargs + ): """Add files to the network dataset. Post API endpoint is called. Args: @@ -293,14 +337,14 @@ def add_files_to_network_dataset(self, dataset_id: str, filepaths: list, graphname = os.path.splitext(graphname)[0] for filepath in filepaths: filename = os.path.splitext(ntpath.basename(filepath))[0] - file = open(filepath, 'rb') - bodyname = '' + file = open(filepath, "rb") + bodyname = "" if filename == linkname: - bodyname = 'link-file' + bodyname = "link-file" if filename == nodename: - bodyname = 'node-file' + bodyname = "node-file" if filename == graphname: - bodyname = 'graph-file' + bodyname = "graph-file" tuple = (bodyname, file) listfiles.append(tuple) kwargs["files"] = listfiles @@ -376,23 +420,23 @@ def get_file_blob(self, file_id: str, timeout=(30, 600), **kwargs): """ # construct url for file download - url = urljoin(self.files_url, file_id + '/blob') + url = urljoin(self.files_url, file_id + "/blob") kwargs["stream"] = True r = self.client.get(url, timeout=timeout, **kwargs) r = return_http_response(r) # extract filename - disposition = r.headers['content-disposition'] + disposition = r.headers["content-disposition"] fname = re.findall("filename=(.+)", disposition) # construct local directory and filename - if not os.path.exists('data'): - os.makedirs('data') - local_filename = os.path.join('data', fname[0].strip('\"')) + if not os.path.exists("data"): + os.makedirs("data") + local_filename = os.path.join("data", fname[0].strip('"')) # download - with open(local_filename, 'wb') as f: + with open(local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) @@ -411,16 +455,16 @@ def unzip_dataset(self, local_filename: str): """ foldername, file_extension = os.path.splitext(local_filename) # if it is not a zip file, no unzip - if not file_extension.lower() == '.zip': - print('It is not a zip file; no unzip') + if not file_extension.lower() == ".zip": + print("It is not a zip file; no unzip") return None # check the folder existance, no unzip if os.path.isdir(foldername): - print('Unzipped folder found in the local cache. Reading from it...') + print("Unzipped folder found in the local cache. Reading from it...") return foldername os.makedirs(foldername) - zip_ref = zipfile.ZipFile(local_filename, 'r') + zip_ref = zipfile.ZipFile(local_filename, "r") zip_ref.extractall(foldername) zip_ref.close() return foldername @@ -440,7 +484,7 @@ def get_shpfile_from_service(self, fileid, dirname, timeout=(30, 600), **kwargs) """ request_str = self.base_url + fileid - request_str_zip = request_str + '/blob' + request_str_zip = request_str + "/blob" # obtain file name r = self.client.get(request_str, timeout=timeout, **kwargs) @@ -477,7 +521,14 @@ def get_tornado_dataset_id_from_service(self, fileid, timeout=(30, 600), **kwarg return return_http_response(r).json()["tornadoDatasetId"] @forbid_offline - def search_datasets(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_datasets( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Function to search datasets. Args: @@ -494,9 +545,9 @@ def search_datasets(self, text: str, skip: int = None, limit: int = None, timeou url = urljoin(self.base_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) diff --git a/pyincore/dataset.py b/pyincore/dataset.py index 7f13c65e2..087d2736d 100644 --- a/pyincore/dataset.py +++ b/pyincore/dataset.py @@ -36,7 +36,9 @@ def __init__(self, metadata): # For convenience instead of having to dig through the metadata for these self.title = metadata["title"] if "title" in metadata else None - self.description = metadata["description"] if "description" in metadata else None + self.description = ( + metadata["description"] if "description" in metadata else None + ) self.data_type = metadata["dataType"] self.format = metadata["format"] self.id = metadata["id"] @@ -86,7 +88,9 @@ def from_json_str(cls, json_str, data_service: DataService = None, file_path=Non instance.local_file_path = file_path else: - raise ValueError("You have to either use data services, or given pass local file path.") + raise ValueError( + "You have to either use data services, or given pass local file path." + ) return instance @@ -102,10 +106,12 @@ def from_file(cls, file_path, data_type): obj: Dataset from file. """ - metadata = {"dataType": data_type, - "format": '', - "fileDescriptors": [], - "id": file_path} + metadata = { + "dataType": data_type, + "format": "", + "fileDescriptors": [], + "id": file_path, + } instance = cls(metadata) instance.local_file_path = file_path return instance @@ -141,9 +147,11 @@ def from_csv_data(cls, result_data, name, data_type): """ if len(result_data) > 0: - with open(name, 'w') as csv_file: + with open(name, "w") as csv_file: # Write the parent ID at the top of the result data, if it is given - writer = csv.DictWriter(csv_file, dialect="unix", fieldnames=result_data[0].keys()) + writer = csv.DictWriter( + csv_file, dialect="unix", fieldnames=result_data[0].keys() + ) writer.writeheader() writer.writerows(result_data) return Dataset.from_file(name, data_type) @@ -162,7 +170,7 @@ def from_json_data(cls, result_data, name, data_type): """ if len(result_data) > 0: - with open(name, 'w') as json_file: + with open(name, "w") as json_file: json_dumps_str = json.dumps(result_data, indent=4) json_file.write(json_dumps_str) return Dataset.from_file(name, data_type) @@ -214,7 +222,7 @@ def get_json_reader(self): if len(files) > 0: filename = files[0] - with open(filename, 'r') as f: + with open(filename, "r") as f: return json.load(f) return self.readers["json"] @@ -250,10 +258,10 @@ def get_raster_value(self, x, y): def get_csv_reader(self): """Utility method for reading different standard file formats: csv reader. - Returns: - obj: CSV reader. + Returns: + obj: CSV reader. - """ + """ if "csv" not in self.readers: filename = self.local_file_path if os.path.isdir(filename): @@ -261,7 +269,7 @@ def get_csv_reader(self): if len(files) > 0: filename = files[0] - csvfile = open(filename, 'r') + csvfile = open(filename, "r") return csv.DictReader(csvfile) return self.readers["csv"] @@ -269,10 +277,10 @@ def get_csv_reader(self): def get_csv_reader_std(self): """Utility method for reading different standard file formats: csv reader. - Returns: - obj: CSV reader. + Returns: + obj: CSV reader. - """ + """ if "csv" not in self.readers: filename = self.local_file_path if os.path.isdir(filename): @@ -280,12 +288,12 @@ def get_csv_reader_std(self): if len(files) > 0: filename = files[0] - csvfile = open(filename, 'r') + csvfile = open(filename, "r") return csv.reader(csvfile) return self.readers["csv"] - def get_file_path(self, type='csv'): + def get_file_path(self, type="csv"): """Utility method for reading different standard file formats: file path. Args: @@ -314,10 +322,12 @@ def get_dataframe_from_csv(self, low_memory=True, delimiter=None): obj: Panda's DataFrame. """ - filename = self.get_file_path('csv') + filename = self.get_file_path("csv") df = pd.DataFrame() if os.path.isfile(filename): - df = pd.read_csv(filename, header="infer", low_memory=low_memory, delimiter=delimiter) + df = pd.read_csv( + filename, header="infer", low_memory=low_memory, delimiter=delimiter + ) return df def get_dataframe_from_shapefile(self): @@ -334,14 +344,12 @@ def get_dataframe_from_shapefile(self): return gdf def delete_temp_file(self): - """Delete temporary folder. - """ + """Delete temporary folder.""" if os.path.exists(self.local_file_path): os.remove(self.local_file_path) def delete_temp_folder(self): - """Delete temporary folder. - """ + """Delete temporary folder.""" path = Path(self.local_file_path) absolute_path = path.parent.absolute() @@ -369,7 +377,7 @@ class DamageRatioDataset: def __init__(self, filename): self.damage_ratio = None - csvfile = open(filename, 'r') + csvfile = open(filename, "r") reader = csv.DictReader(csvfile) self.damage_ratio = [] for row in reader: diff --git a/pyincore/decorators.py b/pyincore/decorators.py index 212e40373..abd355c3f 100644 --- a/pyincore/decorators.py +++ b/pyincore/decorators.py @@ -3,6 +3,7 @@ def forbid_offline(func): Custom decorator to forbid method interact with remote service in offline mode. Returns: """ + def wrapper(self, *args, **kwargs): if self.client.offline: raise ValueError("Service is not available in offline mode.") diff --git a/pyincore/dfr3service.py b/pyincore/dfr3service.py index a7bc56b41..2916c0e52 100644 --- a/pyincore/dfr3service.py +++ b/pyincore/dfr3service.py @@ -22,12 +22,7 @@ logger = pyglobals.LOGGER # add more types if needed -known_types = { - "java.lang.String": "str", - "double": "float", - "int": "int", - "str": "str" -} +known_types = {"java.lang.String": "str", "double": "float", "int": "int", "str": "str"} # add more operators if needed known_operators = { @@ -39,7 +34,7 @@ "LT": "<", "LE": "<=", "NMATCHES": "", - "MATCHES": "" + "MATCHES": "", } @@ -71,7 +66,7 @@ class Dfr3Service: def __init__(self, client: IncoreClient): self.client = client - self.base_mapping_url = urljoin(client.service_url, 'dfr3/api/mappings/') + self.base_mapping_url = urljoin(client.service_url, "dfr3/api/mappings/") @forbid_offline def get_dfr3_set(self, dfr3_id: str, timeout=(30, 600), **kwargs): @@ -94,13 +89,13 @@ def get_dfr3_set(self, dfr3_id: str, timeout=(30, 600), **kwargs): @forbid_offline def delete_dfr3_set(self, dfr3_id: str, timeout=(30, 600), **kwargs): """Delete specific DFR3 set. - Args: - dfr3_id (str): ID of the DFR3 set. - timeout (tuple): Timeout for the request. - **kwargs: Arbitrary keyword arguments. + Args: + dfr3_id (str): ID of the DFR3 set. + timeout (tuple): Timeout for the request. + **kwargs: Arbitrary keyword arguments. - Returns: - obj: HTTP response with return results. + Returns: + obj: HTTP response with return results. """ url = urljoin(self.base_dfr3_url, dfr3_id) r = self.client.delete(url, timeout=timeout, **kwargs) @@ -122,19 +117,28 @@ def batch_get_dfr3_set(self, dfr3_id_lists: list): for id in dfr3_id_lists: dfr3_set = self.get_dfr3_set(id) instance = self.__class__.__name__ - if instance == 'FragilityService': + if instance == "FragilityService": batch_dfr3_sets[id] = FragilityCurveSet(dfr3_set) - elif instance == 'RepairService': + elif instance == "RepairService": batch_dfr3_sets[id] = RepairCurveSet(dfr3_set) - elif instance == 'RestorationService': + elif instance == "RestorationService": batch_dfr3_sets[id] = RestorationCurveSet(dfr3_set) else: - raise ValueError("Only fragility and repair services are currently supported") + raise ValueError( + "Only fragility and repair services are currently supported" + ) return batch_dfr3_sets @forbid_offline - def search_dfr3_sets(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_dfr3_sets( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search DFR3 sets based on a specific text. Args: @@ -151,9 +155,9 @@ def search_dfr3_sets(self, text: str, skip: int = None, limit: int = None, timeo url = urljoin(self.base_dfr3_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -175,7 +179,9 @@ def create_dfr3_set(self, dfr3_set: dict, timeout=(30, 600), **kwargs): r = self.client.post(url, json=dfr3_set, timeout=timeout, **kwargs) return return_http_response(r).json() - def match_inventory(self, mapping: MappingSet, inventories: list, entry_key: Optional[str] = None): + def match_inventory( + self, mapping: MappingSet, inventories: list, entry_key: Optional[str] = None + ): """This method is intended to replace the match_inventory method in the future. The functionality is same as match_inventory but instead of dfr3_sets in plain json, dfr3 curves will be represented in FragilityCurveSet Object. @@ -199,49 +205,70 @@ def match_inventory(self, mapping: MappingSet, inventories: list, entry_key: Opt entry_key = m["name"] break if entry_key is None: - raise ValueError("Entry key not provided and no default entry key found in the mapping!") + raise ValueError( + "Entry key not provided and no default entry key found in the mapping!" + ) # loop through inventory to match the rules matched_curve_ids = [] for inventory in inventories: - if "occ_type" in inventory["properties"] and \ - inventory["properties"]["occ_type"] is None: + if ( + "occ_type" in inventory["properties"] + and inventory["properties"]["occ_type"] is None + ): inventory["properties"]["occ_type"] = "" - if "efacility" in inventory["properties"] and \ - inventory["properties"]["efacility"] is None: + if ( + "efacility" in inventory["properties"] + and inventory["properties"]["efacility"] is None + ): inventory["properties"]["efacility"] = "" # if retrofit key exist, use retrofit key otherwise use default key - retrofit_entry_key = inventory["properties"]["retrofit_k"] if "retrofit_k" in \ - inventory["properties"] else None + retrofit_entry_key = ( + inventory["properties"]["retrofit_k"] + if "retrofit_k" in inventory["properties"] + else None + ) cached_curve = self._check_cache(dfr3_sets_cache, inventory["properties"]) if cached_curve is not None: - dfr3_sets[inventory['id']] = cached_curve + dfr3_sets[inventory["id"]] = cached_curve else: for m in mapping.mappings: # for old format rule matching [[]] # [[ and ] or [ and ]] if isinstance(m.rules, list): - if self._property_match_legacy(rules=m.rules, properties=inventory["properties"]): - if retrofit_entry_key is not None and retrofit_entry_key in m.entry: + if self._property_match_legacy( + rules=m.rules, properties=inventory["properties"] + ): + if ( + retrofit_entry_key is not None + and retrofit_entry_key in m.entry + ): curve = m.entry[retrofit_entry_key] else: curve = m.entry[entry_key] - - dfr3_sets[inventory['id']] = curve - matched_properties_dict = self._convert_properties_to_dict(m.rules, inventory["properties"]) + dfr3_sets[inventory["id"]] = curve + + matched_properties_dict = self._convert_properties_to_dict( + m.rules, inventory["properties"] + ) if retrofit_entry_key is not None: - matched_properties_dict["retrofit_k"] = retrofit_entry_key + matched_properties_dict[ + "retrofit_k" + ] = retrofit_entry_key # Add the matched inventory properties so other matching inventory can avoid rule matching dfr3_sets_cache[curve] = matched_properties_dict # if it's string:id; then need to fetch it from remote and cast to dfr3curve object - if isinstance(curve, str) and curve not in matched_curve_ids: + if ( + isinstance(curve, str) + and curve not in matched_curve_ids + ): matched_curve_ids.append(curve) # use the first match @@ -250,19 +277,29 @@ def match_inventory(self, mapping: MappingSet, inventories: list, entry_key: Opt # for new format rule matching {"AND/OR":[]} # {"AND": [xx, "OR": [yy, yy], "AND": {"OR":["zz", "zz"]]} elif isinstance(m.rules, dict): - if self._property_match(rules=m.rules, properties=inventory["properties"]): - if retrofit_entry_key is not None and retrofit_entry_key in m.entry: + if self._property_match( + rules=m.rules, properties=inventory["properties"] + ): + if ( + retrofit_entry_key is not None + and retrofit_entry_key in m.entry + ): curve = m.entry[retrofit_entry_key] else: curve = m.entry[entry_key] - dfr3_sets[inventory['id']] = curve + dfr3_sets[inventory["id"]] = curve - matched_properties_dict = self._convert_properties_to_dict(m.rules, inventory["properties"]) + matched_properties_dict = self._convert_properties_to_dict( + m.rules, inventory["properties"] + ) # Add the matched inventory properties so other matching inventory can avoid rule matching dfr3_sets_cache[curve] = matched_properties_dict # if it's string:id; then need to fetch it from remote and cast to dfr3 curve object - if isinstance(curve, str) and curve not in matched_curve_ids: + if ( + isinstance(curve, str) + and curve not in matched_curve_ids + ): matched_curve_ids.append(curve) # use the first match @@ -272,18 +309,24 @@ def match_inventory(self, mapping: MappingSet, inventories: list, entry_key: Opt # replace the curve id in dfr3_sets to the dfr3 curve for inventory_id, curve_item in dfr3_sets.items(): - if isinstance(curve_item, FragilityCurveSet) or isinstance(curve_item, RepairCurveSet) \ - or isinstance(curve_item, RestorationCurveSet): + if ( + isinstance(curve_item, FragilityCurveSet) + or isinstance(curve_item, RepairCurveSet) + or isinstance(curve_item, RestorationCurveSet) + ): pass elif isinstance(curve_item, str): dfr3_sets[inventory_id] = batch_dfr3_sets[curve_item] else: raise ValueError( - "Cannot realize dfr3_set entry. The entry has to be either remote id string; or dfr3curve object!") + "Cannot realize dfr3_set entry. The entry has to be either remote id string; or dfr3curve object!" + ) return dfr3_sets - def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: Optional[str] = None): + def match_list_of_dicts( + self, mapping: MappingSet, inventories: list, entry_key: Optional[str] = None + ): """This method is same as match_inventory, except it takes a simple list of dictionaries that contains the items to be mapped in the rules. The match_inventory method takes a list of fiona objects @@ -305,7 +348,9 @@ def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: entry_key = m["name"] break if entry_key is None: - raise ValueError("Entry key not provided and no default entry key found in the mapping!") + raise ValueError( + "Entry key not provided and no default entry key found in the mapping!" + ) # loop through inventory to match the rules matched_curve_ids = [] @@ -315,7 +360,7 @@ def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: if isinstance(m.rules, list): if self._property_match_legacy(rules=m.rules, properties=inventory): curve = m.entry[entry_key] - dfr3_sets[inventory['id']] = curve + dfr3_sets[inventory["id"]] = curve # if it's string:id; then need to fetch it from remote and cast to fragility3curve object if isinstance(curve, str) and curve not in matched_curve_ids: @@ -328,7 +373,7 @@ def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: elif isinstance(m.rules, dict): if self._property_match(rules=m.rules, properties=inventory): curve = m.entry[entry_key] - dfr3_sets[inventory['guid']] = curve + dfr3_sets[inventory["guid"]] = curve # if it's string:id; then need to fetch it from remote and cast to fragility3curve object if isinstance(curve, str) and curve not in matched_curve_ids: @@ -341,14 +386,18 @@ def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: # replace the curve id in dfr3_sets to the dfr3 curve for inventory_id, curve_item in dfr3_sets.items(): - if isinstance(curve_item, FragilityCurveSet) or isinstance(curve_item, RepairCurveSet) \ - or isinstance(curve_item, RestorationCurveSet): + if ( + isinstance(curve_item, FragilityCurveSet) + or isinstance(curve_item, RepairCurveSet) + or isinstance(curve_item, RestorationCurveSet) + ): pass elif isinstance(curve_item, str): dfr3_sets[inventory_id] = batch_dfr3_sets[curve_item] else: raise ValueError( - "Cannot realize dfr3_set entry. The entry has to be either remote id string; or dfr3curve object!") + "Cannot realize dfr3_set entry. The entry has to be either remote id string; or dfr3curve object!" + ) return dfr3_sets @@ -356,18 +405,20 @@ def match_list_of_dicts(self, mapping: MappingSet, inventories: list, entry_key: def _check_cache(dfr3_sets_dict, properties): """A method to see if we already have matched an inventory with the same properties to a fragility curve - Args: - dfr3_sets_dict (dict): {"fragility-curve-id-1": {"struct_typ": "W1", "no_stories": "2"}, etc.} - properties (obj): A fiona Properties object that contains properties of the inventory row. + Args: + dfr3_sets_dict (dict): {"fragility-curve-id-1": {"struct_typ": "W1", "no_stories": "2"}, etc.} + properties (obj): A fiona Properties object that contains properties of the inventory row. - Returns: - Fragility curve id if a match is found + Returns: + Fragility curve id if a match is found """ if not dfr3_sets_dict: return None - retrofit_entry_key = properties["retrofit_k"] if "retrofit_k" in properties else None + retrofit_entry_key = ( + properties["retrofit_k"] if "retrofit_k" in properties else None + ) for entry_key in dfr3_sets_dict: inventory_dict = {} entry_dict = dfr3_sets_dict[entry_key] @@ -400,7 +451,9 @@ def _convert_properties_to_dict(rules, properties): return matched_properties for i, and_rules in enumerate(rules): for j, rule in enumerate(and_rules): - matched_properties.update(Dfr3Service._eval_property_from_inventory(rule, properties)) + matched_properties.update( + Dfr3Service._eval_property_from_inventory(rule, properties) + ) elif isinstance(rules, dict): # If the rules are empty, return the matched properties if not rules or rules == [[]] or rules == [None]: @@ -415,10 +468,15 @@ def _convert_properties_to_dict(rules, properties): if isinstance(criterion, dict): for criteria in criterion: for rule_criteria in criterion[criteria]: - matched_properties.update(Dfr3Service._eval_property_from_inventory(rule_criteria, - properties)) + matched_properties.update( + Dfr3Service._eval_property_from_inventory( + rule_criteria, properties + ) + ) elif isinstance(criterion, str): - matched_properties.update(Dfr3Service._eval_property_from_inventory(criterion, properties)) + matched_properties.update( + Dfr3Service._eval_property_from_inventory(criterion, properties) + ) else: raise ValueError("Cannot evaluate criterion, unsupported format!") @@ -444,7 +502,12 @@ def _property_match_legacy(rules, properties): else: # rules = [[A and B], OR [C and D], OR [E and F]] or_matched = [ - all(map(lambda rule: Dfr3Service._eval_criterion(rule, properties), and_rules)) + all( + map( + lambda rule: Dfr3Service._eval_criterion(rule, properties), + and_rules, + ) + ) for and_rules in rules ] @@ -493,13 +556,13 @@ def _property_match(rules, properties): def _eval_property_from_inventory(rule, properties): """A method to evaluate individual rule and get the property from the inventory properties. - Args: - rule (str): # e.g. "int no_stories EQ 1", - properties (dict): dictionary of properties of an inventory item. e.g. {"guid":xxx, - "num_stories":xxx, ...} + Args: + rule (str): # e.g. "int no_stories EQ 1", + properties (dict): dictionary of properties of an inventory item. e.g. {"guid":xxx, + "num_stories":xxx, ...} - Returns: - dictionary entry with the inventory property value that matched the rule + Returns: + dictionary entry with the inventory property value that matched the rule """ elements = rule.split(" ", 3) @@ -530,36 +593,52 @@ def _eval_criterion(rule, properties): rule_type = elements[0] # e.g. int, str, double, java.lang.String, etc... if rule_type not in known_types: - raise ValueError(rule_type + " Unknown. Cannot parse the rules of this mapping!") + raise ValueError( + rule_type + " Unknown. Cannot parse the rules of this mapping!" + ) rule_key = elements[1] # e.g. no_storeis, year_built, etc... rule_operator = elements[2] # e.g. EQ, GE, LE, EQUALS if rule_operator not in known_operators: - raise ValueError(rule_operator + " Unknown. Cannot parse the rules of this mapping!") + raise ValueError( + rule_operator + " Unknown. Cannot parse the rules of this mapping!" + ) - rule_value = elements[3].strip('\'').strip('\"') + rule_value = elements[3].strip("'").strip('"') if rule_key in properties: # validate if the rule is written correctly by comparing variable type, e.g. no_stories properties # should be integer if isinstance(properties[rule_key], eval(known_types[rule_type])): # additional steps to strip "'" for string matches - if known_types[rule_type] == 'str': + if known_types[rule_type] == "str": if rule_operator == "MATCHES": matched = bool(re.search(rule_value, properties[rule_key])) elif rule_operator == "NMATCHES": matched = not bool(re.search(rule_value, properties[rule_key])) else: matched = eval( - '"{0}"'.format(properties[rule_key]) + known_operators[rule_operator] + '"{0}"'.format( - rule_value)) + '"{0}"'.format(properties[rule_key]) + + known_operators[rule_operator] + + '"{0}"'.format(rule_value) + ) else: - matched = eval(str(properties[rule_key]) + known_operators[rule_operator] + rule_value) + matched = eval( + str(properties[rule_key]) + + known_operators[rule_operator] + + rule_value + ) else: - raise ValueError("Mismatched datatype found in the mapping rule: " + rule + - ". Datatype found in the dataset for " + rule_key + " : " - + str(type(properties[rule_key])) + ". Please review the mapping being used.") + raise ValueError( + "Mismatched datatype found in the mapping rule: " + + rule + + ". Datatype found in the dataset for " + + rule_key + + " : " + + str(type(properties[rule_key])) + + ". Please review the mapping being used." + ) return matched @@ -586,7 +665,7 @@ def extract_inventory_class_legacy(rules): for j, rule in enumerate(and_rules): if j != 0: inventory_class += "+" - inventory_class += rule.split(" ")[3].strip('\'').strip('\"') + inventory_class += rule.split(" ")[3].strip("'").strip('"') return inventory_class @staticmethod @@ -609,9 +688,13 @@ def extract_inventory_class(rules): criteria = rules[boolean] for criterion in criteria: if isinstance(criterion, dict): - inventory_class.append(Dfr3Service.extract_inventory_class(criterion)) + inventory_class.append( + Dfr3Service.extract_inventory_class(criterion) + ) elif isinstance(criterion, str): - inventory_class.append(criterion.split(" ")[3].strip('\'').strip('\"')) + inventory_class.append( + criterion.split(" ")[3].strip("'").strip('"') + ) else: raise ValueError("Cannot evaluate criterion, unsupported format!") @@ -642,9 +725,18 @@ def create_mapping(self, mapping_set: dict, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def get_mappings(self, hazard_type: str = None, inventory_type: str = None, mapping_type: str = None, - creator: str = None, space: str = None, skip: int = None, limit: int = None, - timeout=(30, 600), **kwargs): + def get_mappings( + self, + hazard_type: str = None, + inventory_type: str = None, + mapping_type: str = None, + creator: str = None, + space: str = None, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Get the set of mappings. Mapping is a relationship between inventories (buildings, bridges etc.) and DFR3 sets. @@ -667,19 +759,19 @@ def get_mappings(self, hazard_type: str = None, inventory_type: str = None, mapp payload = {} if hazard_type is not None: - payload['hazard'] = hazard_type + payload["hazard"] = hazard_type if inventory_type is not None: - payload['inventory'] = inventory_type + payload["inventory"] = inventory_type if mapping_type is not None: - payload['mappingType'] = mapping_type + payload["mappingType"] = mapping_type if creator is not None: - payload['creator'] = creator + payload["creator"] = creator if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) diff --git a/pyincore/fragilityservice.py b/pyincore/fragilityservice.py index 4b90ecafa..e2c862c91 100644 --- a/pyincore/fragilityservice.py +++ b/pyincore/fragilityservice.py @@ -23,17 +23,27 @@ class FragilityService(Dfr3Service): def __init__(self, client: IncoreClient): self.client = client - self.base_dfr3_url = urllib.parse.urljoin(client.service_url, 'dfr3/api/fragilities/') + self.base_dfr3_url = urllib.parse.urljoin( + client.service_url, "dfr3/api/fragilities/" + ) super(FragilityService, self).__init__(client) @forbid_offline - def get_dfr3_sets(self, demand_type: str = None, - hazard_type: str = None, inventory_type: str = None, - author: str = None, legacy_id: str = None, - creator: str = None, space: str = None, - skip: int = None, limit: int = None, - timeout=(30, 600), **kwargs): + def get_dfr3_sets( + self, + demand_type: str = None, + hazard_type: str = None, + inventory_type: str = None, + author: str = None, + legacy_id: str = None, + creator: str = None, + space: str = None, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Get the set of fragility data, curves. Args: @@ -57,23 +67,23 @@ def get_dfr3_sets(self, demand_type: str = None, payload = {} if demand_type is not None: - payload['demand'] = demand_type + payload["demand"] = demand_type if hazard_type is not None: - payload['hazard'] = hazard_type + payload["hazard"] = hazard_type if inventory_type is not None: - payload['inventory'] = inventory_type + payload["inventory"] = inventory_type if author is not None: - payload['author'] = author + payload["author"] = author if legacy_id is not None: - payload['legacy_id'] = legacy_id + payload["legacy_id"] = legacy_id if creator is not None: - payload['creator'] = creator + payload["creator"] = creator if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() diff --git a/pyincore/globals.py b/pyincore/globals.py index 4836f757a..a193bbd15 100644 --- a/pyincore/globals.py +++ b/pyincore/globals.py @@ -18,22 +18,26 @@ KEYCLOAK_AUTH_PATH = "/auth/realms/In-core/protocol/openid-connect/token" KEYCLOAK_USERINFO_PATH = "/auth/realms/In-core/protocol/openid-connect/userinfo" CLIENT_ID = "react-auth" -INCORE_LDAP_TEST_USER_INFO = "{\"preferred_username\": \"incrtest\"}" +INCORE_LDAP_TEST_USER_INFO = '{"preferred_username": "incrtest"}' PYINCORE_PACKAGE_HOME = os.path.dirname(__file__) PYINCORE_ROOT_FOLDER = os.path.dirname(os.path.dirname(__file__)) -USER_HOME = os.path.expanduser('~') +USER_HOME = os.path.expanduser("~") USER_CACHE_DIR = ".incore" PYINCORE_USER_CACHE = os.path.join(USER_HOME, USER_CACHE_DIR) DATA_CACHE_FOLDER_NAME = "cache_data" DATA_CACHE_HASH_NAMES_SERVICE_JSON = "service.json" PYINCORE_USER_DATA_CACHE = os.path.join(PYINCORE_USER_CACHE, DATA_CACHE_FOLDER_NAME) -PYINCORE_SERVICE_JSON = os.path.join(PYINCORE_USER_CACHE, DATA_CACHE_HASH_NAMES_SERVICE_JSON) +PYINCORE_SERVICE_JSON = os.path.join( + PYINCORE_USER_CACHE, DATA_CACHE_HASH_NAMES_SERVICE_JSON +) -LOGGING_CONFIG = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.ini')) +LOGGING_CONFIG = os.path.abspath( + os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.ini") +) logging_config.fileConfig(LOGGING_CONFIG) -LOGGER = logging.getLogger('pyincore') +LOGGER = logging.getLogger("pyincore") TEST_DATA_DIR = os.path.join(PYINCORE_ROOT_FOLDER, "tests/data") diff --git a/pyincore/hazardservice.py b/pyincore/hazardservice.py index 0029821d5..17d50373a 100644 --- a/pyincore/hazardservice.py +++ b/pyincore/hazardservice.py @@ -29,20 +29,26 @@ class HazardService: def __init__(self, client: IncoreClient): self.client = client - self.base_earthquake_url = urljoin(client.service_url, - 'hazard/api/earthquakes/') - self.base_tornado_url = urljoin(client.service_url, - 'hazard/api/tornadoes/') - self.base_tsunami_url = urljoin(client.service_url, - 'hazard/api/tsunamis/') - self.base_hurricane_url = urljoin(client.service_url, 'hazard/api/hurricanes/') - self.base_hurricanewf_url = urljoin(client.service_url, - 'hazard/api/hurricaneWindfields/') - self.base_flood_url = urljoin(client.service_url, 'hazard/api/floods/') + self.base_earthquake_url = urljoin( + client.service_url, "hazard/api/earthquakes/" + ) + self.base_tornado_url = urljoin(client.service_url, "hazard/api/tornadoes/") + self.base_tsunami_url = urljoin(client.service_url, "hazard/api/tsunamis/") + self.base_hurricane_url = urljoin(client.service_url, "hazard/api/hurricanes/") + self.base_hurricanewf_url = urljoin( + client.service_url, "hazard/api/hurricaneWindfields/" + ) + self.base_flood_url = urljoin(client.service_url, "hazard/api/floods/") @forbid_offline - def get_earthquake_hazard_metadata_list(self, skip: int = None, limit: int = None, space: str = None, - timeout: tuple = (30, 600), **kwargs): + def get_earthquake_hazard_metadata_list( + self, + skip: int = None, + limit: int = None, + space: str = None, + timeout: tuple = (30, 600), + **kwargs + ): """Retrieve earthquake metadata list from hazard service. Hazard API endpoint is called. Args: @@ -59,18 +65,20 @@ def get_earthquake_hazard_metadata_list(self, skip: int = None, limit: int = Non url = self.base_earthquake_url payload = {} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_earthquake_hazard_metadata(self, hazard_id: str, timeout=(30, 600), **kwargs): + def get_earthquake_hazard_metadata( + self, hazard_id: str, timeout=(30, 600), **kwargs + ): """Retrieve earthquake metadata from hazard service. Hazard API endpoint is called. Args: @@ -88,10 +96,16 @@ def get_earthquake_hazard_metadata(self, hazard_id: str, timeout=(30, 600), **kw return return_http_response(r).json() @forbid_offline - def get_earthquake_hazard_value_set(self, hazard_id: str, demand_type: str, - demand_unit: str, bbox, - grid_spacing: float, - timeout=(30, 600), **kwargs): + def get_earthquake_hazard_value_set( + self, + hazard_id: str, + demand_type: str, + demand_unit: str, + bbox, + grid_spacing: float, + timeout=(30, 600), + **kwargs + ): """Retrieve earthquake hazard value set from the Hazard service. Args: @@ -112,12 +126,16 @@ def get_earthquake_hazard_value_set(self, hazard_id: str, demand_type: str, # bbox: [[minx, miny],[maxx, maxy]] # raster?demandType=0.2+SA&demandUnits=g&minX=-90.3099&minY=34.9942&maxX=-89.6231&maxY=35.4129&gridSpacing=0.01696 # bbox - url = urljoin(self.base_earthquake_url, - hazard_id + "/raster") - payload = {'demandType': demand_type, 'demandUnits': demand_unit, - 'minX': bbox[0][0], 'minY': bbox[0][1], - 'maxX': bbox[1][0], 'maxY': bbox[1][1], - 'gridSpacing': grid_spacing} + url = urljoin(self.base_earthquake_url, hazard_id + "/raster") + payload = { + "demandType": demand_type, + "demandUnits": demand_unit, + "minX": bbox[0][0], + "minY": bbox[0][1], + "maxX": bbox[1][0], + "maxY": bbox[1][1], + "gridSpacing": grid_spacing, + } r = self.client.get(url, params=payload, timeout=timeout, **kwargs) response = return_http_response(r).json() @@ -125,10 +143,10 @@ def get_earthquake_hazard_value_set(self, hazard_id: str, demand_type: str, xlist = [] ylist = [] zlist = [] - for entry in response['hazardResults']: - xlist.append(float(entry['longitude'])) - ylist.append(float(entry['latitude'])) - zlist.append(float(entry['hazardValue'])) + for entry in response["hazardResults"]: + xlist.append(float(entry["longitude"])) + ylist.append(float(entry["latitude"])) + zlist.append(float(entry["hazardValue"])) x = numpy.array(xlist) y = numpy.array(ylist) hazard_val = numpy.array(zlist) @@ -136,9 +154,15 @@ def get_earthquake_hazard_value_set(self, hazard_id: str, demand_type: str, return x, y, hazard_val @forbid_offline - def post_earthquake_hazard_values(self, hazard_id: str, payload: list, amplify_hazard=True, timeout=(30, 600), - **kwargs): - """ Retrieve bulk hurricane hazard values from the Hazard service. + def post_earthquake_hazard_values( + self, + hazard_id: str, + payload: list, + amplify_hazard=True, + timeout=(30, 600), + **kwargs + ): + """Retrieve bulk hurricane hazard values from the Hazard service. Args: hazard_id (str): ID of the Earthquake. @@ -161,14 +185,26 @@ def post_earthquake_hazard_values(self, hazard_id: str, payload: list, amplify_h """ url = urljoin(self.base_earthquake_url, hazard_id + "/values") - kwargs = {"files": {('points', json.dumps(payload)), ('amplifyHazard', json.dumps(amplify_hazard))}} + kwargs = { + "files": { + ("points", json.dumps(payload)), + ("amplifyHazard", json.dumps(amplify_hazard)), + } + } r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_liquefaction_values(self, hazard_id: str, geology_dataset_id: str, - demand_unit: str, points: List, timeout=(30, 600), **kwargs): + def get_liquefaction_values( + self, + hazard_id: str, + geology_dataset_id: str, + demand_unit: str, + points: List, + timeout=(30, 600), + **kwargs + ): """Retrieve earthquake liquefaction values. Args: @@ -183,18 +219,26 @@ def get_liquefaction_values(self, hazard_id: str, geology_dataset_id: str, obj: HTTP response. """ - url = urljoin(self.base_earthquake_url, - hazard_id + "/liquefaction/values") - payload = {'demandUnits': demand_unit, - 'geologyDataset': geology_dataset_id, 'point': points} + url = urljoin(self.base_earthquake_url, hazard_id + "/liquefaction/values") + payload = { + "demandUnits": demand_unit, + "geologyDataset": geology_dataset_id, + "point": points, + } r = self.client.get(url, params=payload, timeout=timeout, **kwargs) response = r.json() return response @forbid_offline - def post_liquefaction_values(self, hazard_id: str, geology_dataset_id: str, payload: list, timeout=(30, 600), - **kwargs): - """ Retrieve bulk earthquake liquefaction hazard values from the Hazard service. + def post_liquefaction_values( + self, + hazard_id: str, + geology_dataset_id: str, + payload: list, + timeout=(30, 600), + **kwargs + ): + """Retrieve bulk earthquake liquefaction hazard values from the Hazard service. Args: hazard_id (str): ID of the Tornado. @@ -206,17 +250,29 @@ def post_liquefaction_values(self, hazard_id: str, geology_dataset_id: str, payl """ url = urljoin(self.base_earthquake_url, hazard_id + "/liquefaction/values") - kwargs = {"files": {('points', json.dumps(payload)), ('geologyDataset', geology_dataset_id)}} + kwargs = { + "files": { + ("points", json.dumps(payload)), + ("geologyDataset", geology_dataset_id), + } + } r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_soil_amplification_value(self, method: str, dataset_id: str, - site_lat: float, site_long: float, - demand_type: str, hazard: float, - default_site_class: str, - timeout=(30, 600), **kwargs): + def get_soil_amplification_value( + self, + method: str, + dataset_id: str, + site_lat: float, + site_long: float, + demand_type: str, + hazard: float, + default_site_class: str, + timeout=(30, 600), + **kwargs + ): """Retrieve earthquake liquefaction values. Args: @@ -234,12 +290,16 @@ def get_soil_amplification_value(self, method: str, dataset_id: str, obj: HTTP response. """ - url = urljoin(self.base_earthquake_url, - 'soil/amplification') - payload = {"method": method, "datasetId": dataset_id, - "siteLat": site_lat, "siteLong": site_long, - "demandType": demand_type, "hazard": hazard, - "defaultSiteClass": default_site_class} + url = urljoin(self.base_earthquake_url, "soil/amplification") + payload = { + "method": method, + "datasetId": dataset_id, + "siteLat": site_lat, + "siteLong": site_long, + "demandType": demand_type, + "hazard": hazard, + "defaultSiteClass": default_site_class, + } r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -258,13 +318,15 @@ def get_supported_earthquake_models(self, timeout=(30, 600), **kwargs): obj: HTTP response. """ - url = urljoin(self.base_earthquake_url, 'models') + url = urljoin(self.base_earthquake_url, "models") r = self.client.get(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def create_earthquake(self, eq_json, file_paths: List = [], timeout=(30, 600), **kwargs): + def create_earthquake( + self, eq_json, file_paths: List = [], timeout=(30, 600), **kwargs + ): """Create earthquake on the server. POST API endpoint is called. Args: @@ -279,10 +341,10 @@ def create_earthquake(self, eq_json, file_paths: List = [], timeout=(30, 600), * """ url = self.base_earthquake_url - eq_data = {('earthquake', eq_json)} + eq_data = {("earthquake", eq_json)} for file_path in file_paths: - eq_data.add(('file', open(file_path, 'rb'))) + eq_data.add(("file", open(file_path, "rb"))) kwargs = {"files": eq_data} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -305,7 +367,14 @@ def delete_earthquake(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_earthquakes(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_earthquakes( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search earthquakes. Args: @@ -322,17 +391,19 @@ def search_earthquakes(self, text: str, skip: int = None, limit: int = None, tim url = urljoin(self.base_earthquake_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_earthquake_aleatory_uncertainty(self, hazard_id: str, demand_type: str, timeout=(30, 600), **kwargs): - """ Gets aleatory uncertainty for an earthquake + def get_earthquake_aleatory_uncertainty( + self, hazard_id: str, demand_type: str, timeout=(30, 600), **kwargs + ): + """Gets aleatory uncertainty for an earthquake Args: hazard_id (str): ID of the Earthquake @@ -351,8 +422,16 @@ def get_earthquake_aleatory_uncertainty(self, hazard_id: str, demand_type: str, return return_http_response(r).json() @forbid_offline - def get_earthquake_variance(self, hazard_id: str, variance_type: str, demand_type: str, - demand_unit: str, points: List, timeout=(30, 600), **kwargs): + def get_earthquake_variance( + self, + hazard_id: str, + variance_type: str, + demand_type: str, + demand_unit: str, + points: List, + timeout=(30, 600), + **kwargs + ): """Gets total and epistemic variance for a model based earthquake Args: @@ -368,15 +447,27 @@ def get_earthquake_variance(self, hazard_id: str, variance_type: str, demand_typ obj: HTTP POST Response with variance value. """ - url = urljoin(self.base_earthquake_url, hazard_id + "/variance/" + variance_type) - payload = {"demandType": demand_type, "demandUnits": demand_unit, 'point': points} + url = urljoin( + self.base_earthquake_url, hazard_id + "/variance/" + variance_type + ) + payload = { + "demandType": demand_type, + "demandUnits": demand_unit, + "point": points, + } r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_tornado_hazard_metadata_list(self, skip: int = None, limit: int = None, space: str = None, - timeout=(30, 600), **kwargs): + def get_tornado_hazard_metadata_list( + self, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Retrieve tornado metadata list from hazard service. Hazard API endpoint is called. Args: @@ -393,11 +484,11 @@ def get_tornado_hazard_metadata_list(self, skip: int = None, limit: int = None, url = self.base_tornado_url payload = {} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -422,8 +513,10 @@ def get_tornado_hazard_metadata(self, hazard_id: str, timeout=(30, 600), **kwarg return return_http_response(r).json() @forbid_offline - def post_tornado_hazard_values(self, hazard_id: str, payload: list, seed=None, timeout=(30, 600), **kwargs): - """ Retrieve bulk tornado hazard values from the Hazard service. + def post_tornado_hazard_values( + self, hazard_id: str, payload: list, seed=None, timeout=(30, 600), **kwargs + ): + """Retrieve bulk tornado hazard values from the Hazard service. Args: hazard_id (str): ID of the Tornado. @@ -436,16 +529,21 @@ def post_tornado_hazard_values(self, hazard_id: str, payload: list, seed=None, t url = urljoin(self.base_tornado_url, hazard_id + "/values") if seed is not None: - kwargs["files"] = {('points', json.dumps(payload)), ('seed', json.dumps(seed))} + kwargs["files"] = { + ("points", json.dumps(payload)), + ("seed", json.dumps(seed)), + } else: - kwargs["files"] = {('points', json.dumps(payload))} + kwargs["files"] = {("points", json.dumps(payload))} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def create_tornado_scenario(self, tornado_json, file_paths: List = [], timeout=(30, 600), **kwargs): + def create_tornado_scenario( + self, tornado_json, file_paths: List = [], timeout=(30, 600), **kwargs + ): """Create tornado on the server. POST API endpoint is called. Args: @@ -460,10 +558,10 @@ def create_tornado_scenario(self, tornado_json, file_paths: List = [], timeout=( """ url = self.base_tornado_url - tornado_data = {('tornado', tornado_json)} + tornado_data = {("tornado", tornado_json)} for file_path in file_paths: - tornado_data.add(('file', open(file_path, 'rb'))) + tornado_data.add(("file", open(file_path, "rb"))) kwargs = {"files": tornado_data} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -486,7 +584,14 @@ def delete_tornado(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_tornadoes(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_tornadoes( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search tornadoes. Args: @@ -503,17 +608,23 @@ def search_tornadoes(self, text: str, skip: int = None, limit: int = None, timeo url = urljoin(self.base_tornado_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_tsunami_hazard_metadata_list(self, skip: int = None, limit: int = None, space: str = None, - timeout=(30, 600), **kwargs): + def get_tsunami_hazard_metadata_list( + self, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Retrieve tsunami metadata list from hazard service. Hazard API endpoint is called. Args: @@ -530,11 +641,11 @@ def get_tsunami_hazard_metadata_list(self, skip: int = None, limit: int = None, url = self.base_tsunami_url payload = {} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -559,8 +670,10 @@ def get_tsunami_hazard_metadata(self, hazard_id: str, timeout=(30, 600), **kwarg return return_http_response(r).json() @forbid_offline - def post_tsunami_hazard_values(self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs): - """ Retrieve bulk tsunami hazard values from the Hazard service. + def post_tsunami_hazard_values( + self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs + ): + """Retrieve bulk tsunami hazard values from the Hazard service. Args: hazard_id (str): ID of the Tsunami. @@ -572,13 +685,15 @@ def post_tsunami_hazard_values(self, hazard_id: str, payload: list, timeout=(30, """ url = urljoin(self.base_tsunami_url, hazard_id + "/values") - kwargs = {"files": {('points', json.dumps(payload))}} + kwargs = {"files": {("points", json.dumps(payload))}} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def create_tsunami_hazard(self, tsunami_json, file_paths: List, timeout=(30, 600), **kwargs): + def create_tsunami_hazard( + self, tsunami_json, file_paths: List, timeout=(30, 600), **kwargs + ): """Create tsunami on the server. POST API endpoint is called. Args: @@ -593,10 +708,10 @@ def create_tsunami_hazard(self, tsunami_json, file_paths: List, timeout=(30, 600 """ url = self.base_tsunami_url - tsunami_data = {('tsunami', tsunami_json)} + tsunami_data = {("tsunami", tsunami_json)} for file_path in file_paths: - tsunami_data.add(('file', open(file_path, 'rb'))) + tsunami_data.add(("file", open(file_path, "rb"))) kwargs = {"files": tsunami_data} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -619,7 +734,14 @@ def delete_tsunami(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_tsunamis(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_tsunamis( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search tsunamis. Args: @@ -636,16 +758,18 @@ def search_tsunamis(self, text: str, skip: int = None, limit: int = None, timeou url = urljoin(self.base_tsunami_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def create_hurricane(self, hurricane_json, file_paths: List, timeout=(30, 600), **kwargs): + def create_hurricane( + self, hurricane_json, file_paths: List, timeout=(30, 600), **kwargs + ): """Create hurricanes on the server. POST API endpoint is called. Args: @@ -659,18 +783,24 @@ def create_hurricane(self, hurricane_json, file_paths: List, timeout=(30, 600), """ url = self.base_hurricane_url - hurricane_data = {('hurricane', hurricane_json)} + hurricane_data = {("hurricane", hurricane_json)} for file_path in file_paths: - hurricane_data.add(('file', open(file_path, 'rb'))) + hurricane_data.add(("file", open(file_path, "rb"))) kwargs = {"files": hurricane_data} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_hurricane_metadata_list(self, skip: int = None, limit: int = None, space: str = None, - timeout=(30, 600), **kwargs): + def get_hurricane_metadata_list( + self, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Retrieve hurricane metadata list from hazard service. Hazard API endpoint is called. Args: @@ -688,11 +818,11 @@ def get_hurricane_metadata_list(self, skip: int = None, limit: int = None, space payload = {} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -717,8 +847,10 @@ def get_hurricane_metadata(self, hazard_id, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def post_hurricane_hazard_values(self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs): - """ Retrieve bulk hurricane hazard values from the Hazard service. + def post_hurricane_hazard_values( + self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs + ): + """Retrieve bulk hurricane hazard values from the Hazard service. Args: hazard_id (str): ID of the Hurricane. @@ -728,7 +860,7 @@ def post_hurricane_hazard_values(self, hazard_id: str, payload: list, timeout=(3 """ url = urljoin(self.base_hurricane_url, hazard_id + "/values") - kwargs = {"files": {('points', json.dumps(payload))}} + kwargs = {"files": {("points", json.dumps(payload))}} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -751,7 +883,14 @@ def delete_hurricane(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_hurricanes(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_hurricanes( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search hurricanes. Args: @@ -767,9 +906,9 @@ def search_hurricanes(self, text: str, skip: int = None, limit: int = None, time url = urljoin(self.base_hurricane_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -787,18 +926,24 @@ def create_flood(self, flood_json, file_paths: List, timeout=(30, 600), **kwargs """ url = self.base_flood_url - flood_data = {('flood', flood_json)} + flood_data = {("flood", flood_json)} for file_path in file_paths: - flood_data.add(('file', open(file_path, 'rb'))) + flood_data.add(("file", open(file_path, "rb"))) kwargs = {"files": flood_data} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_flood_metadata_list(self, skip: int = None, limit: int = None, space: str = None, timeout=(30, 600), - **kwargs): + def get_flood_metadata_list( + self, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Retrieve flood metadata list from hazard service. Hazard API endpoint is called. Args: @@ -816,11 +961,11 @@ def get_flood_metadata_list(self, skip: int = None, limit: int = None, space: st payload = {} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -844,8 +989,10 @@ def get_flood_metadata(self, hazard_id, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def post_flood_hazard_values(self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs): - """ Retrieve bulk flood hazard values from the Hazard service. + def post_flood_hazard_values( + self, hazard_id: str, payload: list, timeout=(30, 600), **kwargs + ): + """Retrieve bulk flood hazard values from the Hazard service. Args: hazard_id (str): ID of the Flood. @@ -857,7 +1004,7 @@ def post_flood_hazard_values(self, hazard_id: str, payload: list, timeout=(30, 6 """ url = urljoin(self.base_flood_url, hazard_id + "/values") - kwargs = {"files": {('points', json.dumps(payload))}} + kwargs = {"files": {("points", json.dumps(payload))}} r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -880,7 +1027,14 @@ def delete_flood(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_floods(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_floods( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search floods. Args: @@ -897,9 +1051,9 @@ def search_floods(self, text: str, skip: int = None, limit: int = None, timeout= url = urljoin(self.base_flood_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -919,7 +1073,7 @@ def create_hurricane_windfield(self, hurr_wf_inputs, timeout=(30, 10800), **kwar """ url = self.base_hurricanewf_url - headers = {'Content-type': 'application/json'} + headers = {"Content-type": "application/json"} new_headers = {**self.client.session.headers, **headers} kwargs = {"headers": new_headers} r = self.client.post(url, data=hurr_wf_inputs, timeout=timeout, **kwargs) @@ -927,8 +1081,16 @@ def create_hurricane_windfield(self, hurr_wf_inputs, timeout=(30, 10800), **kwar return return_http_response(r).json() @forbid_offline - def get_hurricanewf_metadata_list(self, coast: str = None, category: int = None, skip: int = None, - limit: int = None, space: str = None, timeout=(30, 600), **kwargs): + def get_hurricanewf_metadata_list( + self, + coast: str = None, + category: int = None, + skip: int = None, + limit: int = None, + space: str = None, + timeout=(30, 600), + **kwargs + ): """Retrieve hurricane metadata list from hazard service. Hazard API endpoint is called. Args: @@ -948,15 +1110,15 @@ def get_hurricanewf_metadata_list(self, coast: str = None, category: int = None, payload = {} if coast is not None: - payload['coast'] = coast + payload["coast"] = coast if category is not None: - payload['category'] = category + payload["category"] = category if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -981,10 +1143,16 @@ def get_hurricanewf_metadata(self, hazard_id, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def post_hurricanewf_hazard_values(self, hazard_id: str, payload: list, elevation: int, roughness: float, - timeout=(30, 600), **kwargs): - - """ Retrieve bulk hurricane windfield hazard values from the Hazard service. + def post_hurricanewf_hazard_values( + self, + hazard_id: str, + payload: list, + elevation: int, + roughness: float, + timeout=(30, 600), + **kwargs + ): + """Retrieve bulk hurricane windfield hazard values from the Hazard service. Args: hazard_id (str): ID of the hurricanewf. @@ -998,17 +1166,30 @@ def post_hurricanewf_hazard_values(self, hazard_id: str, payload: list, elevatio """ url = urljoin(self.base_hurricanewf_url, hazard_id + "/values") - kwargs["files"] = {('points', json.dumps(payload)), - ('elevation', json.dumps(elevation)), - ('roughness', json.dumps(roughness))} + kwargs["files"] = { + ("points", json.dumps(payload)), + ("elevation", json.dumps(elevation)), + ("roughness", json.dumps(roughness)), + } r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @forbid_offline - def get_hurricanewf_json(self, coast: str, category: int, trans_d: float, land_fall_loc: int, demand_type: str, - demand_unit: str, resolution: int = 6, grid_points: int = 80, - rf_method: str = "circular", timeout=(30, 600), **kwargs): + def get_hurricanewf_json( + self, + coast: str, + category: int, + trans_d: float, + land_fall_loc: int, + demand_type: str, + demand_unit: str, + resolution: int = 6, + grid_points: int = 80, + rf_method: str = "circular", + timeout=(30, 600), + **kwargs + ): """Retrieve hurricane wind field values from the Hazard service. Args: @@ -1030,11 +1211,16 @@ def get_hurricanewf_json(self, coast: str, category: int, trans_d: float, land_f """ # land_fall_loc: IncorePoint e.g.'28.01, -83.85' url = urljoin(self.base_hurricanewf_url, "json/" + coast) - payload = {"category": category, "TransD": trans_d, - "LandfallLoc": land_fall_loc, - "demandType": demand_type, "demandUnits": demand_unit, - "resolution": resolution, "gridPoints": grid_points, - "reductionType": rf_method} + payload = { + "category": category, + "TransD": trans_d, + "LandfallLoc": land_fall_loc, + "demandType": demand_type, + "demandUnits": demand_unit, + "resolution": resolution, + "gridPoints": grid_points, + "reductionType": rf_method, + } r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -1057,7 +1243,14 @@ def delete_hurricanewf(self, hazard_id: str, timeout=(30, 600), **kwargs): return return_http_response(r).json() @forbid_offline - def search_hurricanewf(self, text: str, skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def search_hurricanewf( + self, + text: str, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Search hurricanes. Args: @@ -1074,9 +1267,9 @@ def search_hurricanewf(self, text: str, skip: int = None, limit: int = None, tim url = urljoin(self.base_hurricanewf_url, "search") payload = {"text": text} if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -1089,17 +1282,17 @@ def get_allowed_demands(self, hazard_type, timeout=(30, 600), **kwargs): else: raise ValueError("Unknown hazard type!") else: - if hazard_type == 'earthquake': + if hazard_type == "earthquake": url = urljoin(self.base_earthquake_url, "demands") - elif hazard_type == 'tornado': + elif hazard_type == "tornado": url = urljoin(self.base_tornado_url, "demands") - elif hazard_type == 'tsunami': + elif hazard_type == "tsunami": url = urljoin(self.base_tsunami_url, "demands") - elif hazard_type == 'hurricane': + elif hazard_type == "hurricane": url = urljoin(self.base_hurricane_url, "demands") - elif hazard_type == 'hurricaneWindfield': + elif hazard_type == "hurricaneWindfield": url = urljoin(self.base_hurricanewf_url, "demands") - elif hazard_type == 'flood': + elif hazard_type == "flood": url = urljoin(self.base_flood_url, "demands") else: raise ValueError("Unknown hazard type!") @@ -1111,297 +1304,178 @@ def get_allowed_demands(self, hazard_type, timeout=(30, 600), **kwargs): class HazardConstant: """HazardConstant class to hold all the constants related to hazard.""" + DEFAULT_ALLOWED_DEMANDS = { "earthquake": [ { "demand_type": "pga", - "demand_unit": [ - "g", - "in/sec^2", - "m/sec^2" - ], - "description": "Peak ground acceleration" + "demand_unit": ["g", "in/sec^2", "m/sec^2"], + "description": "Peak ground acceleration", }, { "demand_type": "pgv", - "demand_unit": [ - "in/s", - "cm/s" - ], - "description": "Peak ground velocity" + "demand_unit": ["in/s", "cm/s"], + "description": "Peak ground velocity", }, { "demand_type": "pgd", - "demand_unit": [ - "in", - "ft", - "m" - ], - "description": "Peak ground displacement" + "demand_unit": ["in", "ft", "m"], + "description": "Peak ground displacement", }, { "demand_type": "sa", - "demand_unit": [ - "g", - "in/sec^2", - "m/sec^2" - ], - "description": "Spectral acceleration" + "demand_unit": ["g", "in/sec^2", "m/sec^2"], + "description": "Spectral acceleration", }, { "demand_type": "sd", - "demand_unit": [ - "in", - "ft", - "m", - "cm" - ], - "description": "Spectral displacement" + "demand_unit": ["in", "ft", "m", "cm"], + "description": "Spectral displacement", }, { "demand_type": "sv", - "demand_unit": [ - "cm/s", - "in/s" - ], - "description": "Spectral Velocity" - } + "demand_unit": ["cm/s", "in/s"], + "description": "Spectral Velocity", + }, ], "tsunami": [ { "demand_type": "Hmax", - "demand_unit": [ - "ft", - "m" - ], + "demand_unit": ["ft", "m"], "description": "Onshore: maximum tsunami height above local ground level overland. Offshore: " - "maximum tsunami height taken crest to trough" + "maximum tsunami height taken crest to trough", }, { "demand_type": "Vmax", - "demand_unit": [ - "mph", - "kph", - "ft/sec", - "m/sec" - ], - "description": "Maximum near-coast or overland water velocity due to tsunami" + "demand_unit": ["mph", "kph", "ft/sec", "m/sec"], + "description": "Maximum near-coast or overland water velocity due to tsunami", }, { "demand_type": "Mmax", - "demand_unit": [ - "m^3/s^2", - "ft^3/s^2" - ], - "description": "" - } + "demand_unit": ["m^3/s^2", "ft^3/s^2"], + "description": "", + }, ], "flood": [ { "demand_type": "inundationDepth", - "demand_unit": [ - "ft", - "m" - ], - "description": "Depth of the water surface relative to local ground level" + "demand_unit": ["ft", "m"], + "description": "Depth of the water surface relative to local ground level", }, { "demand_type": "waterSurfaceElevation", - "demand_unit": [ - "ft", - "m" - ], - "description": "Elevation of the water surface above reference datum (e.g. NAVD88, mean sea level)" - } + "demand_unit": ["ft", "m"], + "description": "Elevation of the water surface above reference datum (e.g. NAVD88, mean sea level)", + }, ], "tornado": [ { "demand_type": "wind", - "demand_unit": [ - "mps", - "mph" - ], - "description": "Defined as a wind velocity below" + "demand_unit": ["mps", "mph"], + "description": "Defined as a wind velocity below", } ], "hurricaneWindfield": [ { "demand_type": "3s", - "demand_unit": [ - "kph", - "mph", - "kt" - ], - "description": "Typically, reported at 10 m above local ground or sea level" + "demand_unit": ["kph", "mph", "kt"], + "description": "Typically, reported at 10 m above local ground or sea level", }, { "demand_type": "60s", - "demand_unit": [ - "kph", - "mph", - "kt" - ], - "description": "Typically, reported at 10 m above local ground or sea level" - } + "demand_unit": ["kph", "mph", "kt"], + "description": "Typically, reported at 10 m above local ground or sea level", + }, ], "hurricane": [ { "demand_type": "waveHeight", - "demand_unit": [ - "ft", - "m", - "in", - "cm" - ], + "demand_unit": ["ft", "m", "in", "cm"], "description": " Height of wave measured crest to trough. Characteristic wave height is typically the " - "average of one third highest waves for a random sea." + "average of one third highest waves for a random sea.", }, { "demand_type": "surgeLevel", - "demand_unit": [ - "ft", - "m", - "in", - "cm" - ], - "description": "Elevation of the water surface above reference datum (e.g. NAVD88, mean sea level)" + "demand_unit": ["ft", "m", "in", "cm"], + "description": "Elevation of the water surface above reference datum (e.g. NAVD88, mean sea level)", }, { "demand_type": "inundationDuration", - "demand_unit": [ - "hr", - "min", - "s" - ], - "description": "Time that inundation depth exceeds a critical threshold for a given storm" + "demand_unit": ["hr", "min", "s"], + "description": "Time that inundation depth exceeds a critical threshold for a given storm", }, { "demand_type": "inundationDepth", - "demand_unit": [ - "ft", - "m", - "in", - "cm" - ], - "description": "Depth of the water surface relative to local ground level" + "demand_unit": ["ft", "m", "in", "cm"], + "description": "Depth of the water surface relative to local ground level", }, { "demand_type": "wavePeriod", - "demand_unit": [ - "s", - "hr", - "min" - ], + "demand_unit": ["s", "hr", "min"], "description": "Time between wave crests. Characteristic wave period is typically the inverse of the " - "spectral peak frequency for a random sea" + "spectral peak frequency for a random sea", }, { "demand_type": "waveDirection", - "demand_unit": [ - "deg", - "rad" - ], - "description": "Principle wave direction associated with the characteristic wave height and period" + "demand_unit": ["deg", "rad"], + "description": "Principle wave direction associated with the characteristic wave height and period", }, { "demand_type": "waterVelocity", - "demand_unit": [ - "ft/s", - "m/s", - "in/s" - ], - "description": "" + "demand_unit": ["ft/s", "m/s", "in/s"], + "description": "", }, { "demand_type": "windVelocity", - "demand_unit": [ - "ft/s", - "m/s", - "m/sec", - "in/s" - ], - "description": "" - } + "demand_unit": ["ft/s", "m/s", "m/sec", "in/s"], + "description": "", + }, ], "earthquake+tsunami": [ { "demand_type": "pga", - "demand_unit": [ - "g", - "in/sec^2", - "m/sec^2" - ], - "description": "Peak ground acceleration" + "demand_unit": ["g", "in/sec^2", "m/sec^2"], + "description": "Peak ground acceleration", }, { "demand_type": "pgv", - "demand_unit": [ - "in/s", - "cm/s" - ], - "description": "Peak ground velocity" + "demand_unit": ["in/s", "cm/s"], + "description": "Peak ground velocity", }, { "demand_type": "pgd", - "demand_unit": [ - "in", - "ft", - "m" - ], - "description": "Peak ground displacement" + "demand_unit": ["in", "ft", "m"], + "description": "Peak ground displacement", }, { "demand_type": "sa", - "demand_unit": [ - "g", - "in/sec^2", - "m/sec^2" - ], - "description": "Spectral acceleration" + "demand_unit": ["g", "in/sec^2", "m/sec^2"], + "description": "Spectral acceleration", }, { "demand_type": "sd", - "demand_unit": [ - "in", - "ft", - "m", - "cm" - ], - "description": "Spectral displacement" + "demand_unit": ["in", "ft", "m", "cm"], + "description": "Spectral displacement", }, { "demand_type": "sv", - "demand_unit": [ - "cm/s", - "in/s" - ], - "description": "Spectral Velocity" + "demand_unit": ["cm/s", "in/s"], + "description": "Spectral Velocity", }, { "demand_type": "Hmax", - "demand_unit": [ - "ft", - "m" - ], - "description": "Onshore: maximum tsunami height above local ground level overland. Offshore: maximum tsunami height taken crest to trough" + "demand_unit": ["ft", "m"], + "description": "Onshore: maximum tsunami height above local ground level overland. Offshore: maximum tsunami height taken crest to trough", }, { "demand_type": "Vmax", - "demand_unit": [ - "mph", - "kph", - "ft/sec", - "m/sec" - ], - "description": "Maximum near-coast or overland water velocity due to tsunami" + "demand_unit": ["mph", "kph", "ft/sec", "m/sec"], + "description": "Maximum near-coast or overland water velocity due to tsunami", }, { "demand_type": "Mmax", - "demand_unit": [ - "m^3/s^2", - "ft^3/s^2" - ], - "description": "" - } - ] + "demand_unit": ["m^3/s^2", "ft^3/s^2"], + "description": "", + }, + ], } diff --git a/pyincore/models/dfr3curve.py b/pyincore/models/dfr3curve.py index a42b61637..a10557b03 100644 --- a/pyincore/models/dfr3curve.py +++ b/pyincore/models/dfr3curve.py @@ -18,14 +18,16 @@ class DFR3Curve: """A class to represent a DFR3 curve.""" def __init__(self, curve_parameters): - self.rules = curve_parameters['rules'] - self.return_type = curve_parameters['returnType'] + self.rules = curve_parameters["rules"] + self.return_type = curve_parameters["returnType"] for rule in self.rules: rule["expression"] = rule["expression"].replace("^", "**") - self.description = curve_parameters['description'] + self.description = curve_parameters["description"] - def solve_curve_expression(self, hazard_values: dict, curve_parameters: dict, **kwargs): + def solve_curve_expression( + self, hazard_values: dict, curve_parameters: dict, **kwargs + ): """Evaluates expression of the curve. Args: @@ -46,7 +48,9 @@ def solve_curve_expression(self, hazard_values: dict, curve_parameters: dict, ** for parameter in curve_parameters: # if default exists, use default if "expression" in parameter and parameter["expression"] is not None: - parameters[parameter["name"]] = evaluateexpression.evaluate(parameter["expression"], parameters) + parameters[parameter["name"]] = evaluateexpression.evaluate( + parameter["expression"], parameters + ) else: parameters[parameter["name"]] = None @@ -78,7 +82,9 @@ def solve_curve_expression(self, hazard_values: dict, curve_parameters: dict, ** eval_result = None for rule in self.rules: if "condition" not in rule or rule["condition"] is None: - eval_result = evaluateexpression.evaluate(rule["expression"], parameters) + eval_result = evaluateexpression.evaluate( + rule["expression"], parameters + ) else: conditions_met = [] for condition in rule["condition"]: @@ -89,10 +95,14 @@ def solve_curve_expression(self, hazard_values: dict, curve_parameters: dict, ** conditions_met.append(False) break if all(conditions_met): - eval_result = evaluateexpression.evaluate(rule["expression"], parameters) + eval_result = evaluateexpression.evaluate( + rule["expression"], parameters + ) break - if isinstance(eval_result, numpy.ndarray) or isinstance(eval_result, list): # for repair curves etc. + if isinstance(eval_result, numpy.ndarray) or isinstance( + eval_result, list + ): # for repair curves etc. return eval_result else: # for fragility curves the return is a float if eval_result is None: @@ -100,14 +110,25 @@ def solve_curve_expression(self, hazard_values: dict, curve_parameters: dict, ** if math.isnan(eval_result): error_msg = "Unable to calculate limit state." if self.rules: - error_msg += " Evaluation failed for expression: \n" + json.dumps(self.rules) + "\n" - error_msg += "Provided Inputs: \n" + json.dumps(hazard_values) + "\n" + json.dumps(kwargs) + error_msg += ( + " Evaluation failed for expression: \n" + + json.dumps(self.rules) + + "\n" + ) + error_msg += ( + "Provided Inputs: \n" + + json.dumps(hazard_values) + + "\n" + + json.dumps(kwargs) + ) raise ValueError(error_msg) return eval_result - def solve_curve_for_inverse(self, hazard_values: dict, curve_parameters: dict, **kwargs): + def solve_curve_for_inverse( + self, hazard_values: dict, curve_parameters: dict, **kwargs + ): """Evaluates expression of the curve by calculating its inverse. Example, ppf for cdf. Only supports cdf() for now. More inverse methods may be added in the future. @@ -123,14 +144,20 @@ def solve_curve_for_inverse(self, hazard_values: dict, curve_parameters: dict, * inverse_rules = [] actual_rules = self.rules for rule in self.rules: - if ".cdf(" in rule['expression']: - new_exp = rule['expression'].replace(".cdf(", ".ppf(") - inverse_rules.append({'condition': rule['condition'], 'expression': new_exp}) + if ".cdf(" in rule["expression"]: + new_exp = rule["expression"].replace(".cdf(", ".ppf(") + inverse_rules.append( + {"condition": rule["condition"], "expression": new_exp} + ) else: - raise KeyError("Inverse does not exist for the provided expression. exiting..") + raise KeyError( + "Inverse does not exist for the provided expression. exiting.." + ) self.rules = inverse_rules - inverse = self.solve_curve_expression(hazard_values=hazard_values, curve_parameters=curve_parameters, **kwargs) + inverse = self.solve_curve_expression( + hazard_values=hazard_values, curve_parameters=curve_parameters, **kwargs + ) self.rules = actual_rules # swap the original rules back so further calculations are not affected return inverse @@ -151,16 +178,30 @@ def get_building_period(self, curve_parameters, **kwargs): num_stories = 1.0 for parameter in curve_parameters: # if default exists, use default - if parameter["name"] == "num_stories" and "expression" in parameter and parameter["expression"] is not None: + if ( + parameter["name"] == "num_stories" + and "expression" in parameter + and parameter["expression"] is not None + ): num_stories = evaluateexpression.evaluate(parameter["expression"]) # if exist in building inventory for kwargs_key, kwargs_value in kwargs.items(): - if kwargs_key.lower() == "num_stories" and kwargs_value is not None and kwargs_value > 0: + if ( + kwargs_key.lower() == "num_stories" + and kwargs_value is not None + and kwargs_value > 0 + ): num_stories = kwargs_value # calculate period - if parameter["name"] == "period" and "expression" in parameter and parameter["expression"] is not None: - period = evaluateexpression.evaluate(parameter["expression"], {"num_stories": num_stories}) + if ( + parameter["name"] == "period" + and "expression" in parameter + and parameter["expression"] is not None + ): + period = evaluateexpression.evaluate( + parameter["expression"], {"num_stories": num_stories} + ) return period diff --git a/pyincore/models/fragilitycurveset.py b/pyincore/models/fragilitycurveset.py index 511b03660..38726a864 100644 --- a/pyincore/models/fragilitycurveset.py +++ b/pyincore/models/fragilitycurveset.py @@ -24,27 +24,31 @@ class FragilityCurveSet: def __init__(self, metadata): self.id = metadata["id"] if "id" in metadata else "" - self.description = metadata['description'] if "description" in metadata else "" - self.authors = ", ".join(metadata['authors']) if "authors" in metadata else "" - self.paper_reference = str(metadata["paperReference"]) if "paperReference" in metadata else "" + self.description = metadata["description"] if "description" in metadata else "" + self.authors = ", ".join(metadata["authors"]) if "authors" in metadata else "" + self.paper_reference = ( + str(metadata["paperReference"]) if "paperReference" in metadata else "" + ) self.creator = metadata["creator"] if "creator" in metadata else "" self.demand_types = metadata["demandTypes"] self.demand_units = metadata["demandUnits"] self.result_type = metadata["resultType"] self.result_unit = metadata["resultUnit"] if "resultUnit" in metadata else "" - self.hazard_type = metadata['hazardType'] - self.inventory_type = metadata['inventoryType'] + self.hazard_type = metadata["hazardType"] + self.inventory_type = metadata["inventoryType"] self.curve_parameters = {} self.fragility_curves = [] - if 'curveParameters' in metadata.keys(): + if "curveParameters" in metadata.keys(): self.curve_parameters = metadata["curveParameters"] - if 'fragilityCurves' in metadata.keys(): + if "fragilityCurves" in metadata.keys(): for fragility_curve in metadata["fragilityCurves"]: self.fragility_curves.append(DFR3Curve(fragility_curve)) else: - raise ValueError("Cannot create dfr3 curve object. Missing key field: fragilityCurves.") + raise ValueError( + "Cannot create dfr3 curve object. Missing key field: fragilityCurves." + ) @classmethod def from_json_str(cls, json_str): @@ -75,9 +79,9 @@ def from_json_file(cls, file_path): return instance - def calculate_limit_state(self, hazard_values: dict = {}, - inventory_type: str = "building", - **kwargs): + def calculate_limit_state( + self, hazard_values: dict = {}, inventory_type: str = "building", **kwargs + ): """WIP computation of limit state probabilities accounting for custom expressions. Args: @@ -95,17 +99,23 @@ def calculate_limit_state(self, hazard_values: dict = {}, if len(self.fragility_curves) <= 4: for fragility_curve in self.fragility_curves: - probability = fragility_curve.solve_curve_expression(hazard_values, - self.curve_parameters, - **kwargs) - output[limit_state[index]] = AnalysisUtil.update_precision(probability) # round to default digits + probability = fragility_curve.solve_curve_expression( + hazard_values, self.curve_parameters, **kwargs + ) + output[limit_state[index]] = AnalysisUtil.update_precision( + probability + ) # round to default digits index += 1 else: - raise ValueError("We can only handle fragility curves with less than 4 limit states.") + raise ValueError( + "We can only handle fragility curves with less than 4 limit states." + ) return output - def calculate_damage_interval(self, damage, hazard_type="earthquake", inventory_type: str = "building"): + def calculate_damage_interval( + self, damage, hazard_type="earthquake", inventory_type: str = "building" + ): """ Args: @@ -152,11 +162,22 @@ def calculate_damage_interval(self, damage, hazard_type="earthquake", inventory_ ("hurricane", "bridge", 4): FragilityCurveSet._4ls_to_5ds, } - if not (hazard_type, inventory_type, len(self.fragility_curves)) in ls_ds_dspatcher.keys(): - raise ValueError(inventory_type + " " + hazard_type + " damage analysis do not support " + - str(len(self.fragility_curves)) + " limit state") - - return ls_ds_dspatcher[(hazard_type, inventory_type, len(self.fragility_curves))](damage) + if ( + not (hazard_type, inventory_type, len(self.fragility_curves)) + in ls_ds_dspatcher.keys() + ): + raise ValueError( + inventory_type + + " " + + hazard_type + + " damage analysis do not support " + + str(len(self.fragility_curves)) + + " limit state" + ) + + return ls_ds_dspatcher[ + (hazard_type, inventory_type, len(self.fragility_curves)) + ](damage) def construct_expression_args_from_inventory(self, inventory_unit: dict): """ @@ -170,13 +191,16 @@ def construct_expression_args_from_inventory(self, inventory_unit: dict): """ kwargs_dict = {} for parameters in self.curve_parameters: - - if parameters['name'] == "age_group" and ('age_group' not in inventory_unit['properties'] or - inventory_unit['properties']['age_group'] == ""): - if 'year_built' in inventory_unit['properties'].keys() and inventory_unit['properties']['year_built'] \ - is not None: + if parameters["name"] == "age_group" and ( + "age_group" not in inventory_unit["properties"] + or inventory_unit["properties"]["age_group"] == "" + ): + if ( + "year_built" in inventory_unit["properties"].keys() + and inventory_unit["properties"]["year_built"] is not None + ): try: - yr_built = int(inventory_unit['properties']['year_built']) + yr_built = int(inventory_unit["properties"]["year_built"]) except ValueError: print("Non integer value found in year_built") raise @@ -190,12 +214,16 @@ def construct_expression_args_from_inventory(self, inventory_unit: dict): elif 1995 <= yr_built < 2008: age_group = 4 - kwargs_dict['age_group'] = age_group + kwargs_dict["age_group"] = age_group - if parameters['name'] in inventory_unit['properties'] and \ - inventory_unit['properties'][parameters['name']] is not None and \ - inventory_unit['properties'][parameters['name']] != "": - kwargs_dict[parameters['name']] = inventory_unit['properties'][parameters['name']] + if ( + parameters["name"] in inventory_unit["properties"] + and inventory_unit["properties"][parameters["name"]] is not None + and inventory_unit["properties"][parameters["name"]] != "" + ): + kwargs_dict[parameters["name"]] = inventory_unit["properties"][ + parameters["name"] + ] return kwargs_dict @staticmethod @@ -210,23 +238,27 @@ def _3ls_to_4ds(limit_states): """ limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) - damage_states = AnalysisUtil.float_dict_to_decimal({"DS_0": 0.0, "DS_1": 0.0, "DS_2": 0.0, "DS_3": 0.0}) + damage_states = AnalysisUtil.float_dict_to_decimal( + {"DS_0": 0.0, "DS_1": 0.0, "DS_2": 0.0, "DS_3": 0.0} + ) small_overlap = FragilityCurveSet.is_there_small_overlap(limit_states) if small_overlap: - ds_overlap = FragilityCurveSet.adjust_for_small_overlap(small_overlap, limit_states, damage_states) + ds_overlap = FragilityCurveSet.adjust_for_small_overlap( + small_overlap, limit_states, damage_states + ) - damage_states['DS_0'] = ds_overlap[0] - damage_states['DS_1'] = ds_overlap[1] - damage_states['DS_2'] = ds_overlap[2] - damage_states['DS_3'] = ds_overlap[3] + damage_states["DS_0"] = ds_overlap[0] + damage_states["DS_1"] = ds_overlap[1] + damage_states["DS_2"] = ds_overlap[2] + damage_states["DS_3"] = ds_overlap[3] else: - damage_states['DS_0'] = 1 - limit_states["LS_0"] - damage_states['DS_1'] = limit_states["LS_0"] - limit_states["LS_1"] - damage_states['DS_2'] = limit_states["LS_1"] - limit_states["LS_2"] - damage_states['DS_3'] = limit_states["LS_2"] + damage_states["DS_0"] = 1 - limit_states["LS_0"] + damage_states["DS_1"] = limit_states["LS_0"] - limit_states["LS_1"] + damage_states["DS_2"] = limit_states["LS_1"] - limit_states["LS_2"] + damage_states["DS_3"] = limit_states["LS_2"] return damage_states @@ -242,26 +274,29 @@ def _4ls_to_5ds(limit_states): """ limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) - damage_states = AnalysisUtil.float_dict_to_decimal({"DS_0": 0.0, "DS_1": 0.0, "DS_2": 0.0, "DS_3": 0.0, - "DS_4": 0.0}) + damage_states = AnalysisUtil.float_dict_to_decimal( + {"DS_0": 0.0, "DS_1": 0.0, "DS_2": 0.0, "DS_3": 0.0, "DS_4": 0.0} + ) small_overlap = FragilityCurveSet.is_there_small_overlap(limit_states) if small_overlap: - ds_overlap = FragilityCurveSet.adjust_for_small_overlap(small_overlap, limit_states, damage_states) + ds_overlap = FragilityCurveSet.adjust_for_small_overlap( + small_overlap, limit_states, damage_states + ) - damage_states['DS_0'] = ds_overlap[0] - damage_states['DS_1'] = ds_overlap[1] - damage_states['DS_2'] = ds_overlap[2] - damage_states['DS_3'] = ds_overlap[3] - damage_states['DS_4'] = ds_overlap[4] + damage_states["DS_0"] = ds_overlap[0] + damage_states["DS_1"] = ds_overlap[1] + damage_states["DS_2"] = ds_overlap[2] + damage_states["DS_3"] = ds_overlap[3] + damage_states["DS_4"] = ds_overlap[4] else: - damage_states['DS_0'] = 1 - limit_states["LS_0"] - damage_states['DS_1'] = limit_states["LS_0"] - limit_states["LS_1"] - damage_states['DS_2'] = limit_states["LS_1"] - limit_states["LS_2"] - damage_states['DS_3'] = limit_states["LS_2"] - limit_states["LS_3"] - damage_states['DS_4'] = limit_states["LS_3"] + damage_states["DS_0"] = 1 - limit_states["LS_0"] + damage_states["DS_1"] = limit_states["LS_0"] - limit_states["LS_1"] + damage_states["DS_2"] = limit_states["LS_1"] - limit_states["LS_2"] + damage_states["DS_3"] = limit_states["LS_2"] - limit_states["LS_3"] + damage_states["DS_4"] = limit_states["LS_3"] return damage_states @@ -278,10 +313,10 @@ def _1ls_to_4ds(limit_states): """ limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = dict() - damage_states['DS_0'] = 1 - limit_states["LS_0"] - damage_states['DS_1'] = 0 - damage_states['DS_2'] = 0 - damage_states['DS_3'] = limit_states["LS_0"] + damage_states["DS_0"] = 1 - limit_states["LS_0"] + damage_states["DS_1"] = 0 + damage_states["DS_2"] = 0 + damage_states["DS_3"] = limit_states["LS_0"] return damage_states @@ -298,11 +333,11 @@ def _1ls_to_5ds(limit_states): """ limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = dict() - damage_states['DS_0'] = 1 - limit_states["LS_0"] - damage_states['DS_1'] = 0 - damage_states['DS_2'] = 0 - damage_states['DS_3'] = 0 - damage_states['DS_4'] = limit_states["LS_0"] + damage_states["DS_0"] = 1 - limit_states["LS_0"] + damage_states["DS_1"] = 0 + damage_states["DS_2"] = 0 + damage_states["DS_3"] = 0 + damage_states["DS_4"] = limit_states["LS_0"] return damage_states diff --git a/pyincore/models/hazard/earthquake.py b/pyincore/models/hazard/earthquake.py index 1e9affd61..213df9d22 100644 --- a/pyincore/models/hazard/earthquake.py +++ b/pyincore/models/hazard/earthquake.py @@ -9,7 +9,6 @@ class Earthquake(Hazard): - def __init__(self, metadata): super().__init__(metadata) self.hazardDatasets = [] @@ -35,7 +34,7 @@ def from_hazard_service(cls, id: str, hazard_service: HazardService): return instance def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): - """ Retrieve bulk earthquake hazard values either from the Hazard service or read it from local Dataset + """Retrieve bulk earthquake hazard values either from the Hazard service or read it from local Dataset Args: payload (list): @@ -46,6 +45,8 @@ def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): """ if self.id and self.id != "" and hazard_service is not None: - return hazard_service.post_earthquake_hazard_values(self.id, payload, **kwargs) + return hazard_service.post_earthquake_hazard_values( + self.id, payload, **kwargs + ) else: return self.read_local_raster_hazard_values(payload) diff --git a/pyincore/models/hazard/flood.py b/pyincore/models/hazard/flood.py index 456e7a092..01a08e99e 100644 --- a/pyincore/models/hazard/flood.py +++ b/pyincore/models/hazard/flood.py @@ -3,13 +3,12 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore import HazardService, Dataset +from pyincore import HazardService from pyincore.models.hazard.hazard import Hazard from pyincore.models.hazard.hazarddataset import FloodDataset class Flood(Hazard): - def __init__(self, metadata): super().__init__(metadata) self.hazardDatasets = [] @@ -35,7 +34,7 @@ def from_hazard_service(cls, id: str, hazard_service: HazardService): return instance def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): - """ Retrieve bulk flood hazard values either from the Hazard service or read it from local Dataset + """Retrieve bulk flood hazard values either from the Hazard service or read it from local Dataset Args: payload (list): diff --git a/pyincore/models/hazard/hazard.py b/pyincore/models/hazard/hazard.py index b22616ee5..9ea031b76 100644 --- a/pyincore/models/hazard/hazard.py +++ b/pyincore/models/hazard/hazard.py @@ -23,11 +23,10 @@ class Hazard: """ def __init__(self, metadata): - self.id = metadata["id"] if "id" in metadata else "" - self.name = metadata['name'] if "name" in metadata else "" - self.description = metadata['description'] if "description" in metadata else "" - self.date = metadata['date'] if "date" in metadata else "" + self.name = metadata["name"] if "name" in metadata else "" + self.description = metadata["description"] if "description" in metadata else "" + self.date = metadata["date"] if "date" in metadata else "" self.creator = metadata["creator"] if "creator" in metadata else "" self.spaces = metadata["spaces"] if "spaces" in metadata else [] self.hazard_type = metadata["hazard_type"] if "hazard_type" in metadata else "" @@ -63,7 +62,7 @@ def from_json_file(cls, file_path): return instance def read_local_raster_hazard_values(self, payload: list): - """ Read local hazard values from raster dataset + """Read local hazard values from raster dataset Args: payload (list): @@ -80,8 +79,12 @@ def read_local_raster_hazard_values(self, payload: list): for index, req_demand_type in enumerate(req["demands"]): match = False for hazard_dataset in self.hazardDatasets: - if hazard_dataset.dataset is None or not isinstance(hazard_dataset.dataset, Dataset): - raise Exception("Hazard dataset is not properly attached to the hazard object.") + if hazard_dataset.dataset is None or not isinstance( + hazard_dataset.dataset, Dataset + ): + raise Exception( + "Hazard dataset is not properly attached to the hazard object." + ) # TODO Consider how to get the closest period # TODO consider pga, pgv, sd conversions @@ -91,27 +94,37 @@ def read_local_raster_hazard_values(self, payload: list): period = None # find matching raster file (Dataset) to read value from - if req_demand_type.lower() == hazard_dataset.demand_type.lower() or \ - (hasattr(hazard_dataset, 'period') and period == hazard_dataset.period): + if ( + req_demand_type.lower() == hazard_dataset.demand_type.lower() + or ( + hasattr(hazard_dataset, "period") + and period == hazard_dataset.period + ) + ): raw_raster_value = hazard_dataset.dataset.get_raster_value( x=float(req["loc"].split(",")[1]), - y=float(req["loc"].split(",")[0])) + y=float(req["loc"].split(",")[0]), + ) if raw_raster_value is None: converted_raster_value = raw_raster_value else: # some basic unit conversion - converted_raster_value = Units.convert_hazard(raw_raster_value, - original_demand_units=hazard_dataset.demand_units, - requested_demand_units=req["units"][index]) + converted_raster_value = Units.convert_hazard( + raw_raster_value, + original_demand_units=hazard_dataset.demand_units, + requested_demand_units=req["units"][index], + ) # compare with threshold (optional) threshold_value = hazard_dataset.threshold_value threshold_unit = hazard_dataset.threshold_unit if threshold_value is not None: - converted_threshold_value = Units.convert_hazard(threshold_value, - original_demand_units=threshold_unit, - requested_demand_units=req["units"][index]) + converted_threshold_value = Units.convert_hazard( + threshold_value, + original_demand_units=threshold_unit, + requested_demand_units=req["units"][index], + ) if converted_raster_value < converted_threshold_value: converted_raster_value = None diff --git a/pyincore/models/hazard/hazarddataset.py b/pyincore/models/hazard/hazarddataset.py index 6b8bce1a9..570db5cdd 100644 --- a/pyincore/models/hazard/hazarddataset.py +++ b/pyincore/models/hazard/hazarddataset.py @@ -8,21 +8,38 @@ class HazardDataset: def __init__(self, hazard_datasets_metadata): - self.hazard_type = hazard_datasets_metadata["hazardType"] if "hazardType" in hazard_datasets_metadata else "" - self.demand_type = hazard_datasets_metadata["demandType"] if "demandType" in hazard_datasets_metadata else "" - self.demand_units = hazard_datasets_metadata["demandUnits"] \ - if "demandUnits" in hazard_datasets_metadata else "" - self.dataset_id = hazard_datasets_metadata["datasetId"] if "datasetId" in hazard_datasets_metadata else "" + self.hazard_type = ( + hazard_datasets_metadata["hazardType"] + if "hazardType" in hazard_datasets_metadata + else "" + ) + self.demand_type = ( + hazard_datasets_metadata["demandType"] + if "demandType" in hazard_datasets_metadata + else "" + ) + self.demand_units = ( + hazard_datasets_metadata["demandUnits"] + if "demandUnits" in hazard_datasets_metadata + else "" + ) + self.dataset_id = ( + hazard_datasets_metadata["datasetId"] + if "datasetId" in hazard_datasets_metadata + else "" + ) self.dataset = None # default threshold value and unit if exist - self.threshold_value = hazard_datasets_metadata["threshold"] if "threshold" in hazard_datasets_metadata \ + self.threshold_value = ( + hazard_datasets_metadata["threshold"] + if "threshold" in hazard_datasets_metadata else None + ) self.threshold_unit = self.demand_units def from_file(self, file_path, data_type): - """Get hurricane dataset from the file. - """ + """Get hurricane dataset from the file.""" self.dataset = Dataset.from_file(file_path, data_type) def set_threshold(self, threshold_value, threshold_unit): @@ -45,29 +62,51 @@ def from_data_service(self, data_service): class HurricaneDataset(HazardDataset): def __init__(self, hazard_datasets_metadata): super().__init__(hazard_datasets_metadata) - self.hurricane_parameters = hazard_datasets_metadata["hurricaneParameters"] \ - if "hurricaneParameters" in hazard_datasets_metadata else {} + self.hurricane_parameters = ( + hazard_datasets_metadata["hurricaneParameters"] + if "hurricaneParameters" in hazard_datasets_metadata + else {} + ) class EarthquakeDataset(HazardDataset): def __init__(self, hazard_datasets_metadata): super().__init__(hazard_datasets_metadata) - self.period = hazard_datasets_metadata["period"] if "period" in hazard_datasets_metadata else 0 - self.recurrence_interval = hazard_datasets_metadata["recurrenceInterval"] if "recurrenceInterval" in \ - hazard_datasets_metadata else 10000 - self.recurrence_unit = hazard_datasets_metadata["recurrenceUnit"] if "recurrenceUnit" in \ - hazard_datasets_metadata else "years" - self.eq_parameters = hazard_datasets_metadata["eqParameters"] if "eqParameters" in hazard_datasets_metadata \ + self.period = ( + hazard_datasets_metadata["period"] + if "period" in hazard_datasets_metadata + else 0 + ) + self.recurrence_interval = ( + hazard_datasets_metadata["recurrenceInterval"] + if "recurrenceInterval" in hazard_datasets_metadata + else 10000 + ) + self.recurrence_unit = ( + hazard_datasets_metadata["recurrenceUnit"] + if "recurrenceUnit" in hazard_datasets_metadata + else "years" + ) + self.eq_parameters = ( + hazard_datasets_metadata["eqParameters"] + if "eqParameters" in hazard_datasets_metadata else {} + ) class TsunamiDataset(HazardDataset): def __init__(self, hazard_datasets_metadata): super().__init__(hazard_datasets_metadata) - self.recurrence_interval = hazard_datasets_metadata["recurrenceInterval"] \ - if "recurrenceInterval" in hazard_datasets_metadata else 100 - self.recurrence_unit = hazard_datasets_metadata["recurrenceUnit"] if "recurrenceUnit" in \ - hazard_datasets_metadata else "years" + self.recurrence_interval = ( + hazard_datasets_metadata["recurrenceInterval"] + if "recurrenceInterval" in hazard_datasets_metadata + else 100 + ) + self.recurrence_unit = ( + hazard_datasets_metadata["recurrenceUnit"] + if "recurrenceUnit" in hazard_datasets_metadata + else "years" + ) # TODO: Tornado dataset has very different shape @@ -79,6 +118,8 @@ def __init__(self, hazard_datasets_metadata): class FloodDataset(HazardDataset): def __init__(self, hazard_datasets_metadata): super().__init__(hazard_datasets_metadata) - self.flood_parameters = hazard_datasets_metadata["floodParameters"] \ - if "floodParameters" in hazard_datasets_metadata else {} - + self.flood_parameters = ( + hazard_datasets_metadata["floodParameters"] + if "floodParameters" in hazard_datasets_metadata + else {} + ) diff --git a/pyincore/models/hazard/hurricane.py b/pyincore/models/hazard/hurricane.py index 2f312d04e..ec441487c 100644 --- a/pyincore/models/hazard/hurricane.py +++ b/pyincore/models/hazard/hurricane.py @@ -9,7 +9,6 @@ class Hurricane(Hazard): - def __init__(self, metadata): super().__init__(metadata) self.hazardDatasets = [] @@ -35,7 +34,7 @@ def from_hazard_service(cls, id: str, hazard_service: HazardService): return instance def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): - """ Retrieve bulk hurricane hazard values either from the Hazard service or read it from local Dataset + """Retrieve bulk hurricane hazard values either from the Hazard service or read it from local Dataset Args: payload (list): @@ -47,6 +46,8 @@ def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): """ if self.id and self.id != "" and hazard_service is not None: - return hazard_service.post_hurricane_hazard_values(self.id, payload, **kwargs) + return hazard_service.post_hurricane_hazard_values( + self.id, payload, **kwargs + ) else: return self.read_local_raster_hazard_values(payload) diff --git a/pyincore/models/hazard/tornado.py b/pyincore/models/hazard/tornado.py index b82fc4d7c..8d13f555a 100644 --- a/pyincore/models/hazard/tornado.py +++ b/pyincore/models/hazard/tornado.py @@ -11,7 +11,6 @@ from shapely.geometry import Point import random import time -import math class Tornado(Hazard): @@ -30,8 +29,13 @@ class Tornado(Hazard): "hazardDatasets":[] """ - def __init__(self, metadata, ef_rating_field="ef_rating", ef_wind_speed=(65, 86, 111, 136, 166, 200), - max_wind_speed=250.0): + def __init__( + self, + metadata, + ef_rating_field="ef_rating", + ef_wind_speed=(65, 86, 111, 136, 166, 200), + max_wind_speed=250.0, + ): super().__init__(metadata) self.tornado_type = metadata["tornadoType"] if "tornadoType" in metadata else "" self.hazardDatasets = [] @@ -42,7 +46,9 @@ def __init__(self, metadata, ef_rating_field="ef_rating", ef_wind_speed=(65, 86, self.EF_RATING_FIELD = ef_rating_field self.EF_WIND_SPEED = ef_wind_speed self.MAX_WIND_SPEED = max_wind_speed - self.tornado_parameters = metadata["TornadoParameters"] if "TornadoParameters" in metadata else {} + self.tornado_parameters = ( + metadata["TornadoParameters"] if "TornadoParameters" in metadata else {} + ) @classmethod def from_hazard_service(cls, id: str, hazard_service: HazardService): @@ -60,8 +66,10 @@ def from_hazard_service(cls, id: str, hazard_service: HazardService): instance = cls(metadata) return instance - def read_hazard_values(self, payload: list, hazard_service=None, seed=None, **kwargs): - """ Retrieve bulk earthquake hazard values either from the Hazard service or read it from local Dataset + def read_hazard_values( + self, payload: list, hazard_service=None, seed=None, **kwargs + ): + """Retrieve bulk earthquake hazard values either from the Hazard service or read it from local Dataset Args: payload (list): @@ -73,23 +81,29 @@ def read_hazard_values(self, payload: list, hazard_service=None, seed=None, **kw """ if self.id and self.id != "" and hazard_service is not None: - return hazard_service.post_tornado_hazard_values(self.id, payload, seed=seed, **kwargs) + return hazard_service.post_tornado_hazard_values( + self.id, payload, seed=seed, **kwargs + ) else: if self.tornado_type == "dataset": return self.calculate_wind_speed_uniform_random_dist(payload, seed) else: - raise ValueError("Local Tornado type \"" + self.tornado_type + "\" is not supported yet.") + raise ValueError( + 'Local Tornado type "' + + self.tornado_type + + '" is not supported yet.' + ) def calculate_wind_speed_uniform_random_dist(self, payload, seed=-1): - """ Read local hazard values from shapefile dataset + """Read local hazard values from shapefile dataset - Args: - payload (list): - seed: (None or int): Seed value for random values. - Returns: - obj: Hazard values. + Args: + payload (list): + seed: (None or int): Seed value for random values. + Returns: + obj: Hazard values. - """ + """ response = [] @@ -99,19 +113,25 @@ def calculate_wind_speed_uniform_random_dist(self, payload, seed=-1): for index, req_demand_type in enumerate(req["demands"]): match = False for hazard_dataset in self.hazardDatasets: - if hazard_dataset.dataset is None or not isinstance(hazard_dataset.dataset, Dataset): - raise Exception("Hazard dataset is not properly attached to the hazard object.") + if hazard_dataset.dataset is None or not isinstance( + hazard_dataset.dataset, Dataset + ): + raise Exception( + "Hazard dataset is not properly attached to the hazard object." + ) # find matching raster file (Dataset) to read value from if req_demand_type.lower() == hazard_dataset.demand_type.lower(): - hazard_df = hazard_dataset.dataset.get_dataframe_from_shapefile() + hazard_df = ( + hazard_dataset.dataset.get_dataframe_from_shapefile() + ) ef_box = -1 x = float(req["loc"].split(",")[1]) y = float(req["loc"].split(",")[0]) location = Point(x, y) for _, feature in hazard_df.iterrows(): - polygon = feature['geometry'] + polygon = feature["geometry"] if location.within(polygon): ef_rating = feature[self.EF_RATING_FIELD] ef_box = Tornado.get_ef_rating(ef_rating) @@ -134,18 +154,21 @@ def calculate_wind_speed_uniform_random_dist(self, payload, seed=-1): converted_wind_speed = raw_wind_speed else: # some basic unit conversion - converted_wind_speed = Units.convert_hazard(raw_wind_speed, - original_demand_units=hazard_dataset.demand_units, - requested_demand_units=req["units"][index]) + converted_wind_speed = Units.convert_hazard( + raw_wind_speed, + original_demand_units=hazard_dataset.demand_units, + requested_demand_units=req["units"][index], + ) # compare with threshold (optional) threshold_value = hazard_dataset.threshold_value threshold_unit = hazard_dataset.threshold_unit if threshold_value is not None: - converted_threshold_value = Units.convert_hazard(threshold_value, - original_demand_units=threshold_unit, - requested_demand_units=req["units"][ - index]) + converted_threshold_value = Units.convert_hazard( + threshold_value, + original_demand_units=threshold_unit, + requested_demand_units=req["units"][index], + ) if converted_wind_speed < converted_threshold_value: converted_wind_speed = None @@ -162,9 +185,12 @@ def calculate_wind_speed_uniform_random_dist(self, payload, seed=-1): return response def get_random_seed(self, location, seed=-1): - # Get seed from the model and override if no value specified - if (seed is None or seed == -1) and self.tornado_parameters is not {} and "randomSeed" in self.tornado_parameters: + if ( + (seed is None or seed == -1) + and self.tornado_parameters is not {} + and "randomSeed" in self.tornado_parameters + ): seed = self.tornado_parameters["randomSeed"] # If no seed value provided OR model seed value was never set by the user, use current system time @@ -175,8 +201,10 @@ def get_random_seed(self, location, seed=-1): try: seed = seed + int(abs((location.x + location.y) * 10000)) except OverflowError: - print("Seed + abs((location.x + location.y) * 10000) exceeds max value, capping at Maximum value") - seed = float('inf') # Cap at positive infinity for maximum value + print( + "Seed + abs((location.x + location.y) * 10000) exceeds max value, capping at Maximum value" + ) + seed = float("inf") # Cap at positive infinity for maximum value return seed diff --git a/pyincore/models/hazard/tsunami.py b/pyincore/models/hazard/tsunami.py index 4d3f1f72f..5e89bffbc 100644 --- a/pyincore/models/hazard/tsunami.py +++ b/pyincore/models/hazard/tsunami.py @@ -9,7 +9,6 @@ class Tsunami(Hazard): - def __init__(self, metadata): super().__init__(metadata) self.hazardDatasets = [] @@ -35,7 +34,7 @@ def from_hazard_service(cls, id: str, hazard_service: HazardService): return instance def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): - """ Retrieve bulk tsunami hazard values either from the Hazard service or read it from local Dataset + """Retrieve bulk tsunami hazard values either from the Hazard service or read it from local Dataset Args: payload (list): @@ -49,4 +48,3 @@ def read_hazard_values(self, payload: list, hazard_service=None, **kwargs): return hazard_service.post_tsunami_hazard_values(self.id, payload, **kwargs) else: return self.read_local_raster_hazard_values(payload) - diff --git a/pyincore/models/mapping.py b/pyincore/models/mapping.py index 25121c0b3..6e5e480bd 100644 --- a/pyincore/models/mapping.py +++ b/pyincore/models/mapping.py @@ -6,7 +6,7 @@ class Mapping: - """ mapping class that contains the rules and keys of dfr3 curves. + """mapping class that contains the rules and keys of dfr3 curves. Args: entry (dict): mapping entry. diff --git a/pyincore/models/mappingset.py b/pyincore/models/mappingset.py index 52c410bb1..d4e2d66af 100644 --- a/pyincore/models/mappingset.py +++ b/pyincore/models/mappingset.py @@ -18,28 +18,31 @@ class MappingSet: """ def __init__(self, metadata): - self.id = metadata["id"] if "id" in metadata else "" self.name = metadata["name"] if "name" in metadata else "" self.hazard_type = metadata["hazardType"] if "hazardType" in metadata else "" - self.inventory_type = metadata['inventoryType'] if "inventoryType" in metadata else "" + self.inventory_type = ( + metadata["inventoryType"] if "inventoryType" in metadata else "" + ) if "mappingEntryKeys" in metadata and metadata["mappingEntryKeys"] is not None: self.mappingEntryKeys = metadata["mappingEntryKeys"] else: self.mappingEntryKeys = [] - self.data_type = metadata["dataType"] if "dataType" in metadata else "incore:dfr3MappingSet" + self.data_type = ( + metadata["dataType"] if "dataType" in metadata else "incore:dfr3MappingSet" + ) mappings = [] - for m in metadata['mappings']: + for m in metadata["mappings"]: if isinstance(m, Mapping): mappings.append(m) else: # enforce convert dictionary to mapping entry object - mappings.append(Mapping(m['entry'], m['rules'])) + mappings.append(Mapping(m["entry"], m["rules"])) self.mappings = mappings - self.mapping_type = metadata['mappingType'] + self.mapping_type = metadata["mappingType"] @classmethod def from_json_str(cls, json_str): diff --git a/pyincore/models/networkdataset.py b/pyincore/models/networkdataset.py index c40b68a7c..9351c386a 100644 --- a/pyincore/models/networkdataset.py +++ b/pyincore/models/networkdataset.py @@ -14,7 +14,7 @@ class NetworkDataset: - """ This class wraps around the Dataset class. + """This class wraps around the Dataset class. Args: dataset (obj): The dataset object we want to extract the network data from. @@ -22,12 +22,17 @@ class NetworkDataset: """ def __init__(self, dataset: Dataset): - if dataset.format == 'shp-network' and dataset.metadata['networkDataset'] is not None: + if ( + dataset.format == "shp-network" + and dataset.metadata["networkDataset"] is not None + ): self.metadata = dataset.metadata self.data_type = dataset.metadata["dataType"] self.nodes = NetworkDataset._network_component_from_dataset(dataset, "node") self.links = NetworkDataset._network_component_from_dataset(dataset, "link") - self.graph = NetworkDataset._network_component_from_dataset(dataset, "graph") + self.graph = NetworkDataset._network_component_from_dataset( + dataset, "graph" + ) else: # TODO why do we need those self._metadata = None @@ -69,7 +74,9 @@ def from_dataset(cls, dataset: Dataset): return cls(dataset) @classmethod - def from_json_str(cls, json_str, data_service: DataService = None, folder_path=None): + def from_json_str( + cls, json_str, data_service: DataService = None, folder_path=None + ): """Get Dataset from json string. Args: @@ -90,14 +97,23 @@ def from_json_str(cls, json_str, data_service: DataService = None, folder_path=N elif folder_path is not None: dataset.local_file_path = folder_path else: - raise ValueError("You have to either use data services, or given pass local file path.") + raise ValueError( + "You have to either use data services, or given pass local file path." + ) return cls(dataset) @classmethod - def from_files(cls, node_file_path, link_file_path, graph_file_path, network_data_type, link_data_type, - node_data_type, - graph_data_type): + def from_files( + cls, + node_file_path, + link_file_path, + graph_file_path, + network_data_type, + link_data_type, + node_data_type, + graph_data_type, + ): """Create Dataset from the file. Args: @@ -117,20 +133,11 @@ def from_files(cls, node_file_path, link_file_path, graph_file_path, network_dat "dataType": network_data_type, "fileDescriptors": [], "networkDataset": { - "link": { - "dataType": link_data_type, - "fileName": link_file_path - }, - "node": { - "dataType": node_data_type, - "fileName": node_file_path - }, - "graph": { - "dataType": graph_data_type, - "fileName": graph_file_path - } + "link": {"dataType": link_data_type, "fileName": link_file_path}, + "node": {"dataType": node_data_type, "fileName": node_file_path}, + "graph": {"dataType": graph_data_type, "fileName": graph_file_path}, }, - "format": "shp-network" + "format": "shp-network", } dataset = Dataset(metadata) dataset.local_file_path = "" @@ -147,17 +154,24 @@ def _network_component_from_dataset(dataset: Dataset, network_type="link"): Returns: network component in dataset object """ - network_component_filename = dataset.metadata['networkDataset'][network_type]["fileName"] + network_component_filename = dataset.metadata["networkDataset"][network_type][ + "fileName" + ] network_component_metadata = { - "dataType": dataset.metadata['networkDataset'][network_type]["dataType"], + "dataType": dataset.metadata["networkDataset"][network_type]["dataType"], "format": f"shp-{network_type}", "id": f"{dataset.id}-{network_type}", - "fileDescriptors": [fd for fd in dataset.file_descriptors if fd["filename"].find( - network_component_filename.split(".")[0]) != -1] + "fileDescriptors": [ + fd + for fd in dataset.file_descriptors + if fd["filename"].find(network_component_filename.split(".")[0]) != -1 + ], } network_component = Dataset(network_component_metadata) try: - file_path = os.path.join(dataset.local_file_path, network_component_filename) + file_path = os.path.join( + dataset.local_file_path, network_component_filename + ) except FileNotFoundError: raise FileNotFoundError("Invalid local file path.") network_component.local_file_path = file_path @@ -173,7 +187,13 @@ def get_links(self): def get_graph(self): return self.graph.get_csv_reader() - def get_graph_networkx(self, from_node_fld="fromnode", to_node_fld="tonode", directed=False, numeric=True): + def get_graph_networkx( + self, + from_node_fld="fromnode", + to_node_fld="tonode", + directed=False, + numeric=True, + ): if directed: G = nx.DiGraph() else: diff --git a/pyincore/models/repaircurveset.py b/pyincore/models/repaircurveset.py index 4aec0197e..8911dc529 100644 --- a/pyincore/models/repaircurveset.py +++ b/pyincore/models/repaircurveset.py @@ -21,25 +21,29 @@ class RepairCurveSet: def __init__(self, metadata): self.id = metadata["id"] if "id" in metadata else "" - self.description = metadata['description'] if "description" in metadata else "" - self.authors = ", ".join(metadata['authors']) if "authors" in metadata else "" - self.paper_reference = str(metadata["paperReference"]) if "paperReference" in metadata else "" + self.description = metadata["description"] if "description" in metadata else "" + self.authors = ", ".join(metadata["authors"]) if "authors" in metadata else "" + self.paper_reference = ( + str(metadata["paperReference"]) if "paperReference" in metadata else "" + ) self.creator = metadata["creator"] if "creator" in metadata else "" self.time_units = metadata["timeUnits"] self.result_type = metadata["resultType"] self.result_unit = metadata["resultUnit"] if "resultUnit" in metadata else "" - self.hazard_type = metadata['hazardType'] - self.inventory_type = metadata['inventoryType'] + self.hazard_type = metadata["hazardType"] + self.inventory_type = metadata["inventoryType"] self.repair_curves = [] - if 'curveParameters' in metadata.keys(): + if "curveParameters" in metadata.keys(): self.curve_parameters = metadata["curveParameters"] - if 'repairCurves' in metadata.keys(): + if "repairCurves" in metadata.keys(): for repair_curve in metadata["repairCurves"]: self.repair_curves.append(DFR3Curve(repair_curve)) else: - raise ValueError("Cannot create dfr3 curve object. Missing key field: repairCurves.") + raise ValueError( + "Cannot create dfr3 curve object. Missing key field: repairCurves." + ) @classmethod def from_json_str(cls, json_str): @@ -84,11 +88,14 @@ def calculate_repair_rates(self, **kwargs): output = {} if len(self.repair_curves) <= 5: for repair_curve in self.repair_curves: - eval_value = repair_curve.solve_curve_expression(hazard_values={}, - curve_parameters=self.curve_parameters, **kwargs) - output[repair_curve.return_type['description']] = eval_value + eval_value = repair_curve.solve_curve_expression( + hazard_values={}, curve_parameters=self.curve_parameters, **kwargs + ) + output[repair_curve.return_type["description"]] = eval_value else: - raise ValueError("We can only handle repair curves with less than 5 damage states.") + raise ValueError( + "We can only handle repair curves with less than 5 damage states." + ) return output @@ -107,10 +114,13 @@ def calculate_inverse_repair_rates(self, **kwargs): if len(self.repair_curves) <= 5: for repair_curve in self.repair_curves: - eval_value = repair_curve.solve_curve_for_inverse(hazard_values={}, - curve_parameters=self.curve_parameters, **kwargs) - output[repair_curve.return_type['description']] = eval_value + eval_value = repair_curve.solve_curve_for_inverse( + hazard_values={}, curve_parameters=self.curve_parameters, **kwargs + ) + output[repair_curve.return_type["description"]] = eval_value else: - raise ValueError("We can only handle repair curves with less than 5 damage states.") + raise ValueError( + "We can only handle repair curves with less than 5 damage states." + ) return output diff --git a/pyincore/models/restorationcurveset.py b/pyincore/models/restorationcurveset.py index 0e2c0835b..5bff8b62b 100644 --- a/pyincore/models/restorationcurveset.py +++ b/pyincore/models/restorationcurveset.py @@ -21,25 +21,29 @@ class RestorationCurveSet: def __init__(self, metadata): self.id = metadata["id"] if "id" in metadata else "" - self.description = metadata['description'] if "description" in metadata else "" - self.authors = ", ".join(metadata['authors']) if "authors" in metadata else "" - self.paper_reference = str(metadata["paperReference"]) if "paperReference" in metadata else "" + self.description = metadata["description"] if "description" in metadata else "" + self.authors = ", ".join(metadata["authors"]) if "authors" in metadata else "" + self.paper_reference = ( + str(metadata["paperReference"]) if "paperReference" in metadata else "" + ) self.creator = metadata["creator"] if "creator" in metadata else "" self.time_units = metadata["timeUnits"] self.result_type = metadata["resultType"] self.result_unit = metadata["resultUnit"] if "resultUnit" in metadata else "" - self.hazard_type = metadata['hazardType'] - self.inventory_type = metadata['inventoryType'] + self.hazard_type = metadata["hazardType"] + self.inventory_type = metadata["inventoryType"] self.restoration_curves = [] - if 'curveParameters' in metadata.keys(): + if "curveParameters" in metadata.keys(): self.curve_parameters = metadata["curveParameters"] - if 'restorationCurves' in metadata.keys(): + if "restorationCurves" in metadata.keys(): for restoration_curve in metadata["restorationCurves"]: self.restoration_curves.append(DFR3Curve(restoration_curve)) else: - raise ValueError("Cannot create dfr3 curve object. Missing key field: restorationCurves.") + raise ValueError( + "Cannot create dfr3 curve object. Missing key field: restorationCurves." + ) @classmethod def from_json_str(cls, json_str): @@ -85,11 +89,14 @@ def calculate_restoration_rates(self, **kwargs): if len(self.restoration_curves) <= 5: for restoration_curve in self.restoration_curves: - eval_value = restoration_curve.solve_curve_expression(hazard_values={}, - curve_parameters=self.curve_parameters, **kwargs) - output[restoration_curve.return_type['description']] = eval_value + eval_value = restoration_curve.solve_curve_expression( + hazard_values={}, curve_parameters=self.curve_parameters, **kwargs + ) + output[restoration_curve.return_type["description"]] = eval_value else: - raise ValueError("We can only handle restoration curves with less than 5 damage states.") + raise ValueError( + "We can only handle restoration curves with less than 5 damage states." + ) return output @@ -107,10 +114,13 @@ def calculate_inverse_restoration_rates(self, **kwargs): output = {} if len(self.restoration_curves) <= 5: for restoration_curve in self.restoration_curves: - eval_value = restoration_curve.solve_curve_for_inverse(hazard_values={}, - curve_parameters=self.curve_parameters, **kwargs) - output[restoration_curve.return_type['description']] = eval_value + eval_value = restoration_curve.solve_curve_for_inverse( + hazard_values={}, curve_parameters=self.curve_parameters, **kwargs + ) + output[restoration_curve.return_type["description"]] = eval_value else: - raise ValueError("We can only handle restoration curves with less than 5 damage states.") + raise ValueError( + "We can only handle restoration curves with less than 5 damage states." + ) return output diff --git a/pyincore/models/units.py b/pyincore/models/units.py index 3de2a8103..8ecd4bc23 100644 --- a/pyincore/models/units.py +++ b/pyincore/models/units.py @@ -6,7 +6,6 @@ class Units: - deg_to_rad = 0.0174533 rad_to_deg = 57.2958 @@ -63,14 +62,18 @@ class Units: def convert_hazard(hazard_value, original_demand_units, requested_demand_units): converted_hazard_value = hazard_value if original_demand_units.lower() != requested_demand_units.lower(): - conversion = f"{original_demand_units.lower().replace('/', '')}_to_" \ - f"{requested_demand_units.lower().replace('/', '')}" + conversion = ( + f"{original_demand_units.lower().replace('/', '')}_to_" + f"{requested_demand_units.lower().replace('/', '')}" + ) try: conversion_value = getattr(Units, conversion) converted_hazard_value = conversion_value * hazard_value except AttributeError: - raise ValueError(f"We don't support the conversion from {original_demand_units} " - f"to {requested_demand_units}") + raise ValueError( + f"We don't support the conversion from {original_demand_units} " + f"to {requested_demand_units}" + ) else: return converted_hazard_value diff --git a/pyincore/networkdata.py b/pyincore/networkdata.py index fb030680c..e471410ab 100644 --- a/pyincore/networkdata.py +++ b/pyincore/networkdata.py @@ -9,7 +9,7 @@ class NetworkData: - """ Network data from Fiona package. Fiona can read and write data using GIS formats. + """Network data from Fiona package. Fiona can read and write data using GIS formats. Args: network_type (str): Network type. @@ -22,11 +22,10 @@ def __init__(self, network_type: str, file_path: str): if os.path.exists(file_path): self.file_path = file_path else: - raise FileNotFoundError( - errno.ENOENT, os.strerror(errno.ENOENT), file_path) + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file_path) def get_inventory_reader(self): - """ getter """ + """getter""" filename = self.file_path if os.path.isdir(filename): layers = fiona.listlayers(filename) diff --git a/pyincore/repairservice.py b/pyincore/repairservice.py index da7780408..375dd75fe 100644 --- a/pyincore/repairservice.py +++ b/pyincore/repairservice.py @@ -22,15 +22,25 @@ class RepairService(Dfr3Service): def __init__(self, client: IncoreClient): self.client = client - self.base_dfr3_url = urllib.parse.urljoin(client.service_url, - 'dfr3/api/repairs/') + self.base_dfr3_url = urllib.parse.urljoin( + client.service_url, "dfr3/api/repairs/" + ) super(RepairService, self).__init__(client) @forbid_offline - def get_dfr3_sets(self, hazard_type: str = None, inventory_type: str = None, - author: str = None, creator: str = None, space: str = None, - skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def get_dfr3_sets( + self, + hazard_type: str = None, + inventory_type: str = None, + author: str = None, + creator: str = None, + space: str = None, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Get the set of repair data, curves. Args: @@ -52,19 +62,19 @@ def get_dfr3_sets(self, hazard_type: str = None, inventory_type: str = None, payload = {} if hazard_type is not None: - payload['hazard'] = hazard_type + payload["hazard"] = hazard_type if inventory_type is not None: - payload['inventory'] = inventory_type + payload["inventory"] = inventory_type if author is not None: - payload['author'] = author + payload["author"] = author if creator is not None: - payload['creator'] = creator + payload["creator"] = creator if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return r.json() diff --git a/pyincore/restorationservice.py b/pyincore/restorationservice.py index 279592872..7fdca9ff9 100644 --- a/pyincore/restorationservice.py +++ b/pyincore/restorationservice.py @@ -23,14 +23,23 @@ class RestorationService(Dfr3Service): def __init__(self, client: IncoreClient): self.client = client - self.base_dfr3_url = urljoin(client.service_url, 'dfr3/api/restorations/') + self.base_dfr3_url = urljoin(client.service_url, "dfr3/api/restorations/") super(RestorationService, self).__init__(client) @forbid_offline - def get_dfr3_sets(self, hazard_type: str = None, inventory_type: str = None, - author: str = None, creator: str = None, space: str = None, - skip: int = None, limit: int = None, timeout=(30, 600), **kwargs): + def get_dfr3_sets( + self, + hazard_type: str = None, + inventory_type: str = None, + author: str = None, + creator: str = None, + space: str = None, + skip: int = None, + limit: int = None, + timeout=(30, 600), + **kwargs + ): """Get the set of restoration data, curves. Args: @@ -50,19 +59,19 @@ def get_dfr3_sets(self, hazard_type: str = None, inventory_type: str = None, payload = {} if hazard_type is not None: - payload['hazard'] = hazard_type + payload["hazard"] = hazard_type if inventory_type is not None: - payload['inventory'] = inventory_type + payload["inventory"] = inventory_type if author is not None: - payload['author'] = author + payload["author"] = author if creator is not None: - payload['creator'] = creator + payload["creator"] = creator if skip is not None: - payload['skip'] = skip + payload["skip"] = skip if limit is not None: - payload['limit'] = limit + payload["limit"] = limit if space is not None: - payload['space'] = space + payload["space"] = space r = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(r).json() diff --git a/pyincore/semanticservice.py b/pyincore/semanticservice.py index 372758778..bcd72b0f5 100644 --- a/pyincore/semanticservice.py +++ b/pyincore/semanticservice.py @@ -31,7 +31,6 @@ def __init__(self, client: IncoreClient): self.client = client self.base_url = urljoin(client.service_url, "semantics/api/types") - @forbid_offline def get_all_semantic_types( self, @@ -72,9 +71,7 @@ def get_all_semantic_types( "limit": limit, "detail": detail, } - response = self.client.get( - url, params=payload, timeout=timeout, **kwargs - ) + response = self.client.get(url, params=payload, timeout=timeout, **kwargs) data = return_http_response(response).json() if save_json: @@ -136,7 +133,5 @@ def search_semantic_type( url = f"{self.base_url}/search" payload = {"text": query} - response = self.client.get( - url, params=payload, timeout=timeout, **kwargs - ) + response = self.client.get(url, params=payload, timeout=timeout, **kwargs) return return_http_response(response).json() diff --git a/pyincore/spaceservice.py b/pyincore/spaceservice.py index 38d5b6a81..64c055249 100644 --- a/pyincore/spaceservice.py +++ b/pyincore/spaceservice.py @@ -39,7 +39,7 @@ def create_space(self, space_json, timeout=(30, 600), **kwargs): """ url = self.base_space_url - space_data = {('space', space_json)} + space_data = {("space", space_json)} kwargs["files"] = space_data r = self.client.post(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -60,7 +60,7 @@ def get_spaces(self, dataset_id: str = None, timeout=(30, 600), **kwargs): url = self.base_space_url payload = {} if dataset_id is not None: - payload['dataset'] = dataset_id + payload["dataset"] = dataset_id r = self.client.get(url, params=payload, timeout=timeout, **kwargs) @@ -95,7 +95,9 @@ def get_space_by_name(self, space_name: str, timeout=(30, 600), **kwargs): obj: HTTP response with the returned space information. """ - r = self.client.get(self.base_space_url, params={"name": space_name}, timeout=timeout, **kwargs) + r = self.client.get( + self.base_space_url, params={"name": space_name}, timeout=timeout, **kwargs + ) return return_http_response(r).json() @forbid_offline @@ -113,7 +115,7 @@ def update_space(self, space_id: str, space_json, timeout=(30, 600), **kwargs): """ url = urljoin(self.base_space_url, space_id) - space_data = {('space', space_json)} + space_data = {("space", space_json)} kwargs["files"] = space_data r = self.client.put(url, timeout=timeout, **kwargs) return return_http_response(r).json() @@ -155,7 +157,9 @@ def remove_from_space_by_name(self, space_name: str, dataset_id: str): return response @forbid_offline - def remove_dataset_from_space(self, space_id: str, dataset_id: str, timeout=(30, 600), **kwargs): + def remove_dataset_from_space( + self, space_id: str, dataset_id: str, timeout=(30, 600), **kwargs + ): """Remove dataset from the space using dataset id and space id Args: @@ -174,7 +178,9 @@ def remove_dataset_from_space(self, space_id: str, dataset_id: str, timeout=(30, return return_http_response(r).json() @forbid_offline - def add_dataset_to_space(self, space_id: str, dataset_id: str, timeout=(30, 600), **kwargs): + def add_dataset_to_space( + self, space_id: str, dataset_id: str, timeout=(30, 600), **kwargs + ): """Add member to a Space. Args: @@ -193,7 +199,9 @@ def add_dataset_to_space(self, space_id: str, dataset_id: str, timeout=(30, 600) return return_http_response(r).json() @forbid_offline - def grant_privileges_to_space(self, space_id: str, privileges_json, timeout=(30, 600), **kwargs): + def grant_privileges_to_space( + self, space_id: str, privileges_json, timeout=(30, 600), **kwargs + ): """Updates a Space. Args: @@ -207,7 +215,7 @@ def grant_privileges_to_space(self, space_id: str, privileges_json, timeout=(30, """ url = urljoin(self.base_space_url, space_id + "/grant") - space_privileges = {('grant', privileges_json)} + space_privileges = {("grant", privileges_json)} kwargs["files"] = space_privileges r = self.client.post(url, timeout=timeout, **kwargs) diff --git a/pyincore/utils/__init__.py b/pyincore/utils/__init__.py index 93eb09efa..4a5718040 100644 --- a/pyincore/utils/__init__.py +++ b/pyincore/utils/__init__.py @@ -1 +1 @@ -from pyincore.utils.http_util import return_http_response \ No newline at end of file +from pyincore.utils.http_util import return_http_response diff --git a/pyincore/utils/analysisutil.py b/pyincore/utils/analysisutil.py index fabf97ce9..e154659b2 100644 --- a/pyincore/utils/analysisutil.py +++ b/pyincore/utils/analysisutil.py @@ -21,12 +21,8 @@ class AnalysisUtil: """Utility methods for analysis""" - DOCSTR_FORMAT = "$DESC$ \n\n" \ - "Args: \n\t" \ - "$ARGS$ " \ - "\n" \ - "Returns: \n\t" \ - "$RETS$ " + + DOCSTR_FORMAT = "$DESC$ \n\n" "Args: \n\t" "$ARGS$ " "\n" "Returns: \n\t" "$RETS$ " getcontext().prec = DAMAGE_PRECISION @@ -41,7 +37,9 @@ def update_precision(num, precision: int = DAMAGE_PRECISION): @staticmethod def update_precision_of_dicts(states: dict) -> dict: - updated_states = {key: AnalysisUtil.update_precision(states[key]) for key in states} + updated_states = { + key: AnalysisUtil.update_precision(states[key]) for key in states + } return updated_states @staticmethod @@ -49,7 +47,9 @@ def update_precision_of_lists(hazard_vals: List) -> List: updated_hazard_vals = [] for val in hazard_vals: if val is not None: - if math.ceil(val) == -9999: # if it's an error code(-9999.x) do not update precision + if ( + math.ceil(val) == -9999 + ): # if it's an error code(-9999.x) do not update precision updated_hazard_vals.append(val) else: updated_hazard_vals.append(AnalysisUtil.update_precision(val)) @@ -59,7 +59,6 @@ def update_precision_of_lists(hazard_vals: List) -> List: @staticmethod def float_to_decimal(num: float): - # Helper function to check if a string is a float def is_float(string): try: @@ -85,83 +84,103 @@ def float_dict_to_decimal(num_dict: dict): def dmg_string_dict_to_dmg_float_dict(dmg_dict: dict): float_dmg_dict = {} for key in dmg_dict: - if key != 'guid' and key != 'haz_expose': - if dmg_dict[key] == '': + if key != "guid" and key != "haz_expose": + if dmg_dict[key] == "": float_dmg_dict[key] = np.nan else: float_dmg_dict[key] = float(dmg_dict[key]) else: - if dmg_dict[key] != '': + if dmg_dict[key] != "": float_dmg_dict[key] = dmg_dict[key] else: float_dmg_dict[key] = np.nan return float_dmg_dict @staticmethod - def calculate_mean_damage(dmg_ratio_tbl, dmg_intervals, - damage_interval_keys, is_bridge=False, - bridge_spans=1): + def calculate_mean_damage( + dmg_ratio_tbl, + dmg_intervals, + damage_interval_keys, + is_bridge=False, + bridge_spans=1, + ): if len(damage_interval_keys) < 4: raise ValueError("we only accept 4 damage or more than 4 interval keys!") - float_dmg_intervals = AnalysisUtil.dmg_string_dict_to_dmg_float_dict(dmg_intervals) + float_dmg_intervals = AnalysisUtil.dmg_string_dict_to_dmg_float_dict( + dmg_intervals + ) output = collections.OrderedDict() if len(dmg_ratio_tbl) == 5: - output['meandamage'] = float( - float(dmg_ratio_tbl[1]["Best Mean Damage Ratio"]) * float_dmg_intervals[damage_interval_keys[0]] + \ - float(dmg_ratio_tbl[2]["Best Mean Damage Ratio"]) * float_dmg_intervals[damage_interval_keys[1]] + \ - float(dmg_ratio_tbl[3]["Best Mean Damage Ratio"]) * float_dmg_intervals[damage_interval_keys[2]] +\ - float(dmg_ratio_tbl[4]["Best Mean Damage Ratio"]) * float_dmg_intervals[damage_interval_keys[3]]) + output["meandamage"] = float( + float(dmg_ratio_tbl[1]["Best Mean Damage Ratio"]) + * float_dmg_intervals[damage_interval_keys[0]] + + float(dmg_ratio_tbl[2]["Best Mean Damage Ratio"]) + * float_dmg_intervals[damage_interval_keys[1]] + + float(dmg_ratio_tbl[3]["Best Mean Damage Ratio"]) + * float_dmg_intervals[damage_interval_keys[2]] + + float(dmg_ratio_tbl[4]["Best Mean Damage Ratio"]) + * float_dmg_intervals[damage_interval_keys[3]] + ) elif len(dmg_ratio_tbl) == 4: - output['meandamage'] = float( - float(dmg_ratio_tbl[0]["Mean Damage Factor"]) * float_dmg_intervals[damage_interval_keys[0]] + \ - float(dmg_ratio_tbl[1]["Mean Damage Factor"]) * float_dmg_intervals[damage_interval_keys[1]] + \ - float(dmg_ratio_tbl[2]["Mean Damage Factor"]) * float_dmg_intervals[damage_interval_keys[2]] + \ - float(dmg_ratio_tbl[3]["Mean Damage Factor"]) * float_dmg_intervals[damage_interval_keys[3]]) + output["meandamage"] = float( + float(dmg_ratio_tbl[0]["Mean Damage Factor"]) + * float_dmg_intervals[damage_interval_keys[0]] + + float(dmg_ratio_tbl[1]["Mean Damage Factor"]) + * float_dmg_intervals[damage_interval_keys[1]] + + float(dmg_ratio_tbl[2]["Mean Damage Factor"]) + * float_dmg_intervals[damage_interval_keys[2]] + + float(dmg_ratio_tbl[3]["Mean Damage Factor"]) + * float_dmg_intervals[damage_interval_keys[3]] + ) elif len(dmg_ratio_tbl) == 6 and is_bridge: # this is for bridge - weight_slight = float(dmg_ratio_tbl[1]['Best Mean Damage Ratio']) - weight_moderate = float(dmg_ratio_tbl[2]['Best Mean Damage Ratio']) - weight_extensive = float( - dmg_ratio_tbl[3]['Best Mean Damage Ratio']) - weight_collapse0 = float( - dmg_ratio_tbl[4]['Best Mean Damage Ratio']) - weight_collapse1 = float( - dmg_ratio_tbl[5]['Best Mean Damage Ratio']) - - output['meandamage'] = \ - weight_slight * float_dmg_intervals[damage_interval_keys[1]] + \ - weight_moderate * float_dmg_intervals[damage_interval_keys[2]] + \ - weight_extensive * float_dmg_intervals[damage_interval_keys[3]] + weight_slight = float(dmg_ratio_tbl[1]["Best Mean Damage Ratio"]) + weight_moderate = float(dmg_ratio_tbl[2]["Best Mean Damage Ratio"]) + weight_extensive = float(dmg_ratio_tbl[3]["Best Mean Damage Ratio"]) + weight_collapse0 = float(dmg_ratio_tbl[4]["Best Mean Damage Ratio"]) + weight_collapse1 = float(dmg_ratio_tbl[5]["Best Mean Damage Ratio"]) + + output["meandamage"] = ( + weight_slight * float_dmg_intervals[damage_interval_keys[1]] + + weight_moderate * float_dmg_intervals[damage_interval_keys[2]] + + weight_extensive * float_dmg_intervals[damage_interval_keys[3]] + ) if bridge_spans >= 3: - output[ - 'meandamage'] += weight_collapse1 / bridge_spans *float_dmg_intervals[damage_interval_keys[4]] + output["meandamage"] += ( + weight_collapse1 + / bridge_spans + * float_dmg_intervals[damage_interval_keys[4]] + ) else: - output['meandamage'] += weight_collapse0 * float_dmg_intervals[damage_interval_keys[4]] + output["meandamage"] += ( + weight_collapse0 * float_dmg_intervals[damage_interval_keys[4]] + ) else: - raise ValueError('We cannot handle this damage ratio format.') + raise ValueError("We cannot handle this damage ratio format.") return output @staticmethod - def calculate_mean_damage_std_deviation(dmg_ratio_tbl, dmg, - mean_damage, damage_interval_keys): - + def calculate_mean_damage_std_deviation( + dmg_ratio_tbl, dmg, mean_damage, damage_interval_keys + ): float_dmg = AnalysisUtil.dmg_string_dict_to_dmg_float_dict(dmg) output = collections.OrderedDict() result = 0.0 idx = 0 for key in damage_interval_keys: - result += float_dmg[key] * (math.pow( - float(dmg_ratio_tbl[idx]["Mean Damage Factor"]), 2) + math.pow( - float(dmg_ratio_tbl[idx]["Deviation Damage Factor"]), 2)) + result += float_dmg[key] * ( + math.pow(float(dmg_ratio_tbl[idx]["Mean Damage Factor"]), 2) + + math.pow(float(dmg_ratio_tbl[idx]["Deviation Damage Factor"]), 2) + ) idx += 1 - output['mdamagedev'] = math.sqrt(result - math.pow(mean_damage, 2)) + output["mdamagedev"] = math.sqrt(result - math.pow(mean_damage, 2)) return output @staticmethod @@ -176,16 +195,26 @@ def get_expected_damage(mean_damage, dmg_ratios): float: A value of the damage state. """ - no_dmg_bound = [float(dmg_ratios[1]["Lower Bound"]), - float(dmg_ratios[1]["Upper Bound"])] - slight_bound = [float(dmg_ratios[2]["Lower Bound"]), - float(dmg_ratios[2]["Upper Bound"])] - moderate_bound = [float(dmg_ratios[3]["Lower Bound"]), - float(dmg_ratios[3]["Upper Bound"])] - extensive_bound = [float(dmg_ratios[4]["Lower Bound"]), - float(dmg_ratios[4]["Upper Bound"])] - collapse_bound = [float(dmg_ratios[5]["Lower Bound"]), - float(dmg_ratios[5]["Upper Bound"])] + no_dmg_bound = [ + float(dmg_ratios[1]["Lower Bound"]), + float(dmg_ratios[1]["Upper Bound"]), + ] + slight_bound = [ + float(dmg_ratios[2]["Lower Bound"]), + float(dmg_ratios[2]["Upper Bound"]), + ] + moderate_bound = [ + float(dmg_ratios[3]["Lower Bound"]), + float(dmg_ratios[3]["Upper Bound"]), + ] + extensive_bound = [ + float(dmg_ratios[4]["Lower Bound"]), + float(dmg_ratios[4]["Upper Bound"]), + ] + collapse_bound = [ + float(dmg_ratios[5]["Lower Bound"]), + float(dmg_ratios[5]["Upper Bound"]), + ] if no_dmg_bound[0] <= mean_damage <= no_dmg_bound[1]: idx = 1 elif slight_bound[0] <= mean_damage <= slight_bound[1]: @@ -201,8 +230,9 @@ def get_expected_damage(mean_damage, dmg_ratios): return dmg_ratios[idx]["Damage State"] @staticmethod - def determine_parallelism_locally(self, number_of_loops, - user_defined_parallelism=0): + def determine_parallelism_locally( + self, number_of_loops, user_defined_parallelism=0 + ): """Determine the parallelism on the current compute node. Args: @@ -218,17 +248,20 @@ def determine_parallelism_locally(self, number_of_loops, number_of_cpu = os.cpu_count() if number_of_loops > 0: if user_defined_parallelism > 0: - return min(number_of_cpu, number_of_loops, - user_defined_parallelism) + return min(number_of_cpu, number_of_loops, user_defined_parallelism) else: return min(number_of_cpu, number_of_loops) else: return number_of_cpu @staticmethod - def create_result_dataset(datasvc: DataService, parentid: str, - result_files: List[str], title: str, - output_metadata: Dict[str, str]): + def create_result_dataset( + datasvc: DataService, + parentid: str, + result_files: List[str], + title: str, + output_metadata: Dict[str, str], + ): # Result metadata properties = output_metadata properties["title"] = title @@ -244,7 +277,9 @@ def create_result_dataset(datasvc: DataService, parentid: str, return result_dataset_id @staticmethod - def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_probabilities): + def adjust_damage_for_liquefaction( + limit_state_probabilities, ground_failure_probabilities + ): """Adjusts building damage probability based on liquefaction ground failure probability with the liq_dmg, we know that it is 3 values, the first two are the same. The 3rd might be different. @@ -271,20 +306,26 @@ def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_pro # second-to-last probability of ground failure instead. if i > ground_failure_probabilities_len - 1: - prob_ground_failure = ground_failure_probabilities[ground_failure_probabilities_len - 2] + prob_ground_failure = ground_failure_probabilities[ + ground_failure_probabilities_len - 2 + ] else: prob_ground_failure = ground_failure_probabilities[i] - adjusted_limit_state_probabilities[keys[i]] = \ - limit_state_probabilities[keys[i]] + prob_ground_failure \ + adjusted_limit_state_probabilities[keys[i]] = ( + limit_state_probabilities[keys[i]] + + prob_ground_failure - limit_state_probabilities[keys[i]] * prob_ground_failure + ) # the final one is the last of limitStates should match with the last of ground failures j = len(limit_state_probabilities) - 1 prob_ground_failure = ground_failure_probabilities[-1] - adjusted_limit_state_probabilities[keys[j]] = \ - limit_state_probabilities[keys[j]] \ - + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure + adjusted_limit_state_probabilities[keys[j]] = ( + limit_state_probabilities[keys[j]] + + prob_ground_failure + - limit_state_probabilities[keys[j]] * prob_ground_failure + ) return adjusted_limit_state_probabilities @@ -294,13 +335,16 @@ def adjust_limit_states_for_pgd(limit_states, pgd_limit_states): adj_limit_states = collections.OrderedDict() for key, value in limit_states.items(): - adj_limit_states[key] = limit_states[key] + pgd_limit_states[key] - \ - (limit_states[key] * pgd_limit_states[key]) + adj_limit_states[key] = ( + limit_states[key] + + pgd_limit_states[key] + - (limit_states[key] * pgd_limit_states[key]) + ) return AnalysisUtil.update_precision_of_dicts(adj_limit_states) except KeyError as e: - print('Mismatched keys encountered in the limit states') + print("Mismatched keys encountered in the limit states") print(str(e)) @staticmethod @@ -331,38 +375,59 @@ def create_gdocstr_from_spec(specs): str: Google format docstrings to copy for the run() method of any analysis """ - desc = specs['description'] + desc = specs["description"] args = "" rets = "" - for dataset in specs['input_datasets']: + for dataset in specs["input_datasets"]: is_opt = "" - if not dataset['required']: + if not dataset["required"]: is_opt = ", " + "optional" - args = \ - args + dataset['id'] + "(str" + is_opt + ") : " \ - + dataset['description'] + ". " \ - + AnalysisUtil.get_custom_types_str(dataset['type']) + "\n\t" - - for param in specs['input_parameters']: + args = ( + args + + dataset["id"] + + "(str" + + is_opt + + ") : " + + dataset["description"] + + ". " + + AnalysisUtil.get_custom_types_str(dataset["type"]) + + "\n\t" + ) + + for param in specs["input_parameters"]: is_opt = "" - if not param['required']: + if not param["required"]: is_opt = ", " + "optional" - args = \ - args + param['id'] + "(" + AnalysisUtil.get_type_str(param['type']) + is_opt + ") : " \ - + param['description'] + "\n\t" - - for dataset in specs['output_datasets']: - rets = rets + dataset['id'] + ": " \ - + dataset[ - 'description'] + ". " + AnalysisUtil.get_custom_types_str( - dataset['type']) + "\n\t" - - docstr = AnalysisUtil.DOCSTR_FORMAT.replace("$DESC$", desc).replace( - "$ARGS$", - args).replace("$RETS$", rets) + args = ( + args + + param["id"] + + "(" + + AnalysisUtil.get_type_str(param["type"]) + + is_opt + + ") : " + + param["description"] + + "\n\t" + ) + + for dataset in specs["output_datasets"]: + rets = ( + rets + + dataset["id"] + + ": " + + dataset["description"] + + ". " + + AnalysisUtil.get_custom_types_str(dataset["type"]) + + "\n\t" + ) + + docstr = ( + AnalysisUtil.DOCSTR_FORMAT.replace("$DESC$", desc) + .replace("$ARGS$", args) + .replace("$RETS$", rets) + ) print(docstr) @@ -379,7 +444,7 @@ def get_type_str(class_type): """ t = str(class_type) - match = re.search('\'([^"]*)\'', t) + match = re.search("'([^\"]*)'", t) if match is not None: return match.group(1) return None @@ -395,11 +460,11 @@ def get_custom_types_str(types): str: Formatted string with applicable datatypes used to generate docstrigns from specs """ - custom_types_str = 'Applicable dataset type(s): ' - if (isinstance(types, str)): + custom_types_str = "Applicable dataset type(s): " + if isinstance(types, str): return custom_types_str + types - if (isinstance(types, list)): - if (len(types) > 1): + if isinstance(types, list): + if len(types) > 1: idx = 0 for type in types: if idx < len(types) - 1: @@ -415,7 +480,7 @@ def get_custom_types_str(types): def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): - yield lst[i:i + n] + yield lst[i : i + n] @staticmethod def get_hazard_demand_type(building, fragility_set, hazard_type): @@ -432,7 +497,6 @@ def get_hazard_demand_type(building, fragility_set, hazard_type): str: A hazard demand type. """ - BLDG_STORIES = "no_stories" PROPERTIES = "properties" BLDG_PERIOD = "period" @@ -440,14 +504,16 @@ def get_hazard_demand_type(building, fragility_set, hazard_type): hazard_demand_type = fragility_hazard_type if hazard_type.lower() == "earthquake": - num_stories = building[PROPERTIES][BLDG_STORIES] # Get building period from the fragility if possible - building_args = fragility_set.construct_expression_args_from_inventory(building) + building_args = fragility_set.construct_expression_args_from_inventory( + building + ) building_period = fragility_set.fragility_curves[0].get_building_period( - fragility_set.curve_parameters, **building_args) + fragility_set.curve_parameters, **building_args + ) - if fragility_hazard_type.endswith('sa') and fragility_hazard_type != 'sa': + if fragility_hazard_type.endswith("sa") and fragility_hazard_type != "sa": # This fixes a bug where demand type is in a format similar to 1.0 Sec Sa if len(fragility_hazard_type.split()) > 2: building_period = fragility_hazard_type.split()[0] @@ -456,7 +522,7 @@ def get_hazard_demand_type(building, fragility_set, hazard_type): hazard_demand_type = fragility_hazard_type # This handles the case where some fragilities only specify Sa, others a specific period of Sa - if not hazard_demand_type.endswith('pga'): + if not hazard_demand_type.endswith("pga"): # If the fragility does not contain the period calculation, check if the dataset has it if building_period == 0.0 and BLDG_PERIOD in building[PROPERTIES]: if building[PROPERTIES][BLDG_PERIOD] > 0.0: @@ -472,7 +538,9 @@ def get_hazard_demand_type(building, fragility_set, hazard_type): return hazard_demand_type @staticmethod - def get_hazard_demand_types_units(building, fragility_set, hazard_type, allowed_demand_types): + def get_hazard_demand_types_units( + building, fragility_set, hazard_type, allowed_demand_types + ): """ Get hazard demand type. This method is intended to replace get_hazard_demand_type. Fragility_set is not a json but a fragilityCurveSet object now. @@ -523,21 +591,28 @@ def get_hazard_demand_types_units(building, fragility_set, hazard_type, allowed_ if allowed: num_stories = building[PROPERTIES][BLDG_STORIES] # Get building period from the fragility if possible - building_args = fragility_set.construct_expression_args_from_inventory(building) - building_period = fragility_set.fragility_curves[0].get_building_period( - fragility_set.curve_parameters, **building_args) + building_args = ( + fragility_set.construct_expression_args_from_inventory(building) + ) + building_period = fragility_set.fragility_curves[ + 0 + ].get_building_period( + fragility_set.curve_parameters, **building_args + ) # TODO: There might be a bug here as this is not handling SD - if demand_type.endswith('sa'): + if demand_type.endswith("sa"): # This fixes a bug where demand type is in a format similar to 1.0 Sec Sa - if demand_type != 'sa': + if demand_type != "sa": if len(demand_type.split()) > 2: building_period = demand_type.split()[0] adjusted_demand_type = str(building_period) + " " + "SA" else: if building_period == 0.0: - if BLDG_PERIOD in building[PROPERTIES] and building[PROPERTIES][BLDG_PERIOD] > 0.0: - + if ( + BLDG_PERIOD in building[PROPERTIES] + and building[PROPERTIES][BLDG_PERIOD] > 0.0 + ): building_period = building[PROPERTIES][BLDG_PERIOD] else: # try to calculate the period from the expression @@ -546,8 +621,12 @@ def get_hazard_demand_types_units(building, fragility_set, hazard_type, allowed_ # TODO: This is a hack and expects a parameter with name "period" present. # This can potentially cause naming conflicts in some fragilities - building_period = evaluateexpression.evaluate(param["expression"], - {"num_stories": num_stories}) + building_period = ( + evaluateexpression.evaluate( + param["expression"], + {"num_stories": num_stories}, + ) + ) # TODO: num_stories logic is not tested. should find a fragility with # periodEqnType = 2 or 3 to test. periodEqnType = 1 doesn't need # num_stories. @@ -564,7 +643,9 @@ def get_hazard_demand_types_units(building, fragility_set, hazard_type, allowed_ return adjusted_demand_types, adjusted_demand_units, adjusted_to_original @staticmethod - def group_by_demand_type(inventories, fragility_sets, hazard_type="earthquake", is_building=False): + def group_by_demand_type( + inventories, fragility_sets, hazard_type="earthquake", is_building=False + ): """ This method should replace group_by_demand_type in the future. Fragility_sets is not list of dictionary ( json) anymore but a list of FragilityCurveSet objects @@ -581,14 +662,15 @@ def group_by_demand_type(inventories, fragility_sets, hazard_type="earthquake", """ grouped_inventory = dict() for fragility_id, frag in fragility_sets.items(): - # TODO this method will be deprecated so this is temporary fix demand_type = frag.demand_types[0] demand_unit = frag.demand_units[0] if is_building: inventory = inventories[fragility_id] - demand_type = AnalysisUtil.get_hazard_demand_type(inventory, frag, hazard_type) + demand_type = AnalysisUtil.get_hazard_demand_type( + inventory, frag, hazard_type + ) tpl = (demand_type, demand_unit) grouped_inventory.setdefault(tpl, []).append(fragility_id) @@ -634,7 +716,7 @@ def get_exposure_from_hazard_values(hazard_vals, hazard_type): @staticmethod def do_hazard_values_have_errors(hazard_vals): - """ Checks if any of the hazard values have errors + """Checks if any of the hazard values have errors Args: hazard_vals(list): List of hazard values returned by the service for a particular point @@ -658,12 +740,19 @@ def get_discretized_restoration(restoration_curve_set, discretized_days): class_restoration = {} for time in discretized_days: - restoration_times = restoration_curve_set.calculate_restoration_rates(time=time) + restoration_times = restoration_curve_set.calculate_restoration_rates( + time=time + ) # Key (e.g. day1, day3) - time_key = "day"+str(time) - - restoration = [1, restoration_times['PF_0'], restoration_times['PF_1'], restoration_times['PF_2'], - restoration_times['PF_3']] + time_key = "day" + str(time) + + restoration = [ + 1, + restoration_times["PF_0"], + restoration_times["PF_1"], + restoration_times["PF_2"], + restoration_times["PF_3"], + ] class_restoration[time_key] = restoration diff --git a/pyincore/utils/cge_ml_file_util.py b/pyincore/utils/cge_ml_file_util.py index e055511b0..78605343d 100644 --- a/pyincore/utils/cge_ml_file_util.py +++ b/pyincore/utils/cge_ml_file_util.py @@ -103,12 +103,16 @@ def parse_base_vals( """ base_cap_factors: List[np.ndarray] = [] - base_cap: np.ndarray = CGEMLFileUtil.parse_csv(filenames[-1], ds_sectors).reshape( + base_cap: np.ndarray = CGEMLFileUtil.parse_csv( + filenames[-1], ds_sectors + ).reshape( 1, -1 ) # 1 x K array K = number of sectors in the model # logger.info(f"base_cap shape: {base_cap.shape}") - for filename, sector_order in zip(filenames[:-1], base_cap_sector_order.values()): + for filename, sector_order in zip( + filenames[:-1], base_cap_sector_order.values() + ): base_cap_factors.append( CGEMLFileUtil.parse_csv(filename, sector_order).reshape(-1, 1) ) # k_i x 1 array k_i = number of sectors k for a factor i @@ -146,7 +150,9 @@ def parse_files( """ logger.info("Parsing input files...") - model_coeffs, sectors, base_cap_sector_ordering = CGEMLFileUtil.parse_coeff(model_filenames) + model_coeffs, sectors, base_cap_sector_ordering = CGEMLFileUtil.parse_coeff( + model_filenames + ) base_cap_factors, base_cap = CGEMLFileUtil.parse_base_vals( filenames, sectors["ds"], base_cap_sector_ordering diff --git a/pyincore/utils/cgeoutputprocess.py b/pyincore/utils/cgeoutputprocess.py index 4a90dfd19..965f9fe99 100644 --- a/pyincore/utils/cgeoutputprocess.py +++ b/pyincore/utils/cgeoutputprocess.py @@ -12,8 +12,12 @@ class CGEOutputProcess: """This class converts csv results outputs of Joplin CGE analysis to json format.""" @staticmethod - def get_cge_household_count(household_count, household_count_path=None, filename_json=None, - income_categories=("HH1", "HH2", "HH3", "HH4", "HH5")): + def get_cge_household_count( + household_count, + household_count_path=None, + filename_json=None, + income_categories=("HH1", "HH2", "HH3", "HH4", "HH5"), + ): """Calculate income results from the output files of the Joplin CGE analysis and convert the results to json format. { @@ -43,18 +47,26 @@ def get_cge_household_count(household_count, household_count_path=None, filename after_event = {} pct_change = {} for income_category in income_categories: - before_event[income_category] = household_group_count[household_group_count["Household Group"] == - income_category]["HH0"].values[0] - after_event[income_category] = household_group_count[household_group_count["Household Group"] == - income_category]["HHL"].values[0] + before_event[income_category] = household_group_count[ + household_group_count["Household Group"] == income_category + ]["HH0"].values[0] + after_event[income_category] = household_group_count[ + household_group_count["Household Group"] == income_category + ]["HHL"].values[0] if before_event[income_category]: - pct_change[income_category] = 100 * ((after_event[income_category] - before_event[income_category]) / - abs(before_event[income_category])) + pct_change[income_category] = 100 * ( + (after_event[income_category] - before_event[income_category]) + / abs(before_event[income_category]) + ) else: pct_change[income_category] = None - cge_total_household_count = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} + cge_total_household_count = { + "beforeEvent": before_event, + "afterEvent": after_event, + "%_change": pct_change, + } if filename_json: with open(filename_json, "w") as outfile: @@ -63,8 +75,12 @@ def get_cge_household_count(household_count, household_count_path=None, filename return json.dumps(cge_total_household_count) @staticmethod - def get_cge_gross_income(gross_income, gross_income_path=None, filename_json=None, - income_categories=("HH1", "HH2", "HH3", "HH4", "HH5")): + def get_cge_gross_income( + gross_income, + gross_income_path=None, + filename_json=None, + income_categories=("HH1", "HH2", "HH3", "HH4", "HH5"), + ): """Calculate household gross income results from the output files of the Joplin CGE analysis and convert the results to json format. { @@ -94,18 +110,26 @@ def get_cge_gross_income(gross_income, gross_income_path=None, filename_json=Non after_event = {} pct_change = {} for income_category in income_categories: - before_event[income_category] = household_income[household_income["Household Group"] == income_category][ - "Y0"].values[0] - after_event[income_category] = household_income[household_income["Household Group"] == income_category][ - "YL"].values[0] + before_event[income_category] = household_income[ + household_income["Household Group"] == income_category + ]["Y0"].values[0] + after_event[income_category] = household_income[ + household_income["Household Group"] == income_category + ]["YL"].values[0] if before_event[income_category]: - pct_change[income_category] = 100 * ((after_event[income_category] - before_event[income_category]) / - abs(before_event[income_category])) + pct_change[income_category] = 100 * ( + (after_event[income_category] - before_event[income_category]) + / abs(before_event[income_category]) + ) else: pct_change[income_category] = None - cge_total_household_income = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} + cge_total_household_income = { + "beforeEvent": before_event, + "afterEvent": after_event, + "%_change": pct_change, + } if filename_json: with open(filename_json, "w") as outfile: @@ -114,8 +138,14 @@ def get_cge_gross_income(gross_income, gross_income_path=None, filename_json=Non return json.dumps(cge_total_household_income) @staticmethod - def get_cge_employment(pre_demand, post_demand, pre_demand_path=None, post_demand_path=None, - filename_json=None, demand_categories=("GOODS", "TRADE", "OTHER")): + def get_cge_employment( + pre_demand, + post_demand, + pre_demand_path=None, + post_demand_path=None, + filename_json=None, + demand_categories=("GOODS", "TRADE", "OTHER"), + ): """Calculate employment results from the output files of the Joplin CGE analysis and convert the results to json format. The value is a sum of L1, L2 and L3 Labor groups numbers. { @@ -160,12 +190,18 @@ def get_cge_employment(pre_demand, post_demand, pre_demand_path=None, post_deman after_event[demand_category] = post_disaster_demand[demand_category].sum() if before_event[demand_category]: - pct_change[demand_category] = 100 * ((after_event[demand_category] - before_event[demand_category]) / - abs(before_event[demand_category])) + pct_change[demand_category] = 100 * ( + (after_event[demand_category] - before_event[demand_category]) + / abs(before_event[demand_category]) + ) else: pct_change[demand_category] = None - cge_employment = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} + cge_employment = { + "beforeEvent": before_event, + "afterEvent": after_event, + "%_change": pct_change, + } if filename_json: with open(filename_json, "w") as outfile: @@ -174,8 +210,12 @@ def get_cge_employment(pre_demand, post_demand, pre_demand_path=None, post_deman return json.dumps(cge_employment) @staticmethod - def get_cge_domestic_supply(domestic_supply, domestic_supply_path=None, filename_json=None, - supply_categories=("Goods", "Trade", "Other", "HS1", "HS2", "HS3")): + def get_cge_domestic_supply( + domestic_supply, + domestic_supply_path=None, + filename_json=None, + supply_categories=("Goods", "Trade", "Other", "HS1", "HS2", "HS3"), + ): """Calculate domestic supply results from the output files of the Joplin CGE analysis and convert the results to json format. { @@ -208,15 +248,25 @@ def get_cge_domestic_supply(domestic_supply, domestic_supply_path=None, filename after_event = {} pct_change = {} for supply_category in supply_categories: - before_event[supply_category] = sector_supply[sector_supply["Sectors"] == supply_category]["DS0"].values[0] - after_event[supply_category] = sector_supply[sector_supply["Sectors"] == supply_category]["DSL"].values[0] + before_event[supply_category] = sector_supply[ + sector_supply["Sectors"] == supply_category + ]["DS0"].values[0] + after_event[supply_category] = sector_supply[ + sector_supply["Sectors"] == supply_category + ]["DSL"].values[0] if before_event[supply_category]: - pct_change[supply_category] = 100 * ((after_event[supply_category] - before_event[supply_category]) / - abs(before_event[supply_category])) + pct_change[supply_category] = 100 * ( + (after_event[supply_category] - before_event[supply_category]) + / abs(before_event[supply_category]) + ) else: pct_change[supply_category] = None - cge_domestic_supply = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} + cge_domestic_supply = { + "beforeEvent": before_event, + "afterEvent": after_event, + "%_change": pct_change, + } if filename_json: with open(filename_json, "w") as outfile: diff --git a/pyincore/utils/dataprocessutil.py b/pyincore/utils/dataprocessutil.py index d0aac6675..766e1253d 100644 --- a/pyincore/utils/dataprocessutil.py +++ b/pyincore/utils/dataprocessutil.py @@ -9,12 +9,11 @@ import pandas as pd import numpy as np -from pyincore import Dataset, DataService, IncoreClient +from pyincore import Dataset, DataService from functools import reduce class DataProcessUtil: - @staticmethod def get_mapped_result_from_analysis( client, @@ -442,7 +441,7 @@ def get_max_damage_state(dmg_result): guids = dmg_result[["guid"]] max_val = dmg_result[dmg_states].max(axis=1) - max_key = dmg_result[dmg_states].dropna(how='all').idxmax(axis=1) + max_key = dmg_result[dmg_states].dropna(how="all").idxmax(axis=1) dmg_concat = pd.concat([guids, max_val, max_key], axis=1) dmg_concat.rename(columns={0: "max_prob", 1: "max_state"}, inplace=True) @@ -479,12 +478,12 @@ def create_mapped_dmg_result_gal( Then rename them to their respective categories. """ - max_dmg_result.loc[max_dmg_result["max_state"] == "DS_0", "max_state"] = ( - "Affected" - ) - max_dmg_result.loc[max_dmg_result["max_state"] == "DS_1", "max_state"] = ( - "Affected" - ) + max_dmg_result.loc[ + max_dmg_result["max_state"] == "DS_0", "max_state" + ] = "Affected" + max_dmg_result.loc[ + max_dmg_result["max_state"] == "DS_1", "max_state" + ] = "Affected" max_dmg_result.loc[max_dmg_result["max_state"] == "DS_2", "max_state"] = "Minor" max_dmg_result.loc[ ( @@ -493,9 +492,9 @@ def create_mapped_dmg_result_gal( ), "max_state", ] = "Major" - max_dmg_result.loc[max_dmg_result["sw_max_ds"] == "DS_3", "max_state"] = ( - "Destroyed" - ) + max_dmg_result.loc[ + max_dmg_result["sw_max_ds"] == "DS_3", "max_state" + ] = "Destroyed" result_by_cluster, result_by_category = DataProcessUtil.create_mapped_dmg( inventory, max_dmg_result, arch_mapping, groupby_col_name, arch_col diff --git a/pyincore/utils/datasetutil.py b/pyincore/utils/datasetutil.py index 33a177b8f..5e7b57446 100644 --- a/pyincore/utils/datasetutil.py +++ b/pyincore/utils/datasetutil.py @@ -11,8 +11,8 @@ from pyincore import Dataset, DataService, MappingSet # for evaluation of retrofit expression -import math -import scipy +import math # noqa: F401 +import scipy # noqa: F401 class DatasetUtil: @@ -34,7 +34,7 @@ def join_datasets(geodataset, tabledataset, clean_attributes=False): # remove the tables except guid if clean_attributes: - gdf = gdf[['geometry','guid']] + gdf = gdf[["geometry", "guid"]] # joining and indexing join_gdf = gdf.set_index("guid").join(df.set_index("guid")) @@ -45,26 +45,29 @@ def join_datasets(geodataset, tabledataset, clean_attributes=False): def join_table_dataset_with_source_dataset(dataset, client, clean_attributes=False): """Creates geopandas geodataframe by joining table dataset and its source dataset - Args: - dataset (Dataset): pyincore dataset object - client (Client): pyincore service client object - clean_attributes (boolean): flag for deleting the fields except guid and the fields in csv table + Args: + dataset (Dataset): pyincore dataset object + client (Client): pyincore service client object + clean_attributes (boolean): flag for deleting the fields except guid and the fields in csv table - Returns: - gpd.Dataset: Geopandas geodataframe object. + Returns: + gpd.Dataset: Geopandas geodataframe object. """ is_source_dataset = False source_dataset = None # check if the given dataset is table dataset - if dataset.metadata['format'] != 'table' and dataset.metadata['format'] != 'csv': + if ( + dataset.metadata["format"] != "table" + and dataset.metadata["format"] != "csv" + ): print("The given dataset is not a table dataset") return None # check if source dataset exists try: - source_dataset = dataset.metadata['sourceDataset'] + source_dataset = dataset.metadata["sourceDataset"] is_source_dataset = True except Exception: print("There is no source dataset for the give table dataset") @@ -72,14 +75,18 @@ def join_table_dataset_with_source_dataset(dataset, client, clean_attributes=Fal if is_source_dataset: # merge dataset and source dataset geodataset = Dataset.from_data_service(source_dataset, DataService(client)) - joined_gdf = DatasetUtil.join_datasets(geodataset, dataset, clean_attributes) + joined_gdf = DatasetUtil.join_datasets( + geodataset, dataset, clean_attributes + ) else: return None return joined_gdf @staticmethod - def construct_updated_inventories(inventory_dataset: Dataset, add_info_dataset: Dataset, mapping: MappingSet): + def construct_updated_inventories( + inventory_dataset: Dataset, add_info_dataset: Dataset, mapping: MappingSet + ): """ This method update the given inventory with retrofit information based on the mapping and additional information @@ -94,61 +101,90 @@ def construct_updated_inventories(inventory_dataset: Dataset, add_info_dataset: """ if add_info_dataset is not None: inventory_df = inventory_dataset.get_dataframe_from_shapefile() - inventory_df.set_index('guid', inplace=True) + inventory_df.set_index("guid", inplace=True) add_info_df = add_info_dataset.get_dataframe_from_csv() - add_info_df.set_index('guid', inplace=True) + add_info_df.set_index("guid", inplace=True) # if additional information e.g. Retrofit presented, merge inventory properties with that additional # information - inventory_df = pd.merge(inventory_df, add_info_df, left_index=True, right_index=True, how='left') + inventory_df = pd.merge( + inventory_df, add_info_df, left_index=True, right_index=True, how="left" + ) # prepare retrofit definition into pandas dataframe; need to work with retrofit if len(mapping.mappingEntryKeys) > 0: mapping_entry_keys_df = pd.DataFrame(mapping.mappingEntryKeys) # add suffix to avoid conflict - mapping_entry_keys_df.columns = [col + '_mappingEntryKey' for col in mapping_entry_keys_df.columns] - mapping_entry_keys_df.set_index('name_mappingEntryKey', inplace=True) - inventory_df = pd.merge(inventory_df, mapping_entry_keys_df, left_on='retrofit_key', right_index=True, - how='left') - inventory_df.drop(columns=['defaultKey_mappingEntryKey'], inplace=True) + mapping_entry_keys_df.columns = [ + col + "_mappingEntryKey" for col in mapping_entry_keys_df.columns + ] + mapping_entry_keys_df.set_index("name_mappingEntryKey", inplace=True) + inventory_df = pd.merge( + inventory_df, + mapping_entry_keys_df, + left_on="retrofit_key", + right_index=True, + how="left", + ) + inventory_df.drop(columns=["defaultKey_mappingEntryKey"], inplace=True) else: - raise ValueError("Missing proper definition for mappingEntryKeys in the mapping!") + raise ValueError( + "Missing proper definition for mappingEntryKeys in the mapping!" + ) def _apply_retrofit_value(row): - target_column = row["config_mappingEntryKey"]["targetColumn"] \ - if ("config_mappingEntryKey" in row.index and - isinstance(row["config_mappingEntryKey"], dict) and - "targetColumn" in row["config_mappingEntryKey"].keys()) else None - expression = row["config_mappingEntryKey"]["expression"] \ - if ("config_mappingEntryKey" in row.index and - isinstance(row["config_mappingEntryKey"], dict) and - "expression" in row["config_mappingEntryKey"].keys()) else None - type = row["config_mappingEntryKey"]["type"] \ - if ("config_mappingEntryKey" in row.index and - isinstance(row["config_mappingEntryKey"], dict) and - "type" in row["config_mappingEntryKey"].keys()) else None + target_column = ( + row["config_mappingEntryKey"]["targetColumn"] + if ( + "config_mappingEntryKey" in row.index + and isinstance(row["config_mappingEntryKey"], dict) + and "targetColumn" in row["config_mappingEntryKey"].keys() + ) + else None + ) + expression = ( + row["config_mappingEntryKey"]["expression"] + if ( + "config_mappingEntryKey" in row.index + and isinstance(row["config_mappingEntryKey"], dict) + and "expression" in row["config_mappingEntryKey"].keys() + ) + else None + ) + _ = ( + row["config_mappingEntryKey"]["type"] + if ( + "config_mappingEntryKey" in row.index + and isinstance(row["config_mappingEntryKey"], dict) + and "type" in row["config_mappingEntryKey"].keys() + ) + else None + ) if target_column and expression: if target_column in row.index: - retrofit_value = float(row["retrofit_value"]) if type == "number" else row["retrofit_value"] - # Dangerous! Be careful with the expression row[target_column] = eval(f"row[target_column]{expression}") else: - raise ValueError(f"targetColumn: {target_column} not found in inventory properties!") + raise ValueError( + f"targetColumn: {target_column} not found in inventory properties!" + ) return row inventory_df = inventory_df.apply(_apply_retrofit_value, axis=1) # rename columns to fit the character limit of shapefile - inventory_df.rename(columns={ - 'retrofit_key': 'retrofit_k', - 'retrofit_value': 'retrofit_v', - 'description_mappingEntryKey': 'descr_map', - 'config_mappingEntryKey': 'config_map' - }, inplace=True) + inventory_df.rename( + columns={ + "retrofit_key": "retrofit_k", + "retrofit_value": "retrofit_v", + "description_mappingEntryKey": "descr_map", + "config_mappingEntryKey": "config_map", + }, + inplace=True, + ) # save the updated inventory to a new shapefile tmpdirname = tempfile.mkdtemp() @@ -156,7 +192,11 @@ def _apply_retrofit_value(row): inventory_df.to_file(file_path) # return the updated inventory dataset in geoDataframe for future consumption - return Dataset.from_file(file_path, inventory_dataset.data_type), tmpdirname, inventory_df + return ( + Dataset.from_file(file_path, inventory_dataset.data_type), + tmpdirname, + inventory_df, + ) else: # return original dataset return inventory_dataset, None, None diff --git a/pyincore/utils/evaluateexpression.py b/pyincore/utils/evaluateexpression.py index 7ead79cb3..0f79b6dc0 100644 --- a/pyincore/utils/evaluateexpression.py +++ b/pyincore/utils/evaluateexpression.py @@ -1,23 +1,33 @@ import math -import scipy -import numpy -import decimal - -INVALID_NAMES = ["exec", "func", "eval", "type", "isinstance", "getattr", "setattr", "repr", - "compile", "open"] +import scipy # noqa: F401 +import numpy # noqa: F401 +import decimal # noqa: F401 + +INVALID_NAMES = [ + "exec", + "func", + "eval", + "type", + "isinstance", + "getattr", + "setattr", + "repr", + "compile", + "open", +] def evaluate(expression: str, parameters: dict = {}): """Evaluate a math expression. - Args: - expression (str): Math expression. - parameters (dict): Expression parameters. + Args: + expression (str): Math expression. + parameters (dict): Expression parameters. - Returns: - float: A result of expression evaluation. + Returns: + float: A result of expression evaluation. - """ + """ # Compile the expression code = compile(expression, "", "eval") @@ -26,23 +36,26 @@ def evaluate(expression: str, parameters: dict = {}): if "__" in name or name in INVALID_NAMES: raise NameError(f"The use of '{name}' is not allowed.") for parameter in parameters: - if type(parameter) == "str" and ("__" in parameter or parameter in INVALID_NAMES): + if type(parameter) is str and ("__" in parameter or parameter in INVALID_NAMES): raise NameError(f"Using '{parameter}' is not allowed.") # TODO figure out a better way of doing this. Can we import the packages here directly? - safe_globals = {"__builtins__": {"min": min, - "max": max, - "round": round, - "sum": sum, - "abs": abs, - "pow": pow}, - "scipy": globals()["scipy"], - "numpy": globals()["numpy"], - "math": globals()["math"], - "decimal": globals()["decimal"]} + safe_globals = { + "__builtins__": { + "min": min, + "max": max, + "round": round, + "sum": sum, + "abs": abs, + "pow": pow, + }, + "scipy": globals()["scipy"], + "numpy": globals()["numpy"], + "math": globals()["math"], + "decimal": globals()["decimal"], + } try: return eval(code, safe_globals, parameters) except Exception as e: print(f"An unexpected error occurred: {e}") return math.nan - diff --git a/pyincore/utils/expressioneval/__init__.py b/pyincore/utils/expressioneval/__init__.py index 197d02aea..6cf65ae8f 100644 --- a/pyincore/utils/expressioneval/__init__.py +++ b/pyincore/utils/expressioneval/__init__.py @@ -37,8 +37,7 @@ TFUNCALL = 4 -class Token(): - +class Token: def __init__(self, type_, index_, prio_, number_): self.type_ = type_ self.index_ = index_ or 0 @@ -51,13 +50,12 @@ def toString(self): if self.type_ == TOP1 or self.type_ == TOP2 or self.type_ == TVAR: return self.index_ elif self.type_ == TFUNCALL: - return 'CALL' + return "CALL" else: - return 'Invalid Token' - + return "Invalid Token" -class Expression(): +class Expression: def __init__(self, tokens, ops1, ops2, functions): self.tokens = tokens self.ops1 = ops1 @@ -140,7 +138,7 @@ def evaluate(self, values): elif item.index_ in self.functions: nstack.append(self.functions[item.index_]) else: - raise Exception('undefined variable: ' + item.index_) + raise Exception("undefined variable: " + item.index_) elif type_ == TOP1: n1 = nstack.pop() f = self.ops1[item.index_] @@ -154,11 +152,11 @@ def evaluate(self, values): else: nstack.append(f(n1)) else: - raise Exception(f + ' is not a function') + raise Exception(f + " is not a function") else: - raise Exception('invalid Expression') + raise Exception("invalid Expression") if len(nstack) > 1: - raise Exception('invalid Expression (parity)') + raise Exception("invalid Expression (parity)") return nstack[0] def toString(self, toJS=False): @@ -176,37 +174,38 @@ def toString(self, toJS=False): n2 = nstack.pop() n1 = nstack.pop() f = item.index_ - if toJS and f == '^': - nstack.append('math.pow(' + n1 + ',' + n2 + ')') + if toJS and f == "^": + nstack.append("math.pow(" + n1 + "," + n2 + ")") else: - frm = '({n1}{f}{n2})' - if f == ',': - frm = '{n1}{f}{n2}' - - nstack.append(frm.format( - n1=n1, - n2=n2, - f=f, - )) - + frm = "({n1}{f}{n2})" + if f == ",": + frm = "{n1}{f}{n2}" + + nstack.append( + frm.format( + n1=n1, + n2=n2, + f=f, + ) + ) elif type_ == TVAR: nstack.append(item.index_) elif type_ == TOP1: n1 = nstack.pop() f = item.index_ - if f == '-': - nstack.append('(' + f + str(n1) + ')') + if f == "-": + nstack.append("(" + f + str(n1) + ")") else: - nstack.append(f + '(' + n1 + ')') + nstack.append(f + "(" + n1 + ")") elif type_ == TFUNCALL: n1 = nstack.pop() f = nstack.pop() - nstack.append(f + '(' + n1 + ')') + nstack.append(f + "(" + n1 + ")") else: - raise Exception('invalid Expression') + raise Exception("invalid Expression") if len(nstack) > 1: - raise Exception('invalid Expression (parity)') + raise Exception("invalid Expression (parity)") return nstack[0] def __str__(self): @@ -221,9 +220,7 @@ def symbols(self): return vars def variables(self): - return [ - sym for sym in self.symbols() - if sym not in self.functions] + return [sym for sym in self.symbols() if sym not in self.functions] class Parser: @@ -256,9 +253,9 @@ def mod(self, a, b): return a % b def concat(self, a, b, *args): - result = u'{0}{1}'.format(a, b) + result = "{0}{1}".format(a, b) for arg in args: - result = u'{0}{1}'.format(result, arg) + result = "{0}{1}".format(result, arg) return result def equal(self, a, b): @@ -280,10 +277,10 @@ def lessThanEqual(self, a, b): return a <= b def andOperator(self, a, b): - return (a and b) + return a and b def orOperator(self, a, b): - return (a or b) + return a or b def neg(self, a): return -a @@ -308,8 +305,8 @@ def append(self, a, b): def __init__(self): self.success = False - self.errormsg = '' - self.expression = '' + self.errormsg = "" + self.expression = "" self.pos = 0 @@ -319,31 +316,31 @@ def __init__(self): self.tmpprio = 0 self.ops1 = { - 'sin': math.sin, - 'cos': math.cos, - 'tan': math.tan, - 'asin': math.asin, - 'acos': math.acos, - 'atan': math.atan, - 'sqrt': math.sqrt, - 'abs': abs, - 'ceil': math.ceil, - 'floor': math.floor, - 'round': round, - '-': self.neg, - 'exp': math.exp, + "sin": math.sin, + "cos": math.cos, + "tan": math.tan, + "asin": math.asin, + "acos": math.acos, + "atan": math.atan, + "sqrt": math.sqrt, + "abs": abs, + "ceil": math.ceil, + "floor": math.floor, + "round": round, + "-": self.neg, + "exp": math.exp, } self.ops2 = { - '+': self.add, - '-': self.sub, - '*': self.mul, - '/': self.div, - '%': self.mod, - '^': math.pow, - '**': math.pow, - ',': self.append, - '||': self.concat, + "+": self.add, + "-": self.sub, + "*": self.mul, + "/": self.div, + "%": self.mod, + "^": math.pow, + "**": math.pow, + ",": self.append, + "||": self.concat, "==": self.equal, "!=": self.notEqual, ">": self.greaterThan, @@ -351,54 +348,54 @@ def __init__(self): ">=": self.greaterThanEqual, "<=": self.lessThanEqual, "and": self.andOperator, - "or": self.orOperator + "or": self.orOperator, } self.functions = { - 'random': random, - 'fac': self.fac, - 'log': math.log, - 'min': min, - 'max': max, - 'pyt': self.pyt, - 'pow': math.pow, - 'atan2': math.atan2, - 'concat': self.concat, - 'if': self.ifFunction + "random": random, + "fac": self.fac, + "log": math.log, + "min": min, + "max": max, + "pyt": self.pyt, + "pow": math.pow, + "atan2": math.atan2, + "concat": self.concat, + "if": self.ifFunction, } self.consts = { - 'E': math.e, - 'PI': math.pi, + "E": math.e, + "PI": math.pi, } self.values = { - 'sin': math.sin, - 'cos': math.cos, - 'tan': math.tan, - 'asin': math.asin, - 'acos': math.acos, - 'atan': math.atan, - 'sqrt': math.sqrt, - 'log': math.log, - 'abs': abs, - 'ceil': math.ceil, - 'floor': math.floor, - 'round': round, - 'random': self.random, - 'fac': self.fac, - 'exp': math.exp, - 'min': min, - 'max': max, - 'pyt': self.pyt, - 'pow': math.pow, - 'atan2': math.atan2, - 'E': math.e, - 'PI': math.pi + "sin": math.sin, + "cos": math.cos, + "tan": math.tan, + "asin": math.asin, + "acos": math.acos, + "atan": math.atan, + "sqrt": math.sqrt, + "log": math.log, + "abs": abs, + "ceil": math.ceil, + "floor": math.floor, + "round": round, + "random": self.random, + "fac": self.fac, + "exp": math.exp, + "min": min, + "max": max, + "pyt": self.pyt, + "pow": math.pow, + "atan2": math.atan2, + "E": math.e, + "PI": math.pi, } def parse(self, expr): - self.errormsg = '' + self.errormsg = "" self.success = True operstack = [] tokenstack = [] @@ -413,99 +410,100 @@ def parse(self, expr): if self.isSign() and expected & self.SIGN: if self.isNegativeSign(): self.tokenprio = 5 - self.tokenindex = '-' + self.tokenindex = "-" noperators += 1 self.addfunc(tokenstack, operstack, TOP1) - expected = \ - self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN + expected = self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN elif self.isComment(): pass else: if expected and self.OPERATOR == 0: - self.error_parsing(self.pos, 'unexpected operator') + self.error_parsing(self.pos, "unexpected operator") noperators += 2 self.addfunc(tokenstack, operstack, TOP2) - expected = \ - self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN + expected = self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN elif self.isNumber(): if expected and self.PRIMARY == 0: - self.error_parsing(self.pos, 'unexpected number') + self.error_parsing(self.pos, "unexpected number") token = Token(TNUMBER, 0, 0, self.tokennumber) tokenstack.append(token) expected = self.OPERATOR | self.RPAREN | self.COMMA elif self.isString(): if (expected & self.PRIMARY) == 0: - self.error_parsing(self.pos, 'unexpected string') + self.error_parsing(self.pos, "unexpected string") token = Token(TNUMBER, 0, 0, self.tokennumber) tokenstack.append(token) expected = self.OPERATOR | self.RPAREN | self.COMMA elif self.isLeftParenth(): if (expected & self.LPAREN) == 0: - self.error_parsing(self.pos, 'unexpected \"(\"') + self.error_parsing(self.pos, 'unexpected "("') if expected & self.CALL: noperators += 2 self.tokenprio = -2 self.tokenindex = -1 self.addfunc(tokenstack, operstack, TFUNCALL) - expected = \ - self.PRIMARY | self.LPAREN | self.FUNCTION | \ - self.SIGN | self.NULLARY_CALL + expected = ( + self.PRIMARY + | self.LPAREN + | self.FUNCTION + | self.SIGN + | self.NULLARY_CALL + ) elif self.isRightParenth(): if expected & self.NULLARY_CALL: token = Token(TNUMBER, 0, 0, []) tokenstack.append(token) elif (expected & self.RPAREN) == 0: - self.error_parsing(self.pos, 'unexpected \")\"') - expected = \ - self.OPERATOR | self.RPAREN | self.COMMA | \ - self.LPAREN | self.CALL + self.error_parsing(self.pos, 'unexpected ")"') + expected = ( + self.OPERATOR | self.RPAREN | self.COMMA | self.LPAREN | self.CALL + ) elif self.isComma(): if (expected & self.COMMA) == 0: - self.error_parsing(self.pos, 'unexpected \",\"') + self.error_parsing(self.pos, 'unexpected ","') self.addfunc(tokenstack, operstack, TOP2) noperators += 2 - expected = \ - self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN + expected = self.PRIMARY | self.LPAREN | self.FUNCTION | self.SIGN elif self.isConst(): if (expected & self.PRIMARY) == 0: - self.error_parsing(self.pos, 'unexpected constant') + self.error_parsing(self.pos, "unexpected constant") consttoken = Token(TNUMBER, 0, 0, self.tokennumber) tokenstack.append(consttoken) expected = self.OPERATOR | self.RPAREN | self.COMMA elif self.isOp2(): if (expected & self.FUNCTION) == 0: - self.error_parsing(self.pos, 'unexpected function') + self.error_parsing(self.pos, "unexpected function") self.addfunc(tokenstack, operstack, TOP2) noperators += 2 expected = self.LPAREN elif self.isOp1(): if (expected & self.FUNCTION) == 0: - self.error_parsing(self.pos, 'unexpected function') + self.error_parsing(self.pos, "unexpected function") self.addfunc(tokenstack, operstack, TOP1) noperators += 1 expected = self.LPAREN elif self.isVar(): if (expected & self.PRIMARY) == 0: - self.error_parsing(self.pos, 'unexpected variable') + self.error_parsing(self.pos, "unexpected variable") vartoken = Token(TVAR, self.tokenindex, 0, 0) tokenstack.append(vartoken) - expected = \ - self.OPERATOR | self.RPAREN | \ - self.COMMA | self.LPAREN | self.CALL + expected = ( + self.OPERATOR | self.RPAREN | self.COMMA | self.LPAREN | self.CALL + ) elif self.isWhite(): pass else: - if self.errormsg == '': - self.error_parsing(self.pos, 'unknown character') + if self.errormsg == "": + self.error_parsing(self.pos, "unknown character") else: self.error_parsing(self.pos, self.errormsg) if self.tmpprio < 0 or self.tmpprio >= 10: - self.error_parsing(self.pos, 'unmatched \"()\"') + self.error_parsing(self.pos, 'unmatched "()"') while len(operstack) > 0: tmp = operstack.pop() tokenstack.append(tmp) if (noperators + 1) != len(tokenstack): - self.error_parsing(self.pos, 'parity') + self.error_parsing(self.pos, "parity") return Expression(tokenstack, self.ops1, self.ops2, self.functions) @@ -514,7 +512,7 @@ def evaluate(self, expr, variables): def error_parsing(self, column, msg): self.success = False - self.errormsg = 'parse error [column ' + str(column) + ']: ' + msg + self.errormsg = "parse error [column " + str(column) + "]: " + msg raise Exception(self.errormsg) def addfunc(self, tokenstack, operstack, type_): @@ -534,24 +532,24 @@ def addfunc(self, tokenstack, operstack, type_): def isNumber(self): r = False - if self.expression[self.pos] == 'E': + if self.expression[self.pos] == "E": return False # number in scientific notation - pattern = r'([-+]?([0-9]*\.?[0-9]*)[eE][-+]?[0-9]+).*' - match = re.match(pattern, self.expression[self.pos:]) + pattern = r"([-+]?([0-9]*\.?[0-9]*)[eE][-+]?[0-9]+).*" + match = re.match(pattern, self.expression[self.pos :]) if match: self.pos += len(match.group(1)) self.tokennumber = float(match.group(1)) return True # number in decimal - str = '' + str = "" while self.pos < len(self.expression): code = self.expression[self.pos] - if (code >= '0' and code <= '9') or code == '.': - if (len(str) == 0 and code == '.'): - str = '0' + if (code >= "0" and code <= "9") or code == ".": + if len(str) == 0 and code == ".": + str = "0" str += code self.pos += 1 try: @@ -574,28 +572,28 @@ def unescape(self, v, pos): if c == "'": buffer.append("'") break - elif c == '\\': - buffer.append('\\') + elif c == "\\": + buffer.append("\\") break - elif c == '/': - buffer.append('/') + elif c == "/": + buffer.append("/") break - elif c == 'b': - buffer.append('\b') + elif c == "b": + buffer.append("\b") break - elif c == 'f': - buffer.append('\f') + elif c == "f": + buffer.append("\f") break - elif c == 'n': - buffer.append('\n') + elif c == "n": + buffer.append("\n") break - elif c == 'r': - buffer.append('\r') + elif c == "r": + buffer.append("\r") break - elif c == 't': - buffer.append('\t') + elif c == "t": + buffer.append("\t") break - elif c == 'u': + elif c == "u": # interpret the following 4 characters # as the hex of the unicode code point codePoint = int(v[i + 1, i + 5], 16) @@ -605,26 +603,26 @@ def unescape(self, v, pos): else: raise self.error_parsing( pos + i, - 'Illegal escape sequence: \'\\' + c + '\'', + "Illegal escape sequence: '\\" + c + "'", ) escaping = False else: - if c == '\\': + if c == "\\": escaping = True else: buffer.append(c) - return ''.join(buffer) + return "".join(buffer) def isString(self): r = False - str = '' + str = "" startpos = self.pos if self.pos < len(self.expression) and self.expression[self.pos] == "'": self.pos += 1 while self.pos < len(self.expression): code = self.expression[self.pos] - if code != '\'' or (str != '' and str[-1] == '\\'): + if code != "'" or (str != "" and str[-1] == "\\"): str += self.expression[self.pos] self.pos += 1 else: @@ -637,13 +635,16 @@ def isString(self): def isConst(self): for i in self.consts: L = len(i) - str = self.expression[self.pos:self.pos + L] + str = self.expression[self.pos : self.pos + L] if i == str: if len(self.expression) <= self.pos + L: self.tokennumber = self.consts[i] self.pos += L return True - if not self.expression[self.pos + L].isalnum() and self.expression[self.pos + L] != "_": + if ( + not self.expression[self.pos + L].isalnum() + and self.expression[self.pos + L] != "_" + ): self.tokennumber = self.consts[i] self.pos += L return True @@ -651,24 +652,24 @@ def isConst(self): def isOperator(self): ops = ( - ('+', 2, '+'), - ('-', 2, '-'), - ('**', 6, '**'), - ('*', 3, '*'), - (u'\u2219', 3, '*'), # bullet operator - (u'\u2022', 3, '*'), # black small circle - ('/', 4, '/'), - ('%', 4, '%'), - ('^', 6, '^'), - ('||', 1, '||'), - ('==', 1, '=='), - ('!=', 1, '!='), - ('<=', 1, '<='), - ('>=', 1, '>='), - ('<', 1, '<'), - ('>', 1, '>'), - ('and ', 0, 'and'), - ('or ', 0, 'or'), + ("+", 2, "+"), + ("-", 2, "-"), + ("**", 6, "**"), + ("*", 3, "*"), + ("\u2219", 3, "*"), # bullet operator + ("\u2022", 3, "*"), # black small circle + ("/", 4, "/"), + ("%", 4, "%"), + ("^", 6, "^"), + ("||", 1, "||"), + ("==", 1, "=="), + ("!=", 1, "!="), + ("<=", 1, "<="), + (">=", 1, ">="), + ("<", 1, "<"), + (">", 1, ">"), + ("and ", 0, "and"), + ("or ", 0, "or"), ) for token, priority, index in ops: if self.expression.startswith(token, self.pos): @@ -680,19 +681,19 @@ def isOperator(self): def isSign(self): code = self.expression[self.pos - 1] - return (code == '+') or (code == '-') + return (code == "+") or (code == "-") def isPositiveSign(self): code = self.expression[self.pos - 1] - return code == '+' + return code == "+" def isNegativeSign(self): code = self.expression[self.pos - 1] - return code == '-' + return code == "-" def isLeftParenth(self): code = self.expression[self.pos] - if code == '(': + if code == "(": self.pos += 1 self.tmpprio += 10 return True @@ -700,7 +701,7 @@ def isLeftParenth(self): def isRightParenth(self): code = self.expression[self.pos] - if code == ')': + if code == ")": self.pos += 1 self.tmpprio -= 10 return True @@ -708,7 +709,7 @@ def isRightParenth(self): def isComma(self): code = self.expression[self.pos] - if code == ',': + if code == ",": self.pos += 1 self.tokenprio = -1 self.tokenindex = "," @@ -723,11 +724,11 @@ def isWhite(self): return False def isOp1(self): - str = '' + str = "" for i in range(self.pos, len(self.expression)): c = self.expression[i] if c.upper() == c.lower(): - if i == self.pos or (c != '_' and (c < '0' or c > '9')): + if i == self.pos or (c != "_" and (c < "0" or c > "9")): break str += c if len(str) > 0 and str in self.ops1: @@ -738,11 +739,11 @@ def isOp1(self): return False def isOp2(self): - str = '' + str = "" for i in range(self.pos, len(self.expression)): c = self.expression[i] if c.upper() == c.lower(): - if i == self.pos or (c != '_' and (c < '0' or c > '9')): + if i == self.pos or (c != "_" and (c < "0" or c > "9")): break str += c if len(str) > 0 and (str in self.ops2): @@ -753,12 +754,15 @@ def isOp2(self): return False def isVar(self): - str = '' + str = "" inQuotes = False for i in range(self.pos, len(self.expression)): c = self.expression[i] if c.lower() == c.upper(): - if ((i == self.pos and c != '"') or (not (c in '_."') and (c < '0' or c > '9'))) and not inQuotes: + if ( + (i == self.pos and c != '"') + or (not (c in '_."') and (c < "0" or c > "9")) + ) and not inQuotes: break if c == '"': inQuotes = not inQuotes @@ -772,8 +776,8 @@ def isVar(self): def isComment(self): code = self.expression[self.pos - 1] - if code == '/' and self.expression[self.pos] == '*': - self.pos = self.expression.index('*/', self.pos) + 2 + if code == "/" and self.expression[self.pos] == "*": + self.pos = self.expression.index("*/", self.pos) + 2 if self.pos == 1: self.pos = len(self.expression) return True diff --git a/pyincore/utils/geoutil.py b/pyincore/utils/geoutil.py index 71489dea3..5cb446c1b 100644 --- a/pyincore/utils/geoutil.py +++ b/pyincore/utils/geoutil.py @@ -28,18 +28,18 @@ class GeoUtil: def get_location(feature): """Location of the object. - Args: - feature (obj): A JSON mapping of a geometric object from the inventory. + Args: + feature (obj): A JSON mapping of a geometric object from the inventory. - Note: - From the Shapely documentation: The centroid of an object might be one of its points, - but this is not guaranteed. + Note: + From the Shapely documentation: The centroid of an object might be one of its points, + but this is not guaranteed. - Returns: - point: A representation of the object’s geometric centroid. + Returns: + point: A representation of the object’s geometric centroid. - """ - geom = shape(feature['geometry']) + """ + geom = shape(feature["geometry"]) return geom.centroid @staticmethod @@ -55,8 +55,10 @@ def find_nearest_feature(features, query_point): obj: A nearest feature. obj: Nearest distances. - """ - points = np.asarray([feature['geometry']['coordinates'] for feature in features]) + """ + points = np.asarray( + [feature["geometry"]["coordinates"] for feature in features] + ) tree = KDTree(points) query_point = np.asarray([[query_point.x, query_point.y]]) @@ -85,28 +87,28 @@ def create_output(filename, source, results, types): new_schema = source.schema.copy() col_names = results[list(results.keys())[0]].keys() for col in col_names: - new_schema['properties'][col] = types[col] + new_schema["properties"][col] = types[col] empty_data = {} for col in col_names: empty_data[col] = None with fiona.open( - filename, 'w', - crs=source.crs, - driver=source.driver, - schema=new_schema, + filename, + "w", + crs=source.crs, + driver=source.driver, + schema=new_schema, ) as sink: for f in source: try: new_feature = f.copy() - if new_feature['id'] in results.keys(): - new_feature['properties'].update( - results[new_feature['id']]) + if new_feature["id"] in results.keys(): + new_feature["properties"].update(results[new_feature["id"]]) else: - new_feature['properties'].update(empty_data) + new_feature["properties"].update(empty_data) sink.write(new_feature) - except Exception as e: - logging.exception("Error processing feature %s:", f['id']) + except Exception: + logging.exception("Error processing feature %s:", f["id"]) @staticmethod def decimal_to_degree(decimal: float): @@ -125,9 +127,12 @@ def decimal_to_degree(decimal: float): degree = int(decimal) minutes = int((decimal - degree) * 60) seconds = (decimal - degree - minutes / 60) * 3600 - overall_degree = format(degree, '02d') + format(minutes, '02d') \ - + format(int(seconds), '02d') + format( - int(seconds % 1 * 100), '02d') + overall_degree = ( + format(degree, "02d") + + format(minutes, "02d") + + format(int(seconds), "02d") + + format(int(seconds % 1 * 100), "02d") + ) return int(overall_degree) @@ -144,11 +149,15 @@ def degree_to_decimal(degree: int): int: A decimal value. """ - if degree == 0.0 or degree == None or degree == '': - decimal = 'NA' + if degree == 0.0 or degree is None or degree == "": + decimal = "NA" else: degree = str(int(degree)) - decimal = int(degree[:-6]) + int(degree[-6:-4]) / 60 + (int(degree[-4:-2]) + int(degree[-2:]) / 100) / 3600 + decimal = ( + int(degree[:-6]) + + int(degree[-6:-4]) / 60 + + (int(degree[-4:-2]) + int(degree[-2:]) / 100) / 3600 + ) return decimal @@ -168,11 +177,16 @@ def calc_geog_distance_from_linestring(line_segment, unit=1): if isinstance(line_segment, MultiLineString): for line in line_segment.geoms: dist = dist + float( - GeoUtil.calc_geog_distance_between_points(Point(line.coords[0]), Point(line.coords[1]), unit)) + GeoUtil.calc_geog_distance_between_points( + Point(line.coords[0]), Point(line.coords[1]), unit + ) + ) elif isinstance(line_segment, LineString): dist = float( - GeoUtil.calc_geog_distance_between_points(Point(line_segment.coords[0]), Point(line_segment.coords[1]), - unit)) + GeoUtil.calc_geog_distance_between_points( + Point(line_segment.coords[0]), Point(line_segment.coords[1]), unit + ) + ) return dist @@ -189,8 +203,7 @@ def calc_geog_distance_between_points(point1, point2, unit=1): str: Distance between points. """ - dist = 0 - geod = pyproj.Geod(ellps='WGS84') + geod = pyproj.Geod(ellps="WGS84") angle1, angle2, distance = geod.inv(point1.x, point1.y, point2.x, point2.y) # print(point1.x, point1.y, point2.x, point2.y) km = "{0:8.4f}".format(distance / 1000) @@ -220,7 +233,7 @@ def create_rtree_index(inshp): print("creating node index.....") feature_list = [] for feature in inshp: - line = shape(feature['geometry']) + line = shape(feature["geometry"]) feature_list.append(line) idx = index.Index() for i in range(len(feature_list)): @@ -250,9 +263,9 @@ def add_guid(infile, outfile): is_shapefile = False is_geopackage = False - if infile.lower().endswith('.shp'): + if infile.lower().endswith(".shp"): is_shapefile = True - elif infile.lower().endswith('.gpkg'): + elif infile.lower().endswith(".gpkg"): is_geopackage = True else: logging.error("Error: Input file format is not supported.") @@ -264,18 +277,22 @@ def add_guid(infile, outfile): if is_shapefile: gdf = gpd.read_file(infile) - gdf['guid'] = gdf.apply(lambda x: str(uuid.uuid4()), axis=1) - gdf.to_file(f"{outfile}", driver='ESRI Shapefile') + gdf["guid"] = gdf.apply(lambda x: str(uuid.uuid4()), axis=1) + gdf.to_file(f"{outfile}", driver="ESRI Shapefile") is_success = True elif is_geopackage: if GeoUtil.is_vector_gpkg(infile): gdf = gpd.read_file(infile) - gdf['guid'] = gdf.apply(lambda x: str(uuid.uuid4()), axis=1) - gdf.to_file(outfile, layer=outfile_name, driver='GPKG') + gdf["guid"] = gdf.apply(lambda x: str(uuid.uuid4()), axis=1) + gdf.to_file(outfile, layer=outfile_name, driver="GPKG") is_success = True else: - logging.error("Error: The GeoPackage contains raster data, which is not supported.") - print("Error: The GeoPackage contains raster data, which is not supported.") + logging.error( + "Error: The GeoPackage contains raster data, which is not supported." + ) + print( + "Error: The GeoPackage contains raster data, which is not supported." + ) return False return is_success @@ -284,6 +301,6 @@ def add_guid(infile, outfile): def is_vector_gpkg(filepath): try: with fiona.open(filepath) as src: - return src.schema['geometry'] is not None + return src.schema["geometry"] is not None except fiona.errors.DriverError: return False diff --git a/pyincore/utils/hhrsoutputprocess.py b/pyincore/utils/hhrsoutputprocess.py index 593e1959b..c1e1653f9 100644 --- a/pyincore/utils/hhrsoutputprocess.py +++ b/pyincore/utils/hhrsoutputprocess.py @@ -25,12 +25,16 @@ def get_hhrs_stage_count(timesteps, hhrs_df, filename_json="hhrs_stage_count.jso hhrs_stage_count = {} for t in timesteps: stage = hhrs_df[t] - hhrs_stage_count[t] = [int((stage == 1.0).sum()), int((stage == 2.0).sum()), int((stage == 3.0).sum()), - int((stage == 4.0).sum()), int((stage == 5.0).sum())] + hhrs_stage_count[t] = [ + int((stage == 1.0).sum()), + int((stage == 2.0).sum()), + int((stage == 3.0).sum()), + int((stage == 4.0).sum()), + int((stage == 5.0).sum()), + ] if filename_json: with open(filename_json, "w") as outfile: json.dump(hhrs_stage_count, outfile, indent=2) return hhrs_stage_count - diff --git a/pyincore/utils/http_util.py b/pyincore/utils/http_util.py index 389588e31..7cd87b520 100644 --- a/pyincore/utils/http_util.py +++ b/pyincore/utils/http_util.py @@ -8,22 +8,31 @@ logger = pyglobals.LOGGER + def return_http_response(http_response): try: http_response.raise_for_status() return http_response except requests.exceptions.HTTPError: - logger.error('A HTTPError has occurred \n' + - 'HTTP Status code: ' + str(http_response.status_code) + '\n' + - 'Error Message: ' + http_response.content.decode() - ) + logger.error( + "A HTTPError has occurred \n" + + "HTTP Status code: " + + str(http_response.status_code) + + "\n" + + "Error Message: " + + http_response.content.decode() + ) raise except requests.exceptions.ConnectionError: - logger.error("ConnectionError: Failed to establish a connection with the server. " - "This might be due to a refused connection. " - "Please check that you are using the right URLs.") + logger.error( + "ConnectionError: Failed to establish a connection with the server. " + "This might be due to a refused connection. " + "Please check that you are using the right URLs." + ) raise except requests.exceptions.RequestException: - logger.error("RequestException: There was an exception while trying to handle your request. " - "Please go to the end of this message for more specific information about the exception.") + logger.error( + "RequestException: There was an exception while trying to handle your request. " + "Please go to the end of this message for more specific information about the exception." + ) raise diff --git a/pyincore/utils/networkutil.py b/pyincore/utils/networkutil.py index 69f49fdd6..d660ecabd 100644 --- a/pyincore/utils/networkutil.py +++ b/pyincore/utils/networkutil.py @@ -31,7 +31,7 @@ def build_link_by_node(node_filename, graph_filename, id_field, out_filename): """ # read graph with open(graph_filename) as f: - reader = csv.reader(f, delimiter=',') + reader = csv.reader(f, delimiter=",") graph_list = list(reader) # remove the first element, which is a header, from a list graph_list.pop(0) @@ -46,20 +46,20 @@ def build_link_by_node(node_filename, graph_filename, id_field, out_filename): # create a schema for output line file schema = { - 'geometry': 'LineString', - 'properties': { - 'linkid': 'str:10', - 'guid': 'str:30', - 'from_node': 'str:10', - 'to_node': 'str:10' - } + "geometry": "LineString", + "properties": { + "linkid": "str:10", + "guid": "str:30", + "from_node": "str:10", + "to_node": "str:10", + }, } for in_feature in innode: # build shape feature tmp_feature = copy.deepcopy(in_feature) - tmp_feature['properties']['guid'] = str(uuid.uuid4()) - node_id = str(tmp_feature['properties'][id_field]) + tmp_feature["properties"]["guid"] = str(uuid.uuid4()) + node_id = str(tmp_feature["properties"][id_field]) node_list.append(tmp_feature) node_id_list.append(node_id) @@ -85,28 +85,42 @@ def build_link_by_node(node_filename, graph_filename, id_field, out_filename): if str(node_id_list[i]) == to_id: to_location_in_node_list = i - from_geo = node_list[from_location_in_node_list]['geometry'] - to_geo = node_list[to_location_in_node_list]['geometry'] - line_geom = LineString([Point(shape(from_geo).coords), Point(shape(to_geo).coords)]) + from_geo = node_list[from_location_in_node_list]["geometry"] + to_geo = node_list[to_location_in_node_list]["geometry"] + line_geom = LineString( + [Point(shape(from_geo).coords), Point(shape(to_geo).coords)] + ) line_geom_list.append(line_geom) # create line feature - with fiona.open(out_filename, 'w', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: + with fiona.open( + out_filename, + "w", + crs=from_epsg(4326), + driver="ESRI Shapefile", + schema=schema, + ) as layer: for i in range(len(line_geom_list)): # filling schema - schema['geometry'] = mapping(line_geom_list[i]) - schema['properties']['linkid'] = line_id_list[i] - schema['properties']['guid'] = str(uuid.uuid4()) - schema['properties']['from_node'] = line_from_list[i] - schema['properties']['to_node'] = line_to_list[i] + schema["geometry"] = mapping(line_geom_list[i]) + schema["properties"]["linkid"] = line_id_list[i] + schema["properties"]["guid"] = str(uuid.uuid4()) + schema["properties"]["from_node"] = line_from_list[i] + schema["properties"]["to_node"] = line_to_list[i] layer.write(schema) return True @staticmethod - def build_node_by_link(link_filename, link_id_field, fromnode_field, tonode_field, out_node_filename, - out_graph_filename): + def build_node_by_link( + link_filename, + link_id_field, + fromnode_field, + tonode_field, + out_node_filename, + out_graph_filename, + ): """Create node dataset based on line shapefile and graph file graph should be in csv format Args: @@ -129,17 +143,19 @@ def build_node_by_link(link_filename, link_id_field, fromnode_field, tonode_fiel graph_list = [] for line in linefile: - line_geom = shape(line['geometry']) + line_geom = shape(line["geometry"]) seg_coord_list = list(line_geom.coords) # to check if this is a multiline string if len(seg_coord_list) > 2: - print("The line shapefile is a multiline string. The process will be aborted") + print( + "The line shapefile is a multiline string. The process will be aborted" + ) return False - line_id = str(line['properties'][link_id_field]) - fromnode_id = str(line['properties'][fromnode_field]) - tonode_id = str(line['properties'][tonode_field]) + line_id = str(line["properties"][link_id_field]) + fromnode_id = str(line["properties"][fromnode_field]) + tonode_id = str(line["properties"][tonode_field]) fromnode_coord = seg_coord_list[0] tonode_coord = seg_coord_list[1] graph_line_list = [] @@ -170,24 +186,27 @@ def build_node_by_link(link_filename, link_id_field, fromnode_field, tonode_fiel # create a schema for output line file schema = { - 'geometry': 'Point', - 'properties': { - 'nodeid': 'str:10', - 'guid': 'str:30' - } + "geometry": "Point", + "properties": {"nodeid": "str:10", "guid": "str:30"}, } # create output node feature - with fiona.open(out_node_filename, 'w', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: + with fiona.open( + out_node_filename, + "w", + crs=from_epsg(4326), + driver="ESRI Shapefile", + schema=schema, + ) as layer: for i in range(len(node_id_list)): # filling schema - schema['geometry'] = mapping(node_list[i]) - schema['properties']['nodeid'] = node_id_list[i] - schema['properties']['guid'] = str(uuid.uuid4()) + schema["geometry"] = mapping(node_list[i]) + schema["properties"]["nodeid"] = node_id_list[i] + schema["properties"]["guid"] = str(uuid.uuid4()) layer.write(schema) - with open(out_graph_filename, "w", newline='') as f: + with open(out_graph_filename, "w", newline="") as f: writer = csv.writer(f) writer.writerows([[link_id_field, fromnode_field, tonode_field]]) writer.writerows(graph_list) @@ -195,7 +214,9 @@ def build_node_by_link(link_filename, link_id_field, fromnode_field, tonode_fiel return True @staticmethod - def create_network_graph_from_link(link_file, fromnode_fldname, tonode_fldname, is_directed=False): + def create_network_graph_from_link( + link_file, fromnode_fldname, tonode_fldname, is_directed=False + ): """Create network graph from field. Args: @@ -214,21 +235,20 @@ def create_network_graph_from_link(link_file, fromnode_fldname, tonode_fldname, fromnode_list = [] tonode_list = [] node_list = [] - size = 0 indataset = fiona.open(link_file) for line_feature in indataset: from_node_val = None if fromnode_fldname in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname] + from_node_val = line_feature["properties"][fromnode_fldname] elif fromnode_fldname.lower() in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname.lower()] + from_node_val = line_feature["properties"][fromnode_fldname.lower()] to_node_val = None if tonode_fldname in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname] + to_node_val = line_feature["properties"][tonode_fldname] elif tonode_fldname.lower() in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname.lower()] + to_node_val = line_feature["properties"][tonode_fldname.lower()] fromnode_list.append(from_node_val - 1) tonode_list.append(to_node_val - 1) node_list.append(from_node_val - 1) @@ -252,20 +272,19 @@ def create_network_graph_from_link(link_file, fromnode_fldname, tonode_fldname, coords = dict((i, None) for i in range(len(node_list) - 1)) # create coordinates - node_coords_list = [None] * (len(node_list)) for line_feature in indataset: from_node_val = None if fromnode_fldname in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname] + from_node_val = line_feature["properties"][fromnode_fldname] elif fromnode_fldname.lower() in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname.lower()] + from_node_val = line_feature["properties"][fromnode_fldname.lower()] to_node_val = None if tonode_fldname in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname] + to_node_val = line_feature["properties"][tonode_fldname] elif tonode_fldname.lower() in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname.lower()] - line_geom = (line_feature['geometry']) - coords_list = line_geom.get('coordinates') + to_node_val = line_feature["properties"][tonode_fldname.lower()] + line_geom = line_feature["geometry"] + coords_list = line_geom.get("coordinates") from_coord = coords_list[0] to_coord = coords_list[1] coords[int(from_node_val) - 1] = from_coord @@ -285,9 +304,11 @@ def plot_network_graph(graph, coords): # nx.draw(graph, coords, with_lables=True, font_weithg='bold') # other ways to draw - nx.draw_networkx_nodes(graph, coords, cmap=plt.get_cmap('jet'), node_size=100, node_color='g') + nx.draw_networkx_nodes( + graph, coords, cmap=plt.get_cmap("jet"), node_size=100, node_color="g" + ) nx.draw_networkx_labels(graph, coords) - nx.draw_networkx_edges(graph, coords, edge_color='r', arrows=True) + nx.draw_networkx_edges(graph, coords, edge_color="r", arrows=True) plt.show() @staticmethod @@ -314,15 +335,17 @@ def read_network_graph_from_file(filename, is_directed=False): graph = nx.Graph() graph.add_nodes_from(node_coords.keys()) - l = [set(x) for x in geom.edges()] - edg = [tuple(k for k, v in node_coords.items() if v in sl) for sl in l] + m = [set(x) for x in geom.edges()] + edg = [tuple(k for k, v in node_coords.items() if v in sl) for sl in m] graph.add_edges_from(edg) return graph, node_coords @staticmethod - def validate_network_node_ids(network_dataset, fromnode_fldname, tonode_fldname, nodeid_fldname): + def validate_network_node_ids( + network_dataset, fromnode_fldname, tonode_fldname, nodeid_fldname + ): """Check if the node id in from or to node exist in the real node id. Args: @@ -344,14 +367,14 @@ def validate_network_node_ids(network_dataset, fromnode_fldname, tonode_fldname, for line_feature in link_dataset: from_node_val = None if fromnode_fldname in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname] + from_node_val = line_feature["properties"][fromnode_fldname] elif fromnode_fldname.lower() in line_feature["properties"]: - from_node_val = line_feature['properties'][fromnode_fldname.lower()] + from_node_val = line_feature["properties"][fromnode_fldname.lower()] to_node_val = None if tonode_fldname in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname] + to_node_val = line_feature["properties"][tonode_fldname] elif tonode_fldname.lower() in line_feature["properties"]: - to_node_val = line_feature['properties'][tonode_fldname.lower()] + to_node_val = line_feature["properties"][tonode_fldname.lower()] link_node_list.append(from_node_val) link_node_list.append(to_node_val) @@ -360,9 +383,9 @@ def validate_network_node_ids(network_dataset, fromnode_fldname, tonode_fldname, for node_feature in node_dataset: node_val = None if nodeid_fldname in node_feature["properties"]: - node_val = node_feature['properties'][nodeid_fldname] + node_val = node_feature["properties"][nodeid_fldname] elif nodeid_fldname.lower() in node_feature["properties"]: - node_val = node_feature['properties'][nodeid_fldname.lower()] + node_val = node_feature["properties"][nodeid_fldname.lower()] node_list.append(node_val) link_node_list.sort() @@ -371,7 +394,7 @@ def validate_network_node_ids(network_dataset, fromnode_fldname, tonode_fldname, node_list = list(set(node_list)) for node in link_node_list: - if node in node_list == False: + if node in node_list is False: validate = False return validate @@ -394,7 +417,6 @@ def merge_labeled_networks(graph_a, graph_b, edges_ab, directed=False): # Define directionality when needed __left_to_right = 1 - __right_to_left = -1 __no_direction = 0 # Extract labels @@ -404,14 +426,14 @@ def merge_labeled_networks(graph_a, graph_b, edges_ab, directed=False): prefix_b = labels[1] # Ensure data types are correct - edges_ab[prefix_a] = edges_ab[prefix_a].astype('int64') - edges_ab[prefix_b] = edges_ab[prefix_a].astype('int64') + edges_ab[prefix_a] = edges_ab[prefix_a].astype("int64") + edges_ab[prefix_b] = edges_ab[prefix_a].astype("int64") direction = None if directed: direction = labels[2] - edges_ab[direction] = edges_ab[direction].astype('int64') + edges_ab[direction] = edges_ab[direction].astype("int64") # Merge the networks merged_graph = nx.union(graph_a, graph_b, rename=(prefix_a, prefix_b)) @@ -420,14 +442,24 @@ def merge_labeled_networks(graph_a, graph_b, edges_ab, directed=False): for idx, row in edges_ab.iterrows(): if directed: if row[direction] == __left_to_right: - merged_graph.add_edge(f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}") + merged_graph.add_edge( + f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}" + ) elif row[direction] == __no_direction: - merged_graph.add_edge(f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}") - merged_graph.add_edge(f"{prefix_b}{row[prefix_b]}", f"{prefix_a}{row[prefix_a]}") + merged_graph.add_edge( + f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}" + ) + merged_graph.add_edge( + f"{prefix_b}{row[prefix_b]}", f"{prefix_a}{row[prefix_a]}" + ) else: - merged_graph.add_edge(f"{prefix_b}{row[prefix_b]}", f"{prefix_a}{row[prefix_a]}") + merged_graph.add_edge( + f"{prefix_b}{row[prefix_b]}", f"{prefix_a}{row[prefix_a]}" + ) else: - merged_graph.add_edge(f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}") + merged_graph.add_edge( + f"{prefix_a}{row[prefix_a]}", f"{prefix_b}{row[prefix_b]}" + ) return merged_graph @@ -445,7 +477,9 @@ def extract_network_by_label(labeled_graph, prefix): """ # Filter the list of nodes based on prefix - prefix_nodes = filter(lambda node_id: node_id.startswith(prefix), list(labeled_graph.nodes)) + prefix_nodes = filter( + lambda node_id: node_id.startswith(prefix), list(labeled_graph.nodes) + ) # Extract the corresponding subgraph subgraph = labeled_graph.subgraph(prefix_nodes) @@ -456,7 +490,7 @@ def extract_network_by_label(labeled_graph, prefix): return nx.relabel_nodes(subgraph, de_mapping, copy=True) @staticmethod - def create_network_graph_from_dataframes(df_nodes, df_links, sort='unsorted'): + def create_network_graph_from_dataframes(df_nodes, df_links, sort="unsorted"): """Given a dataframe of nodes and a dataframe of links, assemble a network object. Args: @@ -469,27 +503,27 @@ def create_network_graph_from_dataframes(df_nodes, df_links, sort='unsorted'): """ graph = nx.DiGraph() # Empty graph - pos_x = df_nodes['geometry'].apply(lambda p: p.x).head() - pos_y = df_nodes['geometry'].apply(lambda p: p.y).head() - node_id = df_nodes['nodenwid'] + pos_x = df_nodes["geometry"].apply(lambda p: p.x).head() + pos_y = df_nodes["geometry"].apply(lambda p: p.y).head() + node_id = df_nodes["nodenwid"] pos = {} - pos_x = df_nodes['geometry'].apply(lambda p: p.x) - - pos_y = df_nodes['geometry'].apply(lambda p: p.y) + pos_x = df_nodes["geometry"].apply(lambda p: p.x) + + pos_y = df_nodes["geometry"].apply(lambda p: p.y) for i, val in enumerate(df_nodes["nodenwid"]): pos[val] = (pos_x[i], pos_y[i]) - edges = [(x, y) for x, y in zip(df_links["fromnode"], df_links["tonode"])] + _ = [(x, y) for x, y in zip(df_links["fromnode"], df_links["tonode"])] edge = [] - if sort == 'sorted': + if sort == "sorted": for i, val in enumerate(df_links["linknwid"]): if df_links["direction"][i] == 1: edge.append((df_links["fromnode"][i], df_links["tonode"][i])) else: edge.append((df_links["tonode"][i], df_links["fromnode"][i])) - elif sort == 'unsorted': + elif sort == "unsorted": for i, val in enumerate(df_links["linknwid"]): edge.append((df_links["fromnode"][i], df_links["tonode"][i])) @@ -497,9 +531,9 @@ def create_network_graph_from_dataframes(df_nodes, df_links, sort='unsorted'): graph.add_edges_from(edge) for x, y, id in zip(pos_x, pos_y, node_id): - graph.nodes[id]['pos'] = (x, y) + graph.nodes[id]["pos"] = (x, y) for ii, node_id in enumerate(graph.nodes()): graph.nodes[node_id]["classification"] = df_nodes["utilfcltyc"][ii] - return graph \ No newline at end of file + return graph diff --git a/pyincore/utils/popdisloutputprocess.py b/pyincore/utils/popdisloutputprocess.py index 50ff6f0a9..e7f828d2b 100644 --- a/pyincore/utils/popdisloutputprocess.py +++ b/pyincore/utils/popdisloutputprocess.py @@ -23,17 +23,25 @@ class PopDislOutputProcess: vacant_disl (bool): A flag to include vacant (Vacant for tenure) dislocation """ - HUPD_CATEGORIES = ["household_characteristics", - "household_dislocated", - "total_households", - "percent_household_dislocated", - "population_dislocated", - "total_population", - "percent_population_dislocated" - ] - - def __init__(self, pop_disl_result, pop_disl_result_path=None, - filter_name=None, filter_guid=True, vacant_disl=True): + + HUPD_CATEGORIES = [ + "household_characteristics", + "household_dislocated", + "total_households", + "percent_household_dislocated", + "population_dislocated", + "total_population", + "percent_population_dislocated", + ] + + def __init__( + self, + pop_disl_result, + pop_disl_result_path=None, + filter_name=None, + filter_guid=True, + vacant_disl=True, + ): if pop_disl_result_path: pd_result = pd.read_csv(pop_disl_result_path, low_memory=False) else: @@ -43,26 +51,36 @@ def __init__(self, pop_disl_result, pop_disl_result_path=None, # keep only inventory with guid; filter for Joplin since only Joplin inventory has guids if filter_guid: if filter_name: - pd_result_flag = pd_result[(pd_result["guid"].notnull()) & - (pd_result["numprec"].notnull()) & - (pd_result["plcname10"] == filter_name)] + pd_result_flag = pd_result[ + (pd_result["guid"].notnull()) + & (pd_result["numprec"].notnull()) + & (pd_result["plcname10"] == filter_name) + ] # only keep guid and place - pd_result_shp = pd_result[(pd_result["guid"].notnull()) & - (pd_result["numprec"].notnull()) & - (pd_result["plcname10"] == filter_name)] + pd_result_shp = pd_result[ + (pd_result["guid"].notnull()) + & (pd_result["numprec"].notnull()) + & (pd_result["plcname10"] == filter_name) + ] else: - pd_result_flag = pd_result[(pd_result["guid"].notnull()) & - (pd_result["numprec"].notnull())] + pd_result_flag = pd_result[ + (pd_result["guid"].notnull()) & (pd_result["numprec"].notnull()) + ] # only keep guid - pd_result_shp = pd_result[(pd_result["guid"].notnull()) & - (pd_result["numprec"].notnull())] + pd_result_shp = pd_result[ + (pd_result["guid"].notnull()) & (pd_result["numprec"].notnull()) + ] else: if filter_name: - pd_result_flag = pd_result[(pd_result["numprec"].notnull()) & - (pd_result["plcname10"] == filter_name)] + pd_result_flag = pd_result[ + (pd_result["numprec"].notnull()) + & (pd_result["plcname10"] == filter_name) + ] # only keep guid and place - pd_result_shp = pd_result[(pd_result["numprec"].notnull()) & - (pd_result["plcname10"] == filter_name)] + pd_result_shp = pd_result[ + (pd_result["numprec"].notnull()) + & (pd_result["plcname10"] == filter_name) + ] else: pd_result_flag = pd_result[(pd_result["numprec"].notnull())] # only keep guid @@ -73,19 +91,19 @@ def __init__(self, pop_disl_result, pop_disl_result_path=None, self.pop_disl_result_shp = pd_result_shp def get_heatmap_shp(self, filename="pop-disl-numprec.shp"): - """ Convert and filter population dislocation output to shapefile that contains only guid and numprec columns + """Convert and filter population dislocation output to shapefile that contains only guid and numprec columns - Args: - filename (str): Path and name to save shapefile output file in. E.g "heatmap.shp" + Args: + filename (str): Path and name to save shapefile output file in. E.g "heatmap.shp" - Returns: - str: full path and filename of the shapefile + Returns: + str: full path and filename of the shapefile """ df = self.pop_disl_result_shp # save as shapefile - gdf = gpd.GeoDataFrame(df, crs='epsg:4326') + gdf = gpd.GeoDataFrame(df, crs="epsg:4326") gdf = gdf[["guid", "numprec", "geometry", "dislocated"]] # keep original dislocated results @@ -102,7 +120,7 @@ def get_heatmap_shp(self, filename="pop-disl-numprec.shp"): return filename def pd_by_race(self, filename_json=None): - """ Calculate race results from the output files of the Joplin Population Dislocation analysis + """Calculate race results from the output files of the Joplin Population Dislocation analysis and convert the results to json format. [ {"household_characteristics": "Not Hispanic/White", @@ -127,20 +145,24 @@ def pd_by_race(self, filename_json=None): # The numbering follows the Community description notebook # 0 - Vacant HU No Race Ethnicity Data, 1 - Not Hispanic/White, 2 - Not Hispanic/Black # 3 - Not Hispanic/Other race, 4 - Hispanic, 5 - No Race or Ethnicity Data - race_categories = ["Vacant HU No Race or Ethnicity Data", - "Not Hispanic/White", - "Not Hispanic/Black", - "Not Hispanic/Other Race", - "Hispanic", - "No Race or Ethnicity Data", - "Total"] + race_categories = [ + "Vacant HU No Race or Ethnicity Data", + "Not Hispanic/White", + "Not Hispanic/Black", + "Not Hispanic/Other Race", + "Hispanic", + "No Race or Ethnicity Data", + "Total", + ] huapd = self.pop_disl_result # Allocated by race and ethnicity huapd["hua_re"] = "0" huapd.loc[(huapd["race"] == 1) & (huapd["hispan"] == 0), "hua_re"] = "1" huapd.loc[(huapd["race"] == 2) & (huapd["hispan"] == 0), "hua_re"] = "2" - huapd.loc[(huapd["race"].isin([3, 4, 5, 6, 7])) & (huapd["hispan"] == 0), "hua_re"] = "3" + huapd.loc[ + (huapd["race"].isin([3, 4, 5, 6, 7])) & (huapd["hispan"] == 0), "hua_re" + ] = "3" huapd.loc[(huapd["hispan"] == 1), "hua_re"] = "4" huapd.loc[(huapd["gqtype"] >= 1), "hua_re"] = "5" hua_vals = huapd["hua_re"].value_counts() @@ -161,9 +183,20 @@ def pd_by_race(self, filename_json=None): # Dislocated by race and ethnicity huapd["hud_re"] = "" huapd.loc[huapd["dislocated"], "hud_re"] = "0" - huapd.loc[(huapd["race"] == 1) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "1" - huapd.loc[(huapd["race"] == 2) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "2" - huapd.loc[(huapd["race"].isin([3, 4, 5, 6, 7])) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "3" + huapd.loc[ + (huapd["race"] == 1) & (huapd["hispan"] == 0) & huapd["dislocated"], + "hud_re", + ] = "1" + huapd.loc[ + (huapd["race"] == 2) & (huapd["hispan"] == 0) & huapd["dislocated"], + "hud_re", + ] = "2" + huapd.loc[ + (huapd["race"].isin([3, 4, 5, 6, 7])) + & (huapd["hispan"] == 0) + & huapd["dislocated"], + "hud_re", + ] = "3" huapd.loc[(huapd["hispan"] == 1) & huapd["dislocated"], "hud_re"] = "4" huapd.loc[(huapd["gqtype"] >= 1) & huapd["dislocated"], "hud_re"] = "5" hud_vals = huapd["hud_re"].value_counts() @@ -207,7 +240,7 @@ def pd_by_race(self, filename_json=None): return json.dumps(pd_by_race_json) def pd_by_income(self, filename_json=None): - """ Calculate income results from the output files of the Joplin Population Dislocation analysis + """Calculate income results from the output files of the Joplin Population Dislocation analysis and convert the results to json format. [ {"household_characteristics": "HH1 (less than $15,000)", @@ -229,13 +262,15 @@ def pd_by_income(self, filename_json=None): obj: PD total count by income. A JSON of the hua and population dislocation income results by category. """ - income_categories = ["HH1 (less than $15,000)", - "HH2 ($15,000 to $35,000)", - "HH3 ($35,000 to $70,000)", - "HH4 ($70,000 to $120,000)", - "HH5 (More than $120,000)", - "Unknown", - "Total"] + income_categories = [ + "HH1 (less than $15,000)", + "HH2 ($15,000 to $35,000)", + "HH3 ($35,000 to $70,000)", + "HH4 ($70,000 to $120,000)", + "HH5 (More than $120,000)", + "Unknown", + "Total", + ] huapd = self.pop_disl_result # Allocated by income @@ -256,17 +291,25 @@ def pd_by_income(self, filename_json=None): # Dislocated by income hua_disl = [] for i in range(1, 6): - disl = huapd.loc[(huapd["hhinc"] == i) & huapd["dislocated"], ["dislocated"]].sum() + disl = huapd.loc[ + (huapd["hhinc"] == i) & huapd["dislocated"], ["dislocated"] + ].sum() hua_disl.append(int(disl)) - disl_unknown = huapd.loc[pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["dislocated"]].sum() + disl_unknown = huapd.loc[ + pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["dislocated"] + ].sum() hua_disl.append(int(disl_unknown)) hua_disl.append(int(sum(hua_disl))) pd_disl = [] for i in range(1, 6): - disl = huapd.loc[(huapd["hhinc"] == i) & huapd["dislocated"], ["numprec"]].sum() + disl = huapd.loc[ + (huapd["hhinc"] == i) & huapd["dislocated"], ["numprec"] + ].sum() pd_disl.append(int(disl)) - disl_unknown = huapd.loc[pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["numprec"]].sum() + disl_unknown = huapd.loc[ + pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["numprec"] + ].sum() pd_disl.append(int(disl_unknown)) pd_disl.append(int(sum(pd_disl))) @@ -296,7 +339,7 @@ def pd_by_income(self, filename_json=None): return json.dumps(pd_by_income_json) def pd_by_tenure(self, filename_json=None): - """ Calculate tenure results from the output files of the Joplin Population Dislocation analysis + """Calculate tenure results from the output files of the Joplin Population Dislocation analysis and convert the results to json format. [ {"household_characteristics": "Owner occupied", @@ -323,14 +366,16 @@ def pd_by_tenure(self, filename_json=None): # 0 - Vacant HU No Tenure Data, 1 - Owner occupied, 2 - Renter occupied, # 3 - Nursing facilities, 4 - Other group quarters, 5 - Vacant for rent # 6 - Vacant for sale, 7 - Vacant other - tenure_categories = ["Owner occupied", - "Renter occupied", - "Nursing facilities", - "Other group quarters", - "Vacant for rent", - "Vacant for sale", - "Vacant other", - "Total"] + tenure_categories = [ + "Owner occupied", + "Renter occupied", + "Nursing facilities", + "Other group quarters", + "Vacant for rent", + "Vacant for sale", + "Vacant other", + "Total", + ] huapd = self.pop_disl_result # Allocated by tenure @@ -353,7 +398,9 @@ def pd_by_tenure(self, filename_json=None): pop_tot = [] for i in range(len(tenure_categories)): - pop_tot.append(int(huapd["numprec"].where(huapd["hua_tnr"] == str(i)).sum())) + pop_tot.append( + int(huapd["numprec"].where(huapd["hua_tnr"] == str(i)).sum()) + ) pop_tot.append(int(sum(pop_tot[1:]))) # Dislocated by tenure @@ -362,10 +409,14 @@ def pd_by_tenure(self, filename_json=None): huapd.loc[(huapd["ownershp"] == 1.0) & huapd["dislocated"], "hud_tnr"] = "1" huapd.loc[(huapd["ownershp"] == 2.0) & huapd["dislocated"], "hud_tnr"] = "2" huapd.loc[(huapd["gqtype"] == 3) & huapd["dislocated"], "hud_tnr"] = "3" - huapd.loc[huapd["gqtype"].isin([1, 2, 4, 5, 6, 7, 8]) & huapd["dislocated"], "hud_tnr"] = "4" + huapd.loc[ + huapd["gqtype"].isin([1, 2, 4, 5, 6, 7, 8]) & huapd["dislocated"], "hud_tnr" + ] = "4" huapd.loc[huapd["vacancy"].isin([1, 2]) & huapd["dislocated"], "hud_tnr"] = "5" huapd.loc[huapd["vacancy"].isin([3, 4]) & huapd["dislocated"], "hud_tnr"] = "6" - huapd.loc[huapd["vacancy"].isin([5, 6, 7]) & huapd["dislocated"], "hud_tnr"] = "7" + huapd.loc[ + huapd["vacancy"].isin([5, 6, 7]) & huapd["dislocated"], "hud_tnr" + ] = "7" hud_vals = huapd["hud_tnr"].value_counts() hua_disl = [] for i in range(len(tenure_categories)): @@ -382,7 +433,9 @@ def pd_by_tenure(self, filename_json=None): pd_disl = [] for i in range(len(tenure_categories)): - pd_disl.append(int(huapd["numprec"].where(huapd["hud_tnr"] == str(i)).sum())) + pd_disl.append( + int(huapd["numprec"].where(huapd["hud_tnr"] == str(i)).sum()) + ) pd_disl.append(int(sum(pd_disl[1:]))) pd_by_tenure_json = [] @@ -392,13 +445,17 @@ def pd_by_tenure(self, filename_json=None): huapd_tenure[self.HUPD_CATEGORIES[1]] = hua_disl[i + 1] huapd_tenure[self.HUPD_CATEGORIES[2]] = hua_tot[i + 1] if hua_tot[i + 1]: - huapd_tenure[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i + 1] / hua_tot[i + 1]) + huapd_tenure[self.HUPD_CATEGORIES[3]] = 100 * ( + hua_disl[i + 1] / hua_tot[i + 1] + ) else: huapd_tenure[self.HUPD_CATEGORIES[3]] = None huapd_tenure[self.HUPD_CATEGORIES[4]] = pd_disl[i + 1] huapd_tenure[self.HUPD_CATEGORIES[5]] = pop_tot[i + 1] if pop_tot[i + 1]: - huapd_tenure[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i + 1] / pop_tot[i + 1]) + huapd_tenure[self.HUPD_CATEGORIES[6]] = 100 * ( + pd_disl[i + 1] / pop_tot[i + 1] + ) else: huapd_tenure[self.HUPD_CATEGORIES[6]] = None pd_by_tenure_json.append(huapd_tenure) @@ -411,7 +468,7 @@ def pd_by_tenure(self, filename_json=None): return json.dumps(pd_by_tenure_json) def pd_by_housing(self, filename_json=None): - """ Calculate housing results from the output files of the Joplin Population Dislocation analysis + """Calculate housing results from the output files of the Joplin Population Dislocation analysis using huestimate column (huestimate = 1 is single family, huestimate > 1 means multi family house) and convert the results to json format. [ @@ -434,9 +491,7 @@ def pd_by_housing(self, filename_json=None): """ # Household categories # 0 - Vacant HU No Tenure Data, 1 - Single Family, 2 - Multi Family - household_categories = ["Single Family", - "Multi Family", - "Total"] + household_categories = ["Single Family", "Multi Family", "Total"] huapd = self.pop_disl_result # Allocated by housing @@ -454,7 +509,9 @@ def pd_by_housing(self, filename_json=None): pop_tot = [] for i in range(len(household_categories)): - pop_tot.append(int(huapd["numprec"].where(huapd["hua_house"] == str(i)).sum())) + pop_tot.append( + int(huapd["numprec"].where(huapd["hua_house"] == str(i)).sum()) + ) pop_tot.append(int(sum(pop_tot[1:]))) # Dislocated by household @@ -473,7 +530,9 @@ def pd_by_housing(self, filename_json=None): pd_disl = [] for i in range(len(household_categories)): - pd_disl.append(int(huapd["numprec"].where(huapd["hud_house"] == str(i)).sum())) + pd_disl.append( + int(huapd["numprec"].where(huapd["hud_house"] == str(i)).sum()) + ) pd_disl.append(int(sum(pd_disl[1:]))) pd_by_housing_json = [] @@ -483,13 +542,17 @@ def pd_by_housing(self, filename_json=None): huapd_household[self.HUPD_CATEGORIES[1]] = hua_disl[i + 1] huapd_household[self.HUPD_CATEGORIES[2]] = hua_tot[i + 1] if hua_tot[i + 1]: - huapd_household[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i + 1] / hua_tot[i + 1]) + huapd_household[self.HUPD_CATEGORIES[3]] = 100 * ( + hua_disl[i + 1] / hua_tot[i + 1] + ) else: huapd_household[self.HUPD_CATEGORIES[3]] = None huapd_household[self.HUPD_CATEGORIES[4]] = pd_disl[i + 1] huapd_household[self.HUPD_CATEGORIES[5]] = pop_tot[i + 1] if pop_tot[i + 1]: - huapd_household[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i + 1] / pop_tot[i + 1]) + huapd_household[self.HUPD_CATEGORIES[6]] = 100 * ( + pd_disl[i + 1] / pop_tot[i + 1] + ) else: huapd_household[self.HUPD_CATEGORIES[6]] = None pd_by_housing_json.append(huapd_household) @@ -501,7 +564,7 @@ def pd_by_housing(self, filename_json=None): return json.dumps(pd_by_housing_json) def pd_total(self, filename_json=None): - """ Calculate total results from the output files of the Joplin Population Dislocation analysis + """Calculate total results from the output files of the Joplin Population Dislocation analysis and convert the results to json format. { "household_dislocated": { "dislocated": { @@ -535,8 +598,10 @@ def pd_total(self, filename_json=None): hua_disl = [hud_vals_false, hud_vals_true] - pd_disl = [int(hud["numprec"].where(hud["dislocated"] == 0).sum()), - int(hud["numprec"].where(hud["dislocated"] == 1).sum())] + pd_disl = [ + int(hud["numprec"].where(hud["dislocated"] == 0).sum()), + int(hud["numprec"].where(hud["dislocated"] == 1).sum()), + ] hua_tot = sum(hua_disl) pop_tot = sum(pd_disl) @@ -547,11 +612,18 @@ def pd_total(self, filename_json=None): hua_disl_tot["not_dislocated"] = no_hua_tot hua_disl_tot["total"] = no_hua_tot if hua_tot: - hua_disl_tot["dislocated"] = {"households": hua_disl[1], - "percent_of_households": 100 * (hua_disl[1]/hua_tot)} - hua_disl_tot["not_dislocated"] = {"households": hua_tot - hua_disl[1], - "percent_of_households": 100 * ((hua_tot - hua_disl[1])/hua_tot)} - hua_disl_tot["total"] = {"households": hua_tot, "percent_of_households": 100} + hua_disl_tot["dislocated"] = { + "households": hua_disl[1], + "percent_of_households": 100 * (hua_disl[1] / hua_tot), + } + hua_disl_tot["not_dislocated"] = { + "households": hua_tot - hua_disl[1], + "percent_of_households": 100 * ((hua_tot - hua_disl[1]) / hua_tot), + } + hua_disl_tot["total"] = { + "households": hua_tot, + "percent_of_households": 100, + } no_pop_tot = {"population": None, "percent_of_population": None} pop_disl_tot = {} @@ -559,14 +631,23 @@ def pd_total(self, filename_json=None): pop_disl_tot["not_dislocated"] = no_pop_tot pop_disl_tot["total"] = no_pop_tot if pop_tot: - pop_disl_tot["dislocated"] = {"population": pd_disl[1], - "percent_of_population": 100 * (pd_disl[1]/pop_tot)} - pop_disl_tot["not_dislocated"] = {"population": pop_tot - pd_disl[1], - "percent_of_population": 100 * ((pop_tot - pd_disl[1])/pop_tot)} - pop_disl_tot["total"] = {"population": pop_tot, "percent_of_population": 100} - - pd_total_json = {"household_dislocation_in_total": hua_disl_tot, - "population_dislocation_in_total": pop_disl_tot} + pop_disl_tot["dislocated"] = { + "population": pd_disl[1], + "percent_of_population": 100 * (pd_disl[1] / pop_tot), + } + pop_disl_tot["not_dislocated"] = { + "population": pop_tot - pd_disl[1], + "percent_of_population": 100 * ((pop_tot - pd_disl[1]) / pop_tot), + } + pop_disl_tot["total"] = { + "population": pop_tot, + "percent_of_population": 100, + } + + pd_total_json = { + "household_dislocation_in_total": hua_disl_tot, + "population_dislocation_in_total": pop_disl_tot, + } # print(pd_total_json) if filename_json: diff --git a/scripts/build-release.py b/scripts/build-release.py index cbeec506c..29a551319 100644 --- a/scripts/build-release.py +++ b/scripts/build-release.py @@ -4,39 +4,41 @@ script_path = os.path.dirname(os.path.realpath(__file__)) -dest_path = os.path.abspath(os.path.join(script_path, '..', '..')) +dest_path = os.path.abspath(os.path.join(script_path, "..", "..")) -with open(os.path.join(script_path, 'release-packages.yml')) as f: +with open(os.path.join(script_path, "release-packages.yml")) as f: config = yaml.safe_load(f) -internalExcludes = ['__pycache__', 'build', 'cache_data', 'dist', 'pyincore.egg-info'] +internalExcludes = ["__pycache__", "build", "cache_data", "dist", "pyincore.egg-info"] excludeList = None -if config['exclude'] is not None: - excludeList = config['exclude'] + internalExcludes +if config["exclude"] is not None: + excludeList = config["exclude"] + internalExcludes else: excludeList = internalExcludes -version = config['version'] +version = config["version"] -zipName = 'pyincore_' + version + '.zip' +zipName = "pyincore_" + version + ".zip" zf = zipfile.ZipFile(os.path.join(dest_path, zipName), "w") -for dirname, subdirs, files in os.walk(os.path.relpath(os.path.join(script_path, '..'))): +for dirname, subdirs, files in os.walk( + os.path.relpath(os.path.join(script_path, "..")) +): for exclude in excludeList: if exclude in subdirs: subdirs.remove(exclude) for subdir in subdirs: - if subdir[0] == '.': # hidden sub directories + if subdir[0] == ".": # hidden sub directories subdirs.remove(subdir) dirbasename = os.path.basename(os.path.abspath(dirname)) - if dirbasename[0] != '.': # hidden directories + if dirbasename[0] != ".": # hidden directories zf.write(dirname) for filename in files: - if filename[0] != '.': # hidden files + if filename[0] != ".": # hidden files zf.write(os.path.join(dirname, filename)) zf.close() diff --git a/setup.py b/setup.py index 9d490aa50..dcc23de63 100644 --- a/setup.py +++ b/setup.py @@ -7,22 +7,19 @@ from setuptools import setup, find_packages # version number of pyincore -version = '1.19.0' +version = "1.19.0" with open("README.rst", encoding="utf-8") as f: readme = f.read() setup( - name='pyincore', + name="pyincore", version=version, - description='IN-CORE analysis tool python package', + description="IN-CORE analysis tool python package", long_description=readme, - long_description_content_type='text/x-rst', - - url='https://incore.ncsa.illinois.edu', - + long_description_content_type="text/x-rst", + url="https://incore.ncsa.illinois.edu", license="Mozilla Public License v2.0", - classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", @@ -30,9 +27,8 @@ "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Topic :: Scientific/Engineering" + "Topic :: Scientific/Engineering", ], - keywords=[ "infrastructure", "resilience", @@ -43,43 +39,38 @@ "tsunami", "tornado", "hurricane", - "dislocation" + "dislocation", ], - - packages=find_packages(where=".", exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), + packages=find_packages( + where=".", exclude=["*.tests", "*.tests.*", "tests.*", "tests"] + ), include_package_data=True, - package_data={ - '': ['*.ini', "*.csv"] - }, - + package_data={"": ["*.ini", "*.csv"]}, python_requires=">=3.9", - install_requires=[ - 'fiona>=1.9.5', - 'geopandas>=0.14.0', - 'matplotlib>=3.8.0', - 'networkx>=3.2.1', - 'numpy>=1.26.0,<2.0a0', - 'pandas>=2.1.2', - 'pyomo>=6.6.2', - 'pyproj>=3.6.1', - 'rasterio>=1.3.9', - 'rtree>=1.1.0', - 'scipy>=1.11.3', - 'shapely>=2.0.2', - 'Deprecated>=1.2.14' + "fiona>=1.9.5", + "geopandas>=0.14.0", + "matplotlib>=3.8.0", + "networkx>=3.2.1", + "numpy>=1.26.0,<2.0a0", + "pandas>=2.1.2", + "pyomo>=6.6.2", + "pyproj>=3.6.1", + "rasterio>=1.3.9", + "rtree>=1.1.0", + "scipy>=1.11.3", + "shapely>=2.0.2", + "Deprecated>=1.2.14", ], - extras_require={ - 'test': [ - 'pycodestyle>=2.6.0', - 'pytest>=3.9.0', - 'python-jose>=3.0', + "test": [ + "pycodestyle>=2.6.0", + "pytest>=3.9.0", + "python-jose>=3.0", ] }, - project_urls={ - 'Bug Reports': 'https://github.com/IN-CORE/pyincore/issues', - 'Source': 'https://github.com/IN-CORE/pyincore', + "Bug Reports": "https://github.com/IN-CORE/pyincore/issues", + "Source": "https://github.com/IN-CORE/pyincore", }, ) diff --git a/tests/conftest.py b/tests/conftest.py index f00c7196c..5d55562a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,14 @@ from pyincore import ( globals as pyglobals, - IncoreClient, DataService, FragilityService, RepairService, RestorationService, HazardService, SpaceService, SemanticService + IncoreClient, + DataService, + FragilityService, + RepairService, + RestorationService, + HazardService, + SpaceService, + SemanticService, ) @@ -18,7 +25,9 @@ def pytest_sessionstart(session): before performing collection and entering the run test loop. """ try: - with open(os.path.join(os.path.dirname(__file__), "pyincore/.incorepw"), 'r') as f: + with open( + os.path.join(os.path.dirname(__file__), "pyincore/.incorepw"), "r" + ) as f: cred = f.read().splitlines() except EnvironmentError: assert False @@ -27,7 +36,9 @@ def pytest_sessionstart(session): monkeypatch = MonkeyPatch() monkeypatch.setattr("builtins.input", lambda x: credentials["username"]) monkeypatch.setattr("getpass.getpass", lambda y: credentials["password"]) - client = IncoreClient(service_url=pyglobals.INCORE_API_DEV_URL, token_file_name=".incrtesttoken") + client = IncoreClient( + service_url=pyglobals.INCORE_API_DEV_URL, token_file_name=".incrtesttoken" + ) pytest.client = client pytest.datasvc = DataService(client) pytest.fragilitysvc = FragilityService(client) @@ -36,4 +47,6 @@ def pytest_sessionstart(session): pytest.hazardsvc = HazardService(client) pytest.spacesvc = SpaceService(client) pytest.semanticsvc = SemanticService(client) - print(f"Successfully initialized Incore client and services. Using {pyglobals.INCORE_API_DEV_URL}") + print( + f"Successfully initialized Incore client and services. Using {pyglobals.INCORE_API_DEV_URL}" + ) diff --git a/tests/pyincore/analyses/bridgedamage/test_bridgedamage.py b/tests/pyincore/analyses/bridgedamage/test_bridgedamage.py index d867aff8a..396fb3671 100644 --- a/tests/pyincore/analyses/bridgedamage/test_bridgedamage.py +++ b/tests/pyincore/analyses/bridgedamage/test_bridgedamage.py @@ -1,4 +1,11 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake, Hurricane +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, + Hurricane, +) from pyincore.analyses.bridgedamage import BridgeDamage import pyincore.globals as pyglobals @@ -16,10 +23,7 @@ def run_with_base_class(): # Default Bridge Fragility Mapping on incore-service mapping_id = "5b47bcce337d4a37755e0cb2" - # Use hazard uncertainty for computing damage - use_hazard_uncertainty = False # Use liquefaction (LIQ) column of bridges to modify fragility curve - # use_liquefaction = False use_liquefaction = True # Geology dataset @@ -34,9 +38,9 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - bridge_dmg.set_input_hazard('hazard', eq) + bridge_dmg.set_input_hazard("hazard", eq) # Set analysis parameters # bridge_dmg.set_parameter("result_name", "bridge_result") @@ -67,9 +71,9 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - bridge_dmg.set_input_hazard('hazard', eq) + bridge_dmg.set_input_hazard("hazard", eq) # Set analysis parameters bridge_dmg.set_parameter("result_name", "bridge_result_south_carolina") bridge_dmg.set_parameter("use_liquefaction", False) @@ -80,7 +84,9 @@ def run_with_base_class(): ################################################################### # test Galveston Bridge Damage - hurricane = Hurricane.from_hazard_service("5f11e50cc6491311a814584c", hazard_service) + hurricane = Hurricane.from_hazard_service( + "5f11e50cc6491311a814584c", hazard_service + ) # Galveston bridge bridge_dataset_id = "6062058ac57ada48e48c31e3" @@ -96,13 +102,17 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) - refactored_mapping_set = MappingSet(fragility_service.get_mapping(refactored_mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', refactored_mapping_set) + refactored_mapping_set = MappingSet( + fragility_service.get_mapping(refactored_mapping_id) + ) + bridge_dmg.set_input_dataset("dfr3_mapping_set", refactored_mapping_set) bridge_dmg.set_input_hazard("hazard", hurricane) # Set analysis parameters - bridge_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bridge_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bridge_dmg.set_parameter("result_name", "galveston_bridge_dmg_result") bridge_dmg.set_parameter("num_cpu", 4) @@ -110,5 +120,5 @@ def run_with_base_class(): bridge_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/bridgedamage/test_bridgedamage_legacy.py b/tests/pyincore/analyses/bridgedamage/test_bridgedamage_legacy.py index 60ee33b14..749a7ebb5 100644 --- a/tests/pyincore/analyses/bridgedamage/test_bridgedamage_legacy.py +++ b/tests/pyincore/analyses/bridgedamage/test_bridgedamage_legacy.py @@ -16,10 +16,7 @@ def run_with_base_class(): # Default Bridge Fragility Mapping on incore-service mapping_id = "5b47bcce337d4a37755e0cb2" - # Use hazard uncertainty for computing damage - use_hazard_uncertainty = False # Use liquefaction (LIQ) column of bridges to modify fragility curve - # use_liquefaction = False use_liquefaction = True # Geology dataset @@ -34,7 +31,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) # Set analysis parameters # bridge_dmg.set_parameter("result_name", "bridge_result") @@ -68,7 +65,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) # Set analysis parameters bridge_dmg.set_parameter("result_name", "bridge_result_south_carolina") @@ -99,11 +96,15 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) - refactored_mapping_set = MappingSet(fragility_service.get_mapping(refactored_mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', refactored_mapping_set) + refactored_mapping_set = MappingSet( + fragility_service.get_mapping(refactored_mapping_id) + ) + bridge_dmg.set_input_dataset("dfr3_mapping_set", refactored_mapping_set) # Set analysis parameters - bridge_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bridge_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bridge_dmg.set_parameter("result_name", "galveston_bridge_dmg_result") bridge_dmg.set_parameter("hazard_type", hazard_type) bridge_dmg.set_parameter("hazard_id", hazard_id) @@ -113,5 +114,5 @@ def run_with_base_class(): bridge_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/bridgedamage/test_bridgedamage_w_local_hazard.py b/tests/pyincore/analyses/bridgedamage/test_bridgedamage_w_local_hazard.py index bec279bfd..666d21193 100644 --- a/tests/pyincore/analyses/bridgedamage/test_bridgedamage_w_local_hazard.py +++ b/tests/pyincore/analyses/bridgedamage/test_bridgedamage_w_local_hazard.py @@ -22,31 +22,43 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) - refactored_mapping_set = MappingSet(fragility_service.get_mapping(refactored_mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', refactored_mapping_set) + refactored_mapping_set = MappingSet( + fragility_service.get_mapping(refactored_mapping_id) + ) + bridge_dmg.set_input_dataset("dfr3_mapping_set", refactored_mapping_set) # try local hurricane # test with local hurricane - hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) - hurricane.hazardDatasets[0].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane = Hurricane.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json") + ) + hurricane.hazardDatasets[0].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[2].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[2].set_threshold(threshold_value=1, threshold_unit="hr") bridge_dmg.set_input_hazard("hazard", hurricane) # Set analysis parameters - bridge_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bridge_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bridge_dmg.set_parameter("result_name", "galveston_bridge_dmg_result_local_hazard") bridge_dmg.set_parameter("num_cpu", 4) @@ -55,5 +67,5 @@ def run_with_base_class(): bridge_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/bridgedamage/test_mmsa_bridgedamage.py b/tests/pyincore/analyses/bridgedamage/test_mmsa_bridgedamage.py index 3e1c99412..5de998a4f 100644 --- a/tests/pyincore/analyses/bridgedamage/test_mmsa_bridgedamage.py +++ b/tests/pyincore/analyses/bridgedamage/test_mmsa_bridgedamage.py @@ -1,4 +1,10 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, +) from pyincore.analyses.bridgedamage import BridgeDamage @@ -7,11 +13,11 @@ def run_with_base_class(): # New madrid earthquake using Atkinson Boore 1995 hazard_service = HazardService(client) - eq = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service=hazard_service) + eq = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service=hazard_service + ) # mmsa highway bridges - highway_bridges_id_list = { - "Highway_Bridges": "60e86d6b544e944c3ce622d2" - } + highway_bridges_id_list = {"Highway_Bridges": "60e86d6b544e944c3ce622d2"} # mmsa railway bridges railway_bridges_id_list = { @@ -19,7 +25,7 @@ def run_with_base_class(): "Railway_Bridges_CN": "60e86e91544e944c3ce62376", "Railway_Bridges_CSXT": "60e86eab60b3f41243fb9e44", "Railway_Bridges_NS": "60e86ed1544e944c3ce6241a", - "Railway_Bridges_UP": "60e86ee960b3f41243fb9ee8" + "Railway_Bridges_UP": "60e86ee960b3f41243fb9ee8", } # Default Bridge Fragility Mapping on incore-service @@ -31,7 +37,7 @@ def run_with_base_class(): "Col": "60f89fdc47290977c8994578", "FBL": "60f89fdda0c8a24d7eedfa9f", "ABT": "60f89fdf6fb1bc236b68d61c", - "FBT": "60f89fdf52e10319df808b5f" + "FBT": "60f89fdf52e10319df808b5f", } railway_bridge_mapping_id_list = { @@ -42,14 +48,9 @@ def run_with_base_class(): "EBL": "60f89fd2a0c8a24d7eedf6f1", "EBT": "60f89fd36fb1bc236b68d3a6", "ABA": "60f89fd452e10319df8088e9", - "ABP": "60f89fda52e10319df808a72" + "ABP": "60f89fda52e10319df808a72", } - # Use hazard uncertainty for computing damage - use_hazard_uncertainty = False - # Use liquefaction (LIQ) column of bridges to modify fragility curve - use_liquefaction = False - # Create bridge damage bridge_dmg = BridgeDamage(client) @@ -59,9 +60,14 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) - for component_name, highway_bridge_mapping_id in highway_bridge_mapping_id_list.items(): - mapping_set = MappingSet(fragility_service.get_mapping(highway_bridge_mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + for ( + component_name, + highway_bridge_mapping_id, + ) in highway_bridge_mapping_id_list.items(): + mapping_set = MappingSet( + fragility_service.get_mapping(highway_bridge_mapping_id) + ) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bridge_dmg.set_input_hazard("hazard", eq) @@ -78,9 +84,14 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) - for component_name, railway_bridge_mapping_id in railway_bridge_mapping_id_list.items(): - mapping_set = MappingSet(fragility_service.get_mapping(railway_bridge_mapping_id)) - bridge_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + for ( + component_name, + railway_bridge_mapping_id, + ) in railway_bridge_mapping_id_list.items(): + mapping_set = MappingSet( + fragility_service.get_mapping(railway_bridge_mapping_id) + ) + bridge_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bridge_dmg.set_input_hazard("hazard", eq) @@ -92,5 +103,5 @@ def run_with_base_class(): bridge_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py b/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py index b079819ff..e8a1868b4 100644 --- a/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py +++ b/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py @@ -21,22 +21,38 @@ bldg_cluster_recovery = BuildingClusterRecovery(client) bldg_cluster_recovery.set_parameter("uncertainty", True) - bldg_cluster_recovery.set_parameter("sample_size", 35) # default none. Gets size form input dataset + bldg_cluster_recovery.set_parameter( + "sample_size", 35 + ) # default none. Gets size form input dataset bldg_cluster_recovery.set_parameter("random_sample_size", 50) # default 10000 bldg_cluster_recovery.set_parameter("no_of_weeks", 100) # default 250 bldg_cluster_recovery.set_parameter("num_cpu", 1) bldg_cluster_recovery.set_parameter("result_name", "memphis") - bldg_cluster_recovery.load_remote_input_dataset("building_data", bldg_data_dataset) - bldg_cluster_recovery.load_remote_input_dataset("occupancy_mapping", occupancy_dataset) - bldg_cluster_recovery.load_remote_input_dataset("building_damage", bldg_damage_dataset) - bldg_cluster_recovery.load_remote_input_dataset("dmg_ratios", mean_repair_dataset) + bldg_cluster_recovery.load_remote_input_dataset( + "building_data", bldg_data_dataset + ) + bldg_cluster_recovery.load_remote_input_dataset( + "occupancy_mapping", occupancy_dataset + ) + bldg_cluster_recovery.load_remote_input_dataset( + "building_damage", bldg_damage_dataset + ) + bldg_cluster_recovery.load_remote_input_dataset( + "dmg_ratios", mean_repair_dataset + ) bldg_cluster_recovery.load_remote_input_dataset("utility", utility_dataset) - bldg_cluster_recovery.load_remote_input_dataset("utility_partial", utility_partial_dataset) + bldg_cluster_recovery.load_remote_input_dataset( + "utility_partial", utility_partial_dataset + ) bldg_cluster_recovery.load_remote_input_dataset("coefFL", coefFL_dataset) bldg_cluster_recovery.run_analysis() - print(bldg_cluster_recovery.get_output_dataset("result").get_dataframe_from_csv().head()) + print( + bldg_cluster_recovery.get_output_dataset("result") + .get_dataframe_from_csv() + .head() + ) except EnvironmentError: raise diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py index 8c91969fb..b9aa79b19 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py @@ -1,5 +1,13 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane, \ - Tornado +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, + Tsunami, + Hurricane, + Tornado, +) from pyincore.analyses.buildingdamage import BuildingDamage import pyincore.globals as pyglobals @@ -29,7 +37,7 @@ def run_with_base_class(): mapping_id = "5b47b350337d4a3629076f2c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", eq) @@ -57,7 +65,7 @@ def run_with_base_class(): mapping_id = "5b48fb1f337d4a478e7bd54d" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", tsunami) result_name = "seaside_tsunami_dmg_result" bldg_dmg.set_parameter("result_name", result_name) @@ -79,8 +87,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_input_hazard("hazard", hurricane) @@ -99,7 +109,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) tornado = Tornado.from_hazard_service("5dfa32bbc0601200080893fb", hazardsvc) bldg_dmg.set_input_hazard("hazard", tornado) @@ -111,5 +121,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py index b7b2c3da4..c968de03b 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py @@ -33,7 +33,7 @@ def run_with_base_class(): mapping_id = "5b47b350337d4a3629076f2c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = os.path.join(result_folder, "memphis_eq_bldg_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -62,7 +62,7 @@ def run_with_base_class(): mapping_id = "5b48fb1f337d4a478e7bd54d" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = os.path.join(result_folder, "seaside_tsunami_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -87,8 +87,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) result_name = os.path.join(result_folder, "galveston_hurr_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -108,7 +110,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype with retrofit fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) hazard_type = "tornado" hazard_id = "5dfa32bbc0601200080893fb" @@ -121,5 +123,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_multihazard.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_multihazard.py index 0a7a56aba..1530e9459 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_multihazard.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_multihazard.py @@ -16,7 +16,7 @@ def run_with_base_class(): mapping_id = "648a3f88c687ae511a1814e2" # earthquake+tsunami mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") hazard_type = "earthquake+tsunami" @@ -34,5 +34,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_offline.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_offline.py index 1fb46cd0c..bf44d717b 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_offline.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_offline.py @@ -1,6 +1,13 @@ import os -from pyincore import IncoreClient, FragilityCurveSet, MappingSet, Tornado, Dataset, Mapping +from pyincore import ( + IncoreClient, + FragilityCurveSet, + MappingSet, + Tornado, + Dataset, + Mapping, +) from pyincore.analyses.buildingdamage import BuildingDamage import pyincore.globals as pyglobals @@ -11,37 +18,58 @@ def run_with_base_class(): # client.clear_cache() # building - buildings = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, - "building/joplin_commercial_bldg_v6_sample.shp"), - data_type="ergo:buildingInventoryVer6") + buildings = Dataset.from_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "building/joplin_commercial_bldg_v6_sample.shp" + ), + data_type="ergo:buildingInventoryVer6", + ) # tornado - tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) - tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), - data_type="incore:tornadoWindfield") + tornado = Tornado.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json") + ) + tornado.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield", + ) # dfr3 - fragility_archetype_6 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "fragility_curves/fragility_archetype_6.json")) - fragility_archetype_7 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "fragility_curves/fragility_archetype_7.json")) + fragility_archetype_6 = FragilityCurveSet.from_json_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "fragility_curves/fragility_archetype_6.json" + ) + ) + fragility_archetype_7 = FragilityCurveSet.from_json_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "fragility_curves/fragility_archetype_7.json" + ) + ) - fragility_entry_archetype_6 = {"Non-Retrofit Fragility ID Code": fragility_archetype_6} + fragility_entry_archetype_6 = { + "Non-Retrofit Fragility ID Code": fragility_archetype_6 + } fragility_rules_archetype_6 = {"OR": ["int archetype EQUALS 6"]} - fragility_mapping_archetype_6 = Mapping(fragility_entry_archetype_6, fragility_rules_archetype_6) - fragility_entry_archetype_7 = {"Non-Retrofit Fragility ID Code": fragility_archetype_7} + fragility_mapping_archetype_6 = Mapping( + fragility_entry_archetype_6, fragility_rules_archetype_6 + ) + fragility_entry_archetype_7 = { + "Non-Retrofit Fragility ID Code": fragility_archetype_7 + } fragility_rules_archetype_7 = {"OR": ["int archetype EQUALS 7"]} - fragility_mapping_archetype_7 = Mapping(fragility_entry_archetype_7, fragility_rules_archetype_7) + fragility_mapping_archetype_7 = Mapping( + fragility_entry_archetype_7, fragility_rules_archetype_7 + ) fragility_mapping_set_definition = { "id": "N/A", "name": "local joplin tornado fragility mapping object", "hazardType": "tornado", "inventoryType": "building", - 'mappings': [ + "mappings": [ fragility_mapping_archetype_6, fragility_mapping_archetype_7, ], - "mappingType": "fragility" + "mappingType": "fragility", } fragility_mapping_set = MappingSet(fragility_mapping_set_definition) @@ -70,5 +98,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py index 3c7ac52bd..5d0081f61 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py @@ -8,9 +8,7 @@ def run_with_base_class(): client = IncoreClient() - dev_client = IncoreClient(pyglobals.INCORE_API_DEV_URL) hazardsvc = HazardService(client) - dev_hazardsvc = HazardService(dev_client) # Set analysis parameters result_folder = "retrofit" @@ -21,8 +19,9 @@ def run_with_base_class(): ############################## # joplin tornado # dfr3 mapping - tornado_fragility_mapping_set = MappingSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/tornado_retrofit_mapping.json")) + tornado_fragility_mapping_set = MappingSet.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "retrofit/tornado_retrofit_mapping.json") + ) # Building Damage # Create building damage @@ -31,28 +30,35 @@ def run_with_base_class(): # Load input dataset bldg_dataset_id = "5dbc8478b9219c06dd242c0d" # joplin building v6 prod tornado_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - retrofit_strategy_plan = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/tornado_retrofit_plan.csv"), - data_type="incore:retrofitStrategy") + retrofit_strategy_plan = Dataset.from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "retrofit/tornado_retrofit_plan.csv"), + data_type="incore:retrofitStrategy", + ) tornado_bldg_dmg.set_input_dataset("retrofit_strategy", retrofit_strategy_plan) tornado = Tornado.from_hazard_service("608c5b17150b5e17064030df", hazardsvc) tornado_bldg_dmg.set_input_hazard("hazard", tornado) # Load fragility mapping - tornado_bldg_dmg.set_input_dataset("dfr3_mapping_set", tornado_fragility_mapping_set) + tornado_bldg_dmg.set_input_dataset( + "dfr3_mapping_set", tornado_fragility_mapping_set + ) # Set hazard tornado_bldg_dmg.set_input_hazard("hazard", tornado) - result_name = os.path.join(result_folder, "joplin_tornado_commerical_bldg_dmg_w_retrofit") + result_name = os.path.join( + result_folder, "joplin_tornado_commerical_bldg_dmg_w_retrofit" + ) tornado_bldg_dmg.set_parameter("result_name", result_name) tornado_bldg_dmg.set_parameter("num_cpu", 8) tornado_bldg_dmg.run_analysis() end_time_1 = time.time() - print(f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds") + print( + f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds" + ) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_w_local_hazard.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_w_local_hazard.py index 3cc05be67..cffee43be 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_w_local_hazard.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_w_local_hazard.py @@ -10,19 +10,27 @@ def run_with_base_class(): # try local hurricane # test with local hurricane - hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) - hurricane.hazardDatasets[0].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane = Hurricane.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json") + ) + hurricane.hazardDatasets[0].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[2].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[2].set_threshold(threshold_value=1, threshold_unit="hr") @@ -36,8 +44,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_input_hazard("hazard", hurricane) @@ -51,11 +61,15 @@ def run_with_base_class(): ########################### # local tornado - tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) + tornado = Tornado.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json") + ) # attach dataset from local file - tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), - data_type="incore:tornadoWindfield") + tornado.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield", + ) bldg_dataset_id = "5df7d0de425e0b00092d0082" @@ -65,7 +79,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", tornado) @@ -78,5 +92,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingdamage/test_slc_buildingdamage.py b/tests/pyincore/analyses/buildingdamage/test_slc_buildingdamage.py index ad7215cd4..60b2be701 100644 --- a/tests/pyincore/analyses/buildingdamage/test_slc_buildingdamage.py +++ b/tests/pyincore/analyses/buildingdamage/test_slc_buildingdamage.py @@ -1,4 +1,11 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, DataService +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, + DataService, +) from pyincore.analyses.buildingdamage import BuildingDamage import time @@ -16,21 +23,27 @@ bldg_dmg = BuildingDamage(client) mapping_set = MappingSet(fragility_services.get_mapping("6309005ad76c6d0e1f6be081")) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - bldg_dmg.load_remote_input_dataset("buildings", "62fea288f5438e1f8c515ef8") # Salt Lake County All Building + bldg_dmg.load_remote_input_dataset( + "buildings", "62fea288f5438e1f8c515ef8" + ) # Salt Lake County All Building bldg_dmg.set_parameter("result_name", "SLC_bldg_dmg_no_retrofit-withLIQ7.1") - eq = Earthquake.from_hazard_service("640a03ea73a1642180262450", hazard_services) # Mw 7.1 + eq = Earthquake.from_hazard_service( + "640a03ea73a1642180262450", hazard_services + ) # Mw 7.1 # eq = Earthquake.from_hazard_service("64108b6486a52d419dd69a41", hazard_services) # Mw 7.0 bldg_dmg.set_input_hazard("hazard", eq) bldg_dmg.set_parameter("use_liquefaction", True) - bldg_dmg.set_parameter("liquefaction_geology_dataset_id", "62fe9ab685ac6b569e372429") + bldg_dmg.set_parameter( + "liquefaction_geology_dataset_id", "62fe9ab685ac6b569e372429" + ) bldg_dmg.set_parameter("num_cpu", 8) # Run building damage without liquefaction bldg_dmg.run_analysis() end_time = time.time() - print(f"total runtime: {end_time - start_time}") \ No newline at end of file + print(f"total runtime: {end_time - start_time}") diff --git a/tests/pyincore/analyses/buildingeconloss/test_buildingeconloss.py b/tests/pyincore/analyses/buildingeconloss/test_buildingeconloss.py index 4ec5b7a5a..723d2601f 100644 --- a/tests/pyincore/analyses/buildingeconloss/test_buildingeconloss.py +++ b/tests/pyincore/analyses/buildingeconloss/test_buildingeconloss.py @@ -22,7 +22,9 @@ def run_with_base_class(): bldg_econ_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) bldg_econ_dmg.load_remote_input_dataset("building_mean_dmg", bldg_dmg_id) - bldg_econ_dmg.load_remote_input_dataset("occupancy_multiplier", bldg_occupancy_mult_id) + bldg_econ_dmg.load_remote_input_dataset( + "occupancy_multiplier", bldg_occupancy_mult_id + ) result_name = "seaside_bldg_econ_loss" bldg_econ_dmg.set_parameter("result_name", result_name) @@ -34,5 +36,5 @@ def run_with_base_class(): bldg_econ_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingfunctionality/test_buildingfunctionality.py b/tests/pyincore/analyses/buildingfunctionality/test_buildingfunctionality.py index 401c5d492..24fa195ef 100644 --- a/tests/pyincore/analyses/buildingfunctionality/test_buildingfunctionality.py +++ b/tests/pyincore/analyses/buildingfunctionality/test_buildingfunctionality.py @@ -1,7 +1,9 @@ import time from pyincore import IncoreClient -from pyincore.analyses.buildingfunctionality.buildingfunctionality import BuildingFunctionality +from pyincore.analyses.buildingfunctionality.buildingfunctionality import ( + BuildingFunctionality, +) import pyincore.globals as pyglobals @@ -10,10 +12,18 @@ def run_with_base_class(): bldg_func = BuildingFunctionality(client) # load datasets remotely - bldg_func.load_remote_input_dataset("building_damage_mcs_samples", "5f0f6fbfb922f96f4e989ed8") - bldg_func.load_remote_input_dataset("substations_damage_mcs_samples", "5f0f71bab922f96f4e9a7511") - bldg_func.load_remote_input_dataset("poles_damage_mcs_samples", "5f0f7231b922f96f4e9a7538") - bldg_func.load_remote_input_dataset("interdependency_dictionary", "5f0f7311feef2d758c47cfab") + bldg_func.load_remote_input_dataset( + "building_damage_mcs_samples", "5f0f6fbfb922f96f4e989ed8" + ) + bldg_func.load_remote_input_dataset( + "substations_damage_mcs_samples", "5f0f71bab922f96f4e9a7511" + ) + bldg_func.load_remote_input_dataset( + "poles_damage_mcs_samples", "5f0f7231b922f96f4e9a7538" + ) + bldg_func.load_remote_input_dataset( + "interdependency_dictionary", "5f0f7311feef2d758c47cfab" + ) bldg_func.set_parameter("result_name", "Joplin_mcs") @@ -22,5 +32,5 @@ def run_with_base_class(): print("--- %s seconds ---" % (time.time() - start_time)) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py index fc46c1550..6833e735f 100644 --- a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py @@ -1,5 +1,8 @@ from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.buildingnonstructuraldamage import BuildingNonStructDamage, BuildingNonStructUtil +from pyincore.analyses.buildingnonstructuraldamage import ( + BuildingNonStructDamage, + BuildingNonStructUtil, +) import pyincore.globals as pyglobals @@ -25,30 +28,46 @@ def run_with_base_class(): # Acceleration sensitive non_structural_building_dmg_as = BuildingNonStructDamage(client) - non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_as") + non_structural_building_dmg_as.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_as.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_as.set_parameter( + "result_name", "non_structural_building_dmg_result_as" + ) non_structural_building_dmg_as.set_parameter("hazard_type", hazard_type) non_structural_building_dmg_as.set_parameter("hazard_id", hazard_id) non_structural_building_dmg_as.set_parameter("num_cpu", 4) - non_structural_building_dmg_as.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter( + "fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS + ) non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_as.run_analysis() # Drift sensitive non_structural_building_dmg_ds = BuildingNonStructDamage(client) - non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_ds") + non_structural_building_dmg_ds.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_ds.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_ds.set_parameter( + "result_name", "non_structural_building_dmg_result_ds" + ) non_structural_building_dmg_ds.set_parameter("hazard_type", hazard_type) non_structural_building_dmg_ds.set_parameter("hazard_id", hazard_id) non_structural_building_dmg_ds.set_parameter("num_cpu", 4) - non_structural_building_dmg_ds.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter( + "fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS + ) non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_ds.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py index 4a7a3dfcc..5dd4f50aa 100644 --- a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py @@ -1,5 +1,14 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake -from pyincore.analyses.buildingnonstructuraldamage import BuildingNonStructDamage, BuildingNonStructUtil +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, +) +from pyincore.analyses.buildingnonstructuraldamage import ( + BuildingNonStructDamage, + BuildingNonStructUtil, +) import pyincore.globals as pyglobals @@ -8,7 +17,9 @@ def run_with_base_class(): # Memphis 7.9 AB-95 hazard_service = HazardService(client) - earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service + ) # Shelby County Essential Facilities building_dataset_id = "5a284f42c7d30d13bc0821ba" @@ -26,28 +37,44 @@ def run_with_base_class(): # Acceleration sensitive non_structural_building_dmg_as = BuildingNonStructDamage(client) - non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_as.set_input_dataset("dfr3_mapping_set", mapping_set) non_structural_building_dmg_as.set_input_hazard("hazard", earthquake) - non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_as") + non_structural_building_dmg_as.set_parameter( + "result_name", "non_structural_building_dmg_result_w_hazard_obj_as" + ) non_structural_building_dmg_as.set_parameter("num_cpu", 4) - non_structural_building_dmg_as.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter( + "fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS + ) non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_as.run_analysis() # Drift sensitive non_structural_building_dmg_ds = BuildingNonStructDamage(client) - non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_ds") + non_structural_building_dmg_ds.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_ds.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_ds.set_parameter( + "result_name", "non_structural_building_dmg_result_w_hazard_obj_ds" + ) non_structural_building_dmg_ds.set_input_hazard("hazard", earthquake) non_structural_building_dmg_ds.set_parameter("num_cpu", 4) - non_structural_building_dmg_ds.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter( + "fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS + ) non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_ds.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py b/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py index 74df76ce1..b1394833d 100644 --- a/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py @@ -20,14 +20,20 @@ def run_with_base_class(): mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) non_structural_building_dmg_flood = BuildingNonStructDamage(client) - non_structural_building_dmg_flood.load_remote_input_dataset("buildings", bldg_dataset_id) - non_structural_building_dmg_flood.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_flood.set_parameter("result_name", "non_structural_building_dmg_result_flood") + non_structural_building_dmg_flood.load_remote_input_dataset( + "buildings", bldg_dataset_id + ) + non_structural_building_dmg_flood.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_flood.set_parameter( + "result_name", "non_structural_building_dmg_result_flood" + ) non_structural_building_dmg_flood.set_input_hazard("hazard", flood) non_structural_building_dmg_flood.set_parameter("num_cpu", 4) - non_structural_building_dmg_flood.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") + non_structural_building_dmg_flood.set_parameter( + "fragility_key", "Lumberton Flood Building Fragility ID Code" + ) non_structural_building_dmg_flood.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py index 15e7d22df..259ad82ab 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py @@ -1,6 +1,16 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane, \ - Tornado -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, + Tsunami, + Hurricane, + Tornado, +) +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals @@ -29,7 +39,7 @@ def run_with_base_class(): mapping_id = "5b47b350337d4a3629076f2c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", eq) @@ -57,7 +67,7 @@ def run_with_base_class(): mapping_id = "5b48fb1f337d4a478e7bd54d" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", tsunami) result_name = "seaside_tsunami_dmg_result" bldg_dmg.set_parameter("result_name", result_name) @@ -79,8 +89,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_input_hazard("hazard", hurricane) @@ -99,7 +111,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) tornado = Tornado.from_hazard_service("5dfa32bbc0601200080893fb", hazardsvc) bldg_dmg.set_input_hazard("hazard", tornado) @@ -111,5 +123,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py index eeba894ce..aaf198409 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py @@ -1,7 +1,9 @@ import os from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals @@ -33,7 +35,7 @@ def run_with_base_class(): mapping_id = "5b47b350337d4a3629076f2c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = os.path.join(result_folder, "memphis_eq_bldg_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -62,7 +64,7 @@ def run_with_base_class(): mapping_id = "5b48fb1f337d4a478e7bd54d" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = os.path.join(result_folder, "seaside_tsunami_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -87,8 +89,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) result_name = os.path.join(result_folder, "galveston_hurr_dmg_result") bldg_dmg.set_parameter("result_name", result_name) @@ -108,7 +112,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype with retrofit fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) hazard_type = "tornado" hazard_id = "5dfa32bbc0601200080893fb" @@ -121,5 +125,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py index 8baad5f39..240e798f8 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py @@ -1,7 +1,9 @@ import os from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals @@ -16,7 +18,7 @@ def run_with_base_class(): mapping_id = "648a3f88c687ae511a1814e2" # earthquake+tsunami mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") hazard_type = "earthquake+tsunami" @@ -34,5 +36,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py index 0d402fcd2..84046ae38 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py @@ -1,7 +1,16 @@ import os -from pyincore import IncoreClient, FragilityCurveSet, MappingSet, Tornado, Dataset, Mapping -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore import ( + IncoreClient, + FragilityCurveSet, + MappingSet, + Tornado, + Dataset, + Mapping, +) +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals @@ -11,37 +20,58 @@ def run_with_base_class(): # client.clear_cache() # building - buildings = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, - "building/joplin_commercial_bldg_v6_sample.shp"), - data_type="ergo:buildingInventoryVer6") + buildings = Dataset.from_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "building/joplin_commercial_bldg_v6_sample.shp" + ), + data_type="ergo:buildingInventoryVer6", + ) # tornado - tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) - tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), - data_type="incore:tornadoWindfield") + tornado = Tornado.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json") + ) + tornado.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield", + ) # dfr3 - fragility_archetype_6 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "fragility_curves/fragility_archetype_6.json")) - fragility_archetype_7 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "fragility_curves/fragility_archetype_7.json")) + fragility_archetype_6 = FragilityCurveSet.from_json_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "fragility_curves/fragility_archetype_6.json" + ) + ) + fragility_archetype_7 = FragilityCurveSet.from_json_file( + os.path.join( + pyglobals.TEST_DATA_DIR, "fragility_curves/fragility_archetype_7.json" + ) + ) - fragility_entry_archetype_6 = {"Non-Retrofit Fragility ID Code": fragility_archetype_6} + fragility_entry_archetype_6 = { + "Non-Retrofit Fragility ID Code": fragility_archetype_6 + } fragility_rules_archetype_6 = {"OR": ["int archetype EQUALS 6"]} - fragility_mapping_archetype_6 = Mapping(fragility_entry_archetype_6, fragility_rules_archetype_6) - fragility_entry_archetype_7 = {"Non-Retrofit Fragility ID Code": fragility_archetype_7} + fragility_mapping_archetype_6 = Mapping( + fragility_entry_archetype_6, fragility_rules_archetype_6 + ) + fragility_entry_archetype_7 = { + "Non-Retrofit Fragility ID Code": fragility_archetype_7 + } fragility_rules_archetype_7 = {"OR": ["int archetype EQUALS 7"]} - fragility_mapping_archetype_7 = Mapping(fragility_entry_archetype_7, fragility_rules_archetype_7) + fragility_mapping_archetype_7 = Mapping( + fragility_entry_archetype_7, fragility_rules_archetype_7 + ) fragility_mapping_set_definition = { "id": "N/A", "name": "local joplin tornado fragility mapping object", "hazardType": "tornado", "inventoryType": "building", - 'mappings': [ + "mappings": [ fragility_mapping_archetype_6, fragility_mapping_archetype_7, ], - "mappingType": "fragility" + "mappingType": "fragility", } fragility_mapping_set = MappingSet(fragility_mapping_set_definition) @@ -70,5 +100,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py index 2267eaf9a..e10836f87 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py @@ -1,16 +1,16 @@ import os from pyincore import IncoreClient, MappingSet, Tornado, Dataset, HazardService -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals import time def run_with_base_class(): client = IncoreClient() - dev_client = IncoreClient(pyglobals.INCORE_API_DEV_URL) hazardsvc = HazardService(client) - dev_hazardsvc = HazardService(dev_client) # Set analysis parameters result_folder = "retrofit" @@ -21,8 +21,9 @@ def run_with_base_class(): ############################## # joplin tornado # dfr3 mapping - tornado_fragility_mapping_set = MappingSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/tornado_retrofit_mapping.json")) + tornado_fragility_mapping_set = MappingSet.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "retrofit/tornado_retrofit_mapping.json") + ) # Building Damage # Create building damage @@ -31,28 +32,35 @@ def run_with_base_class(): # Load input dataset bldg_dataset_id = "5dbc8478b9219c06dd242c0d" # joplin building v6 prod tornado_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - retrofit_strategy_plan = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/tornado_retrofit_plan.csv"), - data_type="incore:retrofitStrategy") + retrofit_strategy_plan = Dataset.from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "retrofit/tornado_retrofit_plan.csv"), + data_type="incore:retrofitStrategy", + ) tornado_bldg_dmg.set_input_dataset("retrofit_strategy", retrofit_strategy_plan) tornado = Tornado.from_hazard_service("608c5b17150b5e17064030df", hazardsvc) tornado_bldg_dmg.set_input_hazard("hazard", tornado) # Load fragility mapping - tornado_bldg_dmg.set_input_dataset("dfr3_mapping_set", tornado_fragility_mapping_set) + tornado_bldg_dmg.set_input_dataset( + "dfr3_mapping_set", tornado_fragility_mapping_set + ) # Set hazard tornado_bldg_dmg.set_input_hazard("hazard", tornado) - result_name = os.path.join(result_folder, "joplin_tornado_commerical_bldg_dmg_w_retrofit") + result_name = os.path.join( + result_folder, "joplin_tornado_commerical_bldg_dmg_w_retrofit" + ) tornado_bldg_dmg.set_parameter("result_name", result_name) tornado_bldg_dmg.set_parameter("num_cpu", 8) tornado_bldg_dmg.run_analysis() end_time_1 = time.time() - print(f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds") + print( + f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds" + ) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py index c28a65909..1da5619e7 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py @@ -1,7 +1,9 @@ import os from pyincore import IncoreClient, FragilityService, MappingSet, Hurricane, Tornado -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import pyincore.globals as pyglobals @@ -10,19 +12,27 @@ def run_with_base_class(): # try local hurricane # test with local hurricane - hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) - hurricane.hazardDatasets[0].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane = Hurricane.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json") + ) + hurricane.hazardDatasets[0].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") - hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), - data_type="ncsa:probabilisticHurricaneRaster") + hurricane.hazardDatasets[2].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster", + ) # Optional: set threshold to determine exposure or not hurricane.hazardDatasets[2].set_threshold(threshold_value=1, threshold_unit="hr") @@ -36,8 +46,10 @@ def run_with_base_class(): mapping_id = "602c381a1d85547cdc9f0675" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_input_hazard("hazard", hurricane) @@ -51,11 +63,15 @@ def run_with_base_class(): ########################### # local tornado - tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) + tornado = Tornado.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json") + ) # attach dataset from local file - tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), - data_type="incore:tornadoWindfield") + tornado.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield", + ) bldg_dataset_id = "5df7d0de425e0b00092d0082" @@ -65,7 +81,7 @@ def run_with_base_class(): mapping_id = "5e8e3a21eaa8b80001f04f1c" fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_input_hazard("hazard", tornado) @@ -78,5 +94,5 @@ def run_with_base_class(): bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py b/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py index 0fbc23225..1f5b6c8b8 100644 --- a/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py @@ -1,5 +1,14 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, DataService -from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, + DataService, +) +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import ( + BuildingStructuralDamage, +) import time @@ -16,21 +25,27 @@ bldg_dmg = BuildingStructuralDamage(client) mapping_set = MappingSet(fragility_services.get_mapping("6309005ad76c6d0e1f6be081")) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - bldg_dmg.load_remote_input_dataset("buildings", "62fea288f5438e1f8c515ef8") # Salt Lake County All Building + bldg_dmg.load_remote_input_dataset( + "buildings", "62fea288f5438e1f8c515ef8" + ) # Salt Lake County All Building bldg_dmg.set_parameter("result_name", "SLC_bldg_dmg_no_retrofit-withLIQ7.1") - eq = Earthquake.from_hazard_service("640a03ea73a1642180262450", hazard_services) # Mw 7.1 + eq = Earthquake.from_hazard_service( + "640a03ea73a1642180262450", hazard_services + ) # Mw 7.1 # eq = Earthquake.from_hazard_service("64108b6486a52d419dd69a41", hazard_services) # Mw 7.0 bldg_dmg.set_input_hazard("hazard", eq) bldg_dmg.set_parameter("use_liquefaction", True) - bldg_dmg.set_parameter("liquefaction_geology_dataset_id", "62fe9ab685ac6b569e372429") + bldg_dmg.set_parameter( + "liquefaction_geology_dataset_id", "62fe9ab685ac6b569e372429" + ) bldg_dmg.set_parameter("num_cpu", 8) # Run building damage without liquefaction bldg_dmg.run_analysis() end_time = time.time() - print(f"total runtime: {end_time - start_time}") \ No newline at end of file + print(f"total runtime: {end_time - start_time}") diff --git a/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py b/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py index 843f65ca0..9c9fcd4a5 100644 --- a/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py +++ b/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py @@ -4,9 +4,8 @@ from pyincore.analyses.buyoutdecision import BuyoutDecision -from pyincore import IncoreClient, Dataset +from pyincore import IncoreClient import pyincore.globals as pyglobals -import pandas as pd def BuyoutDecisionTest(): @@ -30,9 +29,15 @@ def BuyoutDecisionTest(): buyout_decision.load_remote_input_dataset("buildings", buildings_id) buyout_decision.load_remote_input_dataset("housing_unit_allocation", hua_id) - buyout_decision.load_remote_input_dataset("past_building_damage", past_building_damage_id) - buyout_decision.load_remote_input_dataset("future_building_damage", future_building_damage_id) - buyout_decision.load_remote_input_dataset("population_dislocation", past_pop_dislocation_id) + buyout_decision.load_remote_input_dataset( + "past_building_damage", past_building_damage_id + ) + buyout_decision.load_remote_input_dataset( + "future_building_damage", future_building_damage_id + ) + buyout_decision.load_remote_input_dataset( + "population_dislocation", past_pop_dislocation_id + ) buyout_decision.run_analysis() diff --git a/tests/pyincore/analyses/capitalshocks/test_capitalshocks.py b/tests/pyincore/analyses/capitalshocks/test_capitalshocks.py index 4b93a7666..f193222c0 100644 --- a/tests/pyincore/analyses/capitalshocks/test_capitalshocks.py +++ b/tests/pyincore/analyses/capitalshocks/test_capitalshocks.py @@ -15,10 +15,12 @@ def run_base_analysis(): capital_shocks.set_parameter("result_name", "sector_shocks") capital_shocks.load_remote_input_dataset("buildings", building_inventory) - capital_shocks.load_remote_input_dataset("buildings_to_sectors", building_to_sectors) + capital_shocks.load_remote_input_dataset( + "buildings_to_sectors", building_to_sectors + ) capital_shocks.load_remote_input_dataset("failure_probability", failure_probability) capital_shocks.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_base_analysis() diff --git a/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py b/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py index 832feba09..761cdec8b 100644 --- a/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py +++ b/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py @@ -1,5 +1,7 @@ from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.combinedwindwavesurgebuildingdamage import CombinedWindWaveSurgeBuildingDamage +from pyincore.analyses.combinedwindwavesurgebuildingdamage import ( + CombinedWindWaveSurgeBuildingDamage, +) from pyincore.analyses.buildingdamage import BuildingDamage from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage import pyincore.globals as pyglobals @@ -25,7 +27,7 @@ def run_with_base_class(): # surge-wave building damage sw_bldg_dmg = BuildingDamage(client) sw_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - sw_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + sw_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) sw_bldg_dmg.set_parameter("result_name", "Galveston-sw-dmg") sw_bldg_dmg.set_parameter("hazard_type", hazard_type) sw_bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -39,7 +41,7 @@ def run_with_base_class(): # wind building damage w_bldg_dmg = BuildingDamage(client) w_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - w_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + w_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) w_bldg_dmg.set_parameter("result_name", "Galveston-wind-dmg") w_bldg_dmg.set_parameter("hazard_type", hazard_type) w_bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -53,7 +55,7 @@ def run_with_base_class(): # flood building damage f_bldg_dmg = NonStructBuildingDamage(client) f_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - f_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + f_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) f_bldg_dmg.set_parameter("result_name", "Galveston-flood-dmg") f_bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") f_bldg_dmg.set_parameter("hazard_type", hazard_type) @@ -75,5 +77,5 @@ def run_with_base_class(): combined_bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py b/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py index 39e30cdb5..a08d08496 100644 --- a/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py +++ b/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py @@ -1,5 +1,7 @@ from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.combinedwindwavesurgebuildingloss import CombinedWindWaveSurgeBuildingLoss +from pyincore.analyses.combinedwindwavesurgebuildingloss import ( + CombinedWindWaveSurgeBuildingLoss, +) from pyincore.analyses.buildingdamage import BuildingDamage from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage import pyincore.globals as pyglobals @@ -7,7 +9,6 @@ def run_with_base_class(): - client = IncoreClient(pyglobals.INCORE_API_DEV_URL) # Galveston building inventory @@ -27,7 +28,7 @@ def run_with_base_class(): # surge-wave building damage sw_bldg_dmg = BuildingDamage(client) sw_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - sw_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + sw_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) sw_bldg_dmg.set_parameter("result_name", "Galveston-sw-dmg") sw_bldg_dmg.set_parameter("hazard_type", hazard_type) sw_bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -41,7 +42,7 @@ def run_with_base_class(): # wind building damage w_bldg_dmg = BuildingDamage(client) w_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - w_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + w_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) w_bldg_dmg.set_parameter("result_name", "Galveston-wind-dmg") w_bldg_dmg.set_parameter("hazard_type", hazard_type) w_bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -55,7 +56,7 @@ def run_with_base_class(): # flood building damage f_bldg_dmg = NonStructBuildingDamage(client) f_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - f_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + f_bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) f_bldg_dmg.set_parameter("result_name", "Galveston-flood-dmg") f_bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") f_bldg_dmg.set_parameter("hazard_type", hazard_type) @@ -76,14 +77,18 @@ def run_with_base_class(): combined_bldg_loss.set_input_dataset("surge_wave_damage", surge_wave_damage) combined_bldg_loss.set_input_dataset("wind_damage", wind_damage) combined_bldg_loss.set_input_dataset("flood_damage", flood_damage) - combined_bldg_loss.load_remote_input_dataset("structural_cost", "63fd15716d3b2a308ba914c8") - combined_bldg_loss.load_remote_input_dataset("content_cost", "63fd16956d3b2a308ba9269a") + combined_bldg_loss.load_remote_input_dataset( + "structural_cost", "63fd15716d3b2a308ba914c8" + ) + combined_bldg_loss.load_remote_input_dataset( + "content_cost", "63fd16956d3b2a308ba9269a" + ) combined_bldg_loss.set_parameter("result_name", "Galveston") combined_bldg_loss.run_analysis() end = timer() - print(f'Elapsed time: {end - start:.3f} seconds') + print(f"Elapsed time: {end - start:.3f} seconds") -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/commercialbuildingrecovery/test_commercialbuildingrecovery.py b/tests/pyincore/analyses/commercialbuildingrecovery/test_commercialbuildingrecovery.py index aa16837dd..646be022e 100644 --- a/tests/pyincore/analyses/commercialbuildingrecovery/test_commercialbuildingrecovery.py +++ b/tests/pyincore/analyses/commercialbuildingrecovery/test_commercialbuildingrecovery.py @@ -1,5 +1,7 @@ from pyincore import IncoreClient, RepairService, MappingSet -from pyincore.analyses.commercialbuildingrecovery.commercialbuildingrecovery import CommercialBuildingRecovery +from pyincore.analyses.commercialbuildingrecovery.commercialbuildingrecovery import ( + CommercialBuildingRecovery, +) import pyincore.globals as pyglobals @@ -18,13 +20,15 @@ def run_with_base_class(): # Create repair service repair_service = RepairService(client) mapping_set = MappingSet(repair_service.get_mapping(mapping_id)) - com_recovery.set_input_dataset('dfr3_mapping_set', mapping_set) + com_recovery.set_input_dataset("dfr3_mapping_set", mapping_set) # input datsets ids - sample_damage_states = "64ee146456b25759cfc599ac" # 10 samples 28k buildings - MCS output format - mcs_failure = '64ee144256b25759cfc599a5' + sample_damage_states = ( + "64ee146456b25759cfc599ac" # 10 samples 28k buildings - MCS output format + ) + mcs_failure = "64ee144256b25759cfc599a5" delay_factors = "64ee10e756b25759cfc53243" - bld_dmg = '65723c3f9bc3c806024c69b0' + bld_dmg = "65723c3f9bc3c806024c69b0" # Load input datasets com_recovery.load_remote_input_dataset("sample_damage_states", sample_damage_states) @@ -32,8 +36,6 @@ def run_with_base_class(): com_recovery.load_remote_input_dataset("delay_factors", delay_factors) com_recovery.load_remote_input_dataset("building_dmg", bld_dmg) - - # Input parameters num_samples = 10 @@ -48,5 +50,5 @@ def run_with_base_class(): com_recovery.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/cumulativebuildingdamage/test_cumulativebuildingdamage.py b/tests/pyincore/analyses/cumulativebuildingdamage/test_cumulativebuildingdamage.py index 138fd0dee..31bf80040 100644 --- a/tests/pyincore/analyses/cumulativebuildingdamage/test_cumulativebuildingdamage.py +++ b/tests/pyincore/analyses/cumulativebuildingdamage/test_cumulativebuildingdamage.py @@ -1,8 +1,9 @@ from pyincore import IncoreClient, MappingSet, FragilityService import pyincore.globals as pyglobals -from pyincore import Dataset from pyincore.analyses.buildingdamage import BuildingDamage -from pyincore.analyses.cumulativebuildingdamage.cumulativebuildingdamage import CumulativeBuildingDamage +from pyincore.analyses.cumulativebuildingdamage.cumulativebuildingdamage import ( + CumulativeBuildingDamage, +) def run_with_base_class(): @@ -26,7 +27,7 @@ def run_with_base_class(): result_name = "eq_bldg_dmg_result" bldg_dmg_eq.set_parameter("result_name", result_name) eq_mapping_set = MappingSet(fragility_service.get_mapping(eq_mapping_id)) - bldg_dmg_eq.set_input_dataset('dfr3_mapping_set', eq_mapping_set) + bldg_dmg_eq.set_input_dataset("dfr3_mapping_set", eq_mapping_set) bldg_dmg_eq.set_parameter("hazard_type", eq_hazard_type) bldg_dmg_eq.set_parameter("hazard_id", eq_hazard_id) bldg_dmg_eq.set_parameter("num_cpu", 4) @@ -46,7 +47,7 @@ def run_with_base_class(): tsunami_result_name = "tsunami_bldg_dmg_result" bldg_dmg_tsunami.set_parameter("result_name", tsunami_result_name) tsu_mapping_set = MappingSet(fragility_service.get_mapping(tsunami_mapping_id)) - bldg_dmg_tsunami.set_input_dataset('dfr3_mapping_set', tsu_mapping_set) + bldg_dmg_tsunami.set_input_dataset("dfr3_mapping_set", tsu_mapping_set) bldg_dmg_tsunami.set_parameter("hazard_type", tsunami_hazard_type) bldg_dmg_tsunami.set_parameter("hazard_id", tsunami_hazard_id) bldg_dmg_tsunami.set_parameter("num_cpu", 4) @@ -68,5 +69,5 @@ def run_with_base_class(): cumulative_bldg_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/epfdamage/test_epfdamage.py b/tests/pyincore/analyses/epfdamage/test_epfdamage.py index 41d9f06ea..8e022da66 100644 --- a/tests/pyincore/analyses/epfdamage/test_epfdamage.py +++ b/tests/pyincore/analyses/epfdamage/test_epfdamage.py @@ -24,14 +24,16 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_eq_memphis.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_eq_memphis.set_input_dataset("dfr3_mapping_set", mapping_set) epf_dmg_eq_memphis.set_parameter("result_name", "memphis_eq_epf_dmg_result") epf_dmg_eq_memphis.set_parameter("hazard_type", hazard_type_eq) epf_dmg_eq_memphis.set_parameter("hazard_id", hazard_id_eq) epf_dmg_eq_memphis.set_parameter("use_liquefaction", use_liquefaction) epf_dmg_eq_memphis.set_parameter("use_hazard_uncertainty", use_hazard_uncertainty) - epf_dmg_eq_memphis.set_parameter("liquefaction_geology_dataset_id", liquefaction_geology_dataset_id) + epf_dmg_eq_memphis.set_parameter( + "liquefaction_geology_dataset_id", liquefaction_geology_dataset_id + ) epf_dmg_eq_memphis.set_parameter("num_cpu", 1) # Run Analysis epf_dmg_eq_memphis.run_analysis() @@ -50,7 +52,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_eq.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_eq.set_input_dataset("dfr3_mapping_set", mapping_set) epf_dmg_eq.set_parameter("result_name", "seaside_eq_epf_dmg_result") epf_dmg_eq.set_parameter("hazard_type", hazard_type_eq) epf_dmg_eq.set_parameter("hazard_id", hazard_id_eq) @@ -73,15 +75,17 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_tsu_re.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_tsu_re.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_dmg_tsu_re.set_parameter("fragility_key", "Non-Retrofit inundationDepth Fragility ID Code") + epf_dmg_tsu_re.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) epf_dmg_tsu_re.set_parameter("result_name", "seaside_tsunami_epf_dmg_result") epf_dmg_tsu_re.set_parameter("hazard_type", hazard_type_tsu) epf_dmg_tsu_re.set_parameter("hazard_id", hazard_id_tsu) epf_dmg_tsu_re.set_parameter("num_cpu", 1) - epf_dmg_tsu_re.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_tsu_re.set_input_dataset("dfr3_mapping_set", mapping_set) # Run Analysis epf_dmg_tsu_re.run_analysis() @@ -89,21 +93,27 @@ def run_with_base_class(): # Galveston EPF damage # Run epf damage epf_dmg_hurricane_galveston = EpfDamage(client) - epf_dmg_hurricane_galveston.load_remote_input_dataset("epfs", "62fd437b18e50067942b679a") + epf_dmg_hurricane_galveston.load_remote_input_dataset( + "epfs", "62fd437b18e50067942b679a" + ) # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping("62fac92ecef2881193f22613")) - epf_dmg_hurricane_galveston.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_hurricane_galveston.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_dmg_hurricane_galveston.set_parameter("result_name", "galveston_hurricane_epf_damage") + epf_dmg_hurricane_galveston.set_parameter( + "result_name", "galveston_hurricane_epf_damage" + ) epf_dmg_hurricane_galveston.set_parameter("hazard_type", "hurricane") epf_dmg_hurricane_galveston.set_parameter("hazard_id", "5fa472033c1f0c73fe81461a") - epf_dmg_hurricane_galveston.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") + epf_dmg_hurricane_galveston.set_parameter( + "fragility_key", "Non-Retrofit Fragility ID Code" + ) epf_dmg_hurricane_galveston.set_parameter("num_cpu", 8) # Run Analysis epf_dmg_hurricane_galveston.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/epfdamage/test_epfdamage_w_hazard_obj.py b/tests/pyincore/analyses/epfdamage/test_epfdamage_w_hazard_obj.py index 304ee30f7..95d245633 100644 --- a/tests/pyincore/analyses/epfdamage/test_epfdamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/epfdamage/test_epfdamage_w_hazard_obj.py @@ -1,4 +1,12 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + Earthquake, + HazardService, + Tsunami, + Hurricane, +) from pyincore.analyses.epfdamage import EpfDamage import pyincore.globals as pyglobals @@ -8,7 +16,9 @@ def run_with_base_class(): # ## Memphis EPF Damage with Earthquake ## hazard_service = HazardService(client) - earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service + ) epf_dataset_id = "6189c103d5b02930aa3efc35" # mapping_id = "61980b11e32da63f4b9d86f4" @@ -25,14 +35,18 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_eq_memphis.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_eq_memphis.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_dmg_eq_memphis.set_input_hazard('hazard', earthquake) + epf_dmg_eq_memphis.set_input_hazard("hazard", earthquake) - epf_dmg_eq_memphis.set_parameter("result_name", "memphis_eq_epf_dmg_result_w_hazard_obj") + epf_dmg_eq_memphis.set_parameter( + "result_name", "memphis_eq_epf_dmg_result_w_hazard_obj" + ) epf_dmg_eq_memphis.set_parameter("use_liquefaction", use_liquefaction) epf_dmg_eq_memphis.set_parameter("use_hazard_uncertainty", use_hazard_uncertainty) - epf_dmg_eq_memphis.set_parameter("liquefaction_geology_dataset_id", liquefaction_geology_dataset_id) + epf_dmg_eq_memphis.set_parameter( + "liquefaction_geology_dataset_id", liquefaction_geology_dataset_id + ) epf_dmg_eq_memphis.set_parameter("num_cpu", 1) # Run Analysis epf_dmg_eq_memphis.run_analysis() @@ -51,7 +65,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_eq.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_eq.set_input_dataset("dfr3_mapping_set", mapping_set) epf_dmg_eq.set_parameter("result_name", "seaside_eq_epf_dmg_result") epf_dmg_eq.set_parameter("hazard_type", hazard_type_eq) epf_dmg_eq.set_parameter("hazard_id", hazard_id_eq) @@ -75,36 +89,48 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_tsu_re.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_tsu_re.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_dmg_tsu_re.set_input_hazard('hazard', tsunami) + epf_dmg_tsu_re.set_input_hazard("hazard", tsunami) - epf_dmg_tsu_re.set_parameter("fragility_key", "Non-Retrofit inundationDepth Fragility ID Code") - epf_dmg_tsu_re.set_parameter("result_name", "seaside_tsunami_epf_dmg_result_w_hazard_obj") + epf_dmg_tsu_re.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) + epf_dmg_tsu_re.set_parameter( + "result_name", "seaside_tsunami_epf_dmg_result_w_hazard_obj" + ) epf_dmg_tsu_re.set_parameter("num_cpu", 1) - epf_dmg_tsu_re.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_tsu_re.set_input_dataset("dfr3_mapping_set", mapping_set) # Run Analysis epf_dmg_tsu_re.run_analysis() ############################################################################# # Galveston EPF damage # Run epf damage - hurricane = Hurricane.from_hazard_service("5fa472033c1f0c73fe81461a", hazard_service) + hurricane = Hurricane.from_hazard_service( + "5fa472033c1f0c73fe81461a", hazard_service + ) epf_dmg_hurricane_galveston = EpfDamage(client) - epf_dmg_hurricane_galveston.load_remote_input_dataset("epfs", "62fd437b18e50067942b679a") + epf_dmg_hurricane_galveston.load_remote_input_dataset( + "epfs", "62fd437b18e50067942b679a" + ) # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping("62fac92ecef2881193f22613")) - epf_dmg_hurricane_galveston.set_input_dataset('dfr3_mapping_set', mapping_set) - epf_dmg_hurricane_galveston.set_input_hazard('hazard', hurricane) - epf_dmg_hurricane_galveston.set_parameter("result_name", "galveston_hurricane_epf_damage_w_hazard_obj") - epf_dmg_hurricane_galveston.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") + epf_dmg_hurricane_galveston.set_input_dataset("dfr3_mapping_set", mapping_set) + epf_dmg_hurricane_galveston.set_input_hazard("hazard", hurricane) + epf_dmg_hurricane_galveston.set_parameter( + "result_name", "galveston_hurricane_epf_damage_w_hazard_obj" + ) + epf_dmg_hurricane_galveston.set_parameter( + "fragility_key", "Non-Retrofit Fragility ID Code" + ) epf_dmg_hurricane_galveston.set_parameter("num_cpu", 8) # Run Analysis epf_dmg_hurricane_galveston.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/epfrepaircost/test_epfrepaircost.py b/tests/pyincore/analyses/epfrepaircost/test_epfrepaircost.py index 2dbe0c9c3..584d3f288 100644 --- a/tests/pyincore/analyses/epfrepaircost/test_epfrepaircost.py +++ b/tests/pyincore/analyses/epfrepaircost/test_epfrepaircost.py @@ -13,13 +13,19 @@ def run_with_base_class(): epf_repair_cost.load_remote_input_dataset("epfs", "5eebcaa17a00803abc85ec11") # dev # epf_repair_cost.load_remote_input_dataset("epfs", "5d263f08b9219cf93c056c68") # prod - epf_repair_cost.load_remote_input_dataset("replacement_cost", "6470c09a5bc8b26ddf99bb59") + epf_repair_cost.load_remote_input_dataset( + "replacement_cost", "6470c09a5bc8b26ddf99bb59" + ) # can be chained with MCS - epf_repair_cost.load_remote_input_dataset("sample_damage_states", "6470c23d5bc8b26ddf99bb65") + epf_repair_cost.load_remote_input_dataset( + "sample_damage_states", "6470c23d5bc8b26ddf99bb65" + ) # dmg ratios - epf_repair_cost.load_remote_input_dataset("epf_dmg_ratios", "6470c1c35bc8b26ddf99bb5f") + epf_repair_cost.load_remote_input_dataset( + "epf_dmg_ratios", "6470c1c35bc8b26ddf99bb5f" + ) epf_repair_cost.set_parameter("result_name", "seaside_epf") epf_repair_cost.set_parameter("num_cpu", 4) diff --git a/tests/pyincore/analyses/epfrestoration/test_epfrestoration.py b/tests/pyincore/analyses/epfrestoration/test_epfrestoration.py index c412bca1e..1e8bd67af 100644 --- a/tests/pyincore/analyses/epfrestoration/test_epfrestoration.py +++ b/tests/pyincore/analyses/epfrestoration/test_epfrestoration.py @@ -11,9 +11,8 @@ def run_with_base_class(): - client = IncoreClient(pyglobals.INCORE_API_DEV_URL) - + # Memphis EPF Damage with Earthquake hazard_type_eq = "earthquake" hazard_id_eq = "5b902cb273c3371e1236b36b" @@ -31,25 +30,31 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg_eq_memphis.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg_eq_memphis.set_input_dataset("dfr3_mapping_set", mapping_set) epf_dmg_eq_memphis.set_parameter("result_name", "memphis_eq_epf_dmg_result") epf_dmg_eq_memphis.set_parameter("hazard_type", hazard_type_eq) epf_dmg_eq_memphis.set_parameter("hazard_id", hazard_id_eq) epf_dmg_eq_memphis.set_parameter("use_liquefaction", use_liquefaction) epf_dmg_eq_memphis.set_parameter("use_hazard_uncertainty", use_hazard_uncertainty) - epf_dmg_eq_memphis.set_parameter("liquefaction_geology_dataset_id", liquefaction_geology_dataset_id) + epf_dmg_eq_memphis.set_parameter( + "liquefaction_geology_dataset_id", liquefaction_geology_dataset_id + ) epf_dmg_eq_memphis.set_parameter("num_cpu", 1) # Run Analysis epf_dmg_eq_memphis.run_analysis() - + epf_rest = EpfRestoration(client) restorationsvc = RestorationService(client) - mapping_set = MappingSet(restorationsvc.get_mapping("61f302e6e3a03e465500b3eb")) # new format of mapping + mapping_set = MappingSet( + restorationsvc.get_mapping("61f302e6e3a03e465500b3eb") + ) # new format of mapping epf_rest.load_remote_input_dataset("epfs", "6189c103d5b02930aa3efc35") epf_rest.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_rest.set_input_dataset('damage', epf_dmg_eq_memphis.get_output_dataset("result")) + epf_rest.set_input_dataset( + "damage", epf_dmg_eq_memphis.get_output_dataset("result") + ) epf_rest.set_parameter("result_name", "memphis-epf") epf_rest.set_parameter("discretized_days", [1, 3, 7, 30, 90]) epf_rest.set_parameter("restoration_key", "Restoration ID Code") @@ -66,13 +71,22 @@ def run_with_base_class(): time_interval = epf_rest.get_parameter("time_interval") pf_interval = epf_rest.get_parameter("pf_interval") end_time = epf_rest.get_parameter("end_time") - epf_rest_util = EpfRestorationUtil(inventory_restoration_map, pf_results, time_results, time_interval, - pf_interval, end_time) - functionality = epf_rest_util.get_percentage_func(guid="60748fbd-67c3-4f8d-beb9-26685a53d3c5", - damage_state="DS_0", time=2.0) - time = epf_rest_util.get_restoration_time(guid="60748fbd-67c3-4f8d-beb9-26685a53d3c5", damage_state="DS_1", pf=0.81) + epf_rest_util = EpfRestorationUtil( + inventory_restoration_map, + pf_results, + time_results, + time_interval, + pf_interval, + end_time, + ) + functionality = epf_rest_util.get_percentage_func( + guid="60748fbd-67c3-4f8d-beb9-26685a53d3c5", damage_state="DS_0", time=2.0 + ) + time = epf_rest_util.get_restoration_time( + guid="60748fbd-67c3-4f8d-beb9-26685a53d3c5", damage_state="DS_1", pf=0.81 + ) print(functionality, time) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/epnfunctionality/test_epnfunctionality.py b/tests/pyincore/analyses/epnfunctionality/test_epnfunctionality.py index bea0c7f57..9109861cb 100644 --- a/tests/pyincore/analyses/epnfunctionality/test_epnfunctionality.py +++ b/tests/pyincore/analyses/epnfunctionality/test_epnfunctionality.py @@ -1,4 +1,4 @@ -from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore import IncoreClient from pyincore.analyses.epnfunctionality import EpnFunctionality import pyincore.globals as pyglobals @@ -11,7 +11,9 @@ def run_with_base_class(): # run epn functionality epn_func = EpnFunctionality(client) epn_func.load_remote_input_dataset("epn_network", epn_dataset_id) - epn_func.load_remote_input_dataset("epf_sample_failure_state", "62d03711861e370172cb0a37") + epn_func.load_remote_input_dataset( + "epf_sample_failure_state", "62d03711861e370172cb0a37" + ) epn_func.set_parameter("result_name", "mmsa_epn_functionality") epn_func.set_parameter("gate_station_node_list", [1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -20,5 +22,5 @@ def run_with_base_class(): epn_func.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/example/test_example.py b/tests/pyincore/analyses/example/test_example.py index c68aaf53a..a705e4177 100755 --- a/tests/pyincore/analyses/example/test_example.py +++ b/tests/pyincore/analyses/example/test_example.py @@ -5,7 +5,7 @@ import traceback import pyincore.globals as pyglobals -if __name__ == '__main__': +if __name__ == "__main__": cred = None # If you installed pyIncore, a folder called .incore should be created in your home directory along @@ -44,8 +44,10 @@ if example_bldgdmg.run_analysis(): print("Analysis finished, check for a file called " + result_name + ".csv") else: - print("There was an error running the example, you may need to check there there is a credential in " + - incore_pw) + print( + "There was an error running the example, you may need to check there there is a credential in " + + incore_pw + ) except EnvironmentError: print("exception") traceback.print_exc() diff --git a/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py b/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py index 32d0d9c87..8f40bb755 100644 --- a/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py +++ b/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py @@ -1,4 +1,10 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, +) from pyincore.analyses.gasfacilitydamage import GasFacilityDamage import pyincore.globals as pyglobals diff --git a/tests/pyincore/analyses/housingrecovery/test_housingrecovery.py b/tests/pyincore/analyses/housingrecovery/test_housingrecovery.py index 69a891289..9c736ea0e 100644 --- a/tests/pyincore/analyses/housingrecovery/test_housingrecovery.py +++ b/tests/pyincore/analyses/housingrecovery/test_housingrecovery.py @@ -34,7 +34,9 @@ def run_with_base_class(chained): result_name = "Galveston_bldg_dmg_result" - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_parameter("result_name", result_name) bldg_dmg.set_parameter("hazard_type", hazard_type) bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -177,7 +179,7 @@ def run_with_base_class(chained): housing_rec.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": chained = False run_with_base_class(chained) diff --git a/tests/pyincore/analyses/housingrecoveryserial/test_housingrecoverysequential.py b/tests/pyincore/analyses/housingrecoveryserial/test_housingrecoverysequential.py index 348439b50..e851dec69 100644 --- a/tests/pyincore/analyses/housingrecoveryserial/test_housingrecoverysequential.py +++ b/tests/pyincore/analyses/housingrecoveryserial/test_housingrecoverysequential.py @@ -26,17 +26,21 @@ def run_with_base_class(): housing_recovery = HousingRecoverySequential(client) # Parameter setup - housing_recovery.set_parameter('num_cpu', 4) - housing_recovery.set_parameter('seed', seed) - housing_recovery.set_parameter('t_delta', t_delta) - housing_recovery.set_parameter('t_final', t_final) - housing_recovery.set_parameter('result_name', 'results_hhrs_galveston.csv') + housing_recovery.set_parameter("num_cpu", 4) + housing_recovery.set_parameter("seed", seed) + housing_recovery.set_parameter("t_delta", t_delta) + housing_recovery.set_parameter("t_final", t_final) + housing_recovery.set_parameter("result_name", "results_hhrs_galveston.csv") # Dataset inputs - housing_recovery.load_remote_input_dataset("population_dislocation_block", population_dislocation) - housing_recovery.load_remote_input_dataset('tpm', transition_probability_matrix) - housing_recovery.load_remote_input_dataset('initial_stage_probabilities', initial_probability_vector) - housing_recovery.load_remote_input_dataset('sv_result', sv_result) + housing_recovery.load_remote_input_dataset( + "population_dislocation_block", population_dislocation + ) + housing_recovery.load_remote_input_dataset("tpm", transition_probability_matrix) + housing_recovery.load_remote_input_dataset( + "initial_stage_probabilities", initial_probability_vector + ) + housing_recovery.load_remote_input_dataset("sv_result", sv_result) housing_recovery.run() @@ -46,12 +50,12 @@ def run_with_base_class(): end = timer() - print(f'Elapsed time: {end - start:.3f} seconds') + print(f"Elapsed time: {end - start:.3f} seconds") timesteps = ["1", "7", "13", "25", "49"] # t0, t6, t12, t24, t48 print(HHRSOutputProcess.get_hhrs_stage_count(timesteps, hhrs_df)) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/housingunitallocation/test_housingunitallocation.py b/tests/pyincore/analyses/housingunitallocation/test_housingunitallocation.py index bb3cf93a9..4e690b587 100644 --- a/tests/pyincore/analyses/housingunitallocation/test_housingunitallocation.py +++ b/tests/pyincore/analyses/housingunitallocation/test_housingunitallocation.py @@ -3,7 +3,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ from pyincore import IncoreClient -from pyincore.analyses.housingunitallocation.housingunitallocation import HousingUnitAllocation +from pyincore.analyses.housingunitallocation.housingunitallocation import ( + HousingUnitAllocation, +) import pyincore.globals as pyglobals @@ -33,5 +35,5 @@ def run_with_base_class(): return True -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py b/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py index a722556aa..0f3dd7779 100644 --- a/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py +++ b/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py @@ -3,7 +3,9 @@ from pyincore.analyses.buildingdamage.buildingdamage import BuildingDamage from pyincore.analyses.housingunitallocation import HousingUnitAllocation from pyincore.analyses.populationdislocation import PopulationDislocation -from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import HousingValuationRecovery +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import ( + HousingValuationRecovery, +) import pyincore.globals as pyglobals @@ -34,7 +36,9 @@ def run_with_base_class(chained): result_name = "Galveston_bldg_dmg_result" - bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_parameter( + "fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code" + ) bldg_dmg.set_parameter("result_name", result_name) bldg_dmg.set_parameter("hazard_type", hazard_type) bldg_dmg.set_parameter("hazard_id", hazard_id) @@ -177,7 +181,7 @@ def run_with_base_class(chained): housing_rec.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": chained = False run_with_base_class(chained) diff --git a/tests/pyincore/analyses/indp/test_indp.py b/tests/pyincore/analyses/indp/test_indp.py index 257bf6ad8..e712aabcd 100644 --- a/tests/pyincore/analyses/indp/test_indp.py +++ b/tests/pyincore/analyses/indp/test_indp.py @@ -1,4 +1,11 @@ -from pyincore import IncoreClient, Dataset, RestorationService, MappingSet, FragilityService, NetworkDataset +from pyincore import ( + IncoreClient, + Dataset, + RestorationService, + MappingSet, + FragilityService, + NetworkDataset, +) from pyincore.analyses.buildingdamage import BuildingDamage from pyincore.analyses.epfrepaircost import EpfRepairCost from pyincore.analyses.epfrestoration import EpfRestoration @@ -35,9 +42,13 @@ def run_with_base_class(): bldg_inv_id_dev = "64c7d058a62b20774f4107b5" # dev seed = 1111 - power_network_dataset = Dataset.from_data_service("64ac73694e01de3af8fd8f2b", data_service=dev_datasvc) + power_network_dataset = Dataset.from_data_service( + "64ac73694e01de3af8fd8f2b", data_service=dev_datasvc + ) power_network = NetworkDataset.from_dataset(power_network_dataset) - water_network_dataset = Dataset.from_data_service("64ad6abb4e01de3af8fe5201", data_service=dev_datasvc) + water_network_dataset = Dataset.from_data_service( + "64ad6abb4e01de3af8fe5201", data_service=dev_datasvc + ) water_network = NetworkDataset.from_dataset(water_network_dataset) water_facilities = water_network.nodes epfs = power_network.nodes @@ -67,12 +78,20 @@ def run_with_base_class(): wterfclty_mc.set_input_dataset("damage", wterfclty_dmg_result) wterfclty_mc.set_parameter("num_cpu", num_cpu) wterfclty_mc.set_parameter("num_samples", sim_number) - wterfclty_mc.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]) + wterfclty_mc.set_parameter( + "damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"] + ) wterfclty_mc.set_parameter("failure_state_keys", ["DS_1", "DS_2", "DS_3", "DS_4"]) - wterfclty_mc.set_parameter("result_name", result_name + "_wf") # name of csv file with results + wterfclty_mc.set_parameter( + "result_name", result_name + "_wf" + ) # name of csv file with results wterfclty_mc.run() - wterfclty_sample_failure_state = wterfclty_mc.get_output_dataset("sample_failure_state") - wterfclty_sample_damage_states = wterfclty_mc.get_output_dataset("sample_damage_states") + wterfclty_sample_failure_state = wterfclty_mc.get_output_dataset( + "sample_failure_state" + ) + wterfclty_sample_damage_states = wterfclty_mc.get_output_dataset( + "sample_damage_states" + ) ################################################### # water facility repair time @@ -82,7 +101,7 @@ def run_with_base_class(): mapping_set = MappingSet(restorationsvc.get_mapping("61f075ee903e515036cee0a5")) wterfclty_rest.set_input_dataset("water_facilities", water_facilities) wterfclty_rest.set_input_dataset("dfr3_mapping_set", mapping_set) - wterfclty_rest.set_input_dataset('damage', wterfclty_dmg_result) + wterfclty_rest.set_input_dataset("damage", wterfclty_dmg_result) wterfclty_rest.set_parameter("result_name", result_name + "_wf_restoration") wterfclty_rest.set_parameter("discretized_days", [1, 3, 7, 30, 90]) wterfclty_rest.set_parameter("restoration_key", "Restoration ID Code") @@ -97,9 +116,15 @@ def run_with_base_class(): ################################################### wf_repair_cost = WaterFacilityRepairCost(prod_client) wf_repair_cost.set_input_dataset("water_facilities", water_facilities) - wf_repair_cost.load_remote_input_dataset("replacement_cost", "64833bcdd3f39a26a0c8b147") - wf_repair_cost.set_input_dataset("sample_damage_states", wterfclty_sample_damage_states) - wf_repair_cost.load_remote_input_dataset("wf_dmg_ratios", "647e423d7ae18139d9758607") + wf_repair_cost.load_remote_input_dataset( + "replacement_cost", "64833bcdd3f39a26a0c8b147" + ) + wf_repair_cost.set_input_dataset( + "sample_damage_states", wterfclty_sample_damage_states + ) + wf_repair_cost.load_remote_input_dataset( + "wf_dmg_ratios", "647e423d7ae18139d9758607" + ) wf_repair_cost.set_parameter("result_name", result_name + "_wf_repair_cost") wf_repair_cost.set_parameter("num_cpu", 4) wf_repair_cost.run_analysis() @@ -113,10 +138,10 @@ def run_with_base_class(): epf_dmg.set_input_dataset("epfs", epfs) mapping_id = "64ac5f3ad2122d1f95f36356" # 5 DS mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - epf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) epf_dmg.set_parameter("hazard_type", hazard_type) epf_dmg.set_parameter("num_cpu", num_cpu) - epf_dmg.set_parameter('fragility_key', "pga") + epf_dmg.set_parameter("fragility_key", "pga") epf_dmg.set_parameter("hazard_id", hazard_id) epf_dmg.set_parameter("result_name", result_name + "_epf_dmg") epf_dmg.run_analysis() @@ -130,7 +155,7 @@ def run_with_base_class(): mapping_set = MappingSet(restorationsvc.get_mapping("61f302e6e3a03e465500b3eb")) epf_rest.set_input_dataset("epfs", epfs) epf_rest.set_input_dataset("dfr3_mapping_set", mapping_set) - epf_rest.set_input_dataset('damage', epf_dmg_result) + epf_rest.set_input_dataset("damage", epf_dmg_result) epf_rest.set_parameter("result_name", result_name + "_epf_restoration") epf_rest.set_parameter("discretized_days", [1, 3, 7, 30, 90]) epf_rest.set_parameter("restoration_key", "Restoration ID Code") @@ -147,9 +172,13 @@ def run_with_base_class(): epf_mc.set_input_dataset("damage", epf_dmg_result) epf_mc.set_parameter("num_cpu", num_cpu) epf_mc.set_parameter("num_samples", sim_number) - epf_mc.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]) + epf_mc.set_parameter( + "damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"] + ) epf_mc.set_parameter("failure_state_keys", ["DS_1", "DS_2", "DS_3", "DS_4"]) - epf_mc.set_parameter("result_name", result_name + "_epf") # name of csv file with results + epf_mc.set_parameter( + "result_name", result_name + "_epf" + ) # name of csv file with results epf_mc.run() epf_sample_failure_state = epf_mc.get_output_dataset("sample_failure_state") epf_sample_damage_states = epf_mc.get_output_dataset("sample_damage_states") @@ -159,9 +188,13 @@ def run_with_base_class(): ################################################### epf_repair_cost = EpfRepairCost(prod_client) epf_repair_cost.set_input_dataset("epfs", epfs) - epf_repair_cost.load_remote_input_dataset("replacement_cost", "647dff5b4dd25160127ca192") + epf_repair_cost.load_remote_input_dataset( + "replacement_cost", "647dff5b4dd25160127ca192" + ) epf_repair_cost.set_input_dataset("sample_damage_states", epf_sample_damage_states) - epf_repair_cost.load_remote_input_dataset("epf_dmg_ratios", "6483354b41181d20004efbd7") + epf_repair_cost.load_remote_input_dataset( + "epf_dmg_ratios", "6483354b41181d20004efbd7" + ) epf_repair_cost.set_parameter("result_name", result_name + "_epf_repair_cost") epf_repair_cost.set_parameter("num_cpu", 4) epf_repair_cost.run_analysis() @@ -175,9 +208,9 @@ def run_with_base_class(): pipeline_dmg.set_input_dataset("pipeline", pipeline) mapping_id = "5b47c227337d4a38464efea8" mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - pipeline_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + pipeline_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) pipeline_dmg.set_parameter("hazard_type", hazard_type) - pipeline_dmg.set_parameter("fragility_key", 'pgv') + pipeline_dmg.set_parameter("fragility_key", "pgv") pipeline_dmg.set_parameter("num_cpu", num_cpu) pipeline_dmg.set_parameter("hazard_id", hazard_id) pipeline_dmg.set_parameter("result_name", result_name + "_pipeline_dmg") @@ -192,7 +225,9 @@ def run_with_base_class(): pipeline_func.set_parameter("result_name", result_name + "_pipeline") pipeline_func.set_parameter("num_samples", sim_number) pipeline_func.run_analysis() - pipeline_sample_failure_state = pipeline_func.get_output_dataset("sample_failure_state") + pipeline_sample_failure_state = pipeline_func.get_output_dataset( + "sample_failure_state" + ) ################################################### # pipeline repair time @@ -213,10 +248,16 @@ def run_with_base_class(): ################################################### pipeline_repair_cost = PipelineRepairCost(prod_client) pipeline_repair_cost.set_input_dataset("pipeline", pipeline) - pipeline_repair_cost.load_remote_input_dataset("replacement_cost", "6480a2787ae18139d975e919") + pipeline_repair_cost.load_remote_input_dataset( + "replacement_cost", "6480a2787ae18139d975e919" + ) pipeline_repair_cost.set_input_dataset("pipeline_dmg", pipeline_dmg_result) - pipeline_repair_cost.load_remote_input_dataset("pipeline_dmg_ratios", "6480a2d44dd25160127d2fcc") - pipeline_repair_cost.set_parameter("result_name", result_name + "_pipeline_repair_cost") + pipeline_repair_cost.load_remote_input_dataset( + "pipeline_dmg_ratios", "6480a2d44dd25160127d2fcc" + ) + pipeline_repair_cost.set_parameter( + "result_name", result_name + "_pipeline_repair_cost" + ) pipeline_repair_cost.set_parameter("num_cpu", 4) pipeline_repair_cost.run_analysis() pipeline_repair_cost_result = pipeline_repair_cost.get_output_dataset("result") @@ -230,7 +271,7 @@ def run_with_base_class(): mapping_id = "5e99c86d6129af000136defa" # 4 DS dev # mapping_id = "5d2789dbb9219c3c553c7977" # 4 DS prod mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) bldg_dmg.set_parameter("hazard_type", hazard_type) bldg_dmg.set_parameter("num_cpu", 4) bldg_dmg.set_parameter("hazard_id", hazard_id_dev) @@ -277,7 +318,9 @@ def run_with_base_class(): indp_analysis.set_parameter("return_model", "step_function") indp_analysis.set_parameter("testbed_name", "seaside") indp_analysis.set_parameter("extra_commodity", {1: ["PW"], 3: []}) - indp_analysis.set_parameter("RC", [{"budget": 240000, "time": 700}, {"budget": 300000, "time": 600}]) + indp_analysis.set_parameter( + "RC", [{"budget": 240000, "time": 700}, {"budget": 300000, "time": 600}] + ) indp_analysis.set_parameter("layers", [1, 3]) indp_analysis.set_parameter("method", "INDP") # indp_analysis.set_parameter("method", "TDINDP") @@ -301,24 +344,40 @@ def run_with_base_class(): # gurobi # indp_analysis.set_parameter("solver_engine", "gurobi") - indp_analysis.set_parameter("solver_time_limit", 3600) # if not set default to never timeout + indp_analysis.set_parameter( + "solver_time_limit", 3600 + ) # if not set default to never timeout indp_analysis.set_input_dataset("wf_restoration_time", wf_restoration_time) indp_analysis.set_input_dataset("wf_repair_cost", wf_repair_cost_result) indp_analysis.set_input_dataset("epf_restoration_time", epf_restoration_time) indp_analysis.set_input_dataset("epf_repair_cost", epf_repair_cost_result) - indp_analysis.set_input_dataset("pipeline_restoration_time", pipeline_restoration_time) + indp_analysis.set_input_dataset( + "pipeline_restoration_time", pipeline_restoration_time + ) indp_analysis.set_input_dataset("pipeline_repair_cost", pipeline_repair_cost_result) indp_analysis.set_input_dataset("power_network", power_network_dataset) - indp_analysis.set_input_dataset("water_network", water_network_dataset) # with distribution noes - indp_analysis.load_remote_input_dataset("powerline_supply_demand_info", "64ad8b434e01de3af8fea0ba") - indp_analysis.load_remote_input_dataset("epf_supply_demand_info", "64ad9ea54e01de3af8fea0f2") - indp_analysis.load_remote_input_dataset("wf_supply_demand_info", "64ad9e704e01de3af8fea0ec") - indp_analysis.load_remote_input_dataset("pipeline_supply_demand_info", "64ad9e274e01de3af8fea0e5") + indp_analysis.set_input_dataset( + "water_network", water_network_dataset + ) # with distribution noes + indp_analysis.load_remote_input_dataset( + "powerline_supply_demand_info", "64ad8b434e01de3af8fea0ba" + ) + indp_analysis.load_remote_input_dataset( + "epf_supply_demand_info", "64ad9ea54e01de3af8fea0f2" + ) + indp_analysis.load_remote_input_dataset( + "wf_supply_demand_info", "64ad9e704e01de3af8fea0ec" + ) + indp_analysis.load_remote_input_dataset( + "pipeline_supply_demand_info", "64ad9e274e01de3af8fea0e5" + ) indp_analysis.load_remote_input_dataset("interdep", "61c10104837ac508f9a178ef") indp_analysis.set_input_dataset("wf_failure_state", wterfclty_sample_failure_state) indp_analysis.set_input_dataset("wf_damage_state", wterfclty_sample_damage_states) - indp_analysis.set_input_dataset("pipeline_failure_state", pipeline_sample_failure_state) + indp_analysis.set_input_dataset( + "pipeline_failure_state", pipeline_sample_failure_state + ) indp_analysis.set_input_dataset("epf_failure_state", epf_sample_failure_state) indp_analysis.set_input_dataset("epf_damage_state", epf_sample_damage_states) indp_analysis.set_input_dataset("pop_dislocation", pop_dislocation_result) diff --git a/tests/pyincore/analyses/joplinempiricalbuildingrestoration/test_joplinempiricalbuildingrestoration.py b/tests/pyincore/analyses/joplinempiricalbuildingrestoration/test_joplinempiricalbuildingrestoration.py index c84799e2b..505e5c31b 100644 --- a/tests/pyincore/analyses/joplinempiricalbuildingrestoration/test_joplinempiricalbuildingrestoration.py +++ b/tests/pyincore/analyses/joplinempiricalbuildingrestoration/test_joplinempiricalbuildingrestoration.py @@ -2,7 +2,9 @@ from pyincore import IncoreClient, FragilityService, MappingSet from pyincore.analyses.buildingdamage import BuildingDamage -from pyincore.analyses.joplinempiricalbuildingrestoration import JoplinEmpiricalBuildingRestoration +from pyincore.analyses.joplinempiricalbuildingrestoration import ( + JoplinEmpiricalBuildingRestoration, +) import pyincore.globals as pyglobals @@ -46,7 +48,9 @@ def run_with_base_class(): restoration.load_remote_input_dataset("buildings", bldg_dataset_id) # restoration.load_remote_input_dataset("building_dmg", building_dmg_result) restoration.set_input_dataset("building_dmg", building_dmg_result) - restoration.load_remote_input_dataset("building_functionality_level", building_fl_id) + restoration.load_remote_input_dataset( + "building_functionality_level", building_fl_id + ) result_name = "Joplin_empirical_restoration_result" restoration.set_parameter("result_name", result_name) @@ -57,5 +61,5 @@ def run_with_base_class(): restoration.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/meandamage/test_meandamage_bridge.py b/tests/pyincore/analyses/meandamage/test_meandamage_bridge.py index d0abe10d0..f3dc152f2 100644 --- a/tests/pyincore/analyses/meandamage/test_meandamage_bridge.py +++ b/tests/pyincore/analyses/meandamage/test_meandamage_bridge.py @@ -12,13 +12,12 @@ def run_with_base_class(): md.load_remote_input_dataset("damage", "61044165ca3e973ce13c0526") md.load_remote_input_dataset("dmg_ratios", "5a284f2cc7d30d13bc081f96") md.set_parameter("result_name", "mean_damage_bridge") - md.set_parameter("damage_interval_keys", - ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]) + md.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]) md.set_parameter("num_cpu", 1) # Run analysis md.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/meandamage/test_meandamage_building.py b/tests/pyincore/analyses/meandamage/test_meandamage_building.py index d28598ed9..a2bedcda0 100644 --- a/tests/pyincore/analyses/meandamage/test_meandamage_building.py +++ b/tests/pyincore/analyses/meandamage/test_meandamage_building.py @@ -11,13 +11,12 @@ def run_with_base_class(): md.load_remote_input_dataset("damage", "61044192ca3e973ce13c054a") md.load_remote_input_dataset("dmg_ratios", "5a284f2ec7d30d13bc08209a") md.set_parameter("result_name", "mean_damage_building") - md.set_parameter("damage_interval_keys", - ["DS_0", "DS_1", "DS_2", "DS_3"]) + md.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3"]) md.set_parameter("num_cpu", 1) # Run analysis md.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/mlenabledcgeslc/test_mlcgeslc.py b/tests/pyincore/analyses/mlenabledcgeslc/test_mlcgeslc.py index 9f90de2c9..8176ecf0b 100644 --- a/tests/pyincore/analyses/mlenabledcgeslc/test_mlcgeslc.py +++ b/tests/pyincore/analyses/mlenabledcgeslc/test_mlcgeslc.py @@ -11,4 +11,4 @@ # optional mlcgeslc.set_parameter("result_name", "slc_7_region") -mlcgeslc.run_analysis() \ No newline at end of file +mlcgeslc.run_analysis() diff --git a/tests/pyincore/analyses/montecarlofailureprobability/test_montecarlofailureprobability.py b/tests/pyincore/analyses/montecarlofailureprobability/test_montecarlofailureprobability.py index ea80764f3..b1463103e 100644 --- a/tests/pyincore/analyses/montecarlofailureprobability/test_montecarlofailureprobability.py +++ b/tests/pyincore/analyses/montecarlofailureprobability/test_montecarlofailureprobability.py @@ -1,6 +1,5 @@ from pyincore.client import IncoreClient -from pyincore.analyses.montecarlofailureprobability import \ - MonteCarloFailureProbability +from pyincore.analyses.montecarlofailureprobability import MonteCarloFailureProbability import pyincore.globals as pyglobals @@ -16,8 +15,7 @@ def run_with_base_class(): mc.set_parameter("result_name", "building_damage") mc.set_parameter("num_cpu", 8) mc.set_parameter("num_samples", 10) - mc.set_parameter("damage_interval_keys", - ["DS_0", "DS_1", "DS_2", "DS_3"]) + mc.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3"]) mc.set_parameter("failure_state_keys", ["DS_1", "DS_2", "DS_3"]) # optional parameter @@ -26,5 +24,5 @@ def run_with_base_class(): mc.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py b/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py index 9828ce8c3..70b182670 100644 --- a/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py +++ b/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py @@ -1,6 +1,7 @@ from pyincore.client import IncoreClient -from pyincore.analyses.montecarlolimitstateprobability import \ - MonteCarloLimitStateProbability +from pyincore.analyses.montecarlolimitstateprobability import ( + MonteCarloLimitStateProbability, +) import pyincore.globals as pyglobals @@ -16,8 +17,7 @@ def run_with_base_class(): mc.set_parameter("result_name", "building_damage") mc.set_parameter("num_cpu", 8) mc.set_parameter("num_samples", 10) - mc.set_parameter("damage_interval_keys", - ["DS_0", "DS_1", "DS_2", "DS_3"]) + mc.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3"]) mc.set_parameter("failure_state_keys", ["DS_1", "DS_2", "DS_3"]) # optional parameter @@ -26,5 +26,5 @@ def run_with_base_class(): mc.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/multiobjectiveretrofitoptimization/test_multiobjectiveretrofitoptimization.py b/tests/pyincore/analyses/multiobjectiveretrofitoptimization/test_multiobjectiveretrofitoptimization.py index 19cd42c30..a6c857eb4 100644 --- a/tests/pyincore/analyses/multiobjectiveretrofitoptimization/test_multiobjectiveretrofitoptimization.py +++ b/tests/pyincore/analyses/multiobjectiveretrofitoptimization/test_multiobjectiveretrofitoptimization.py @@ -1,5 +1,7 @@ from pyincore import IncoreClient -from pyincore.analyses.multiobjectiveretrofitoptimization import MultiObjectiveRetrofitOptimization +from pyincore.analyses.multiobjectiveretrofitoptimization import ( + MultiObjectiveRetrofitOptimization, +) import pyincore.globals as pyglobals @@ -15,11 +17,15 @@ def run_base_analysis(): retrofit_optimization.set_parameter("max_budget", "default") retrofit_optimization.set_parameter("scale_data", False) - retrofit_optimization.load_remote_input_dataset("building_related_data", building_related_data) - retrofit_optimization.load_remote_input_dataset("strategy_costs_data", strategy_costs_data) + retrofit_optimization.load_remote_input_dataset( + "building_related_data", building_related_data + ) + retrofit_optimization.load_remote_input_dataset( + "strategy_costs_data", strategy_costs_data + ) retrofit_optimization.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_base_analysis() diff --git a/tests/pyincore/analyses/ncifunctionality/test_ncifunctionality.py b/tests/pyincore/analyses/ncifunctionality/test_ncifunctionality.py index d54e3ed21..50d29ed20 100644 --- a/tests/pyincore/analyses/ncifunctionality/test_ncifunctionality.py +++ b/tests/pyincore/analyses/ncifunctionality/test_ncifunctionality.py @@ -1,11 +1,15 @@ -from pyincore import IncoreClient, Dataset, FragilityService, MappingSet, RestorationService +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + RestorationService, +) from pyincore.analyses.epfdamage.epfdamage import EpfDamage from pyincore.analyses.epfrestoration import EpfRestoration from pyincore.analyses.waterfacilitydamage import WaterFacilityDamage from pyincore.analyses.waterfacilityrestoration import WaterFacilityRestoration from pyincore.analyses.montecarlofailureprobability import MonteCarloFailureProbability from pyincore.analyses.ncifunctionality import NciFunctionality -import pyincore.globals as pyglobals def run_with_base_class(): @@ -52,7 +56,7 @@ def run_with_base_class(): epn_sub_dmg.run_analysis() - substation_dmg_result = epn_sub_dmg.get_output_dataset('result') + substation_dmg_result = epn_sub_dmg.get_output_dataset("result") # EPF substation functionality analysis print("Electric power facility MonteCarlo failure analysis...") @@ -62,12 +66,14 @@ def run_with_base_class(): mc_sub.set_input_dataset("damage", substation_dmg_result) mc_sub.set_parameter("num_cpu", 16) mc_sub.set_parameter("num_samples", num_samples) - mc_sub.set_parameter("damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"]) + mc_sub.set_parameter( + "damage_interval_keys", ["DS_0", "DS_1", "DS_2", "DS_3", "DS_4"] + ) mc_sub.set_parameter("failure_state_keys", ["DS_3", "DS_4"]) mc_sub.set_parameter("result_name", result_name) # name of csv file with results mc_sub.run_analysis() - epf_subst_failure_results = mc_sub.get_output_dataset('failure_probability') + epf_subst_failure_results = mc_sub.get_output_dataset("failure_probability") # EPF restoration analysis print("Electric power facility restoration analysis...") @@ -75,7 +81,7 @@ def run_with_base_class(): restorationsvc = RestorationService(client) mapping_set = MappingSet(restorationsvc.get_mapping(epf_restoration_mapping_id)) epf_rest.load_remote_input_dataset("epfs", epf_dataset) - epf_rest.set_input_dataset('dfr3_mapping_set', mapping_set) + epf_rest.set_input_dataset("dfr3_mapping_set", mapping_set) epf_rest.set_input_dataset("damage", substation_dmg_result) result_name = "4_MMSA_epf_restoration_result" epf_rest.set_parameter("result_name", result_name) @@ -110,9 +116,13 @@ def run_with_base_class(): # WDS restoration print("Water facility restoration analysis...") wf_rest = WaterFacilityRestoration(client) - mapping_set = MappingSet(restorationsvc.get_mapping(wds_restoration_mapping_id)) # new format of mapping - wf_rest.load_remote_input_dataset("water_facilities", "5a284f2ac7d30d13bc081e52") # water facility - wf_rest.set_input_dataset('dfr3_mapping_set', mapping_set) + mapping_set = MappingSet( + restorationsvc.get_mapping(wds_restoration_mapping_id) + ) # new format of mapping + wf_rest.load_remote_input_dataset( + "water_facilities", "5a284f2ac7d30d13bc081e52" + ) # water facility + wf_rest.set_input_dataset("dfr3_mapping_set", mapping_set) wf_rest.set_input_dataset("damage", wds_dmg_results) wf_rest.set_parameter("result_name", "wf_restoration") wf_rest.set_parameter("restoration_key", "Restoration ID Code") @@ -142,5 +152,5 @@ def run_with_base_class(): nic_func.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py index 8297309d3..c4bbf0944 100644 --- a/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py @@ -20,14 +20,20 @@ def run_with_base_class(): mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) non_structural_building_dmg_flood = NonStructBuildingDamage(client) - non_structural_building_dmg_flood.load_remote_input_dataset("buildings", bldg_dataset_id) - non_structural_building_dmg_flood.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_flood.set_parameter("result_name", "non_structural_building_dmg_result_flood") + non_structural_building_dmg_flood.load_remote_input_dataset( + "buildings", bldg_dataset_id + ) + non_structural_building_dmg_flood.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_flood.set_parameter( + "result_name", "non_structural_building_dmg_result_flood" + ) non_structural_building_dmg_flood.set_input_hazard("hazard", flood) non_structural_building_dmg_flood.set_parameter("num_cpu", 4) - non_structural_building_dmg_flood.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") + non_structural_building_dmg_flood.set_parameter( + "fragility_key", "Lumberton Flood Building Fragility ID Code" + ) non_structural_building_dmg_flood.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py index 637698d97..361bd7c48 100644 --- a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py @@ -1,5 +1,8 @@ from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage, NonStructBuildingUtil +from pyincore.analyses.nonstructbuildingdamage import ( + NonStructBuildingDamage, + NonStructBuildingUtil, +) import pyincore.globals as pyglobals @@ -25,30 +28,46 @@ def run_with_base_class(): # Acceleration sensitive non_structural_building_dmg_as = NonStructBuildingDamage(client) - non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_as") + non_structural_building_dmg_as.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_as.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_as.set_parameter( + "result_name", "non_structural_building_dmg_result_as" + ) non_structural_building_dmg_as.set_parameter("hazard_type", hazard_type) non_structural_building_dmg_as.set_parameter("hazard_id", hazard_id) non_structural_building_dmg_as.set_parameter("num_cpu", 4) - non_structural_building_dmg_as.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter( + "fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS + ) non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_as.run_analysis() # Drift sensitive non_structural_building_dmg_ds = NonStructBuildingDamage(client) - non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_ds") + non_structural_building_dmg_ds.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_ds.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_ds.set_parameter( + "result_name", "non_structural_building_dmg_result_ds" + ) non_structural_building_dmg_ds.set_parameter("hazard_type", hazard_type) non_structural_building_dmg_ds.set_parameter("hazard_id", hazard_id) non_structural_building_dmg_ds.set_parameter("num_cpu", 4) - non_structural_building_dmg_ds.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter( + "fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS + ) non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_ds.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py index dcc5107a4..a88efa36c 100644 --- a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py @@ -1,5 +1,14 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake -from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage, NonStructBuildingUtil +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, +) +from pyincore.analyses.nonstructbuildingdamage import ( + NonStructBuildingDamage, + NonStructBuildingUtil, +) import pyincore.globals as pyglobals @@ -8,7 +17,9 @@ def run_with_base_class(): # Memphis 7.9 AB-95 hazard_service = HazardService(client) - earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service + ) # Shelby County Essential Facilities building_dataset_id = "5a284f42c7d30d13bc0821ba" @@ -26,28 +37,44 @@ def run_with_base_class(): # Acceleration sensitive non_structural_building_dmg_as = NonStructBuildingDamage(client) - non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_as.set_input_dataset("dfr3_mapping_set", mapping_set) non_structural_building_dmg_as.set_input_hazard("hazard", earthquake) - non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_as") + non_structural_building_dmg_as.set_parameter( + "result_name", "non_structural_building_dmg_result_w_hazard_obj_as" + ) non_structural_building_dmg_as.set_parameter("num_cpu", 4) - non_structural_building_dmg_as.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter( + "fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS + ) non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_as.run_analysis() # Drift sensitive non_structural_building_dmg_ds = NonStructBuildingDamage(client) - non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) - non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) - non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_ds") + non_structural_building_dmg_ds.load_remote_input_dataset( + "buildings", building_dataset_id + ) + non_structural_building_dmg_ds.set_input_dataset("dfr3_mapping_set", mapping_set) + non_structural_building_dmg_ds.set_parameter( + "result_name", "non_structural_building_dmg_result_w_hazard_obj_ds" + ) non_structural_building_dmg_ds.set_input_hazard("hazard", earthquake) non_structural_building_dmg_ds.set_parameter("num_cpu", 4) - non_structural_building_dmg_ds.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter( + "fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS + ) non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.set_parameter( + "liq_geology_dataset_id", liq_geology_dataset_id + ) non_structural_building_dmg_ds.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage.py b/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage.py index b69d2b5ef..c15bfe186 100644 --- a/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage.py +++ b/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage.py @@ -13,25 +13,26 @@ def run_with_base_class(): pipeline_dmg = PipelineDamage(client) # test tsunami pipeline - pipeline_dmg.load_remote_input_dataset("pipeline", - "5ef1171b2367ff111d082f0c") + pipeline_dmg.load_remote_input_dataset("pipeline", "5ef1171b2367ff111d082f0c") # Load fragility mapping fragility_service = FragilityService(client) - mapping_set = MappingSet(fragility_service.get_mapping("60b124e01f2b7d4a916ba456")) # new format fragility curves + mapping_set = MappingSet( + fragility_service.get_mapping("60b124e01f2b7d4a916ba456") + ) # new format fragility curves # mapping_set = MappingSet(fragility_service.get_mapping("5ef11888da15730b13b84353")) # legacy fragility curves - pipeline_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + pipeline_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - pipeline_dmg.set_parameter("result_name", - "seaside_tsunami_pipeline_result") + pipeline_dmg.set_parameter("result_name", "seaside_tsunami_pipeline_result") pipeline_dmg.set_parameter("hazard_type", "tsunami") - pipeline_dmg.set_parameter("fragility_key", - "Non-Retrofit inundationDepth Fragility ID Code") + pipeline_dmg.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) pipeline_dmg.set_parameter("hazard_id", "5bc9eaf7f7b08533c7e610e1") pipeline_dmg.set_parameter("num_cpu", 4) # Run pipeline damage analysis - result = pipeline_dmg.run_analysis() + _ = pipeline_dmg.run_analysis() if __name__ == "__main__": diff --git a/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage_w_hazard_obj.py b/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage_w_hazard_obj.py index bd0e8343c..79600cfb4 100644 --- a/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/pipelinedamage/test_pipelinedamage_w_hazard_obj.py @@ -16,25 +16,28 @@ def run_with_base_class(): pipeline_dmg = PipelineDamage(client) # test tsunami pipeline - pipeline_dmg.load_remote_input_dataset("pipeline", - "5ef1171b2367ff111d082f0c") + pipeline_dmg.load_remote_input_dataset("pipeline", "5ef1171b2367ff111d082f0c") # Load fragility mapping fragility_service = FragilityService(client) - mapping_set = MappingSet(fragility_service.get_mapping("60b124e01f2b7d4a916ba456")) # new format fragility curves + mapping_set = MappingSet( + fragility_service.get_mapping("60b124e01f2b7d4a916ba456") + ) # new format fragility curves # mapping_set = MappingSet(fragility_service.get_mapping("5ef11888da15730b13b84353")) # legacy fragility curves - pipeline_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + pipeline_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) - pipeline_dmg.set_input_hazard('hazard', tsunami) + pipeline_dmg.set_input_hazard("hazard", tsunami) - pipeline_dmg.set_parameter("result_name", - "seaside_tsunami_pipeline_result_w_hazard_obj") - pipeline_dmg.set_parameter("fragility_key", - "Non-Retrofit inundationDepth Fragility ID Code") + pipeline_dmg.set_parameter( + "result_name", "seaside_tsunami_pipeline_result_w_hazard_obj" + ) + pipeline_dmg.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) pipeline_dmg.set_parameter("num_cpu", 4) # Run pipeline damage analysis - result = pipeline_dmg.run_analysis() + _ = pipeline_dmg.run_analysis() if __name__ == "__main__": diff --git a/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate.py b/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate.py index d807f18c4..44fbfd43a 100644 --- a/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate.py +++ b/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate.py @@ -30,7 +30,7 @@ def test_pipeline_dmg_w_repair_rate(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - test_pipeline_dmg_w_rr.set_input_dataset('dfr3_mapping_set', mapping_set) + test_pipeline_dmg_w_rr.set_input_dataset("dfr3_mapping_set", mapping_set) # Specify the result name result_name = "pipeline_result" # Set analysis parameters @@ -39,10 +39,13 @@ def test_pipeline_dmg_w_repair_rate(): test_pipeline_dmg_w_rr.set_parameter("hazard_id", hazard_id) test_pipeline_dmg_w_rr.set_parameter("liquefaction_fragility_key", "pgd") # test_pipeline_dmg_w_rr.set_parameter("use_liquefaction", False) - test_pipeline_dmg_w_rr.set_parameter("use_liquefaction", True) # toggle on and off to see liquefaction + test_pipeline_dmg_w_rr.set_parameter( + "use_liquefaction", True + ) # toggle on and off to see liquefaction test_pipeline_dmg_w_rr.set_parameter("num_cpu", 4) - test_pipeline_dmg_w_rr.set_parameter("liquefaction_geology_dataset_id", - liq_geology_dataset_id) + test_pipeline_dmg_w_rr.set_parameter( + "liquefaction_geology_dataset_id", liq_geology_dataset_id + ) # Run pipeline damage analysis result = test_pipeline_dmg_w_rr.run_analysis() diff --git a/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate_w_hazard_obj.py b/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate_w_hazard_obj.py index ebe09b0bf..05c83ccbe 100644 --- a/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate_w_hazard_obj.py +++ b/tests/pyincore/analyses/pipelinedamagerepairrate/test_pipelinedamagerepairrate_w_hazard_obj.py @@ -3,7 +3,13 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, +) from pyincore.analyses.pipelinedamagerepairrate import PipelineDamageRepairRate import pyincore.globals as pyglobals @@ -18,7 +24,9 @@ def test_pipeline_dmg_w_repair_rate(): # New madrid earthquake using Atkinson Boore 1995 hazard_service = HazardService(client) - earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service + ) # Geology dataset liq_geology_dataset_id = "5a284f53c7d30d13bc08249c" @@ -30,19 +38,22 @@ def test_pipeline_dmg_w_repair_rate(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - test_pipeline_dmg_w_rr.set_input_dataset('dfr3_mapping_set', mapping_set) + test_pipeline_dmg_w_rr.set_input_dataset("dfr3_mapping_set", mapping_set) # set hazard - test_pipeline_dmg_w_rr.set_input_hazard('hazard', earthquake) + test_pipeline_dmg_w_rr.set_input_hazard("hazard", earthquake) # Specify the result name result_name = "pipeline_result_w_hazard_obj" # Set analysis parameters test_pipeline_dmg_w_rr.set_parameter("result_name", result_name) test_pipeline_dmg_w_rr.set_parameter("liquefaction_fragility_key", "pgd") # test_pipeline_dmg_w_rr.set_parameter("use_liquefaction", False) - test_pipeline_dmg_w_rr.set_parameter("use_liquefaction", True) # toggle on and off to see liquefaction + test_pipeline_dmg_w_rr.set_parameter( + "use_liquefaction", True + ) # toggle on and off to see liquefaction test_pipeline_dmg_w_rr.set_parameter("num_cpu", 4) - test_pipeline_dmg_w_rr.set_parameter("liquefaction_geology_dataset_id", - liq_geology_dataset_id) + test_pipeline_dmg_w_rr.set_parameter( + "liquefaction_geology_dataset_id", liq_geology_dataset_id + ) # Run pipeline damage analysis result = test_pipeline_dmg_w_rr.run_analysis() diff --git a/tests/pyincore/analyses/pipelinefunctionality/test_pipelinefunctionality.py b/tests/pyincore/analyses/pipelinefunctionality/test_pipelinefunctionality.py index a7ca3f59c..5afa535eb 100644 --- a/tests/pyincore/analyses/pipelinefunctionality/test_pipelinefunctionality.py +++ b/tests/pyincore/analyses/pipelinefunctionality/test_pipelinefunctionality.py @@ -15,7 +15,9 @@ def test_pipeline_functionality(): pipline_func = PipelineFunctionality(client) # Load input datasets - pipline_func.load_remote_input_dataset("pipeline_repair_rate_damage", "62cdec9c861e370172c8da77") + pipline_func.load_remote_input_dataset( + "pipeline_repair_rate_damage", "62cdec9c861e370172c8da77" + ) # Load fragility mapping # Set analysis parameters diff --git a/tests/pyincore/analyses/pipelinerepaircost/test_pipelinerepaircost.py b/tests/pyincore/analyses/pipelinerepaircost/test_pipelinerepaircost.py index 156844822..d89725abb 100644 --- a/tests/pyincore/analyses/pipelinerepaircost/test_pipelinerepaircost.py +++ b/tests/pyincore/analyses/pipelinerepaircost/test_pipelinerepaircost.py @@ -14,7 +14,9 @@ def run_with_base_class(): # Seaside pipeline pipeline_repair_cost.load_remote_input_dataset("pipeline", pipeline_id) # dev - pipeline_repair_cost.load_remote_input_dataset("replacement_cost", "647782c95bc8b26ddfa11c2f") + pipeline_repair_cost.load_remote_input_dataset( + "replacement_cost", "647782c95bc8b26ddfa11c2f" + ) # can be chained with pipeline repair rate damage test_pipeline_dmg_w_rr = PipelineDamageRepairRate(client) @@ -23,12 +25,14 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping("5b47c227337d4a38464efea8")) - test_pipeline_dmg_w_rr.set_input_dataset('dfr3_mapping_set', mapping_set) + test_pipeline_dmg_w_rr.set_input_dataset("dfr3_mapping_set", mapping_set) test_pipeline_dmg_w_rr.set_parameter("result_name", "seaside_eq_pipeline_result") test_pipeline_dmg_w_rr.set_parameter("fragility_key", "pgv") test_pipeline_dmg_w_rr.set_parameter("hazard_type", "earthquake") - test_pipeline_dmg_w_rr.set_parameter("hazard_id", "5ba8f2b9ec2309043520906e") # seaside probability 5000 yr + test_pipeline_dmg_w_rr.set_parameter( + "hazard_id", "5ba8f2b9ec2309043520906e" + ) # seaside probability 5000 yr test_pipeline_dmg_w_rr.set_parameter("num_cpu", 4) test_pipeline_dmg_w_rr.run_analysis() pipeline_dmg = test_pipeline_dmg_w_rr.get_output_dataset("result") @@ -37,7 +41,9 @@ def run_with_base_class(): pipeline_repair_cost.set_input_dataset("pipeline_dmg", pipeline_dmg) # pipeline damage ratio - pipeline_repair_cost.load_remote_input_dataset("pipeline_dmg_ratios", "647783ad5bc8b26ddfa11c5f") + pipeline_repair_cost.load_remote_input_dataset( + "pipeline_dmg_ratios", "647783ad5bc8b26ddfa11c5f" + ) pipeline_repair_cost.set_parameter("result_name", "seaside_pipeline") pipeline_repair_cost.set_parameter("num_cpu", 4) diff --git a/tests/pyincore/analyses/pipelinerestoration/test_pipelinerestoration.py b/tests/pyincore/analyses/pipelinerestoration/test_pipelinerestoration.py index a317c55e3..7492ebf32 100644 --- a/tests/pyincore/analyses/pipelinerestoration/test_pipelinerestoration.py +++ b/tests/pyincore/analyses/pipelinerestoration/test_pipelinerestoration.py @@ -14,13 +14,19 @@ def run_with_base_class(): pipeline_restoration = PipelineRestoration(client) # shelby county pipelines - pipeline_restoration.load_remote_input_dataset("pipeline", "5a284f28c7d30d13bc081d14") - pipeline_restoration.load_remote_input_dataset("pipeline_damage", "61f36023c53b3620b6b614c6") + pipeline_restoration.load_remote_input_dataset( + "pipeline", "5a284f28c7d30d13bc081d14" + ) + pipeline_restoration.load_remote_input_dataset( + "pipeline_damage", "61f36023c53b3620b6b614c6" + ) # Load fragility mapping restoration_service = RestorationService(client) - mapping_set = MappingSet(restoration_service.get_mapping("61f35f09903e515036cee106")) - pipeline_restoration.set_input_dataset('dfr3_mapping_set', mapping_set) + mapping_set = MappingSet( + restoration_service.get_mapping("61f35f09903e515036cee106") + ) + pipeline_restoration.set_input_dataset("dfr3_mapping_set", mapping_set) pipeline_restoration.set_parameter("result_name", "pipeline_restoration_times") diff --git a/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py b/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py index f879fcf3d..c2c7fa522 100644 --- a/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py +++ b/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py @@ -2,7 +2,7 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore import IncoreClient from pyincore.analyses.populationdislocation import PopulationDislocation import pyincore.globals as pyglobals @@ -40,5 +40,5 @@ def run_with_base_class(): return True -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/residentialbuildingrecovery/test_residentialbuildingrecovery.py b/tests/pyincore/analyses/residentialbuildingrecovery/test_residentialbuildingrecovery.py index ae4805af9..5a0ac2379 100644 --- a/tests/pyincore/analyses/residentialbuildingrecovery/test_residentialbuildingrecovery.py +++ b/tests/pyincore/analyses/residentialbuildingrecovery/test_residentialbuildingrecovery.py @@ -3,7 +3,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ from pyincore import IncoreClient, RepairService, MappingSet -from pyincore.analyses.residentialbuildingrecovery.residentialbuildingrecovery import ResidentialBuildingRecovery +from pyincore.analyses.residentialbuildingrecovery.residentialbuildingrecovery import ( + ResidentialBuildingRecovery, +) import pyincore.globals as pyglobals @@ -11,10 +13,14 @@ def run_with_base_class(): client = IncoreClient(pyglobals.INCORE_API_DEV_URL) # Joplin - buildings = "5df7d0de425e0b00092d0082" # joplin ergo:buildingInventoryVer6 28k buildings + buildings = ( + "5df7d0de425e0b00092d0082" # joplin ergo:buildingInventoryVer6 28k buildings + ) # sample_damage_states = "6112d9ccca3e973ce144b4d9" # 500 samples 28k buildings - MCS output format - sample_damage_states = "60f883c059a8cc52bab4dd77" # 10 samples 28k buildings - MCS output format + sample_damage_states = ( + "60f883c059a8cc52bab4dd77" # 10 samples 28k buildings - MCS output format + ) socio_demographic_data = "60dbd77602897f12fcd449c3" financial_resources = "60dbd64702897f12fcd448f5" delay_factors = "60eca71302897f12fcd70843" # DS_0, etc. @@ -29,10 +35,12 @@ def run_with_base_class(): mapping_id = "60edfa3efc0f3a7af53a21b5" repair_service = RepairService(client) mapping_set = MappingSet(repair_service.get_mapping(mapping_id)) - res_recovery.set_input_dataset('dfr3_mapping_set', mapping_set) + res_recovery.set_input_dataset("dfr3_mapping_set", mapping_set) res_recovery.load_remote_input_dataset("sample_damage_states", sample_damage_states) - res_recovery.load_remote_input_dataset("socio_demographic_data", socio_demographic_data) + res_recovery.load_remote_input_dataset( + "socio_demographic_data", socio_demographic_data + ) res_recovery.load_remote_input_dataset("financial_resources", financial_resources) res_recovery.load_remote_input_dataset("delay_factors", delay_factors) @@ -44,5 +52,5 @@ def run_with_base_class(): return True -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/roaddamage/test_roaddamage.py b/tests/pyincore/analyses/roaddamage/test_roaddamage.py index 810b0001a..881b99862 100644 --- a/tests/pyincore/analyses/roaddamage/test_roaddamage.py +++ b/tests/pyincore/analyses/roaddamage/test_roaddamage.py @@ -13,7 +13,7 @@ def run_with_base_class(): # hazard_type = "earthquake" liq_geology_dataset_id = None - if hazard_type == 'earthquake': + if hazard_type == "earthquake": # Seaside Earthquake hazard_id = "5ba8f379ec2309043520906f" @@ -22,7 +22,7 @@ def run_with_base_class(): fragility_key = "pgd" liquefaction = False - elif hazard_type == 'tsunami': + elif hazard_type == "tsunami": # Seaside Tsunami hazard_id = "5bc9eaf7f7b08533c7e610e1" @@ -32,7 +32,9 @@ def run_with_base_class(): fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" liquefaction = False else: - raise ValueError("Earthquake and tsunami are the only testable hazards with road damage currently") + raise ValueError( + "Earthquake and tsunami are the only testable hazards with road damage currently" + ) uncertainty = False @@ -43,7 +45,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + road_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) road_dmg.set_parameter("result_name", "seaside_road_dmg_" + hazard_type) road_dmg.set_parameter("hazard_type", hazard_type) @@ -53,7 +55,9 @@ def run_with_base_class(): road_dmg.set_parameter("num_cpu", 1) road_dmg.set_parameter("use_liquefaction", liquefaction) if liquefaction and liq_geology_dataset_id is not None: - road_dmg.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + road_dmg.set_parameter( + "liquefaction_geology_dataset_id", liq_geology_dataset_id + ) road_dmg.set_parameter("use_hazard_uncertainty", uncertainty) # Run Analysis @@ -77,13 +81,15 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - hurr_road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + hurr_road_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) # Specify the result name result_name = "galveston_hurricane_road_result" # Set analysis parameters hurr_road_dmg.set_parameter("result_name", result_name) hurr_road_dmg.set_parameter("hazard_type", hazard_type) - hurr_road_dmg.set_parameter("fragility_key", "Non-Retrofit inundationDepth Fragility ID Code") + hurr_road_dmg.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) hurr_road_dmg.set_parameter("hazard_id", hazard_id) hurr_road_dmg.set_parameter("num_cpu", 4) @@ -91,5 +97,5 @@ def run_with_base_class(): hurr_road_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py b/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py index efb14356d..6f2c7a861 100755 --- a/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py @@ -1,4 +1,12 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake, Tsunami, Hurricane +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, + Tsunami, + Hurricane, +) from pyincore.analyses.roaddamage import RoadDamage import pyincore.globals as pyglobals @@ -12,7 +20,9 @@ def run_with_base_class(): hazard_service = HazardService(client) liq_geology_dataset_id = None - earthquake = Earthquake.from_hazard_service("5ba8f379ec2309043520906f", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5ba8f379ec2309043520906f", hazard_service + ) tsunami = Tsunami.from_hazard_service("5bc9eaf7f7b08533c7e610e1", hazard_service) # Earthquake mapping @@ -25,7 +35,6 @@ def run_with_base_class(): tsu_mapping_id = "5ee7b2c9c54361000148de37" tsu_fragility_key = "Non-Retrofit inundationDepth Fragility ID Code" - tsu_liquefaction = False uncertainty = False @@ -37,15 +46,17 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) eq_mapping_set = MappingSet(fragility_service.get_mapping(eq_mapping_id)) - eq_road_dmg.set_input_dataset('dfr3_mapping_set', eq_mapping_set) - eq_road_dmg.set_input_hazard('hazard', earthquake) + eq_road_dmg.set_input_dataset("dfr3_mapping_set", eq_mapping_set) + eq_road_dmg.set_input_hazard("hazard", earthquake) eq_road_dmg.set_parameter("result_name", "seaside_road_dmg_earthquake_w_hazard_obj") if eq_fragility_key is not None: eq_road_dmg.set_parameter("fragility_key", eq_fragility_key) eq_road_dmg.set_parameter("num_cpu", 1) eq_road_dmg.set_parameter("use_liquefaction", liquefaction) if liquefaction and liq_geology_dataset_id is not None: - eq_road_dmg.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + eq_road_dmg.set_parameter( + "liquefaction_geology_dataset_id", liq_geology_dataset_id + ) eq_road_dmg.set_parameter("use_hazard_uncertainty", uncertainty) # Run Analysis @@ -59,8 +70,8 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) tsu_mapping_set = MappingSet(fragility_service.get_mapping(tsu_mapping_id)) - tsu_road_dmg.set_input_dataset('dfr3_mapping_set', tsu_mapping_set) - tsu_road_dmg.set_input_hazard('hazard', tsunami) + tsu_road_dmg.set_input_dataset("dfr3_mapping_set", tsu_mapping_set) + tsu_road_dmg.set_input_hazard("hazard", tsunami) tsu_road_dmg.set_parameter("result_name", "seaside_road_dmg_tsunami_w_hazard_obj") if tsu_fragility_key is not None: tsu_road_dmg.set_parameter("fragility_key", tsu_fragility_key) @@ -79,7 +90,9 @@ def run_with_base_class(): # road damage by hurricane inundation mapping mapping_id = "60ba583b1f2b7d4a916faf03" # Galveston Deterministic Hurricane - Kriging inundationDuration - hurricane = Hurricane.from_hazard_service("5f10837c01d3241d77729a4f", hazard_service) + hurricane = Hurricane.from_hazard_service( + "5f10837c01d3241d77729a4f", hazard_service + ) # Create road damage hurr_road_dmg = RoadDamage(client) @@ -88,18 +101,20 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - hurr_road_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + hurr_road_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) hurr_road_dmg.set_input_hazard("hazard", hurricane) # Specify the result name result_name = "galveston_hurricane_road_result_w_hazard_obj" # Set analysis parameters hurr_road_dmg.set_parameter("result_name", result_name) - hurr_road_dmg.set_parameter("fragility_key", "Non-Retrofit inundationDepth Fragility ID Code") + hurr_road_dmg.set_parameter( + "fragility_key", "Non-Retrofit inundationDepth Fragility ID Code" + ) hurr_road_dmg.set_parameter("num_cpu", 4) # Run road damage by hurricane inundation analysis hurr_road_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/socialvulnerability/test_socialvulnerability.py b/tests/pyincore/analyses/socialvulnerability/test_socialvulnerability.py index f38f3f79c..4ebcda2cf 100644 --- a/tests/pyincore/analyses/socialvulnerability/test_socialvulnerability.py +++ b/tests/pyincore/analyses/socialvulnerability/test_socialvulnerability.py @@ -9,25 +9,29 @@ def run_with_base_class(): - census_geo_level = "tract" - # For a full example, datasets should be obtained using the Census API in pyincore_data national_vulnerability_feature_averages = "6241d9c653302c512d67ef26" # social_vulnerability_demographic_factors = "6241e58653302c512d67fb38" - social_vulnerability_demographic_factors = "62b4d2f7be53de4a4737e52d" # calculated from censusutil + social_vulnerability_demographic_factors = ( + "62b4d2f7be53de4a4737e52d" # calculated from censusutil + ) client = IncoreClient(pyglobals.INCORE_API_DEV_URL) social_vulnerability = SocialVulnerability(client) social_vulnerability.set_parameter("result_name", "social_vulnerabilty") - social_vulnerability.load_remote_input_dataset("national_vulnerability_feature_averages", - national_vulnerability_feature_averages) - social_vulnerability.load_remote_input_dataset("social_vulnerability_demographic_factors", - social_vulnerability_demographic_factors) + social_vulnerability.load_remote_input_dataset( + "national_vulnerability_feature_averages", + national_vulnerability_feature_averages, + ) + social_vulnerability.load_remote_input_dataset( + "social_vulnerability_demographic_factors", + social_vulnerability_demographic_factors, + ) # Run pipeline damage analysis - result = social_vulnerability.run_analysis() + _ = social_vulnerability.run_analysis() if __name__ == "__main__": diff --git a/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py b/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py index 77a731c8d..436bfb943 100644 --- a/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py +++ b/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py @@ -9,25 +9,29 @@ def run_with_base_class(): - census_geo_level = "tract" - # For a full example, datasets should be obtained using the Census API in pyincore_data national_vulnerability_feature_averages = "6241d9c653302c512d67ef26" # social_vulnerability_demographic_factors = "6241e58653302c512d67fb38" - social_vulnerability_demographic_factors = "62b4d2f7be53de4a4737e52d" # calculated from censusutil + social_vulnerability_demographic_factors = ( + "62b4d2f7be53de4a4737e52d" # calculated from censusutil + ) client = IncoreClient(pyglobals.INCORE_API_DEV_URL) social_vulnerability_score = SocialVulnerabilityScore(client) social_vulnerability_score.set_parameter("result_name", "social_vulnerabilty_score") - social_vulnerability_score.load_remote_input_dataset("national_vulnerability_feature_averages", - national_vulnerability_feature_averages) - social_vulnerability_score.load_remote_input_dataset("social_vulnerability_demographic_factors", - social_vulnerability_demographic_factors) + social_vulnerability_score.load_remote_input_dataset( + "national_vulnerability_feature_averages", + national_vulnerability_feature_averages, + ) + social_vulnerability_score.load_remote_input_dataset( + "social_vulnerability_demographic_factors", + social_vulnerability_demographic_factors, + ) # Run pipeline damage analysis - result = social_vulnerability_score.run_analysis() + _ = social_vulnerability_score.run_analysis() if __name__ == "__main__": diff --git a/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage.py b/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage.py index 5da7e179b..6caaa0e94 100644 --- a/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage.py +++ b/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage.py @@ -1,5 +1,4 @@ -from pyincore.analyses.tornadoepndamage.tornadoepndamage import \ - TornadoEpnDamage +from pyincore.analyses.tornadoepndamage.tornadoepndamage import TornadoEpnDamage from pyincore import IncoreClient import pyincore.globals as pyglobals @@ -10,17 +9,17 @@ def run_with_base_class(): ted = TornadoEpnDamage(client) epn_network_id = "62719fc857f1d94b047447e6" - tornado_id = '5df913b83494fe000861a743' + tornado_id = "5df913b83494fe000861a743" ted.load_remote_input_dataset("epn_network", epn_network_id) result_name = "tornado_dmg_result" ted.set_parameter("result_name", result_name) - ted.set_parameter('tornado_id', tornado_id) - ted.set_parameter('seed', 1001) + ted.set_parameter("tornado_id", tornado_id) + ted.set_parameter("seed", 1001) ted.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage_w_hazard_obj.py b/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage_w_hazard_obj.py index f92ed2d9c..641d4caf4 100644 --- a/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/tornadoepndamage/test_tornadoepndamage_w_hazard_obj.py @@ -1,5 +1,4 @@ -from pyincore.analyses.tornadoepndamage.tornadoepndamage import \ - TornadoEpnDamage +from pyincore.analyses.tornadoepndamage.tornadoepndamage import TornadoEpnDamage from pyincore import IncoreClient, HazardService, Tornado import pyincore.globals as pyglobals @@ -14,13 +13,13 @@ def run_with_base_class(): tornado = Tornado.from_hazard_service("5df913b83494fe000861a743", hazard_service) ted.load_remote_input_dataset("epn_network", epn_network_id) - ted.set_input_hazard('hazard', tornado) + ted.set_input_hazard("hazard", tornado) result_name = "tornado_dmg_result_w_hazard_obj" ted.set_parameter("result_name", result_name) - ted.set_parameter('seed', 1001) + ted.set_parameter("seed", 1001) ted.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py b/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py index 357fa81bd..786f983a7 100644 --- a/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py +++ b/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py @@ -13,20 +13,22 @@ def run_with_base_class(): traffic_flow_recovery.load_remote_input_dataset("links", links) bridges = "5a284f2dc7d30d13bc082040" - traffic_flow_recovery.load_remote_input_dataset('bridges', bridges) + traffic_flow_recovery.load_remote_input_dataset("bridges", bridges) bridge_damage = "5c5ddff0c5c0e488fc0355df" - traffic_flow_recovery.load_remote_input_dataset('bridge_damage_value', bridge_damage) + traffic_flow_recovery.load_remote_input_dataset( + "bridge_damage_value", bridge_damage + ) unrepaired = "5c5de0c5c5c0e488fc0355eb" - traffic_flow_recovery.load_remote_input_dataset('unrepaired_bridge', unrepaired) + traffic_flow_recovery.load_remote_input_dataset("unrepaired_bridge", unrepaired) ADT_data = "5c5dde00c5c0e488fc032d7f" - traffic_flow_recovery.load_remote_input_dataset('ADT', ADT_data) + traffic_flow_recovery.load_remote_input_dataset("ADT", ADT_data) traffic_flow_recovery.set_parameter("num_cpu", 4) traffic_flow_recovery.set_parameter("pm", 1) - traffic_flow_recovery.set_parameter('ini_num_population', 5) + traffic_flow_recovery.set_parameter("ini_num_population", 5) traffic_flow_recovery.set_parameter("population_size", 3) traffic_flow_recovery.set_parameter("num_generation", 2) traffic_flow_recovery.set_parameter("mutation_rate", 0.1) @@ -35,5 +37,5 @@ def run_with_base_class(): traffic_flow_recovery.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/transportationrecovery/test_transportationrecovery.py b/tests/pyincore/analyses/transportationrecovery/test_transportationrecovery.py index 72b339985..d97e1eab7 100644 --- a/tests/pyincore/analyses/transportationrecovery/test_transportationrecovery.py +++ b/tests/pyincore/analyses/transportationrecovery/test_transportationrecovery.py @@ -13,20 +13,22 @@ def run_with_base_class(): transportation_recovery.load_remote_input_dataset("links", links) bridges = "5a284f2dc7d30d13bc082040" - transportation_recovery.load_remote_input_dataset('bridges', bridges) + transportation_recovery.load_remote_input_dataset("bridges", bridges) bridge_damage = "5c5ddff0c5c0e488fc0355df" - transportation_recovery.load_remote_input_dataset('bridge_damage_value', bridge_damage) + transportation_recovery.load_remote_input_dataset( + "bridge_damage_value", bridge_damage + ) unrepaired = "5c5de0c5c5c0e488fc0355eb" - transportation_recovery.load_remote_input_dataset('unrepaired_bridge', unrepaired) + transportation_recovery.load_remote_input_dataset("unrepaired_bridge", unrepaired) ADT_data = "5c5dde00c5c0e488fc032d7f" - transportation_recovery.load_remote_input_dataset('ADT', ADT_data) + transportation_recovery.load_remote_input_dataset("ADT", ADT_data) transportation_recovery.set_parameter("num_cpu", 4) transportation_recovery.set_parameter("pm", 1) - transportation_recovery.set_parameter('ini_num_population', 5) + transportation_recovery.set_parameter("ini_num_population", 5) transportation_recovery.set_parameter("population_size", 3) transportation_recovery.set_parameter("num_generation", 2) transportation_recovery.set_parameter("mutation_rate", 0.1) @@ -35,5 +37,5 @@ def run_with_base_class(): transportation_recovery.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage.py b/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage.py index bd615f3bc..72c5f781d 100644 --- a/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage.py +++ b/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage.py @@ -28,7 +28,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - wf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + wf_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = "wf-dmg-results.csv" wf_dmg.set_parameter("result_name", result_name) @@ -44,5 +44,5 @@ def run_with_base_class(): wf_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage_w_hazard_obj.py b/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage_w_hazard_obj.py index a909f2386..62094c04f 100644 --- a/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/waterfacilitydamage/test_waterfacilitydamage_w_hazard_obj.py @@ -3,7 +3,13 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake +from pyincore import ( + IncoreClient, + FragilityService, + MappingSet, + HazardService, + Earthquake, +) from pyincore.analyses.waterfacilitydamage import WaterFacilityDamage import pyincore.globals as pyglobals @@ -11,7 +17,9 @@ def run_with_base_class(): client = IncoreClient(pyglobals.INCORE_API_DEV_URL) hazard_service = HazardService(client) - earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + earthquake = Earthquake.from_hazard_service( + "5b902cb273c3371e1236b36b", hazard_service + ) facility_datasetid = "5a284f2ac7d30d13bc081e52" mapping_id = "5b47c383337d4a387669d592" @@ -28,7 +36,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - wf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + wf_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) wf_dmg.set_input_hazard("hazard", earthquake) result_name = "wf-dmg-results_w_hazard_obj" wf_dmg.set_parameter("result_name", result_name) @@ -42,5 +50,5 @@ def run_with_base_class(): wf_dmg.run_analysis() -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/waterfacilityrepaircost/test_waterfacilityrepaircost.py b/tests/pyincore/analyses/waterfacilityrepaircost/test_waterfacilityrepaircost.py index ac907d8b5..4e5b88c0e 100644 --- a/tests/pyincore/analyses/waterfacilityrepaircost/test_waterfacilityrepaircost.py +++ b/tests/pyincore/analyses/waterfacilityrepaircost/test_waterfacilityrepaircost.py @@ -9,15 +9,23 @@ def run_with_base_class(): wf_repair_cost = WaterFacilityRepairCost(client) # Seaside wf - wf_repair_cost.load_remote_input_dataset("water_facilities", "647644fe5bc8b26ddf9c5ddb") # dev + wf_repair_cost.load_remote_input_dataset( + "water_facilities", "647644fe5bc8b26ddf9c5ddb" + ) # dev - wf_repair_cost.load_remote_input_dataset("replacement_cost", "647645c75bc8b26ddf9c8f66") + wf_repair_cost.load_remote_input_dataset( + "replacement_cost", "647645c75bc8b26ddf9c8f66" + ) # can be chained with MCS - wf_repair_cost.load_remote_input_dataset("sample_damage_states", "647646bb5bc8b26ddf9cb775") + wf_repair_cost.load_remote_input_dataset( + "sample_damage_states", "647646bb5bc8b26ddf9cb775" + ) # dmg ratiose - wf_repair_cost.load_remote_input_dataset("wf_dmg_ratios", "647646705bc8b26ddf9cb747") + wf_repair_cost.load_remote_input_dataset( + "wf_dmg_ratios", "647646705bc8b26ddf9cb747" + ) wf_repair_cost.set_parameter("result_name", "seaside_wf") wf_repair_cost.set_parameter("num_cpu", 4) diff --git a/tests/pyincore/analyses/waterfacilityrestoration/test_waterfacilityrestoration.py b/tests/pyincore/analyses/waterfacilityrestoration/test_waterfacilityrestoration.py index 6645dd46c..0806d10bd 100644 --- a/tests/pyincore/analyses/waterfacilityrestoration/test_waterfacilityrestoration.py +++ b/tests/pyincore/analyses/waterfacilityrestoration/test_waterfacilityrestoration.py @@ -31,7 +31,7 @@ def run_with_base_class(): # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - wf_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + wf_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) result_name = "wf-dmg-results.csv" wf_dmg.set_parameter("result_name", result_name) @@ -50,10 +50,14 @@ def run_with_base_class(): # Load restoration mapping restorationsvc = RestorationService(client) - mapping_set = MappingSet(restorationsvc.get_mapping("61f075ee903e515036cee0a5")) # new format of mapping - wf_rest.load_remote_input_dataset("water_facilities", "5a284f2ac7d30d13bc081e52") # water facility + mapping_set = MappingSet( + restorationsvc.get_mapping("61f075ee903e515036cee0a5") + ) # new format of mapping + wf_rest.load_remote_input_dataset( + "water_facilities", "5a284f2ac7d30d13bc081e52" + ) # water facility wf_rest.set_input_dataset("damage", wf_dmg.get_output_dataset("result")) - wf_rest.set_input_dataset('dfr3_mapping_set', mapping_set) + wf_rest.set_input_dataset("dfr3_mapping_set", mapping_set) wf_rest.set_parameter("discretized_days", [1, 3, 7, 30, 90]) wf_rest.set_parameter("result_name", "shelby-water-facility") wf_rest.set_parameter("restoration_key", "Restoration ID Code") @@ -70,13 +74,22 @@ def run_with_base_class(): time_interval = wf_rest.get_parameter("time_interval") pf_interval = wf_rest.get_parameter("pf_interval") end_time = wf_rest.get_parameter("end_time") - wf_util = WaterFacilityRestorationUtil(inventory_restoration_map, pf_results, time_results, time_interval, - pf_interval, end_time) - functionality = wf_util.get_percentage_func(guid="e1bce78d-00a1-4605-95f3-3776ff907f73", - damage_state="DS_0", time=2.0) - time = wf_util.get_restoration_time(guid="e1bce78d-00a1-4605-95f3-3776ff907f73", damage_state="DS_1", pf=0.95) + wf_util = WaterFacilityRestorationUtil( + inventory_restoration_map, + pf_results, + time_results, + time_interval, + pf_interval, + end_time, + ) + functionality = wf_util.get_percentage_func( + guid="e1bce78d-00a1-4605-95f3-3776ff907f73", damage_state="DS_0", time=2.0 + ) + time = wf_util.get_restoration_time( + guid="e1bce78d-00a1-4605-95f3-3776ff907f73", damage_state="DS_1", pf=0.95 + ) print(functionality, time) -if __name__ == '__main__': +if __name__ == "__main__": run_with_base_class() diff --git a/tests/pyincore/analyses/wfnfunctionality/test_wfnfunctionality.py b/tests/pyincore/analyses/wfnfunctionality/test_wfnfunctionality.py index af62ba841..defab8b6a 100644 --- a/tests/pyincore/analyses/wfnfunctionality/test_wfnfunctionality.py +++ b/tests/pyincore/analyses/wfnfunctionality/test_wfnfunctionality.py @@ -74,7 +74,9 @@ def run_with_base_class(): pipeline_dmg_w_rr.set_parameter("hazard_type", hazard_type) pipeline_dmg_w_rr.set_parameter("hazard_id", hazard_id) pipeline_dmg_w_rr.set_parameter("liquefaction_fragility_key", liq_fragility_key) - pipeline_dmg_w_rr.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + pipeline_dmg_w_rr.set_parameter( + "liquefaction_geology_dataset_id", liq_geology_dataset_id + ) pipeline_dmg_w_rr.set_parameter("use_liquefaction", use_liq) pipeline_dmg_w_rr.set_parameter("num_cpu", 4) @@ -124,5 +126,5 @@ def run_with_base_class(): wfn_func.run_analysis() -if __name__ == '__main__': - run_with_base_class() \ No newline at end of file +if __name__ == "__main__": + run_with_base_class() diff --git a/tests/pyincore/models/test_dataset.py b/tests/pyincore/models/test_dataset.py index 4ae2fd8fc..b2e5da55e 100644 --- a/tests/pyincore/models/test_dataset.py +++ b/tests/pyincore/models/test_dataset.py @@ -17,5 +17,7 @@ def test_from_csv_data(): def test_from_json_data(): result_data = {} - dataset = Dataset.from_json_data(result_data, "empty.json", "incore:buildingDamageSupplement") + dataset = Dataset.from_json_data( + result_data, "empty.json", "incore:buildingDamageSupplement" + ) assert dataset.data_type == "incore:buildingDamageSupplement" diff --git a/tests/pyincore/models/test_dfr3curve.py b/tests/pyincore/models/test_dfr3curve.py index dd309393e..68db26d95 100644 --- a/tests/pyincore/models/test_dfr3curve.py +++ b/tests/pyincore/models/test_dfr3curve.py @@ -7,7 +7,13 @@ import pytest from pyincore.utils import evaluateexpression -from pyincore import globals as pyglobals, FragilityCurveSet, RepairCurveSet, RestorationCurveSet, AnalysisUtil +from pyincore import ( + globals as pyglobals, + FragilityCurveSet, + RepairCurveSet, + RestorationCurveSet, + AnalysisUtil, +) import numpy as np @@ -15,26 +21,36 @@ def test_fragility_set_small_overlap(): fragility_set = get_fragility_set("fragility_curve.json") # Test Case 1 - single overlap - limit_states = collections.OrderedDict([("LS_0", 0.9692754643), ("LS_1", 0.0001444974), ("LS_2", 0.0004277083)]) + limit_states = collections.OrderedDict( + [("LS_0", 0.9692754643), ("LS_1", 0.0001444974), ("LS_2", 0.0004277083)] + ) limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = fragility_set._3ls_to_4ds(limit_states) - assert damage_states['DS_0'] == AnalysisUtil.float_to_decimal(0.0307245357) and \ - damage_states['DS_1'] == AnalysisUtil.float_to_decimal(0.968847756) and \ - damage_states['DS_2'] == AnalysisUtil.float_to_decimal(0.0) and \ - damage_states['DS_3'] == AnalysisUtil.float_to_decimal(0.0004277083) + assert ( + damage_states["DS_0"] == AnalysisUtil.float_to_decimal(0.0307245357) + and damage_states["DS_1"] == AnalysisUtil.float_to_decimal(0.968847756) + and damage_states["DS_2"] == AnalysisUtil.float_to_decimal(0.0) + and damage_states["DS_3"] == AnalysisUtil.float_to_decimal(0.0004277083) + ) # Test Case 2 - double overlap - limit_states = collections.OrderedDict([("LS_0", 0.12), ("LS_1", 0.64), ("LS_2", 0.8)]) + limit_states = collections.OrderedDict( + [("LS_0", 0.12), ("LS_1", 0.64), ("LS_2", 0.8)] + ) limit_states = AnalysisUtil.float_dict_to_decimal(limit_states) damage_states = fragility_set._3ls_to_4ds(limit_states) - assert damage_states['DS_0'] == AnalysisUtil.float_to_decimal(0.2) and \ - damage_states['DS_1'] == AnalysisUtil.float_to_decimal(0.0) and \ - damage_states['DS_2'] == AnalysisUtil.float_to_decimal(0.0) and \ - damage_states['DS_3'] == AnalysisUtil.float_to_decimal(0.8) + assert ( + damage_states["DS_0"] == AnalysisUtil.float_to_decimal(0.2) + and damage_states["DS_1"] == AnalysisUtil.float_to_decimal(0.0) + and damage_states["DS_2"] == AnalysisUtil.float_to_decimal(0.0) + and damage_states["DS_3"] == AnalysisUtil.float_to_decimal(0.8) + ) def get_fragility_set(fragility_dir: str): - with open(os.path.join(pyglobals.TEST_DATA_DIR, fragility_dir), 'r', encoding='utf-8') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, fragility_dir), "r", encoding="utf-8" + ) as f: fragility_curve = json.load(f) fragility_set = FragilityCurveSet(fragility_curve) return fragility_set @@ -52,7 +68,9 @@ def test_create_fragility_set(): def get_repair_set(repair_dir: str): - with open(os.path.join(pyglobals.TEST_DATA_DIR, repair_dir), 'r', encoding='utf-8') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, repair_dir), "r", encoding="utf-8" + ) as f: repair_curveset = json.load(f) repair_set = RepairCurveSet(repair_curveset) return repair_set @@ -65,92 +83,148 @@ def get_remote_repair_set(repair_id: str): def get_restoration_set(restoration_dir: str): - with open(os.path.join(pyglobals.TEST_DATA_DIR, restoration_dir), 'r', encoding='utf-8') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, restoration_dir), "r", encoding="utf-8" + ) as f: restoration_curveset = json.load(f) restoration_set = RestorationCurveSet(restoration_curveset) return restoration_set -@pytest.mark.parametrize("fragility_set,hazard_values,args,expected", [ - (get_fragility_set("fragility_curve.json"), {}, {}, 0.2619967240482869), - (get_fragility_set("fragility_curve.json"), {"surgeLevel": 6, "waveHeight": 4}, {}, 1.0), - (get_fragility_set("fragility_curve.json"), {"waveHeight": 4}, {}, 1.0), - (get_fragility_set("fragility_curve.json"), {"surgeLevel": 6}, {}, 0.9999999950124077), - (get_remote_fragility_set("606221fe618178207f6608a1"), - {"waveHeight": 1.1111, "surgeLevel": 3}, - {"clearance": 4, "span_mass": 12, "g_elev": 0.2}, - 0.142618908), - # test case sensitivity - (get_remote_fragility_set("606221fe618178207f6608a1"), - {"WAVEheight": 1.1111, "sURgeLEVEL": 3}, - {"CLEARANCE": 4, "span_maSS": 12, "g_ELEV": 0.2}, - 0.142618908), - (get_fragility_set("fragility_curves/PeriodStandardFragilityCurve_refactored.json"), {"0.2 sec Sa": 4}, {}, - 0.9905435183), - # test liquefaction - (get_remote_fragility_set("5b47bcce337d4a37755e0c85"), - {"pga": 0.314128903}, - {"inventory_type": "bridge"}, - 0.8097974088), -]) -def test_calculate_limit_state_probability(fragility_set, hazard_values, args, expected): +@pytest.mark.parametrize( + "fragility_set,hazard_values,args,expected", + [ + (get_fragility_set("fragility_curve.json"), {}, {}, 0.2619967240482869), + ( + get_fragility_set("fragility_curve.json"), + {"surgeLevel": 6, "waveHeight": 4}, + {}, + 1.0, + ), + (get_fragility_set("fragility_curve.json"), {"waveHeight": 4}, {}, 1.0), + ( + get_fragility_set("fragility_curve.json"), + {"surgeLevel": 6}, + {}, + 0.9999999950124077, + ), + ( + get_remote_fragility_set("606221fe618178207f6608a1"), + {"waveHeight": 1.1111, "surgeLevel": 3}, + {"clearance": 4, "span_mass": 12, "g_elev": 0.2}, + 0.142618908, + ), + # test case sensitivity + ( + get_remote_fragility_set("606221fe618178207f6608a1"), + {"WAVEheight": 1.1111, "sURgeLEVEL": 3}, + {"CLEARANCE": 4, "span_maSS": 12, "g_ELEV": 0.2}, + 0.142618908, + ), + ( + get_fragility_set( + "fragility_curves/PeriodStandardFragilityCurve_refactored.json" + ), + {"0.2 sec Sa": 4}, + {}, + 0.9905435183, + ), + # test liquefaction + ( + get_remote_fragility_set("5b47bcce337d4a37755e0c85"), + {"pga": 0.314128903}, + {"inventory_type": "bridge"}, + 0.8097974088, + ), + ], +) +def test_calculate_limit_state_probability( + fragility_set, hazard_values, args, expected +): result = fragility_set.calculate_limit_state(hazard_values, **args) print(result) assert np.isclose(result["LS_0"], expected) -@pytest.mark.parametrize("repair_set,args,expected", [ - (get_repair_set("repairset.json"), {"repair_time": [15, 30]}, [0.227, 0.729]), - (get_repair_set("repairset.json"), {"repair_time": 80}, 0.9943516689414926), - (get_remote_repair_set("60edf9a4fc0f3a7af53a2194"), {"repair_time": [15, 30]}, [0.989, 1]) -]) +@pytest.mark.parametrize( + "repair_set,args,expected", + [ + (get_repair_set("repairset.json"), {"repair_time": [15, 30]}, [0.227, 0.729]), + (get_repair_set("repairset.json"), {"repair_time": 80}, 0.9943516689414926), + ( + get_remote_repair_set("60edf9a4fc0f3a7af53a2194"), + {"repair_time": [15, 30]}, + [0.989, 1], + ), + ], +) def test_calculate_repair_rates(repair_set, args, expected): result = repair_set.calculate_repair_rates(**args) - if type(result["PF_0"]) == numpy.ndarray: + if type(result["PF_0"]) is numpy.ndarray: assert numpy.allclose(result["PF_0"], expected, rtol=1e-03, atol=1e-03) - elif type(result["PF_0"]) == numpy.float64: + elif type(result["PF_0"]) is numpy.float64: assert result["PF_0"] == expected else: assert False -@pytest.mark.parametrize("repair_set,args,expected", [ - (get_repair_set("repairset.json"), {"repair_time": [0.5, 0.2]}, [21.977, 14.307]), - (get_repair_set("repairset.json"), {"repair_time": 0.67}, 27.50466741611462) -]) +@pytest.mark.parametrize( + "repair_set,args,expected", + [ + ( + get_repair_set("repairset.json"), + {"repair_time": [0.5, 0.2]}, + [21.977, 14.307], + ), + (get_repair_set("repairset.json"), {"repair_time": 0.67}, 27.50466741611462), + ], +) def test_calculate_inverse_repair_rates(repair_set, args, expected): result = repair_set.calculate_inverse_repair_rates(**args) print(result) - if type(result["PF_0"]) == numpy.ndarray: + if type(result["PF_0"]) is numpy.ndarray: assert numpy.allclose(result["PF_0"], expected, rtol=1e-03, atol=1e-03) - elif type(result["PF_0"]) == numpy.float64: + elif type(result["PF_0"]) is numpy.float64: assert result["PF_0"] == expected else: assert False -@pytest.mark.parametrize("restoration_set,args,expected", [ - (get_restoration_set("restorationset.json"), {"time": [15, 30]}, [0.227, 0.729]), - (get_restoration_set("restorationset.json"), {"time": 80}, 0.9943516689414926) -]) +@pytest.mark.parametrize( + "restoration_set,args,expected", + [ + ( + get_restoration_set("restorationset.json"), + {"time": [15, 30]}, + [0.227, 0.729], + ), + (get_restoration_set("restorationset.json"), {"time": 80}, 0.9943516689414926), + ], +) def test_calculate_restoration_rates(restoration_set, args, expected): result = restoration_set.calculate_restoration_rates(**args) - if type(result["PF_0"]) == numpy.ndarray: + if type(result["PF_0"]) is numpy.ndarray: assert numpy.allclose(result["PF_0"], expected, rtol=1e-03, atol=1e-03) - elif type(result["PF_0"]) == numpy.float64: + elif type(result["PF_0"]) is numpy.float64: assert result["PF_0"] == expected else: assert False -@pytest.mark.parametrize("restoration_set,args,expected", [ - (get_restoration_set("pipe_restorationset.json"), {"break_rate": 0.2, "leak_rate": 0.4, - "pipe_length": 80, "num_workers": 8}, 3.3000000000000003) -]) +@pytest.mark.parametrize( + "restoration_set,args,expected", + [ + ( + get_restoration_set("pipe_restorationset.json"), + {"break_rate": 0.2, "leak_rate": 0.4, "pipe_length": 80, "num_workers": 8}, + 3.3000000000000003, + ) + ], +) def test_calculate_pipeline_restoration_rates(restoration_set, args, expected): result = restoration_set.calculate_restoration_rates(**args) print(result) - if type(result["RT"]) == numpy.ndarray: + if type(result["RT"]) is numpy.ndarray: assert numpy.allclose(result["RT"], expected, rtol=1e-03, atol=1e-03) else: assert result["RT"] == expected diff --git a/tests/pyincore/models/test_hazard.py b/tests/pyincore/models/test_hazard.py index ad0d719da..ee793e6b5 100644 --- a/tests/pyincore/models/test_hazard.py +++ b/tests/pyincore/models/test_hazard.py @@ -1,7 +1,17 @@ import pytest import os -from pyincore import Dataset, HurricaneDataset, FloodDataset, TsunamiDataset, Hurricane, Flood, Earthquake, \ - EarthquakeDataset, Tornado, TornadoDataset +from pyincore import ( + Dataset, + HurricaneDataset, + FloodDataset, + TsunamiDataset, + Hurricane, + Flood, + Earthquake, + EarthquakeDataset, + Tornado, + TornadoDataset, +) from pyincore import globals as pyglobals from pyincore.models.hazard.tsunami import Tsunami @@ -22,17 +32,24 @@ def test_create_hurricane_from_remote(): def test_create_hurricane_from_local(): - # create the hurricane object - hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) + hurricane = Hurricane.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json") + ) # attach dataset from local file - hurricane.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), - data_type="ncsa:deterministicHurricaneRaster") - hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), - data_type="ncsa:deterministicHurricaneRaster") - hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), - data_type="ncsa:deterministicHurricaneRaster") + hurricane.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), + data_type="ncsa:deterministicHurricaneRaster", + ) + hurricane.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:deterministicHurricaneRaster", + ) + hurricane.hazardDatasets[2].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:deterministicHurricaneRaster", + ) assert len(hurricane.hazardDatasets) != 0 assert isinstance(hurricane.hazardDatasets[0], HurricaneDataset) @@ -47,32 +64,31 @@ def test_create_flood_from_remote(): assert isinstance(flood.hazardDatasets[0], FloodDataset) payload = [ - { - "demands": ["waterSurfaceElevation"], - "units": ["m"], - "loc": "34.60,-79.16" - } + {"demands": ["waterSurfaceElevation"], "units": ["m"], "loc": "34.60,-79.16"} ] - values = flood.read_hazard_values(payload, hazard_service=hazardsvc, timeout=(30, 600)) - assert values[0]['hazardValues'] == [41.970442822265625] + values = flood.read_hazard_values( + payload, hazard_service=hazardsvc, timeout=(30, 600) + ) + assert values[0]["hazardValues"] == [41.970442822265625] def test_create_flood_from_local(): - - flood = Flood.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "flood-dataset.json")) + flood = Flood.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "flood-dataset.json") + ) # attach dataset from local file - flood.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "flood-inundationDepth-50ft.tif")), - data_type="ncsa:probabilisticFloodRaster") - flood.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "flood-WSE-50ft.tif"), - data_type="ncsa:probabilisticFloodRaster") + flood.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "flood-inundationDepth-50ft.tif")), + data_type="ncsa:probabilisticFloodRaster", + ) + flood.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "flood-WSE-50ft.tif"), + data_type="ncsa:probabilisticFloodRaster", + ) payload = [ - { - "demands": ["waterSurfaceElevation"], - "units": ["m"], - "loc": "34.60,-79.16" - } + {"demands": ["waterSurfaceElevation"], "units": ["m"], "loc": "34.60,-79.16"} ] assert len(flood.hazardDatasets) != 0 assert not isinstance(flood.hazardDatasets[0], HurricaneDataset) @@ -80,7 +96,7 @@ def test_create_flood_from_local(): assert isinstance(flood.hazardDatasets[0].dataset, Dataset) values = flood.read_hazard_values(payload) - assert values[0]['hazardValues'] == [41.970442822265625] + assert values[0]["hazardValues"] == [41.970442822265625] def test_create_tsunami_from_remote(): @@ -88,43 +104,40 @@ def test_create_tsunami_from_remote(): assert len(tsunami.hazardDatasets) == 3 assert isinstance(tsunami.hazardDatasets[0], TsunamiDataset) - payload = [ - { - "demands": ["hmax"], - "units": ["m"], - "loc": "46.006,-123.935" - } - ] - values = tsunami.read_hazard_values(payload, hazard_service=hazardsvc, timeout=(30, 600)) - assert values[0]['hazardValues'] == [5.900000095367432] + payload = [{"demands": ["hmax"], "units": ["m"], "loc": "46.006,-123.935"}] + values = tsunami.read_hazard_values( + payload, hazard_service=hazardsvc, timeout=(30, 600) + ) + assert values[0]["hazardValues"] == [5.900000095367432] def test_create_tsunami_from_local(): - - tsunami = Tsunami.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tsunami.json")) + tsunami = Tsunami.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tsunami.json") + ) # attach dataset from local file - tsunami.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Vmax.tif")), - data_type="ncsa:probabilisticTsunamiRaster") - tsunami.hazardDatasets[1].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Mmax.tif")), - data_type="ncsa:probabilisticTsunamiRaster") - tsunami.hazardDatasets[2].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Hmax.tif")), - data_type="ncsa:probabilisticTsunamiRaster") - - payload = [ - { - "demands": ["hmax"], - "units": ["m"], - "loc": "46.006,-123.935" - } - ] + tsunami.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Vmax.tif")), + data_type="ncsa:probabilisticTsunamiRaster", + ) + tsunami.hazardDatasets[1].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Mmax.tif")), + data_type="ncsa:probabilisticTsunamiRaster", + ) + tsunami.hazardDatasets[2].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Hmax.tif")), + data_type="ncsa:probabilisticTsunamiRaster", + ) + + payload = [{"demands": ["hmax"], "units": ["m"], "loc": "46.006,-123.935"}] assert len(tsunami.hazardDatasets) != 0 assert not isinstance(tsunami.hazardDatasets[0], FloodDataset) assert isinstance(tsunami.hazardDatasets[0], TsunamiDataset) assert isinstance(tsunami.hazardDatasets[0].dataset, Dataset) values = tsunami.read_hazard_values(payload) - assert values[0]['hazardValues'] == [2.9000000953674316] + assert values[0]["hazardValues"] == [2.9000000953674316] def test_create_eq_from_remote(): @@ -135,40 +148,53 @@ def test_create_eq_from_remote(): { "demands": ["PGA", "0.2 SD", "0.9 SA", "0.2 SA", "PGV"], "units": ["g", "cm", "g", "g", "in/s"], - "loc": "35.84,-89.90" + "loc": "35.84,-89.90", }, - { - "demands": ["1.0 SD", "0.2 SA"], - "units": ["cm", "g"], - "loc": "35.84,-89.90" - } + {"demands": ["1.0 SD", "0.2 SA"], "units": ["cm", "g"], "loc": "35.84,-89.90"}, ] - response = eq.read_hazard_values(payload, hazard_service=hazardsvc, timeout=(30, 600)) - - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [1.5411689639186665, 2.5719942615949374, 0.9241786244448712, - 2.5884360071121133, 34.445240752324956] + response = eq.read_hazard_values( + payload, hazard_service=hazardsvc, timeout=(30, 600) + ) + + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] + == [ + 1.5411689639186665, + 2.5719942615949374, + 0.9241786244448712, + 2.5884360071121133, + 34.445240752324956, + ] + ) def test_create_eq_from_local(): - - eq = Earthquake.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset.json")) + eq = Earthquake.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset.json") + ) # attach dataset from local file - eq.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-SA.tif")), - data_type="ergo:probabilisticEarthquakeRaster") - eq.hazardDatasets[1].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-PGA.tif")), - data_type="ergo:probabilisticEarthquakeRaster") + eq.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-SA.tif")), + data_type="ergo:probabilisticEarthquakeRaster", + ) + eq.hazardDatasets[1].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-PGA.tif")), + data_type="ergo:probabilisticEarthquakeRaster", + ) payload = [ { "demands": ["pga", "0.2 SD", "0.9 SA", "0.2 SA", "PGV"], "units": ["g", "cm", "g", "g", "in/s"], - "loc": "35.03,-89.93" + "loc": "35.03,-89.93", } ] assert len(eq.hazardDatasets) != 0 @@ -177,7 +203,13 @@ def test_create_eq_from_local(): assert isinstance(eq.hazardDatasets[0].dataset, Dataset) values = eq.read_hazard_values(payload) - assert values[0]['hazardValues'] == [0.3149999976158142, -9999.2, -9999.2, 0.4729999899864197, -9999.2] + assert values[0]["hazardValues"] == [ + 0.3149999976158142, + -9999.2, + -9999.2, + 0.4729999899864197, + -9999.2, + ] def test_create_tornado_from_remote(): @@ -188,67 +220,56 @@ def test_create_tornado_from_remote(): # attach dataset from remote tornado.hazardDatasets[0].from_data_service(datasvc) assert isinstance(tornado.hazardDatasets[0].dataset, Dataset) - payload = [ - { - "demands": ["wind"], - "units": ["mph"], - "loc": "37.04, -94.37" - } - ] + payload = [{"demands": ["wind"], "units": ["mph"], "loc": "37.04, -94.37"}] seed = 1234 # Should be an EF1 values = tornado.read_hazard_values(payload, seed=seed) - assert values[0]['hazardValues'][0] > tornado.EF_WIND_SPEED[1] - assert values[0]['hazardValues'][0] < tornado.EF_WIND_SPEED[2] + assert values[0]["hazardValues"][0] > tornado.EF_WIND_SPEED[1] + assert values[0]["hazardValues"][0] < tornado.EF_WIND_SPEED[2] def test_create_model_based_tornado_from_remote(): tornado = Tornado.from_hazard_service("5df913b83494fe000861a743", hazardsvc) # point out of coverage, should return none - payload = [ - { - "demands": ["wind"], - "units": ["mph"], - "loc": "37.07,-94.50" - } - ] - response = tornado.read_hazard_values(payload, hazard_service=hazardsvc, timeout=(30, 600)) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and response[0]['hazardValues'] == [None] + payload = [{"demands": ["wind"], "units": ["mph"], "loc": "37.07,-94.50"}] + response = tornado.read_hazard_values( + payload, hazard_service=hazardsvc, timeout=(30, 600) + ) + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and response[0]["hazardValues"] == [None] + ) # this should fail with pytest.raises(ValueError) as exc_info: tornado.read_hazard_values(payload) - assert str(exc_info.value) == "Local Tornado type \"model\" is not supported yet" + assert str(exc_info.value) == 'Local Tornado type "model" is not supported yet' def test_create_tornado_from_local(): - - tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) + tornado = Tornado.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json") + ) # attach dataset from local file - tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), - data_type="incore:tornadoWindfield") + tornado.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield", + ) - payload = [ - { - "demands": ["wind"], - "units": ["mph"], - "loc": "37.04, -94.37" - } - ] + payload = [{"demands": ["wind"], "units": ["mph"], "loc": "37.04, -94.37"}] assert len(tornado.hazardDatasets) != 0 assert isinstance(tornado.hazardDatasets[0], TornadoDataset) assert isinstance(tornado.hazardDatasets[0].dataset, Dataset) values = tornado.read_hazard_values(payload, seed=1234) # Should be an EF1 - assert values[0]['hazardValues'][0] > tornado.EF_WIND_SPEED[1] - assert values[0]['hazardValues'][0] < tornado.EF_WIND_SPEED[2] + assert values[0]["hazardValues"][0] > tornado.EF_WIND_SPEED[1] + assert values[0]["hazardValues"][0] < tornado.EF_WIND_SPEED[2] def test_read_hazard_values_from_remote(): @@ -256,27 +277,29 @@ def test_read_hazard_values_from_remote(): { "demands": ["waveHeight", "surgeLevel"], "units": ["m", "m"], - "loc": "29.22,-95.06" + "loc": "29.22,-95.06", }, { "demands": ["waveHeight", "surgeLevel"], "units": ["cm", "cm"], - "loc": "29.23,-95.05" + "loc": "29.23,-95.05", }, { "demands": ["waveHeight", "inundationDuration"], "units": ["in", "hr"], - "loc": "29.22,-95.06" - } + "loc": "29.22,-95.06", + }, ] hurricane = Hurricane.from_hazard_service("5f10837c01d3241d77729a4f", hazardsvc) values = hurricane.read_hazard_values(payload, hazard_service=hazardsvc) - assert len(values) == len(payload) \ - and len(values[0]['demands']) == len(payload[0]['demands']) \ - and values[0]['units'] == payload[0]['units'] \ - and len(values[0]['hazardValues']) == len(values[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in values[0]['hazardValues']) \ - and values[0]['hazardValues'] == [1.54217780024576, 3.663398872786693] + assert ( + len(values) == len(payload) + and len(values[0]["demands"]) == len(payload[0]["demands"]) + and values[0]["units"] == payload[0]["units"] + and len(values[0]["hazardValues"]) == len(values[0]["demands"]) + and all(isinstance(hazardval, float) for hazardval in values[0]["hazardValues"]) + and values[0]["hazardValues"] == [1.54217780024576, 3.663398872786693] + ) def test_read_hazard_values_from_local(): @@ -284,45 +307,61 @@ def test_read_hazard_values_from_local(): { "demands": ["waveHeight", "surgeLevel"], "units": ["m", "m"], - "loc": "29.22,-95.06" + "loc": "29.22,-95.06", }, { "demands": ["waveHeight", "surgeLevel"], "units": ["cm", "in"], - "loc": "29.22,-95.06" + "loc": "29.22,-95.06", }, { "demands": ["inundationDuration", "inundationDuration"], "units": ["hr", "s"], - "loc": "29.22,-95.06" + "loc": "29.22,-95.06", }, { "demands": ["waveHeight", "surgeLevel"], "units": ["m", "m"], - "loc": "29.34,-94.94" - } + "loc": "29.34,-94.94", + }, ] # create the hurricane object - hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) + hurricane = Hurricane.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json") + ) # attach dataset from local file - hurricane.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), - data_type="ncsa:deterministicHurricaneRaster") - hurricane.hazardDatasets[0].set_threshold(threshold_value=3.28084, threshold_unit="ft") - - hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), - data_type="ncsa:deterministicHurricaneRaster") - hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), - data_type="ncsa:deterministicHurricaneRaster") + hurricane.hazardDatasets[0].from_file( + (os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), + data_type="ncsa:deterministicHurricaneRaster", + ) + hurricane.hazardDatasets[0].set_threshold( + threshold_value=3.28084, threshold_unit="ft" + ) + + hurricane.hazardDatasets[1].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:deterministicHurricaneRaster", + ) + hurricane.hazardDatasets[2].from_file( + os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:deterministicHurricaneRaster", + ) values = hurricane.read_hazard_values(payload) assert len(values) == len(payload) - assert len(values[0]['demands']) == len(payload[0]['demands']) - assert values[0]['units'] == payload[0]['units'] - assert len(values[0]['hazardValues']) == len(values[0]['demands']) - assert all(isinstance(hazardval, float) for hazardval in values[0]['hazardValues']) - assert values[0]['hazardValues'] == [1.54217780024576, 3.663398872786693] - assert values[1]['hazardValues'] == [1.54217780024576*100, 3.663398872786693*39.3701] # unit conversion - assert values[2]['hazardValues'] == [18.346923306935572, 18.346923306935572*3600] # unit conversion - assert values[3]['hazardValues'] == [None, 3.471035889851387] # test threshold + assert len(values[0]["demands"]) == len(payload[0]["demands"]) + assert values[0]["units"] == payload[0]["units"] + assert len(values[0]["hazardValues"]) == len(values[0]["demands"]) + assert all(isinstance(hazardval, float) for hazardval in values[0]["hazardValues"]) + assert values[0]["hazardValues"] == [1.54217780024576, 3.663398872786693] + assert values[1]["hazardValues"] == [ + 1.54217780024576 * 100, + 3.663398872786693 * 39.3701, + ] # unit conversion + assert values[2]["hazardValues"] == [ + 18.346923306935572, + 18.346923306935572 * 3600, + ] # unit conversion + assert values[3]["hazardValues"] == [None, 3.471035889851387] # test threshold diff --git a/tests/pyincore/models/test_networkdataset.py b/tests/pyincore/models/test_networkdataset.py index bda0bb2a2..c54521644 100644 --- a/tests/pyincore/models/test_networkdataset.py +++ b/tests/pyincore/models/test_networkdataset.py @@ -38,9 +38,14 @@ def test_from_data_service(datasvc): def test_from_json_str(): print(PYINCORE_ROOT_FOLDER) - with open(os.path.join(PYINCORE_ROOT_FOLDER, "tests/data/network/network_dataset.json"), "r") as f: + with open( + os.path.join(PYINCORE_ROOT_FOLDER, "tests/data/network/network_dataset.json"), + "r", + ) as f: json_dict = json.load(f) - network = NetworkDataset.from_json_str(json.dumps(json_dict), folder_path="../data/network/") + network = NetworkDataset.from_json_str( + json.dumps(json_dict), folder_path="../data/network/" + ) assert network.nodes is not None assert network.graph is not None assert network.links is not None @@ -61,8 +66,15 @@ def test_from_files(): link_data_type = "incore:epnLinkVer1" node_data_type = "incore:epnNodeVer1" graph_data_type = "incore:epnGraph" - network = NetworkDataset.from_files(node_file_path, link_file_path, graph_file_path, network_data_type, - link_data_type, node_data_type, graph_data_type) + network = NetworkDataset.from_files( + node_file_path, + link_file_path, + graph_file_path, + network_data_type, + link_data_type, + node_data_type, + graph_data_type, + ) assert network.nodes is not None assert network.graph is not None assert network.links is not None @@ -79,14 +91,14 @@ def test_get_node_inventory(datasvc): dataset_id = "62719fc857f1d94b047447e6" network = NetworkDataset.from_data_service(dataset_id, datasvc) nodes = list(network.get_nodes()) - assert nodes[0]['properties']["guid"] == "9c39623d-920e-49e6-b272-83b2ec954b84" + assert nodes[0]["properties"]["guid"] == "9c39623d-920e-49e6-b272-83b2ec954b84" def test_get_link_inventory(datasvc): dataset_id = "62719fc857f1d94b047447e6" network = NetworkDataset.from_data_service(dataset_id, datasvc) links = list(network.get_links()) - assert links[0]['properties']["guid"] == "a4f63126-bb4b-45e7-9029-47984155f859" + assert links[0]["properties"]["guid"] == "a4f63126-bb4b-45e7-9029-47984155f859" def test_get_graph_table(datasvc): @@ -109,10 +121,15 @@ def test_set_input_dataset(datasvc, client): dataset_id = "62719fc857f1d94b047447e6" network = NetworkDataset.from_data_service(dataset_id, datasvc) base_analysis = BaseAnalysis(client) - base_analysis.input_datasets = {"network": {"spec": { - 'id': 'network', - 'required': True, - 'description': 'network', - 'type': ['incore:epnNetwork'], - }, "value": None}} + base_analysis.input_datasets = { + "network": { + "spec": { + "id": "network", + "required": True, + "description": "network", + "type": ["incore:epnNetwork"], + }, + "value": None, + } + } assert base_analysis.set_input_dataset("network", network) is True diff --git a/tests/pyincore/models/test_units.py b/tests/pyincore/models/test_units.py index c9bfced5c..54444a457 100644 --- a/tests/pyincore/models/test_units.py +++ b/tests/pyincore/models/test_units.py @@ -7,13 +7,25 @@ def test_unit_conversion(): hazard_value = 1.54217780024576 original_demand_units = "m" requested_demand_units = "cm" - assert Units.convert_hazard(hazard_value, original_demand_units, requested_demand_units) == hazard_value * 100 + assert ( + Units.convert_hazard( + hazard_value, original_demand_units, requested_demand_units + ) + == hazard_value * 100 + ) original_demand_units = "m/s" requested_demand_units = "ft/s" - assert Units.convert_hazard(hazard_value, original_demand_units, requested_demand_units) == hazard_value * 3.28084 + assert ( + Units.convert_hazard( + hazard_value, original_demand_units, requested_demand_units + ) + == hazard_value * 3.28084 + ) original_demand_units = "m/s" requested_demand_units = "non-existent-unit" with pytest.raises(ValueError): - Units.convert_hazard(hazard_value, original_demand_units, requested_demand_units) + Units.convert_hazard( + hazard_value, original_demand_units, requested_demand_units + ) diff --git a/tests/pyincore/test_client.py b/tests/pyincore/test_client.py index fa77cdad7..f0ba056c3 100644 --- a/tests/pyincore/test_client.py +++ b/tests/pyincore/test_client.py @@ -6,9 +6,8 @@ import os.path import pytest -import requests -from pyincore import Client, IncoreClient, InsecureIncoreClient, DataService +from pyincore import IncoreClient def test_client_success(monkeypatch): diff --git a/tests/pyincore/test_dataservice.py b/tests/pyincore/test_dataservice.py index 2412b35c7..1abad2696 100644 --- a/tests/pyincore/test_dataservice.py +++ b/tests/pyincore/test_dataservice.py @@ -26,7 +26,7 @@ def datasvc(): def test_get_dataset_metadata(datasvc): dataset_id = "5a284f0ac7d30d13bc0819c4" metadata = datasvc.get_dataset_metadata(dataset_id) - assert metadata['id'] == dataset_id + assert metadata["id"] == dataset_id def test_get_dataset_files_metadata(datasvc): @@ -35,14 +35,13 @@ def test_get_dataset_files_metadata(datasvc): metadata = datasvc.get_dataset_metadata(dataset_id) fileDescriptor = datasvc.get_dataset_files_metadata(dataset_id) - if 'id' not in fileDescriptor[0].keys(): + if "id" not in fileDescriptor[0].keys(): errors.append("response does not seem right!") # compare the id in fileDescriptors field in metadata with the # id returned by /file endpoint - if metadata['fileDescriptors'][0]['id'] != fileDescriptor[0]['id']: - errors.append( - "it doesn't fetch the right fileDescriptors for this id!") + if metadata["fileDescriptors"][0]["id"] != fileDescriptor[0]["id"]: + errors.append("it doesn't fetch the right fileDescriptors for this id!") assert not errors, "errors occured:\n{}".format("\n".join(errors)) @@ -51,7 +50,7 @@ def test_get_dataset_file_metadata(datasvc): dataset_id = "5a284f0bc7d30d13bc081a28" file_id = "5a284f0bc7d30d13bc081a2b" metadata = datasvc.get_dataset_file_metadata(dataset_id, file_id) - assert 'id' in metadata.keys() and metadata['id'] == file_id + assert "id" in metadata.keys() and metadata["id"] == file_id def test_get_dataset_blob(datasvc): @@ -59,7 +58,7 @@ def test_get_dataset_blob(datasvc): dataset_id = "5a284f0ac7d30d13bc0819c4" fname = datasvc.get_dataset_blob(dataset_id, join=True) - if type(fname) != str: + if type(fname) is not str: errors.append("doesn't return the correct filename!") # check if file or folder exists locally, which means successfully downloaded if not os.path.exists(fname): @@ -71,13 +70,17 @@ def test_get_dataset_blob(datasvc): def test_get_datasets(datasvc): errors = [] datatype = "ergo:buildingDamageVer4" - metadata = datasvc.get_datasets(datatype=datatype, title="Shelby_County_Essential_Facilities") + metadata = datasvc.get_datasets( + datatype=datatype, title="Shelby_County_Essential_Facilities" + ) - if 'id' not in metadata[0].keys(): + if "id" not in metadata[0].keys(): errors.append("response is not right!") - if not re.search(r'shelby_county_essential_facilities', metadata[0]['title'].lower()): + if not re.search( + r"shelby_county_essential_facilities", metadata[0]["title"].lower() + ): errors.append("title doesn't match!") - if not re.search(datatype, metadata[0]['dataType']): + if not re.search(datatype, metadata[0]["dataType"]): errors.append("datatype doesn't match!") assert not errors, "errors occured:\n{}".format("\n".join(errors)) @@ -87,19 +90,19 @@ def test_create_dataset_geotif(datasvc): """ Testing create dataset with geotif file """ - with open(os.path.join(pyglobals.TEST_DATA_DIR, 'geotif_sample.json'), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "geotif_sample.json"), "r") as file: dataset_prop = ast.literal_eval(file.read()) response = datasvc.create_dataset(dataset_prop) - if 'id' not in response: + if "id" not in response: assert False - dataset_id = response['id'] - print('dataset is created with id ' + dataset_id) - files = [str(os.path.join(pyglobals.TEST_DATA_DIR, 'geotif_sample.tif'))] + dataset_id = response["id"] + print("dataset is created with id " + dataset_id) + files = [str(os.path.join(pyglobals.TEST_DATA_DIR, "geotif_sample.tif"))] response = datasvc.add_files_to_dataset(dataset_id, files) - assert response['id'] == dataset_id + assert response["id"] == dataset_id r = datasvc.delete_dataset(dataset_id) assert r["id"] == dataset_id @@ -109,21 +112,23 @@ def test_create_dataset_shpfile(datasvc): """ Testing create dataset with shapefile """ - with open(pyglobals.TEST_DATA_DIR + '/shp_sample.json', 'r') as file: + with open(pyglobals.TEST_DATA_DIR + "/shp_sample.json", "r") as file: dataset_prop = ast.literal_eval(file.read()) response = datasvc.create_dataset(dataset_prop) - if 'id' not in response: + if "id" not in response: assert False - dataset_id = response['id'] - print('dataset is created with id ' + dataset_id) - files = [str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/shp_sample.shp')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/shp_sample.dbf')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/shp_sample.shx')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/shp_sample.prj'))] + dataset_id = response["id"] + print("dataset is created with id " + dataset_id) + files = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/shp_sample.shp")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/shp_sample.dbf")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/shp_sample.shx")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/shp_sample.prj")), + ] response = datasvc.add_files_to_dataset(dataset_id, files) - assert response['id'] == dataset_id + assert response["id"] == dataset_id r = datasvc.delete_dataset(dataset_id) assert r["id"] == dataset_id @@ -140,12 +145,12 @@ def test_update_dataset(datasvc): def test_get_files(datasvc): metadata = datasvc.get_files() - assert 'id' in metadata[0] + assert "id" in metadata[0] def test_get_file_metadata(datasvc): metadata = datasvc.get_file_metadata("5a284f24c7d30d13bc081adb") - assert metadata['id'] == "5a284f24c7d30d13bc081adb" + assert metadata["id"] == "5a284f24c7d30d13bc081adb" def test_get_file_blob(datasvc): @@ -153,7 +158,7 @@ def test_get_file_blob(datasvc): dataset_id = "5a284f24c7d30d13bc081adb" fname = datasvc.get_file_blob(dataset_id) - if type(fname) != str: + if type(fname) is not str: errors.append("doesn't return the correct filename!") # check if file or folder exists locally, which means successfully downloaded if not os.path.exists(fname): @@ -164,27 +169,31 @@ def test_get_file_blob(datasvc): @pytest.mark.skip(reason="service currently in refactoring...") def test_create_network_dataset(datasvc): - with open(pyglobals.TEST_DATA_DIR + '/network/network_dataset.json', 'r') as file: + with open(pyglobals.TEST_DATA_DIR + "/network/network_dataset.json", "r") as file: dataset_prop = ast.literal_eval(file.read()) response = datasvc.create_dataset(dataset_prop) - if 'id' not in response: + if "id" not in response: assert False - dataset_id = response['id'] - print('network dataset is created with id ' + dataset_id) - files = [str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_links.shp')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_links.dbf')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_links.shx')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_links.prj')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_node.shp')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_node.dbf')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_node.shx')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_node.prj')), - str(os.path.join(pyglobals.TEST_DATA_DIR, 'shp_sample/network_graph.csv'))] + dataset_id = response["id"] + print("network dataset is created with id " + dataset_id) + files = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_links.shp")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_links.dbf")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_links.shx")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_links.prj")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_node.shp")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_node.dbf")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_node.shx")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_node.prj")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "shp_sample/network_graph.csv")), + ] linkname = dataset_prop["networkDataset"]["link"]["fileName"] nodename = dataset_prop["networkDataset"]["node"]["fileName"] graphname = dataset_prop["networkDataset"]["graph"]["fileName"] - response = datasvc.add_files_to_network_dataset(dataset_id, files, nodename, linkname, graphname) + response = datasvc.add_files_to_network_dataset( + dataset_id, files, nodename, linkname, graphname + ) dataset = Dataset.from_data_service(response["id"], data_service=datasvc) network_dataset = NetworkDataset(dataset) @@ -205,11 +214,24 @@ def test_create_network_dataset(datasvc): def test_create_dataset_from_json_str(datasvc): - with open(os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/dataset_metadata.json"), 'r') as f: + with open( + os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/dataset_metadata.json" + ), + "r", + ) as f: metadata_json_str = f.read() with pytest.raises(Exception): Dataset.from_json_str(metadata_json_str) - assert Dataset.from_json_str(metadata_json_str, data_service=datasvc).local_file_path is not None - assert Dataset.from_json_str(metadata_json_str, file_path="some_local_file_path").local_file_path is not None + assert ( + Dataset.from_json_str(metadata_json_str, data_service=datasvc).local_file_path + is not None + ) + assert ( + Dataset.from_json_str( + metadata_json_str, file_path="some_local_file_path" + ).local_file_path + is not None + ) diff --git a/tests/pyincore/test_dfr3service.py b/tests/pyincore/test_dfr3service.py index 7499b3536..38d8c898b 100644 --- a/tests/pyincore/test_dfr3service.py +++ b/tests/pyincore/test_dfr3service.py @@ -35,93 +35,113 @@ def restorationsvc(monkeypatch): def test_get_fragility_sets(fragilitysvc): metadata = fragilitysvc.get_dfr3_sets(demand_type="PGA", creator="cwang138") - assert 'id' in metadata[0].keys() + assert "id" in metadata[0].keys() def test_get_fragility_set(fragilitysvc): set_id = "5b47b2d7337d4a36187c61c9" metadata = fragilitysvc.get_dfr3_set(set_id) - assert metadata['id'] == set_id + assert metadata["id"] == set_id def test_search_fragility_sets(fragilitysvc): text = "Elnashai and Jeong" fragility_sets = fragilitysvc.search_dfr3_sets(text) - assert len(fragility_sets) > 0 and text in fragility_sets[0]['authors'] + assert len(fragility_sets) > 0 and text in fragility_sets[0]["authors"] def test_match_fragilities_single_inventory(fragilitysvc): inventory = {} - with open(os.path.join(pyglobals.TEST_DATA_DIR, "single_inventory.json"), 'r') as file: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "single_inventory.json"), "r" + ) as file: inventory = ast.literal_eval(file.read()) - mapping_id = '5b47b2d9337d4a36187c7564' + mapping_id = "5b47b2d9337d4a36187c7564" key = "High-Retrofit Drift-Sensitive Fragility ID Code" mapping = MappingSet(fragilitysvc.get_mapping(mapping_id)) frag_set = fragilitysvc.match_inventory(mapping, [inventory], key) - assert inventory['id'] in frag_set.keys() + assert inventory["id"] in frag_set.keys() def test_match_fragilities_multiple_inventory(fragilitysvc): inventories = [] - with open(os.path.join(pyglobals.TEST_DATA_DIR, "multiple_inventory.json"), 'r') as file: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "multiple_inventory.json"), "r" + ) as file: inventories = ast.literal_eval(file.read()) - mapping_id = '5b47b350337d4a3629076f2c' + mapping_id = "5b47b350337d4a3629076f2c" key = "Non-Retrofit Fragility ID Code" mapping = MappingSet(fragilitysvc.get_mapping(mapping_id)) frag_set = fragilitysvc.match_inventory(mapping, inventories, key) - assert (inventories[0]['id'] in frag_set.keys()) and (len(frag_set) == len(inventories)) + assert (inventories[0]["id"] in frag_set.keys()) and ( + len(frag_set) == len(inventories) + ) def test_match_fragilities_multiple_inventories_new_format(fragilitysvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "multiple_inventory.json"), 'r') as file: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "multiple_inventory.json"), "r" + ) as file: inventories = ast.literal_eval(file.read()) key = "Non-Retrofit Fragility ID Code" - mapping = MappingSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "local_mapping_new_format.json"), - "fragility") + mapping = MappingSet.from_json_file( + os.path.join(pyglobals.TEST_DATA_DIR, "local_mapping_new_format.json"), + "fragility", + ) frag_set = fragilitysvc.match_inventory(mapping, inventories, key) - assert (inventories[0]['id'] in frag_set.keys()) and (len(frag_set) == len(inventories)) + assert (inventories[0]["id"] in frag_set.keys()) and ( + len(frag_set) == len(inventories) + ) def test_extract_inventory_class(restorationsvc): - rules = [["java.lang.String utilfcltyc EQUALS 'EESL'"], ["java.lang.String utilfcltyc EQUALS 'ESSH'"]] + rules = [ + ["java.lang.String utilfcltyc EQUALS 'EESL'"], + ["java.lang.String utilfcltyc EQUALS 'ESSH'"], + ] assert restorationsvc.extract_inventory_class_legacy(rules) == "EESL/ESSH" rules = [["java.lang.String utilfcltyc EQUALS 'EDC'"]] assert restorationsvc.extract_inventory_class_legacy(rules) == "EDC" - rules = [["java.lang.String utilfcltyc EQUALS 'EDFLT'"], - ["java.lang.String utilfcltyc EQUALS 'EPPL'"], - ["java.lang.String utilfcltyc EQUALS 'EPPM'"], - ["java.lang.String utilfcltyc EQUALS 'EPPS'"] - ] - assert restorationsvc.extract_inventory_class_legacy(rules) == "EDFLT/EPPL/EPPM/EPPS" - - rules = {"AND": [ - {"OR": [ - "java.lang.String utilfcltyc EQUALS 'EESL'", - "java.lang.String utilfcltyc EQUALS 'ESSH'" - ] - }, - { - "AND": [ - "java.lang.String utilfcltyc EQUALS 'EDC'" - ] - }, - { - "OR": [ - "java.lang.String utilfcltyc EQUALS 'EDFLT'", - "java.lang.String utilfcltyc EQUALS 'EPPL'", - "java.lang.String utilfcltyc EQUALS 'EPPM'", - "java.lang.String utilfcltyc EQUALS 'EPPS'" - ] - } + rules = [ + ["java.lang.String utilfcltyc EQUALS 'EDFLT'"], + ["java.lang.String utilfcltyc EQUALS 'EPPL'"], + ["java.lang.String utilfcltyc EQUALS 'EPPM'"], + ["java.lang.String utilfcltyc EQUALS 'EPPS'"], + ] + assert ( + restorationsvc.extract_inventory_class_legacy(rules) == "EDFLT/EPPL/EPPM/EPPS" + ) + + rules = { + "AND": [ + { + "OR": [ + "java.lang.String utilfcltyc EQUALS 'EESL'", + "java.lang.String utilfcltyc EQUALS 'ESSH'", ] - } - assert restorationsvc.extract_inventory_class(rules) == "EESL/ESSH+EDC+EDFLT/EPPL/EPPM/EPPS" + }, + {"AND": ["java.lang.String utilfcltyc EQUALS 'EDC'"]}, + { + "OR": [ + "java.lang.String utilfcltyc EQUALS 'EDFLT'", + "java.lang.String utilfcltyc EQUALS 'EPPL'", + "java.lang.String utilfcltyc EQUALS 'EPPM'", + "java.lang.String utilfcltyc EQUALS 'EPPS'", + ] + }, + ] + } + assert ( + restorationsvc.extract_inventory_class(rules) + == "EESL/ESSH+EDC+EDFLT/EPPL/EPPM/EPPS" + ) + def test_get_fragility_mappings(fragilitysvc): mappings = fragilitysvc.get_mappings(hazard_type="earthquake", creator="cwang138") @@ -137,7 +157,7 @@ def test_get_fragility_mapping(fragilitysvc): def test_create_and_delete_fragility_set(fragilitysvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "fragility_curve.json"), 'r') as f: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "fragility_curve.json"), "r") as f: fragility_set = json.load(f) created = fragilitysvc.create_dfr3_set(fragility_set) assert "id" in created.keys() @@ -147,7 +167,9 @@ def test_create_and_delete_fragility_set(fragilitysvc): def test_create_and_delete_fragility_mapping(fragilitysvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "fragility_mappingset.json"), 'r') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "fragility_mappingset.json"), "r" + ) as f: mapping_set = json.load(f) created = fragilitysvc.create_mapping(mapping_set) assert "id" in created.keys() @@ -157,7 +179,7 @@ def test_create_and_delete_fragility_mapping(fragilitysvc): def test_create_and_delete_repair_set(repairsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "repairset.json"), 'r') as f: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "repairset.json"), "r") as f: repair_set = json.load(f) created = repairsvc.create_dfr3_set(repair_set) assert "id" in created.keys() @@ -167,7 +189,9 @@ def test_create_and_delete_repair_set(repairsvc): def test_create_and_delete_repair_mapping(repairsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "repair_mappingset.json"), 'r') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "repair_mappingset.json"), "r" + ) as f: mapping_set = json.load(f) created = repairsvc.create_mapping(mapping_set) assert "id" in created.keys() @@ -179,11 +203,11 @@ def test_create_and_delete_repair_mapping(repairsvc): def test_get_repair_sets(repairsvc): metadata = repairsvc.get_dfr3_sets(hazard_type="tornado") - assert 'id' in metadata[0].keys() + assert "id" in metadata[0].keys() def test_create_and_delete_restoration_set(restorationsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "restorationset.json"), 'r') as f: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "restorationset.json"), "r") as f: restoration_set = json.load(f) created = restorationsvc.create_dfr3_set(restoration_set) assert "id" in created.keys() @@ -193,10 +217,12 @@ def test_create_and_delete_restoration_set(restorationsvc): def test_create_and_delete_restoration_mapping(restorationsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "restoration_mappingset.json"), 'r') as f: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "restoration_mappingset.json"), "r" + ) as f: mapping_set = json.load(f) created = restorationsvc.create_mapping(mapping_set) assert "id" in created.keys() del_response = restorationsvc.delete_mapping(created["id"]) - assert del_response["id"] is not None \ No newline at end of file + assert del_response["id"] is not None diff --git a/tests/pyincore/test_hazardservice.py b/tests/pyincore/test_hazardservice.py index b9339175f..515649a98 100644 --- a/tests/pyincore/test_hazardservice.py +++ b/tests/pyincore/test_hazardservice.py @@ -21,26 +21,31 @@ def test_get_earthquake_hazard_metadata_list(hazardsvc): test get /earthquakes endpoint """ response = hazardsvc.get_earthquake_hazard_metadata_list() - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_get_earthquake_hazard_metadata(hazardsvc): """ Testing get earthquake/{id} """ - response = hazardsvc.get_earthquake_hazard_metadata( - "5b902cb273c3371e1236b36b") - assert response['id'] == "5b902cb273c3371e1236b36b" + response = hazardsvc.get_earthquake_hazard_metadata("5b902cb273c3371e1236b36b") + assert response["id"] == "5b902cb273c3371e1236b36b" def test_get_earthquake_hazard_value_set(hazardsvc): # raster?demandType=0.2+SA&demandUnits=g&minX=-90.3099&minY=34.9942&maxX=-89.6231&maxY=35.4129&gridSpacing=0.01696 x, y, hazard_val = hazardsvc.get_earthquake_hazard_value_set( "5ba92505ec23090435209071", - "0.2 SA", "g", - [[-90.3099, 34.9942], [-89.6231, 35.4129]], 0.01696) - assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray) \ - and isinstance(hazard_val, np.ndarray) + "0.2 SA", + "g", + [[-90.3099, 34.9942], [-89.6231, 35.4129]], + 0.01696, + ) + assert ( + isinstance(x, np.ndarray) + and isinstance(y, np.ndarray) + and isinstance(hazard_val, np.ndarray) + ) def test_post_earthquake_hazard_values(hazardsvc): @@ -48,58 +53,51 @@ def test_post_earthquake_hazard_values(hazardsvc): { "demands": ["PGA", "0.2 SD", "0.9 SA", "0.2 SA", "PGV"], "units": ["g", "cm", "g", "g", "in/s"], - "loc": "35.84,-89.90" + "loc": "35.84,-89.90", }, - { - "demands": ["1.0 SD", "0.2 SA"], - "units": ["cm", "g"], - "loc": "35.84,-89.90" - } + {"demands": ["1.0 SD", "0.2 SA"], "units": ["cm", "g"], "loc": "35.84,-89.90"}, ] response = hazardsvc.post_earthquake_hazard_values( - "5b902cb273c3371e1236b36b", - payload + "5b902cb273c3371e1236b36b", payload ) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [1.5411689639186665, 2.5719942615949374, 0.9241786244448712, - 2.5884360071121133, 34.445240752324956] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] + == [ + 1.5411689639186665, + 2.5719942615949374, + 0.9241786244448712, + 2.5884360071121133, + 34.445240752324956, + ] + ) def test_bad_units_post_earthquake_hazard_values(hazardsvc): payload = [ - { - "demands": ["1.0 SD", "0.2 SA"], - "units": ["cm", "zzz"], - "loc": "35.84,-89.90" - } + {"demands": ["1.0 SD", "0.2 SA"], "units": ["cm", "zzz"], "loc": "35.84,-89.90"} ] response = hazardsvc.post_earthquake_hazard_values( - "5b902cb273c3371e1236b36b", - payload + "5b902cb273c3371e1236b36b", payload ) - assert len(response) == len(payload) and response[0]['hazardValues'][1] == -9999.3 + assert len(response) == len(payload) and response[0]["hazardValues"][1] == -9999.3 def test_bad_format_post_earthquake_hazard_values(hazardsvc): payload = [ - { - "demands": ["1.0 SD", "0.2 SA"], - "units": ["cm", "g"], - "loc": "35.84-89.90" - } + {"demands": ["1.0 SD", "0.2 SA"], "units": ["cm", "g"], "loc": "35.84-89.90"} ] try: - hazardsvc.post_earthquake_hazard_values( - "5b902cb273c3371e1236b36b", - payload - ) + hazardsvc.post_earthquake_hazard_values("5b902cb273c3371e1236b36b", payload) except requests.exceptions.HTTPError as e: assert e.response.status_code == 400 else: @@ -107,48 +105,44 @@ def test_bad_format_post_earthquake_hazard_values(hazardsvc): def test_get_liquefaction_values(hazardsvc): - liq_vals = hazardsvc.get_liquefaction_values("5b902cb273c3371e1236b36b", - "5a284f53c7d30d13bc08249c", - "in", - ["35.18,-90.076", - "35.19,-90.0178"]) - assert liq_vals[0]['pgd'] == 94.28155130685825 and liq_vals[1][ - 'pgd'] == 103.2176731165868 + liq_vals = hazardsvc.get_liquefaction_values( + "5b902cb273c3371e1236b36b", + "5a284f53c7d30d13bc08249c", + "in", + ["35.18,-90.076", "35.19,-90.0178"], + ) + assert ( + liq_vals[0]["pgd"] == 94.28155130685825 + and liq_vals[1]["pgd"] == 103.2176731165868 + ) def test_post_liquefaction_values(hazardsvc): payload = [ - { - "demands": ["pgd", "pgd"], - "units": ["cm", "in"], - "loc": "35.19,-90.0178" - } + {"demands": ["pgd", "pgd"], "units": ["cm", "in"], "loc": "35.19,-90.0178"} ] response = hazardsvc.post_liquefaction_values( - "5b902cb273c3371e1236b36b", - "5a284f53c7d30d13bc08249c", - payload + "5b902cb273c3371e1236b36b", "5a284f53c7d30d13bc08249c", payload ) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['pgdValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['pgdValues']) \ - and response[0]['pgdValues'] == [262.17288971613044, 103.2176731165868] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["pgdValues"]) == len(response[0]["demands"]) + and all(isinstance(hazardval, float) for hazardval in response[0]["pgdValues"]) + and response[0]["pgdValues"] == [262.17288971613044, 103.2176731165868] + ) def test_get_soil_amplification_value(hazardsvc): """ test /earthquakes/soil/amplification endpoint """ - soil_amplification_value = hazardsvc.get_soil_amplification_value("NEHRP", - "5a284f20c7d30d13bc081aa6", - 32.3547, - -89.3985, - "pga", - 0.2, "A") + soil_amplification_value = hazardsvc.get_soil_amplification_value( + "NEHRP", "5a284f20c7d30d13bc081aa6", 32.3547, -89.3985, "pga", 0.2, "A" + ) assert soil_amplification_value == 0.8 @@ -168,20 +162,25 @@ def test_create_and_delete_earthquake(hazardsvc): Test creating both model and dataset based earthquakes. Also deletes the created earthquakes """ # Dataset Based Earthquake - with open(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset.json"), "r") as file: eq_dataset_json = file.read() - file_paths = [str(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-SA.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-PGA.tif"))] + file_paths = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-SA.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "eq-dataset-PGA.tif")), + ] dataset_response = hazardsvc.create_earthquake(eq_dataset_json, file_paths) - assert dataset_response["id"] is not None and dataset_response["hazardDatasets"][1]["datasetId"] is not None + assert ( + dataset_response["id"] is not None + and dataset_response["hazardDatasets"][1]["datasetId"] is not None + ) del_response = hazardsvc.delete_earthquake(dataset_response["id"]) assert del_response["id"] is not None # Model Based Earthquake without files - with open(os.path.join(pyglobals.TEST_DATA_DIR, "eq-model.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "eq-model.json"), "r") as file: eqmodel_json = file.read() model_response = hazardsvc.create_earthquake(eqmodel_json) @@ -194,10 +193,12 @@ def test_create_and_delete_earthquake(hazardsvc): def test_get_earthquake_aleatory_uncertainty(hazardsvc): hazard_id = "5c535f57c5c0e4ccead71a1a" demand_type = "PGA" - model_response = hazardsvc. \ - get_earthquake_aleatory_uncertainty(hazard_id, demand_type) - assert model_response[demand_type] is not None and \ - (0 < model_response[demand_type] <= 1) + model_response = hazardsvc.get_earthquake_aleatory_uncertainty( + hazard_id, demand_type + ) + assert model_response[demand_type] is not None and ( + 0 < model_response[demand_type] <= 1 + ) def test_get_earthquake_variance(hazardsvc): @@ -208,27 +209,26 @@ def test_get_earthquake_variance(hazardsvc): points = ["35.927, -89.919"] model_response = hazardsvc.get_earthquake_variance( - hazard_id, variance_type, demand_type, demand_unit, points) - assert model_response[0] is not None and \ - (0 < model_response[0]["variance"] <= 1) + hazard_id, variance_type, demand_type, demand_unit, points + ) + assert model_response[0] is not None and (0 < model_response[0]["variance"] <= 1) def test_get_tornado_hazard_metadata_list(hazardsvc): response = hazardsvc.get_tornado_hazard_metadata_list() - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_get_tornado_hazard_metadata(hazardsvc): """ Testing get tornado/{id} """ - response = hazardsvc.get_tornado_hazard_metadata( - "5df913b83494fe000861a743") - assert response['id'] == "5df913b83494fe000861a743" + response = hazardsvc.get_tornado_hazard_metadata("5df913b83494fe000861a743") + assert response["id"] == "5df913b83494fe000861a743" def test_create_and_delete_tornado_scenario(hazardsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "tornado.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "tornado.json"), "r") as file: scenario = file.read() response = hazardsvc.create_tornado_scenario(scenario) assert response["id"] is not None @@ -239,71 +239,62 @@ def test_create_and_delete_tornado_scenario(hazardsvc): def test_post_tornado_hazard_values(hazardsvc): # point out of coverage, should return none - payload = [ - { - "demands": ["wind"], - "units": ["mph"], - "loc": "37.07,-94.50" - } - ] - response = hazardsvc.post_tornado_hazard_values( - "5df913b83494fe000861a743", - payload + payload = [{"demands": ["wind"], "units": ["mph"], "loc": "37.07,-94.50"}] + response = hazardsvc.post_tornado_hazard_values("5df913b83494fe000861a743", payload) + + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and response[0]["hazardValues"] == [None] ) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and response[0]['hazardValues'] == [None] - def test_get_tsunami_hazard_metadata_list(hazardsvc): response = hazardsvc.get_tsunami_hazard_metadata_list() - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_get_tsunami_hazard_metadata(hazardsvc): """ Testing get tsunami/{id} """ - response = hazardsvc.get_tsunami_hazard_metadata( - "5bc9e25ef7b08533c7e610dc") - assert response['id'] == "5bc9e25ef7b08533c7e610dc" + response = hazardsvc.get_tsunami_hazard_metadata("5bc9e25ef7b08533c7e610dc") + assert response["id"] == "5bc9e25ef7b08533c7e610dc" def test_post_tsunami_hazard_values(hazardsvc): - payload = [ - { - "demands": ["hmax"], - "units": ["m"], - "loc": "46.006,-123.935" - } - ] + payload = [{"demands": ["hmax"], "units": ["m"], "loc": "46.006,-123.935"}] - response = hazardsvc.post_tsunami_hazard_values( - "5bc9ead7f7b08533c7e610e0", - payload - ) + response = hazardsvc.post_tsunami_hazard_values("5bc9ead7f7b08533c7e610e0", payload) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [5.900000095367432] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] == [5.900000095367432] + ) def test_create_and_delete_tsunami_hazard(hazardsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "tsunami.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "tsunami.json"), "r") as file: tsunami_json = file.read() - file_paths = [str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Vmax.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Mmax.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Hmax.tif"))] + file_paths = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Vmax.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Mmax.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "Tsu_100yr_Hmax.tif")), + ] response = hazardsvc.create_tsunami_hazard(tsunami_json, file_paths) - assert response["id"] is not None and response["hazardDatasets"][1][ - "datasetId"] is not None + assert ( + response["id"] is not None + and response["hazardDatasets"][1]["datasetId"] is not None + ) del_response = hazardsvc.delete_tsunami(response["id"]) assert del_response["id"] is not None @@ -311,17 +302,23 @@ def test_create_and_delete_tsunami_hazard(hazardsvc): def test_create_and_delete_hurricane(hazardsvc): """ - Also deletes the created dataset + Also deletes the created dataset """ - with open(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json"), 'r') as file: + with open( + os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json"), "r" + ) as file: hurricane_json = file.read() - file_paths = [str(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"))] + file_paths = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif")), + ] post_response = hazardsvc.create_hurricane(hurricane_json, file_paths) - assert post_response["id"] is not None and post_response["hazardDatasets"][1][ - "datasetId"] is not None + assert ( + post_response["id"] is not None + and post_response["hazardDatasets"][1]["datasetId"] is not None + ) del_response = hazardsvc.delete_hurricane(post_response["id"]) assert del_response["id"] is not None @@ -329,12 +326,12 @@ def test_create_and_delete_hurricane(hazardsvc): def test_get_hurricane_metadata(hazardsvc): response = hazardsvc.get_hurricane_metadata("5f10837c01d3241d77729a4f") - assert response['id'] == "5f10837c01d3241d77729a4f" + assert response["id"] == "5f10837c01d3241d77729a4f" def test_get_hurricane_metadata_list(hazardsvc): response = hazardsvc.get_hurricane_metadata_list() - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_post_hurricane_values(hazardsvc): @@ -342,31 +339,34 @@ def test_post_hurricane_values(hazardsvc): { "demands": ["waveHeight", "surgeLevel"], "units": ["m", "m"], - "loc": "29.22,-95.06" + "loc": "29.22,-95.06", }, { "demands": ["waveHeight", "surgeLevel"], "units": ["cm", "cm"], - "loc": "29.23,-95.05" + "loc": "29.23,-95.05", }, { "demands": ["waveHeight", "inundationDuration"], "units": ["in", "hr"], - "loc": "29.22,-95.06" - } + "loc": "29.22,-95.06", + }, ] response = hazardsvc.post_hurricane_hazard_values( - "5f10837c01d3241d77729a4f", - payload + "5f10837c01d3241d77729a4f", payload ) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [1.54217780024576, 3.663398872786693] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] == [1.54217780024576, 3.663398872786693] + ) def test_search_hurricanes(hazardsvc): @@ -376,16 +376,20 @@ def test_search_hurricanes(hazardsvc): def test_create_and_delete_flood(hazardsvc): """ - Also deletes the created dataset + Also deletes the created dataset """ - with open(os.path.join(pyglobals.TEST_DATA_DIR, "flood-dataset.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "flood-dataset.json"), "r") as file: flood_json = file.read() - file_paths = [str(os.path.join(pyglobals.TEST_DATA_DIR, "flood-inundationDepth-50ft.tif")), - str(os.path.join(pyglobals.TEST_DATA_DIR, "flood-WSE-50ft.tif"))] + file_paths = [ + str(os.path.join(pyglobals.TEST_DATA_DIR, "flood-inundationDepth-50ft.tif")), + str(os.path.join(pyglobals.TEST_DATA_DIR, "flood-WSE-50ft.tif")), + ] post_response = hazardsvc.create_flood(flood_json, file_paths) - assert post_response["id"] is not None and post_response["hazardDatasets"][1][ - "datasetId"] is not None + assert ( + post_response["id"] is not None + and post_response["hazardDatasets"][1]["datasetId"] is not None + ) del_response = hazardsvc.delete_flood(post_response["id"]) assert del_response["id"] is not None @@ -394,34 +398,31 @@ def test_create_and_delete_flood(hazardsvc): def test_get_flood_metadata(hazardsvc): # TODO add id of published flood response = hazardsvc.get_flood_metadata("5f4d02e99f43ee0dde768406") - assert response['id'] == "5f4d02e99f43ee0dde768406" + assert response["id"] == "5f4d02e99f43ee0dde768406" def test_get_flood_metadata_list(hazardsvc): response = hazardsvc.get_flood_metadata_list() - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_post_flood_hazard_values(hazardsvc): payload = [ - { - "demands": ["waterSurfaceElevation"], - "units": ["m"], - "loc": "34.60,-79.16" - } + {"demands": ["waterSurfaceElevation"], "units": ["m"], "loc": "34.60,-79.16"} ] - response = hazardsvc.post_flood_hazard_values( - "5f4d02e99f43ee0dde768406", - payload - ) + response = hazardsvc.post_flood_hazard_values("5f4d02e99f43ee0dde768406", payload) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [41.970442822265625] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] == [41.970442822265625] + ) def test_search_floods(hazardsvc): @@ -431,7 +432,7 @@ def test_search_floods(hazardsvc): @pytest.mark.skip(reason="performance issues") def test_create_and_delete_hurricane_windfield(hazardsvc): - with open(os.path.join(pyglobals.TEST_DATA_DIR, "hurricanewf.json"), 'r') as file: + with open(os.path.join(pyglobals.TEST_DATA_DIR, "hurricanewf.json"), "r") as file: hurr_wf_inputs = file.read() post_response = hazardsvc.create_hurricane_windfield(hurr_wf_inputs) @@ -444,45 +445,41 @@ def test_create_and_delete_hurricane_windfield(hazardsvc): @pytest.mark.skip(reason="performance issues") def test_get_hurricanewf_metadata(hazardsvc): response = hazardsvc.get_hurricanewf_metadata("5bd3d6a1f242fe0cf903cb0e") - assert response['id'] == "5bd3d6a1f242fe0cf903cb0e" + assert response["id"] == "5bd3d6a1f242fe0cf903cb0e" @pytest.mark.skip(reason="performance issues") def test_get_hurricanewf_metadata_list(hazardsvc): response = hazardsvc.get_hurricanewf_metadata_list(coast="florida") - assert len(response) > 0 and 'id' in response[0].keys() + assert len(response) > 0 and "id" in response[0].keys() def test_post_hurricanewf_hazard_values(hazardsvc): - payload = [ - { - "demands": ["3s", "60s"], - "units": ["mph", "mph"], - "loc": "28,-81" - } - ] + payload = [{"demands": ["3s", "60s"], "units": ["mph", "mph"], "loc": "28,-81"}] elevation = 10 roughness = 0.03 response = hazardsvc.post_hurricanewf_hazard_values( - "5cffdcf35648c404a6414f7e", - payload, - elevation, - roughness + "5cffdcf35648c404a6414f7e", payload, elevation, roughness ) - assert len(response) == len(payload) \ - and len(response[0]['demands']) == len(payload[0]['demands']) \ - and response[0]['units'] == payload[0]['units'] \ - and len(response[0]['hazardValues']) == len(response[0]['demands']) \ - and all(isinstance(hazardval, float) for hazardval in response[0]['hazardValues']) \ - and response[0]['hazardValues'] == [81.57440785011988, 66.83292578974765] + assert ( + len(response) == len(payload) + and len(response[0]["demands"]) == len(payload[0]["demands"]) + and response[0]["units"] == payload[0]["units"] + and len(response[0]["hazardValues"]) == len(response[0]["demands"]) + and all( + isinstance(hazardval, float) for hazardval in response[0]["hazardValues"] + ) + and response[0]["hazardValues"] == [81.57440785011988, 66.83292578974765] + ) @pytest.mark.skip(reason="performance issues") def test_get_hurricanewf_json(hazardsvc): - hjson = hazardsvc.get_hurricanewf_json("florida", 1, -83, "28,-81", "3s", "kmph", 6, 10, - "circular") + hjson = hazardsvc.get_hurricanewf_json( + "florida", 1, -83, "28,-81", "3s", "kmph", 6, 10, "circular" + ) assert len(hjson["hurricaneSimulations"]) > 0 diff --git a/tests/pyincore/test_semanticservice.py b/tests/pyincore/test_semanticservice.py index f76b502e4..49ae64d2d 100644 --- a/tests/pyincore/test_semanticservice.py +++ b/tests/pyincore/test_semanticservice.py @@ -4,19 +4,15 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import json -import os import pytest from requests.exceptions import HTTPError -from pyincore import globals as pyglobals -from pyincore import SemanticService - from pyincore.globals import LOGGER logger = LOGGER + @pytest.fixture def semanticsvc(): return pytest.semanticsvc @@ -30,22 +26,20 @@ def test_get_all_semantic_types(semanticsvc): # get all semantic types without hyperlink hyperlink = False semantic_types = semanticsvc.get_all_semantic_types( - hyperlink=hyperlink, - order=order, - skip=skip, - limit=limit + hyperlink=hyperlink, order=order, skip=skip, limit=limit ) - assert len(semantic_types) == limit and len(semantic_types[0].split("/")) <= 1, "Should not have hyperlink" + assert ( + len(semantic_types) == limit and len(semantic_types[0].split("/")) <= 1 + ), "Should not have hyperlink" # get all semantic types with hyperlink hyperlink = True semantic_types = semanticsvc.get_all_semantic_types( - hyperlink=hyperlink, - order=order, - skip=skip, - limit=limit + hyperlink=hyperlink, order=order, skip=skip, limit=limit ) - assert len(semantic_types) == limit and len(semantic_types[0].split("/")) > 1, "Should have hyperlink" + assert ( + len(semantic_types) == limit and len(semantic_types[0].split("/")) > 1 + ), "Should have hyperlink" def test_get_semantic_type_by_name(semanticsvc): @@ -55,12 +49,16 @@ def test_get_semantic_type_by_name(semanticsvc): # find semantic type by name which exists semantic_types = semanticsvc.get_semantic_type_by_name(semantic_type_exists) # Checks semantic dictionary is not empty - assert type(semantic_types) == dict and bool(dict), f"Should find one semantic type as {semantic_type_exists} exists" + assert type(semantic_types) is dict and bool( + dict + ), f"Should find one semantic type as {semantic_type_exists} exists" # find semantic type by name which does not exist # this should raise error with pytest.raises(Exception) as excinfo: semantic_types = semanticsvc.get_semantic_type_by_name(semantic_type_not_exists) - assert excinfo == HTTPError, f"Should raise HTTPError as {semantic_type_not_exists} does not exist" + assert ( + excinfo == HTTPError + ), f"Should raise HTTPError as {semantic_type_not_exists} does not exist" def test_search_semantic_types(semanticsvc): @@ -68,8 +66,11 @@ def test_search_semantic_types(semanticsvc): search_term_not_exists = "asdwerueidj" # search for term that should find an entry semantic_types = semanticsvc.search_semantic_type(search_term_exists) - assert len(semantic_types) > 0, f"Should find at least one semantic type as {search_term_exists} exists" + assert ( + len(semantic_types) > 0 + ), f"Should find at least one semantic type as {search_term_exists} exists" # search for term that should not find an entry semantic_types = semanticsvc.search_semantic_type(search_term_not_exists) - assert len(semantic_types) == 0, f"Should not find any semantic type as {search_term_not_exists} does not exist" - + assert ( + len(semantic_types) == 0 + ), f"Should not find any semantic type as {search_term_not_exists} does not exist" diff --git a/tests/pyincore/test_spaceservice.py b/tests/pyincore/test_spaceservice.py index aa71ddd8f..766428f66 100644 --- a/tests/pyincore/test_spaceservice.py +++ b/tests/pyincore/test_spaceservice.py @@ -3,7 +3,7 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import os + import pytest import json @@ -14,9 +14,12 @@ def spacesvc(monkeypatch): return pytest.spacesvc -@pytest.mark.parametrize("space,expected", [ - ({'mettadatta': {'name': 'bad'}}, 400), -]) +@pytest.mark.parametrize( + "space,expected", + [ + ({"mettadatta": {"name": "bad"}}, 400), + ], +) def test_create_space_failed(spacesvc, space, expected): # assert that trying to create a space with invalid data throws an error with pytest.raises(Exception): @@ -28,22 +31,19 @@ def test_create_update_and_delete_space(spacesvc): # assert that trying to create a space with invalid data throws an error space_json = { "privileges": { - "groupPrivileges": { - "incore_admin": "ADMIN", - "incore_ncsa": "ADMIN" - } - }, - "metadata": { - "name": "test-space" + "groupPrivileges": {"incore_admin": "ADMIN", "incore_ncsa": "ADMIN"} }, - "members": [] + "metadata": {"name": "test-space"}, + "members": [], } create_json = spacesvc.create_space(json.dumps(space_json)) assert create_json["id"] is not None assert create_json["metadata"]["name"] == "test-space" # update the space - update_json = spacesvc.update_space(create_json["id"], json.dumps({'metadata': {'name': 'test-space-updated'}})) + update_json = spacesvc.update_space( + create_json["id"], json.dumps({"metadata": {"name": "test-space-updated"}}) + ) assert update_json["id"] is not None assert update_json["metadata"]["name"] == "test-space-updated" @@ -57,10 +57,13 @@ def test_add_and_remove_member(spacesvc): assert member_id in space["members"] -@pytest.mark.parametrize("space_id,space,expected", [ - ("5c89287d5648c42a917569d8", {'metadata': {'name': 'test-space'}}, 400), - ("5c75bd1a9e503f2ea0500000", {'metadata': {'name': 'not found'}}, 404), -]) +@pytest.mark.parametrize( + "space_id,space,expected", + [ + ("5c89287d5648c42a917569d8", {"metadata": {"name": "test-space"}}, 400), + ("5c75bd1a9e503f2ea0500000", {"metadata": {"name": "not found"}}, 404), + ], +) def test_update_space(spacesvc, space_id, space, expected): """ If the new name already exists, it will throw a bad request exception @@ -71,7 +74,7 @@ def test_update_space(spacesvc, space_id, space, expected): def test_get_spaces(spacesvc): metadata = spacesvc.get_spaces() - assert 'members' in metadata[0].keys() + assert "members" in metadata[0].keys() def test_get_space(spacesvc): diff --git a/tests/pyincore/utils/test_analysisutil.py b/tests/pyincore/utils/test_analysisutil.py index 7bc2b8926..ec507a90a 100644 --- a/tests/pyincore/utils/test_analysisutil.py +++ b/tests/pyincore/utils/test_analysisutil.py @@ -2,17 +2,21 @@ import pytest -@pytest.mark.parametrize("hazard_vals, hazard_type, expected", [ - ([], "tornado", "error"), - ([1.5, 2], "tornado", "yes"), - ([None, None], "tsunami", "no"), - ([1.5, None], "hurricane", "partial"), - ([1.5], "earthquake", "yes"), - ([1, None, -9999.1], "tornado", "error"), - ([1.5, 2.5], "hurricanewindfield", "n/a"), - ([-9999.99], "flood", "error"), - -]) +@pytest.mark.parametrize( + "hazard_vals, hazard_type, expected", + [ + ([], "tornado", "error"), + ([1.5, 2], "tornado", "yes"), + ([None, None], "tsunami", "no"), + ([1.5, None], "hurricane", "partial"), + ([1.5], "earthquake", "yes"), + ([1, None, -9999.1], "tornado", "error"), + ([1.5, 2.5], "hurricanewindfield", "n/a"), + ([-9999.99], "flood", "error"), + ], +) def test_get_exposure_from_hazard_values(hazard_vals, hazard_type, expected): - hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values( + hazard_vals, hazard_type + ) assert hazard_exposure == expected diff --git a/tests/pyincore/utils/test_cgecsvoutputjson.py b/tests/pyincore/utils/test_cgecsvoutputjson.py index 9916b17e6..8ab7efd0b 100644 --- a/tests/pyincore/utils/test_cgecsvoutputjson.py +++ b/tests/pyincore/utils/test_cgecsvoutputjson.py @@ -1,7 +1,7 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import pytest + from pyincore.analyses.galvestoncge.galvestoncge import GalvestonCGEModel from pyincore.analyses.saltlakecge.saltlakecge import SaltLakeCGEModel @@ -26,28 +26,65 @@ def run_convert_SLC_cge_json_path(testpath): for r in region: categories.append(h + "_" + r) - cge_json.get_cge_household_count(None, - os.path.join(testpath, "household-count.csv"), - "slc_cge_total_household_count.json", income_categories=categories) - cge_json.get_cge_gross_income(None, - os.path.join(testpath, "gross-income.csv"), - "slc_cge_total_household_income.json", income_categories=categories) + cge_json.get_cge_household_count( + None, + os.path.join(testpath, "household-count.csv"), + "slc_cge_total_household_count.json", + income_categories=categories, + ) + cge_json.get_cge_gross_income( + None, + os.path.join(testpath, "gross-income.csv"), + "slc_cge_total_household_income.json", + income_categories=categories, + ) categories = [] - for d in ["AG_MI", "UTIL", "CONS", "MANU", "COMMER", "EDU", "HEALTH", "ART_ACC", "RELIG"]: + for d in [ + "AG_MI", + "UTIL", + "CONS", + "MANU", + "COMMER", + "EDU", + "HEALTH", + "ART_ACC", + "RELIG", + ]: for r in region: categories.append(d + "_" + r) - cge_json.get_cge_employment(None, None, os.path.join(testpath, "pre-disaster-factor-demand.csv"), - os.path.join(testpath, "post-disaster-factor-demand.csv"), - "slc_cge_employment.json", demand_categories=categories) + cge_json.get_cge_employment( + None, + None, + os.path.join(testpath, "pre-disaster-factor-demand.csv"), + os.path.join(testpath, "post-disaster-factor-demand.csv"), + "slc_cge_employment.json", + demand_categories=categories, + ) categories = [] - for d in ["AG_MI", "UTIL", "CONS", "MANU", "COMMER", "EDU", "HEALTH", "ART_ACC", "RELIG", "HS1", "HS2", "HS3"]: + for d in [ + "AG_MI", + "UTIL", + "CONS", + "MANU", + "COMMER", + "EDU", + "HEALTH", + "ART_ACC", + "RELIG", + "HS1", + "HS2", + "HS3", + ]: for r in region: categories.append(d + "_" + r) - cge_json.get_cge_domestic_supply(None, - os.path.join(testpath, "domestic-supply.csv"), "slc_cge_domestic_supply.json", - supply_categories=categories) + cge_json.get_cge_domestic_supply( + None, + os.path.join(testpath, "domestic-supply.csv"), + "slc_cge_domestic_supply.json", + supply_categories=categories, + ) return True @@ -56,22 +93,36 @@ def run_convert_Joplin_cge_json_path(testpath): cge_json = CGEOutputProcess() categories = ["HH1", "HH2", "HH3", "HH4", "HH5"] - cge_json.get_cge_household_count(None, - os.path.join(testpath, "household-count.csv"), - "joplin_cge_total_household_count.json", income_categories=categories) - cge_json.get_cge_gross_income(None, - os.path.join(testpath, "gross-income.csv"), - "joplin_cge_total_household_income.json", income_categories=categories) + cge_json.get_cge_household_count( + None, + os.path.join(testpath, "household-count.csv"), + "joplin_cge_total_household_count.json", + income_categories=categories, + ) + cge_json.get_cge_gross_income( + None, + os.path.join(testpath, "gross-income.csv"), + "joplin_cge_total_household_income.json", + income_categories=categories, + ) categories = ["GOODS", "TRADE", "OTHER"] - cge_json.get_cge_employment(None, None, os.path.join(testpath, "pre-disaster-factor-demand.csv"), - os.path.join(testpath, "post-disaster-factor-demand.csv"), - "joplin_cge_employment.json", demand_categories=categories) + cge_json.get_cge_employment( + None, + None, + os.path.join(testpath, "pre-disaster-factor-demand.csv"), + os.path.join(testpath, "post-disaster-factor-demand.csv"), + "joplin_cge_employment.json", + demand_categories=categories, + ) categories = ["Goods", "Trades", "Others", "HS1", "HS2", "HS3"] - cge_json.get_cge_domestic_supply(None, - os.path.join(testpath, "domestic-supply.csv"), "joplin_cge_domestic_supply.json", - supply_categories=categories) + cge_json.get_cge_domestic_supply( + None, + os.path.join(testpath, "domestic-supply.csv"), + "joplin_cge_domestic_supply.json", + supply_categories=categories, + ) return True @@ -81,40 +132,89 @@ def run_convert_Galveston_cge_json_path(testpath): cge_json = CGEOutputProcess() region = ["I", "M"] - categories = ["IHH1", "IHH2", "IHH3", "IHH4", "IHH5", "MHH1", "MHH2", "MHH3", "MHH4", "MHH5"] - cge_json.get_cge_household_count(None, - os.path.join(testpath, "household-count.csv"), - "galveston_cge_total_household_count.json", income_categories=categories) - cge_json.get_cge_gross_income(None, - os.path.join(testpath, "gross-income.csv"), - "galveston_cge_total_household_income.json", income_categories=categories) + categories = [ + "IHH1", + "IHH2", + "IHH3", + "IHH4", + "IHH5", + "MHH1", + "MHH2", + "MHH3", + "MHH4", + "MHH5", + ] + cge_json.get_cge_household_count( + None, + os.path.join(testpath, "household-count.csv"), + "galveston_cge_total_household_count.json", + income_categories=categories, + ) + cge_json.get_cge_gross_income( + None, + os.path.join(testpath, "gross-income.csv"), + "galveston_cge_total_household_income.json", + income_categories=categories, + ) categories = [] - for d in ["AGMIN", "UTIL", "CONS", "MANU", "WHOLE", "RETAIL", "TRANS", "PROFSER", "REALE", "EDU", "HEALTH", "ART", - "ACCO"]: + for d in [ + "AGMIN", + "UTIL", + "CONS", + "MANU", + "WHOLE", + "RETAIL", + "TRANS", + "PROFSER", + "REALE", + "EDU", + "HEALTH", + "ART", + "ACCO", + ]: for r in region: categories.append(r + d) - cge_json.get_cge_employment(None, None, os.path.join(testpath, "pre-disaster-factor-demand.csv"), - os.path.join(testpath, "post-disaster-factor-demand.csv"), - "galveston_cge_employment.json", demand_categories=categories) + cge_json.get_cge_employment( + None, + None, + os.path.join(testpath, "pre-disaster-factor-demand.csv"), + os.path.join(testpath, "post-disaster-factor-demand.csv"), + "galveston_cge_employment.json", + demand_categories=categories, + ) categories = [] - for d in ["AGMIN", "UTIL", "CONS", "MANU", "WHOLE", "RETAIL", "TRANS", "PROFSER", "REALE", "EDU", "HEALTH", "ART", - "ACCO"]: + for d in [ + "AGMIN", + "UTIL", + "CONS", + "MANU", + "WHOLE", + "RETAIL", + "TRANS", + "PROFSER", + "REALE", + "EDU", + "HEALTH", + "ART", + "ACCO", + ]: for r in region: categories.append(r + d) # e.g.iAGMIN for d in ["HS1", "HS2", "HS3"]: for r in region: categories.append(d + r) # e.g.HS1I - cge_json.get_cge_domestic_supply(None, - os.path.join(testpath, "domestic-supply.csv"), - "galveston_cge_domestic_supply.json", - supply_categories=categories) + cge_json.get_cge_domestic_supply( + None, + os.path.join(testpath, "domestic-supply.csv"), + "galveston_cge_domestic_supply.json", + supply_categories=categories, + ) return True -if __name__ == '__main__': - +if __name__ == "__main__": # # run slc cge client = IncoreClient(pyglobals.INCORE_API_DEV_URL) saltlake_cge = SaltLakeCGEModel(client) diff --git a/tests/pyincore/utils/test_dataprocessutil.py b/tests/pyincore/utils/test_dataprocessutil.py index 83b888cf9..4d94a65ac 100644 --- a/tests/pyincore/utils/test_dataprocessutil.py +++ b/tests/pyincore/utils/test_dataprocessutil.py @@ -19,10 +19,17 @@ def test_get_mapped_result(client): archetype_id = "5fca915fb34b193f7a44059b" dmg_ret_json, func_ret_json, max_state_df = util.get_mapped_result_from_dataset_id( - client, bldg_dataset_id, bldg_dmg_dataset_id, bldg_func_state_dataset_id, archetype_id) + client, + bldg_dataset_id, + bldg_dmg_dataset_id, + bldg_func_state_dataset_id, + archetype_id, + ) assert "by_cluster" in dmg_ret_json.keys() and "by_category" in dmg_ret_json.keys() - assert "by_cluster" in func_ret_json.keys() and "by_category" in func_ret_json.keys() + assert ( + "by_cluster" in func_ret_json.keys() and "by_category" in func_ret_json.keys() + ) assert "max_state" in max_state_df._info_axis.values @@ -31,30 +38,46 @@ def test_get_mapped_result_from_analysis(client): bldg_dmg_dataset_id = "5f9868c00ace240b22a7f2a5" # legacy DS_name # bldg_dmg_dataset_id = "602d96e4b1db9c28aeeebdce" # new DS_name - dmg_result_dataset = Dataset.from_data_service(bldg_dmg_dataset_id, DataService(client)) + dmg_result_dataset = Dataset.from_data_service( + bldg_dmg_dataset_id, DataService(client) + ) archetype_id = "5fca915fb34b193f7a44059b" bldg_func_state_dataset_id = "660d9435ce705a7e547a664e" - bldg_func_state_dataset = Dataset.from_data_service(bldg_func_state_dataset_id, DataService(client)) + bldg_func_state_dataset = Dataset.from_data_service( + bldg_func_state_dataset_id, DataService(client) + ) dmg_ret_json, func_ret_json, max_state_df = util.get_mapped_result_from_analysis( - client, bldg_dataset_id, dmg_result_dataset, bldg_func_state_dataset, archetype_id) + client, + bldg_dataset_id, + dmg_result_dataset, + bldg_func_state_dataset, + archetype_id, + ) assert "by_cluster" in dmg_ret_json.keys() and "by_category" in dmg_ret_json.keys() - assert "by_cluster" in func_ret_json.keys() and "by_category" in func_ret_json.keys() + assert ( + "by_cluster" in func_ret_json.keys() and "by_category" in func_ret_json.keys() + ) assert "max_state" in max_state_df._info_axis.values -def _functionality_cluster(client, archetype_mapping="5fca915fb34b193f7a44059b", - building_dataset_id="5fa0b132cc6848728b66948d", - bldg_func_state_id="5f0f6fbfb922f96f4e989ed8", - arch_column="archetype", - title="joplin_mcs"): +def _functionality_cluster( + client, + archetype_mapping="5fca915fb34b193f7a44059b", + building_dataset_id="5fa0b132cc6848728b66948d", + bldg_func_state_id="5f0f6fbfb922f96f4e989ed8", + arch_column="archetype", + title="joplin_mcs", +): dataservice = DataService(client) # Archetype mapping file - archetype_mapping_dataset = Dataset.from_data_service(archetype_mapping, dataservice) + archetype_mapping_dataset = Dataset.from_data_service( + archetype_mapping, dataservice + ) archetype_mapping_path = archetype_mapping_dataset.get_file_path() arch_mapping = pd.read_csv(archetype_mapping_path) @@ -73,39 +96,53 @@ def _functionality_cluster(client, archetype_mapping="5fca915fb34b193f7a44059b", bldg_func_state.loc[0, "failure"] = "" bldg_func_state.loc[1, "failure"] = "" - ret_json = util.create_mapped_func_result(buildings, bldg_func_state, arch_mapping, arch_column) + ret_json = util.create_mapped_func_result( + buildings, bldg_func_state, arch_mapping, arch_column + ) with open(title + "_cluster.json", "w") as f: json.dump(ret_json, f, indent=2) def test_joplin_mcs_cluster(client): - _functionality_cluster(client, archetype_mapping="5fca915fb34b193f7a44059b", - building_dataset_id="5fa0b132cc6848728b66948d", - bldg_func_state_id="5f0f6fbfb922f96f4e989ed8", - arch_column="archetype", - title="joplin_mcs") + _functionality_cluster( + client, + archetype_mapping="5fca915fb34b193f7a44059b", + building_dataset_id="5fa0b132cc6848728b66948d", + bldg_func_state_id="5f0f6fbfb922f96f4e989ed8", + arch_column="archetype", + title="joplin_mcs", + ) def test_joplin_bldg_func_cluster(client): - _functionality_cluster(client, archetype_mapping="5fca915fb34b193f7a44059b", - building_dataset_id="5fa0b132cc6848728b66948d", - bldg_func_state_id="660d9435ce705a7e547a664e", - arch_column="archetype", - title="joplin_bldg_func") + _functionality_cluster( + client, + archetype_mapping="5fca915fb34b193f7a44059b", + building_dataset_id="5fa0b132cc6848728b66948d", + bldg_func_state_id="660d9435ce705a7e547a664e", + arch_column="archetype", + title="joplin_bldg_func", + ) def test_galveston_mcs_cluster(client): - _functionality_cluster(client, archetype_mapping="6420befbb18d026e7c7dbafc", - building_dataset_id="63ff69a96d3b2a308baaca12", - bldg_func_state_id="660d95bece705a7e547a6654", - arch_column="arch_wind", - title="galveston_mcs") + _functionality_cluster( + client, + archetype_mapping="6420befbb18d026e7c7dbafc", + building_dataset_id="63ff69a96d3b2a308baaca12", + bldg_func_state_id="660d95bece705a7e547a6654", + arch_column="arch_wind", + title="galveston_mcs", + ) def test_galveston_bldg_func_cluster(client): - _functionality_cluster(client, archetype_mapping="6420befbb18d026e7c7dbafc", - building_dataset_id="63ff69a96d3b2a308baaca12", - bldg_func_state_id="660d97bfce705a7e547a6659", - arch_column="arch_wind", - title="galveston_bldg_func") + _functionality_cluster( + client, + archetype_mapping="6420befbb18d026e7c7dbafc", + building_dataset_id="63ff69a96d3b2a308baaca12", + bldg_func_state_id="660d97bfce705a7e547a6659", + arch_column="arch_wind", + title="galveston_bldg_func", + ) diff --git a/tests/pyincore/utils/test_datasetutil.py b/tests/pyincore/utils/test_datasetutil.py index c9dc7374c..56693352f 100644 --- a/tests/pyincore/utils/test_datasetutil.py +++ b/tests/pyincore/utils/test_datasetutil.py @@ -10,21 +10,23 @@ def client(): def test_join_table_dataset_with_source_dataset(client): - building_damage_id = '5a296b53c7d30d4af5378cd5' + building_damage_id = "5a296b53c7d30d4af5378cd5" dataset = Dataset.from_data_service(building_damage_id, DataService(client)) joined_gdf = util.join_table_dataset_with_source_dataset(dataset, client) # assert if the fields from each dataset exist # note that guid is out in here since it got indexed - assert 'geometry' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() + assert "geometry" in joined_gdf.keys() and "meandamage" in joined_gdf.keys() def test_join_datasets(client): - building_id = '5a284f0bc7d30d13bc081a28' - building_damage_id = '5a296b53c7d30d4af5378cd5' + building_id = "5a284f0bc7d30d13bc081a28" + building_damage_id = "5a296b53c7d30d4af5378cd5" bldg_dataset = Dataset.from_data_service(building_id, DataService(client)) - bldg_dmg_dataset = Dataset.from_data_service(building_damage_id, DataService(client)) + bldg_dmg_dataset = Dataset.from_data_service( + building_damage_id, DataService(client) + ) joined_gdf = util.join_datasets(bldg_dataset, bldg_dmg_dataset) # assert if the fields from each dataset exist - assert 'geometry' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() + assert "geometry" in joined_gdf.keys() and "meandamage" in joined_gdf.keys() diff --git a/tests/pyincore/utils/test_networkutil.py b/tests/pyincore/utils/test_networkutil.py index c1d48d5b5..7c26b003f 100644 --- a/tests/pyincore/utils/test_networkutil.py +++ b/tests/pyincore/utils/test_networkutil.py @@ -22,39 +22,63 @@ def client(): def test_build_link_by_node(): - node_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_nodes.shp") - graph_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/graph.csv") - out_link_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_links.shp") + node_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_nodes.shp" + ) + graph_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/graph.csv" + ) + out_link_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_links.shp" + ) node_id = "NODENWID" - networkutil.build_link_by_node(node_file_path, graph_file_path, node_id, out_link_file_path) + networkutil.build_link_by_node( + node_file_path, graph_file_path, node_id, out_link_file_path + ) assert True def test_build_node_by_link(): - link_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_links.shp") - out_node_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_nodes.shp") - out_graph_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_graph.csv") + link_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_links.shp" + ) + out_node_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_nodes.shp" + ) + out_graph_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/out_graph.csv" + ) link_id = "linknwid" fromnode = "fromnode" tonode = "tonode" - networkutil.build_node_by_link(link_file_path, link_id, fromnode, tonode, out_node_file_path, - out_graph_file_path) + networkutil.build_node_by_link( + link_file_path, + link_id, + fromnode, + tonode, + out_node_file_path, + out_graph_file_path, + ) assert True def test_create_network_graph_from_link(): - link_file_path = os.path.join(pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_links.shp") + link_file_path = os.path.join( + pyglobals.PYINCORE_ROOT_FOLDER, "tests/data/network/epn_links.shp" + ) fromnode = "fromnode" tonode = "tonode" - graph, coords = networkutil.create_network_graph_from_link(link_file_path, fromnode, tonode) + graph, coords = networkutil.create_network_graph_from_link( + link_file_path, fromnode, tonode + ) if graph is not None and coords is not None: assert True @@ -66,6 +90,8 @@ def test_validate_network_node_ids(datasvc): node_id = "NODENWID" fromnode = "fromnode" tonode = "tonode" - validate = networkutil.validate_network_node_ids(network_dataset, fromnode, tonode, node_id) + validate = networkutil.validate_network_node_ids( + network_dataset, fromnode, tonode, node_id + ) assert validate diff --git a/tests/pyincore/utils/test_parser.py b/tests/pyincore/utils/test_parser.py index 8b9a1d010..a38f362a9 100644 --- a/tests/pyincore/utils/test_parser.py +++ b/tests/pyincore/utils/test_parser.py @@ -33,20 +33,23 @@ def test_exec_evaluator_repair(): def test_parser(): parser = Parser() - result = parser.parse("x^2").evaluate({'x': 4}) + result = parser.parse("x^2").evaluate({"x": 4}) assert result == 16 variable = parser.parse("log(x)*3").variables() - assert variable == ['x'] + assert variable == ["x"] - assert parser.parse("pow(x,y)").variables() == ['x', 'y'] + assert parser.parse("pow(x,y)").variables() == ["x", "y"] assert parser.parse("1").evaluate({}) == 1 - assert parser.parse('a').evaluate({'a': 2}) == 2 + assert parser.parse("a").evaluate({"a": 2}) == 2 - assert parser.parse("(a**2-b**2)==((a+b)*(a-b))").evaluate({'a': 4859, 'b': 13150}) is True + assert ( + parser.parse("(a**2-b**2)==((a+b)*(a-b))").evaluate({"a": 4859, "b": 13150}) + is True + ) - assert parser.parse('log(16,2)').evaluate({}) == 4.0 + assert parser.parse("log(16,2)").evaluate({}) == 4.0 - assert parser.parse("x^2").variables() == ['x'] + assert parser.parse("x^2").variables() == ["x"] diff --git a/tests/pyincore/utils/test_popdisloutputprocess.py b/tests/pyincore/utils/test_popdisloutputprocess.py index a2188b1ca..e9969245b 100644 --- a/tests/pyincore/utils/test_popdisloutputprocess.py +++ b/tests/pyincore/utils/test_popdisloutputprocess.py @@ -19,19 +19,23 @@ def upload_shapefile_to_services(client): "contributors": [], "dataType": "incore:popdislocationShp", "storedUrl": "", - "format": "shapefile" + "format": "shapefile", } response = datasvc.create_dataset(dataset_prop) - dataset_id = response['id'] - files = ['joplin-pop-disl-numprec.shp', - 'joplin-pop-disl-numprec.dbf', - 'joplin-pop-disl-numprec.shx', - 'joplin-pop-disl-numprec.prj'] + dataset_id = response["id"] + files = [ + "joplin-pop-disl-numprec.shp", + "joplin-pop-disl-numprec.dbf", + "joplin-pop-disl-numprec.shx", + "joplin-pop-disl-numprec.prj", + ] datasvc.add_files_to_dataset(dataset_id, files) # add to space spacesvc = SpaceService(client) - spacesvc.add_dataset_to_space("5f99ba8b0ace240b22a82e00", dataset_id=dataset_id) # commresilience + spacesvc.add_dataset_to_space( + "5f99ba8b0ace240b22a82e00", dataset_id=dataset_id + ) # commresilience print(dataset_id + " successfully uploaded and move to commresilience space!") @@ -63,7 +67,7 @@ def run_convert_pd_json_chained(client): return pd_result -if __name__ == '__main__': +if __name__ == "__main__": # test chaining with population dislocation client = IncoreClient(pyglobals.INCORE_API_DEV_URL) @@ -89,7 +93,9 @@ def run_convert_pd_json_chained(client): testpath = "" # testpath = "/Users///pyincore/tests/pyincore/utils" if testpath: - pd_process = PopDislOutputProcess(None, os.path.join(testpath, "joplin-pop-disl-results.csv")) + pd_process = PopDislOutputProcess( + None, os.path.join(testpath, "joplin-pop-disl-results.csv") + ) # pd_process = PopDislOutputProcess(None, # os.path.join(testpath, "joplin-pop-disl-results.csv"), # filter_name="Joplin", diff --git a/tests/test_format.py b/tests/test_format.py index 0c5eb26a2..c94819124 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -9,10 +9,10 @@ from pyincore.globals import PYINCORE_ROOT_FOLDER paths = [ - os.path.join(PYINCORE_ROOT_FOLDER, 'pyincore/'), - os.path.join(PYINCORE_ROOT_FOLDER, 'tests/'), - os.path.join(PYINCORE_ROOT_FOLDER, 'recipes/'), - os.path.join(PYINCORE_ROOT_FOLDER, 'scripts/') + os.path.join(PYINCORE_ROOT_FOLDER, "pyincore/"), + os.path.join(PYINCORE_ROOT_FOLDER, "tests/"), + os.path.join(PYINCORE_ROOT_FOLDER, "recipes/"), + os.path.join(PYINCORE_ROOT_FOLDER, "scripts/"), ]