diff --git a/scripts/uberenv_configs/packages/conduit/package.py b/scripts/uberenv_configs/packages/conduit/package.py index 365947b97..91b53df46 100644 --- a/scripts/uberenv_configs/packages/conduit/package.py +++ b/scripts/uberenv_configs/packages/conduit/package.py @@ -43,6 +43,10 @@ class Conduit(CMakePackage): # is to bridge any spack dependencies that are still using the name master version("master", branch="develop", submodules=True) # note: 2021-05-05 latest tagged release is now preferred instead of develop + version("0.9.3", sha256="45d5a4eccd0fc978d153d29c440c53c483b8f29dfcf78ddcc9aa15c59b257177") + version("0.9.2", sha256="45d5a4eccd0fc978d153d29c440c53c483b8f29dfcf78ddcc9aa15c59b257177") + version("0.9.1", sha256="a3f1168738dcf72f8ebf83299850301aaf56e803f40618fc1230a755d0d05363") + version("0.9.0", sha256="844e012400ab820967eef6cec15e1aa9a68cb05119d0c1f292d3c01630111a58") version("0.8.8", sha256="99811e9c464b6f841f52fcd47e982ae47cbb01cba334cff43eabe13eea58c0df") version("0.8.7", sha256="f3bf44d860783f4e0d61517c5e280c88144af37414569f4cf86e2d29b3ba5293") version("0.8.6", sha256="8ca5d37033143ed7181c7286dd25a3f6126ba0358889066f13a2b32f68fc647e") @@ -128,6 +132,7 @@ class Conduit(CMakePackage): extends("python", when="+python") depends_on("py-numpy", when="+python", type=("build", "run")) depends_on("py-mpi4py", when="+python+mpi", type=("build", "run")) + depends_on("py-pip", when="+python", type="build") ####################### # I/O Packages diff --git a/src/docs/sphinx/Actions/Pipelines.rst b/src/docs/sphinx/Actions/Pipelines.rst index f3c616ed6..3ab1f535e 100644 --- a/src/docs/sphinx/Actions/Pipelines.rst +++ b/src/docs/sphinx/Actions/Pipelines.rst @@ -1089,33 +1089,26 @@ among many simulation ranks. Ascent utilizes the ``partition()`` functions provi that can be used to split or recombine Blueprint meshes in serial or parallel. Full M:N repartioning is supported. The ``partition()`` functions are in the serial and parallel Blueprint libraries, respectively. +Funtionality and further descriptions of optional parameters can be found in the Conduit::Blueprint `documentation `_. .. code:: cpp - // Serial - void conduit::blueprint::mesh::partition(const Node &mesh, - const Node &options, - Node &output); - - // Parallel - void conduit::blueprint::mpi::mesh::partition(const Node &mesh, - const Node &options, - Node &output, - MPI_Comm comm); - - -Partitioning meshes using Blueprint will use any options present to determine -how the partitioning process will behave. Typically, a caller would pass options -containing selections if pieces of domains are desired. The partitioner processes -any selections and then examines the desired target number of domains and will then -decide whether domains must be moved among ranks (only in parallel version) and -then locally combined to achieve the target number of domains. The combining -process will attempt to preserve the input topology type for the output topology. -However, in cases where lower topologies cannot be used, the algorithm will promote -the extracted domain parts towards more general topologies and use the one most -appropriate to contain the inputs. - -In parallel, the ``partition()`` function will make an effort to redistribute data across MPI + conduit::Node pipelines; + // pipeline 1 + pipelines["pl1/f1/type"] = "partition"; + //params optional + pipelines["pl1/f1/params/target"] = 1; + pipelines["pl1/f1/params/fields"].append() = "pink"; + pipelines["pl1/f1/params/fields"].append() = "pony"; + pipelines["pl1/f1/params/fields"].append() = "club"; + pipelines["pl1/f1/params/merge_tolerance"] = 0.000001; + pipelines["pl1/f1/params/mapping"] = 0; //turns off; on by default + pipelines["pl1/f1/params/build_adjsets"] = 1; + + + + +In parallel, the Partition filter will make an effort to redistribute data across MPI ranks to attempt to balance how data are assigned. Domains produced from selections are assigned round-robin across ranks from rank 0 through rank N-1 until all domains have been assigned. This assignment is carried out after extracting @@ -1132,86 +1125,103 @@ before being combined into the target number of domains. Options +++++++ -The ``partition()`` functions accept a node containing options. The options node -can be empty and all options are optional. If no options are given, each input mesh -domain will be fully selected. It is more useful to pass selections as part of the -option node with additional options that tell the algorithm how to split or combine -the inputs. If no selections are present in the options node then the partitioner -will create selections of an appropriate type that selects all elements in each +The Partition filter accepts optional parameters. +If no optional parameters are given, each input mesh +domain will be fully selected. +If no ``selections`` are specifed as ``params`` then the partitioner +will create selections of an appropriate type that selects all elements in each input domain. -The ``target`` option is useful for setting the target number of domains in the +The ``target`` parameter is useful for setting the target number of domains in the final output mesh. If the target value is larger than the number of input domains or selections then the mesh will be split to achieve that target number of domains. This may require further subdividing selections. Alternatively, if the target is smaller than the number of selections then the selections will be combined to yield the target number of domains. The combining is done such that smaller element -count domains are combined first. Additionally, Ascent provides an optional boolean parameter, ``distributed``, which dictates if the number of chosen target domains is applied across ranks (``true``, default), or to each rank individually (``false``). +count domains are combined first. +Additionally, Ascent provides an optional boolean parameter, ``distributed``, which dictates if the number +of chosen target domains is applied across ranks (``true``, default), or to each rank individually (``false``). .. tabularcolumns:: |p{1.5cm}|p{4cm}|L| -+------------------+-----------------------------------------+------------------------------------------+ -| **Option** | **Description** | **Example** | -+------------------+-----------------------------------------+------------------------------------------+ -| selections | A list of selection objects that | .. code:: yaml | -| | identify regions of interest from the | | -| | input domains. Selections can be | selections: | -| | different on each MPI rank. | - | -| | | type: logical | -| | | start: [0,0,0] | -| | | end: [9,9,9] | -| | | domain_id: 10 | -+------------------+-----------------------------------------+------------------------------------------+ -| target | An optional integer that determines the | .. code:: yaml | -| | fields containing original domains and | | -| | number of domains in the output. If | target: 4 | -| | given, the value must be greater than 0.| | -| | Values larger than the number of | | -| | selections cause domains to be split. | | -| | Values smaller than the number of | | -| | selections cause domains to be combined.| | -| | Invalid values are ignored. | | -| | | | -| | If not given, the output will contain | | -| | the number of selections. In parallel, | | -| | the largest target value from the ranks | | -| | will be used for all ranks. | | -+------------------+-----------------------------------------+------------------------------------------+ -| fields | An list of strings that indicate the | .. code:: yaml | -| | names of the fields to extract in the | | -| | output. If this option is not provided, | fields: ["dist", "pressure"] | -| | all fields will be extracted. | | -+------------------+-----------------------------------------+------------------------------------------+ -| mapping | An integer that determines whether | .. code:: yaml | -| | fields containing original domains and | | -| | ids will be added in the output. These | mapping: 0 | -| | fields enable one to know where each | | -| | vertex and element came from originally.| | -| | Mapping is on by default. A non-zero | | -| | value turns it on and a zero value turns| | -| | it off. | | -+------------------+-----------------------------------------+------------------------------------------+ -| merge_tolerance | A double value that indicates the max | .. code:: yaml | -| | allowable distance between 2 points | | -| | before they are considered to be | merge_tolerance: 0.000001 | -| | separate. 2 points spaced smaller than | | -| | this distance will be merged when | | -| | explicit coordsets are combined. | | -+------------------+-----------------------------------------+------------------------------------------+ -| distributed | An optional boolean value for parallel | .. code:: yaml | -| | execution. If true, the chosen number | | -| | of target domains will be applied | distributed: "false" | -| | across all ranks. If false, the chosen | | -| | number of target domains will be | | -| | applied to each rank individually. | | -| | | | -| | If not given, the default is true. | | -+------------------+-----------------------------------------+------------------------------------------+ ++---------------------+-----------------------------------------+------------------------------------------+ +| **Option** | **Description** | **Example** | ++---------------------+-----------------------------------------+------------------------------------------+ +| selections | A list of selection objects that | .. code:: yaml | +| | identify regions of interest from the | | +| | input domains. Selections can be | selections: | +| | different on each MPI rank. | type: "logical" | +| | | start: [0,0,0] | +| | | end: [9,9,9] | +| | | domain_id: 10 | ++---------------------+-----------------------------------------+------------------------------------------+ +| target | An optional integer that determines the | .. code:: yaml | +| | fields containing original domains and | | +| | number of domains in the output. If | target: 4 | +| | given, the value must be greater than 0.| | +| | Values larger than the number of | | +| | selections cause domains to be split. | | +| | Values smaller than the number of | | +| | selections cause domains to be combined.| | +| | Invalid values are ignored. | | +| | | | +| | If not given, the output will contain | | +| | the number of selections. In parallel, | | +| | the largest target value from the ranks | | +| | will be used for all ranks. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| fields | An list of strings that indicate the | .. code:: yaml | +| | names of the fields to extract in the | | +| | output. If this option is not provided, | fields: ["dist", "pressure"] | +| | all fields will be extracted. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| mapping | An integer that determines whether | .. code:: yaml | +| | fields containing original domains and | | +| | ids will be added in the output. These | mapping: 0 | +| | fields enable one to know where each | | +| | vertex and element came from originally.| | +| | Mapping is on by default. A non-zero | | +| | value turns it on and a zero value turns| | +| | it off. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| merge_tolerance | A double value that indicates the max | .. code:: yaml | +| | allowable distance between 2 points | | +| | before they are considered to be | merge_tolerance: 0.000001 | +| | separate. 2 points spaced smaller than | | +| | this distance will be merged when | | +| | explicit coordsets are combined. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| distributed | An optional boolean value for parallel | .. code:: yaml | +| | execution. If true, the chosen number | | +| | of target domains will be applied | distributed: "false" | +| | across all ranks. If false, the chosen | | +| | number of target domains will be | | +| | applied to each rank individually. | | +| | | | +| | If not given, the default is true. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| build_adjsets | An integer that determines whether | .. code:: yaml | +| | the partitioner should build adjsets, | | +| | if they are present in the selected | build_adjsets: 1 | +| | topology. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| original_element_ids| A string value that provides desired | .. code::yaml | +| | field name used to contain original | | +| | element ids created from partitioning. | original_element_ids: "elem_name" | +| | The default value is | | +| | original_element_ids. | | ++---------------------+-----------------------------------------+------------------------------------------+ +| original_vertex_ids | A string value that provides desired | .. code::yaml | +| | field name used to contain original | | +| | vertex ids created from partitioning. | original_vertex_ids: "vert_name" | +| | The default value is | | +| | original_vertex_ids. | | ++---------------------+-----------------------------------------+------------------------------------------+ Selections ++++++++++ -Selections can be specified in the options for the ``partition()`` function to +Selections can be specified in the options for the Partition Filter to select regions of interest that will participate in mesh partitioning. If selections are not used then all elements from the input meshes will be selected to partitipate in the partitioning process. Selections can be further @@ -1220,7 +1230,7 @@ target specific domains and topologies as well. If a selection does not apply to the input mesh domains then no geometry is produced in the output for that selection. -The ``partition()`` function's options support 4 types of selections: +The Partition filter supports 4 types of selections: .. tabularcolumns:: |p{1.5cm}|p{2cm}|L| @@ -1252,31 +1262,29 @@ operate on the specified topology only. | type | The selection type | .. code:: yaml | | | | | | | | selections: | -| | | - | -| | | type: logical | +| | | type: "logical" | +------------------+-----------------------------------------+------------------------------------------+ | domain_id | The domain_id to which the selection | .. code:: yaml | | | will apply. This is almost always an | | | | unsigned integer value. | selections: | -| | | - | -| | | type: logical | +| | | type: "logical" | | | | domain_id: 10 | | | | | | | | .. code:: yaml | | | | | | | For field selections, domain_id is | selections: | -| | allowed to be a string "any" so a single| - | -| | selection can apply to many domains. | type: logical | -| | | domain_id: any | +| | allowed to be a string "any" so a single| type: "logical" | +| | selection can apply to many domains. | domain_id = "any" | +| | | | | | | | +------------------+-----------------------------------------+------------------------------------------+ | topology | The topology to which the selection | .. code:: yaml | | | will apply. | | | | | selections: | -| | | - | -| | | type: logical | +| | | type: "logical" | | | | domain_id: 10 | -| | | topology: mesh | +| | | topology: "mesh" | +| | | | +------------------+-----------------------------------------+------------------------------------------+ Logical Selection @@ -1287,13 +1295,17 @@ beyond the actual mesh's logical extents, they will be clipped. The partitioner automatically subdivide logical selections into smaller logical selections, if needed, preserving the logical structure of the input topology into the output. -.. code:: yaml +.. code:: cpp - selections: - - - type: logical - start: [0,0,0] - end: [9,9,9] + conduit::Node pipelines; + // pipeline 1 + pipelines["pl1/f1/type"] = "partition"; + //params optional + pipelines["pl1/f1/params/selections/type"] = "logical"; + const float start[3] = {0,0,0}; + const float end[3] = {10,10,10}; + pipelines["pl1/f1/params/selections/start"].set(start,3); + pipelines["pl1/f1/params/selections/end"].set(end,3); Explicit Selection ****************** @@ -1301,12 +1313,15 @@ The explicit selection allows the partitioner to extract a list of elements. This is used when the user wants to target a specific set of elements. The output will result in an explicit topology. -.. code:: yaml +.. code:: cpp - selections: - - - type: explicit - elements: [0,1,2,3,100,101,102] + conduit::Node pipelines; + // pipeline 1 + pipelines["pl1/f1/type"] = "partition"; + //params optional + pipelines["pl1/f1/params/selections/type"] = "explicit"; + const int elements[6] = [0,1,2,3,100,101,102]; + pipelines["pl1/f1/params/selections/elements"].set(elements,6); Ranges Selection @@ -1315,12 +1330,16 @@ The ranges selection is similar to the explicit selection except that it identif ranges of elements using pairs of numbers. The list of ranges must be a multiple of 2 in length. The output will result in an explicit topology. -.. code:: yaml +.. code:: cpp + + conduit::Node pipelines; + // pipeline 1 + pipelines["pl1/f1/type"] = "partition"; + //params optional + pipelines["pl1/f1/params/selections/type"] = "ranges"; + const int elements[4] = [0,3,100,102]; + pipelines["pl1/f1/params/selections/elements"].set(elements,4); - selections: - - - type: ranges - ranges: [0,3,100,102] Field Selection *************** @@ -1335,11 +1354,34 @@ can be set to "any" if it is desired that the field selection will be applied to all domains in the input mesh. The domain_id value can still be set to specific integer values to limit the set of domains over which the selection will be applied. -.. code:: yaml ++------------------+-----------------------------------------+------------------------------------------+ +| **Option** | **Description** | **Example** | ++------------------+-----------------------------------------+------------------------------------------+ +| field | The name of the element field that will | .. code:: yaml | +| | be used for partitioning. The field | | +| | shall contain non-negative domain | selections: | +| | numbers. | type: "field" | +| | | domain_id: "any" | +| | | | ++------------------+-----------------------------------------+------------------------------------------+ +| destination_ranks| An optional list of integers | .. code:: yaml | +| | representing the MPI rank where the | | +| | domain will be sent after partitioning. | selections: | +| | This option can help ensure domains for | type: "field" | +| | topologies partitioned via multiple | field: "albatraoz" | +| | calls to partition() end up together on | domain_id: "any" | +| | a target MPI rank. The example shows | destination_ranks: [0,1,2,3] | +| | domain 0 going to MPI rank 0 and so on. | | +| | | | ++------------------+-----------------------------------------+------------------------------------------+ + +.. code:: cpp - selections: - - - type: field - domain_id: any - field: fieldname + conduit::Node pipelines; + // pipeline 1 + pipelines["pl1/f1/type"] = "partition"; + //params optional + pipelines["pl1/f1/params/selections/type"] = "field"; + pipelines["pl1/f1/params/selections/domain_id"] = "any"; + pipelines["pl1/f1/params/selections/field"] = "padam_padam"; diff --git a/src/libs/ascent/runtimes/ascent_data_object.cpp b/src/libs/ascent/runtimes/ascent_data_object.cpp index 1177469cb..c856b7437 100644 --- a/src/libs/ascent/runtimes/ascent_data_object.cpp +++ b/src/libs/ascent/runtimes/ascent_data_object.cpp @@ -389,7 +389,7 @@ std::shared_ptr DataObject::as_node() { conduit::Node *out_data = new conduit::Node(); bool zero_copy = true; - VTKHDataAdapter::VTKHCollectionToBlueprintDataSet(m_vtkh.get(), *out_data, true); + VTKHDataAdapter::VTKHCollectionToBlueprintDataSet(m_vtkh.get(), *out_data, zero_copy); detail::add_metadata(*out_data); std::shared_ptr bp(out_data); diff --git a/src/libs/ascent/runtimes/flow_filters/ascent_runtime_blueprint_filters.cpp b/src/libs/ascent/runtimes/flow_filters/ascent_runtime_blueprint_filters.cpp index 20393b137..d6957545d 100644 --- a/src/libs/ascent/runtimes/flow_filters/ascent_runtime_blueprint_filters.cpp +++ b/src/libs/ascent/runtimes/flow_filters/ascent_runtime_blueprint_filters.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #endif #if defined(ASCENT_VTKM_ENABLED) @@ -319,22 +320,53 @@ BlueprintPartition::verify_params(const conduit::Node ¶ms, info.reset(); bool res = true; - if(! params.has_child("target") || - ! params["target"].dtype().is_int() ) + res &= check_numeric("target",params, info, false, false); + + if(params.has_child("selections")) + { + res &= check_string("selections/type",params, info, true); + //domain_id can be int or "any" + res &= (check_string("selections/domain_id",params, info, false) || check_numeric("selections/domain_id", params, info, false, false)); + res &= check_string("selections/topology",params, info, false); + } + + if(params.has_child("fields")) { - info["errors"].append() = "Missing required int parameter 'target'"; + if(!params["fields"].dtype().is_list()) + { + res = false; + info["errors"].append() = "fields is not a list"; + } } + res &= check_numeric("mapping",params, info, false, false); + res &= check_numeric("merge_tolerance",params, info, false, false); + res &= check_numeric("build_adjsets",params, info, false, false); + res &= check_string("original_element_ids",params, info, false); + res &= check_string("original_vertex_ids",params, info, false); + std::vector valid_paths; valid_paths.push_back("target"); - valid_paths.push_back("selections"); - valid_paths.push_back("fields"); + valid_paths.push_back("selections/type"); + valid_paths.push_back("selections/domain_id"); + valid_paths.push_back("selections/field"); + valid_paths.push_back("selections/topology"); + valid_paths.push_back("selections/start"); + valid_paths.push_back("selections/end"); + valid_paths.push_back("selections/elements"); + valid_paths.push_back("selections/ranges"); + valid_paths.push_back("selections/field"); valid_paths.push_back("mapping"); valid_paths.push_back("merge_tolerance"); + valid_paths.push_back("build_adjsets"); + valid_paths.push_back("original_element_ids"); + valid_paths.push_back("original_vertex_ids"); valid_paths.push_back("distributed"); - - std::string surprises = surprise_check(valid_paths, params); - + + std::vector ingore_paths = {"fields"}; + + std::string surprises = surprise_check(valid_paths,ingore_paths, params); + if(surprises != "") { res = false; @@ -359,8 +391,21 @@ BlueprintPartition::execute() conduit::Node *n_output = new conduit::Node(); - conduit::Node n_options = params(); + conduit::Node n_options; + int target = 1; + if(params().has_child("target")) + { + target = params()["target"].to_int32(); + } + + n_options.set_external(params()); + if(n_options.has_child("distributed")) + { + n_options.remove_child("distributed"); + } + + conduit::Node tmp; #ifdef ASCENT_MPI_ENABLED MPI_Comm mpi_comm = MPI_Comm_f2c(flow::Workspace::default_mpi_comm()); if(params().has_child("distributed") && @@ -368,20 +413,32 @@ BlueprintPartition::execute() { conduit::blueprint::mesh::partition(*n_input, n_options, - *n_output); + tmp); } else { conduit::blueprint::mpi::mesh::partition(*n_input, n_options, - *n_output, + tmp, mpi_comm); } #else conduit::blueprint::mesh::partition(*n_input, n_options, - *n_output); + tmp); #endif + + if(tmp.number_of_children() > 0) + { + if(target == 1) + { + n_output->append().move(tmp); + } + else + { + n_output->move(tmp); + } + } DataObject *d_output = new DataObject(n_output); set_output(d_output); } diff --git a/src/libs/ascent/runtimes/flow_filters/ascent_runtime_vtkh_filters.cpp b/src/libs/ascent/runtimes/flow_filters/ascent_runtime_vtkh_filters.cpp index 1d0e8b213..e91ac51db 100644 --- a/src/libs/ascent/runtimes/flow_filters/ascent_runtime_vtkh_filters.cpp +++ b/src/libs/ascent/runtimes/flow_filters/ascent_runtime_vtkh_filters.cpp @@ -1414,7 +1414,7 @@ VTKHAddRanks::execute() this->name(), collection, throw_error); - std::cerr << "topo_name: " << topo_name << std::endl; + if(topo_name == "") { // this creates a data object with an invalid source @@ -1517,7 +1517,7 @@ VTKHAddDomains::execute() this->name(), collection, throw_error); - std::cerr << "topo_name: " << topo_name << std::endl; + if(topo_name == "") { // this creates a data object with an invalid source @@ -1530,8 +1530,8 @@ VTKHAddDomains::execute() vtkh::DataSet &data = collection->dataset_by_topology(topo_name); data.AddDomainIdField(output_field); + new_coll->add(data, topo_name); - // re wrap in data object DataObject *res = new DataObject(new_coll); set_output(res); diff --git a/src/tests/ascent/t_ascent_mpi_add_domain_ids.cpp b/src/tests/ascent/t_ascent_mpi_add_domain_ids.cpp index 8c990c81d..2e6c67f75 100644 --- a/src/tests/ascent/t_ascent_mpi_add_domain_ids.cpp +++ b/src/tests/ascent/t_ascent_mpi_add_domain_ids.cpp @@ -26,8 +26,6 @@ #include "t_utils.hpp" - - using namespace std; using namespace conduit; using namespace ascent; @@ -81,48 +79,34 @@ TEST(ascent_mpi_add_domain_ids, test_mpi_add_domain_ids) // Create the actions. // - conduit::Node pipelines; - // pipeline 1 - pipelines["pl1/f1/type"] = "add_domain_ids"; - conduit::Node ¶ms = pipelines["pl1/f1/params"]; - params["topology"] = "topo"; - params["output"] = "domain_ids"; - - conduit::Node scenes; - scenes["s1/plots/p1/type"] = "pseudocolor"; - scenes["s1/plots/p1/field"] = "domain_ids"; - scenes["s1/plots/p1/pipeline"] = "pl1"; - scenes["s1/plots/p1/color_table/discrete"] = "true"; - - scenes["s1/image_prefix"] = image_file; - conduit::Node actions; // add the pipeline conduit::Node &add_pipelines = actions.append(); add_pipelines["action"] = "add_pipelines"; - add_pipelines["pipelines"] = pipelines; + conduit::Node &pipelines = add_pipelines["pipelines"]; // add the scenes conduit::Node &add_scenes= actions.append(); add_scenes["action"] = "add_scenes"; - add_scenes["scenes"] = scenes; + conduit::Node &scenes=add_scenes["scenes"]; + + // pipeline 1 + pipelines["pl1/f1/type"] = "add_domain_ids"; + conduit::Node ¶ms = pipelines["pl1/f1/params"]; + params["topology"] = "topo"; + params["output"] = "domain_id"; -// conduit::Node extracts; -// -// extracts["e1/type"] = "relay"; -// extracts["e1/params/path"] = output_file; -// extracts["e1/params/protocol"] = "blueprint/mesh/hdf5"; -// conduit::Node &add_ext= actions.append(); -// add_ext["action"] = "add_extracts"; -// add_ext["extracts"] = extracts; + scenes["s1/plots/p1/type"] = "pseudocolor"; + scenes["s1/plots/p1/field"] = "domain_id"; + scenes["s1/plots/p1/pipeline"] = "pl1"; + scenes["s1/plots/p1/color_table/discrete"] = "true"; + scenes["s1/image_prefix"] = image_file; // // Run Ascent // - Ascent ascent; Node ascent_opts; - ascent_opts["runtime/type"] = "ascent"; ascent_opts["mpi_comm"] = MPI_Comm_c2f(comm); ascent_opts["exceptions"] = "forward"; ascent.open(ascent_opts); diff --git a/src/tests/ascent/t_ascent_mpi_add_ranks.cpp b/src/tests/ascent/t_ascent_mpi_add_ranks.cpp index ec13241e7..691517d14 100644 --- a/src/tests/ascent/t_ascent_mpi_add_ranks.cpp +++ b/src/tests/ascent/t_ascent_mpi_add_ranks.cpp @@ -80,49 +80,34 @@ TEST(ascent_mpi_add_mpi_ranks, test_mpi_add_mpi_ranks) // // Create the actions. // + conduit::Node actions; + // add the pipeline + conduit::Node &add_pipelines = actions.append(); + add_pipelines["action"] = "add_pipelines"; + conduit::Node &pipelines = add_pipelines["pipelines"]; + // add the scenes + conduit::Node &add_scenes= actions.append(); + add_scenes["action"] = "add_scenes"; + conduit::Node &scenes = add_scenes["scenes"]; - conduit::Node pipelines; // pipeline 1 pipelines["pl1/f1/type"] = "add_mpi_ranks"; conduit::Node ¶ms = pipelines["pl1/f1/params"]; - params["topology"] = "topo"; - params["output"] = "ranks"; + params["topology"] = "topo"; + params["output"] = "rank"; - conduit::Node scenes; scenes["s1/plots/p1/type"] = "pseudocolor"; - scenes["s1/plots/p1/field"] = "ranks"; + scenes["s1/plots/p1/field"] = "rank"; scenes["s1/plots/p1/pipeline"] = "pl1"; scenes["s1/plots/p1/color_table/discrete"] = "true"; - scenes["s1/image_prefix"] = image_file; - conduit::Node actions; - // add the pipeline - conduit::Node &add_pipelines = actions.append(); - add_pipelines["action"] = "add_pipelines"; - add_pipelines["pipelines"] = pipelines; - // add the scenes - conduit::Node &add_scenes= actions.append(); - add_scenes["action"] = "add_scenes"; - add_scenes["scenes"] = scenes; - -// conduit::Node extracts; -// -// extracts["e1/type"] = "relay"; -// extracts["e1/params/path"] = output_file; -// extracts["e1/params/protocol"] = "blueprint/mesh/hdf5"; -// conduit::Node &add_ext= actions.append(); -// add_ext["action"] = "add_extracts"; -// add_ext["extracts"] = extracts; - // // Run Ascent // Ascent ascent; - Node ascent_opts; - ascent_opts["runtime/type"] = "ascent"; ascent_opts["mpi_comm"] = MPI_Comm_c2f(comm); ascent_opts["exceptions"] = "forward"; ascent.open(ascent_opts); diff --git a/src/tests/ascent/t_ascent_mpi_partition.cpp b/src/tests/ascent/t_ascent_mpi_partition.cpp index d434724d7..97a89eee6 100644 --- a/src/tests/ascent/t_ascent_mpi_partition.cpp +++ b/src/tests/ascent/t_ascent_mpi_partition.cpp @@ -21,6 +21,7 @@ #include #include +#include #include "t_config.hpp" #include "t_utils.hpp" @@ -30,16 +31,21 @@ using namespace std; using namespace conduit; using namespace ascent; +int NUM_DOMAINS = 8; + //----------------------------------------------------------------------------- -TEST(ascent_partition, test_distributed_mpi_partition_2D_multi_dom) +TEST(ascent_partition, test_mpi_partition_target_1) { Node n; ascent::about(n); - // - // Create an example mesh. - // - Node data, verify_info; + // only run this test if ascent was built with hdf5 + vtkm support + if(n["runtimes/ascent/vtkm/status"].as_string() == "disabled" || + n["runtimes/ascent/hdf5/status"].as_string() == "disabled" ) + { + ASCENT_INFO("Ascent Rendering and/or HDF5 support are disabled, skipping test"); + return; + } // //Set Up MPI @@ -50,36 +56,36 @@ TEST(ascent_partition, test_distributed_mpi_partition_2D_multi_dom) MPI_Comm_rank(comm, &par_rank); MPI_Comm_size(comm, &par_size); + // + // Create an example mesh. + // + Node data, verify_info; + // use spiral , with 7 domains - conduit::blueprint::mesh::examples::spiral(7,data); + conduit::blueprint::mpi::mesh::examples::spiral_round_robin(NUM_DOMAINS,data,comm); EXPECT_TRUE(conduit::blueprint::mesh::verify(data,verify_info)); - int root = 0; - if(par_rank == root) - ASCENT_INFO("Testing blueprint partition of multi-domain mesh with MPI where the target is distributed amongst all ranks"); + ASCENT_INFO("Testing blueprint partition of multi-domain mesh with MPI"); - string output_path = prepare_output_dir(); - std::ostringstream oss; + string output_path; + if(par_rank == 0) + { + output_path = prepare_output_dir(); + } + else + { + output_path = output_dir(); + } - oss << "tout_distributed_partition_multi_dom_mpi"; string output_base = conduit::utils::join_file_path(output_path, - oss.str()); - std::ostringstream ossjson; - ossjson << "tout_distributed_partition_multi_dom_mpi_json"; - string output_json = conduit::utils::join_file_path(output_base, - ossjson.str()); - // remove existing files - if(par_rank == root) + "tout_partition_target_1_mpi"); + string output_root = output_base + "_result.cycle_000000.root"; + + // remove existing file + if(utils::is_file(output_root)) { - if(utils::is_file(output_base)) - { - utils::remove_file(output_base); - } - if(utils::is_file(output_json)) - { - utils::remove_file(output_json); - } + utils::remove_file(output_root); } conduit::Node actions; @@ -90,14 +96,30 @@ TEST(ascent_partition, test_distributed_mpi_partition_2D_multi_dom) conduit::Node &pipelines = add_pipelines["pipelines"]; pipelines["pl1/f1/type"] = "partition"; pipelines["pl1/f1/params/target"] = target; - + //add the extract conduit::Node &add_extracts = actions.append(); add_extracts["action"] = "add_extracts"; conduit::Node &extracts = add_extracts["extracts"]; extracts["e1/type"] = "relay"; extracts["e1/pipeline"] = "pl1"; - extracts["e1/params/path"] = output_base; + extracts["e1/params/path"] = output_base + "_result"; + extracts["e1/params/protocol"] = "hdf5"; + + extracts["einput/type"] = "relay"; + extracts["einput/params/path"] = output_base + "_input"; + extracts["einput/params/protocol"] = "hdf5"; + + // Add a scene that shows domain id + // + //add the scene + conduit::Node &add_scenes= actions.append(); + add_scenes["action"] = "add_scenes"; + conduit::Node &scenes = add_scenes["scenes"]; + scenes["s1/plots/p1/type"] = "pseudocolor"; + scenes["s1/plots/p1/field"] = "rank"; + scenes["s1/plots/p1/pipeline"] = "pl1"; + scenes["s1/image_prefix"] = conduit::utils::join_file_path(output_path, "tout_mpi_partition_target_1_result_render"); // // Run Ascent @@ -106,38 +128,159 @@ TEST(ascent_partition, test_distributed_mpi_partition_2D_multi_dom) Ascent ascent; Node ascent_opts; - ascent_opts["runtime"] = "ascent"; ascent_opts["mpi_comm"] = MPI_Comm_c2f(comm); ascent.open(ascent_opts); ascent.publish(data); ascent.execute(actions); ascent.close(); - if(par_rank == root) + MPI_Barrier(comm); + + if(par_rank == 0) { - //Two files in _output directory: - //tout_partition_multi_dom_mpi - //tout_partition_multi_dom_mpi_json - EXPECT_TRUE(conduit::utils::is_file(output_base)); - Node read_csv; - conduit::relay::io::load(output_base,read_csv); - - int num_doms = conduit::blueprint::mesh::number_of_domains(read_csv); - EXPECT_TRUE(num_doms == target); + EXPECT_TRUE(conduit::utils::is_file(output_root)); + Node read_mesh; + conduit::relay::io::blueprint::load_mesh(output_root,read_mesh); + + int num_doms = conduit::blueprint::mesh::number_of_domains(read_mesh); + EXPECT_TRUE(num_doms == target); } } -//----------------------------------------------------------------------------- -TEST(ascent_partition, test_nondistributed_mpi_partition_2D_multi_dom) +// ---------------------------------------------------------- +TEST(ascent_partition, test_mpi_partition_target_10) { Node n; ascent::about(n); + // only run this test if ascent was built with hdf5 + vtkm support + if(n["runtimes/ascent/vtkm/status"].as_string() == "disabled" || + n["runtimes/ascent/hdf5/status"].as_string() == "disabled" ) + { + ASCENT_INFO("Ascent Rendering and/or HDF5 support are disabled, skipping test"); + return; + } + + // + //Set Up MPI + // + int par_rank; + int par_size; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm, &par_rank); + MPI_Comm_size(comm, &par_size); + // // Create an example mesh. // Node data, verify_info; + // use spiral , with 7 domains + conduit::blueprint::mpi::mesh::examples::spiral_round_robin(NUM_DOMAINS,data,comm); + + EXPECT_TRUE(conduit::blueprint::mesh::verify(data,verify_info)); + + ASCENT_INFO("Testing blueprint partition of multi-domain mesh with MPI"); + + string output_path; + if(par_rank == 0) + { + output_path = prepare_output_dir(); + } + else + { + output_path = output_dir(); + } + + string output_base = conduit::utils::join_file_path(output_path, + "tout_partition_target_10_mpi"); + string output_root = output_base + "_result.cycle_000000.root"; + + if(par_rank == 0) + { + // remove existing file + if(utils::is_file(output_root)) + { + utils::remove_file(output_root); + } + } + + conduit::Node actions; + int target = 10; + // add the pipeline + conduit::Node &add_pipelines = actions.append(); + add_pipelines["action"] = "add_pipelines"; + conduit::Node &pipelines = add_pipelines["pipelines"]; + pipelines["pl1/f1/type"] = "partition"; + pipelines["pl1/f1/params/target"] = target; + + //add the extract + conduit::Node &add_extracts = actions.append(); + add_extracts["action"] = "add_extracts"; + conduit::Node &extracts = add_extracts["extracts"]; + extracts["e1/type"] = "relay"; + extracts["e1/pipeline"] = "pl1"; + extracts["e1/params/path"] = output_base + "_result"; + extracts["e1/params/protocol"] = "hdf5"; + + extracts["einput/type"] = "relay"; + extracts["einput/params/path"] = output_base + "_input"; + extracts["einput/params/protocol"] = "hdf5"; + + // Add a scene that shows domain id + // + //add the scene + conduit::Node &add_scenes= actions.append(); + add_scenes["action"] = "add_scenes"; + conduit::Node &scenes = add_scenes["scenes"]; + scenes["s1/plots/p1/type"] = "pseudocolor"; + scenes["s1/plots/p1/field"] = "rank"; + scenes["s1/plots/p1/pipeline"] = "pl1"; + scenes["s1/image_prefix"] = conduit::utils::join_file_path(output_path, "tout_mpi_partition_target_10_result_render"); + + // + // Run Ascent + // + + Ascent ascent; + + Node ascent_opts; + ascent_opts["mpi_comm"] = MPI_Comm_c2f(comm); + ascent_opts["exceptions"] = "forward"; + ascent.open(ascent_opts); + ascent.publish(data); + ascent.execute(actions); + ascent.close(); + + MPI_Barrier(comm); + + if(par_rank == 0) + { + EXPECT_TRUE(conduit::utils::is_file(output_root)); + Node read_mesh; + conduit::relay::io::blueprint::load_mesh(output_root,read_mesh); + + int num_doms = conduit::blueprint::mesh::number_of_domains(read_mesh); + EXPECT_TRUE(num_doms == target); + } + +} + + +// ---------------------------------------------------------- +TEST(ascent_partition, test_mpi_partition_fields) +{ + Node n; + ascent::about(n); + + // only run this test if ascent was built with hdf5 + vtkm support + if(n["runtimes/ascent/vtkm/status"].as_string() == "disabled" || + n["runtimes/ascent/hdf5/status"].as_string() == "disabled" ) + { + ASCENT_INFO("Ascent Rendering and/or HDF5 support are disabled, skipping test"); + return; + } + // //Set Up MPI // @@ -147,36 +290,39 @@ TEST(ascent_partition, test_nondistributed_mpi_partition_2D_multi_dom) MPI_Comm_rank(comm, &par_rank); MPI_Comm_size(comm, &par_size); + // + // Create an example mesh. + // + Node data, verify_info; + // use spiral , with 7 domains - conduit::blueprint::mesh::examples::spiral(7,data); + conduit::blueprint::mpi::mesh::examples::spiral_round_robin(NUM_DOMAINS,data,comm); EXPECT_TRUE(conduit::blueprint::mesh::verify(data,verify_info)); - int root = 0; - if(par_rank == root) - ASCENT_INFO("Testing blueprint partition of multi-domain mesh with MPI but the target is not distributed and applies to each rank"); - - string output_path = prepare_output_dir(); - std::ostringstream oss; + ASCENT_INFO("Testing blueprint partition of multi-domain mesh with MPI"); + + string output_path; + if(par_rank == 0) + { + output_path = prepare_output_dir(); + } + else + { + output_path = output_dir(); + } - oss << "tout_nondistributed_partition_multi_dom_mpi"; string output_base = conduit::utils::join_file_path(output_path, - oss.str()); - std::ostringstream ossjson; - ossjson << "tout_nondistributed_partition_multi_dom_mpi_json"; - string output_json = conduit::utils::join_file_path(output_base, - ossjson.str()); - // remove existing files - if(par_rank == root) + "tout_partition_target_1_fields"); + string output_root = output_base + "_result.cycle_000000.root"; + + if(par_rank == 0) { - if(utils::is_file(output_base)) - { - utils::remove_file(output_base); - } - if(utils::is_file(output_json)) - { - utils::remove_file(output_json); - } + // remove existing file + if(utils::is_file(output_root)) + { + utils::remove_file(output_root); + } } conduit::Node actions; @@ -187,15 +333,31 @@ TEST(ascent_partition, test_nondistributed_mpi_partition_2D_multi_dom) conduit::Node &pipelines = add_pipelines["pipelines"]; pipelines["pl1/f1/type"] = "partition"; pipelines["pl1/f1/params/target"] = target; - pipelines["pl1/f1/params/distributed"] = "false"; - + pipelines["pl1/f1/params/fields"].append()="dist"; + //add the extract conduit::Node &add_extracts = actions.append(); add_extracts["action"] = "add_extracts"; conduit::Node &extracts = add_extracts["extracts"]; extracts["e1/type"] = "relay"; extracts["e1/pipeline"] = "pl1"; - extracts["e1/params/path"] = output_base; + extracts["e1/params/path"] = output_base + "_result"; + extracts["e1/params/protocol"] = "hdf5"; + + extracts["einput/type"] = "relay"; + extracts["einput/params/path"] = output_base + "_input"; + extracts["einput/params/protocol"] = "hdf5"; + + // Add a scene that shows domain id + // + //add the scene + conduit::Node &add_scenes= actions.append(); + add_scenes["action"] = "add_scenes"; + conduit::Node &scenes = add_scenes["scenes"]; + scenes["s1/plots/p1/type"] = "pseudocolor"; + scenes["s1/plots/p1/field"] = "dist"; + scenes["s1/plots/p1/pipeline"] = "pl1"; + scenes["s1/image_prefix"] = conduit::utils::join_file_path(output_path, "tout_partition_target_1_fields_result_render"); // // Run Ascent @@ -204,27 +366,27 @@ TEST(ascent_partition, test_nondistributed_mpi_partition_2D_multi_dom) Ascent ascent; Node ascent_opts; - ascent_opts["runtime"] = "ascent"; ascent_opts["mpi_comm"] = MPI_Comm_c2f(comm); ascent.open(ascent_opts); ascent.publish(data); ascent.execute(actions); ascent.close(); - if(par_rank == root) + MPI_Barrier(comm); + + if(par_rank == 0) { - //Two files in _output directory: - //tout_partition_multi_dom_mpi - //tout_partition_multi_dom_mpi_json - EXPECT_TRUE(conduit::utils::is_file(output_base)); - Node read_csv; - conduit::relay::io::load(output_base,read_csv); - - int num_doms = conduit::blueprint::mesh::number_of_domains(read_csv); - EXPECT_TRUE(num_doms == target); + EXPECT_TRUE(conduit::utils::is_file(output_root)); + Node read_mesh; + conduit::relay::io::blueprint::load_mesh(output_root,read_mesh); + + int num_doms = conduit::blueprint::mesh::number_of_domains(read_mesh); + EXPECT_TRUE(num_doms == target); } + } + //----------------------------------------------------------------------------- int main(int argc, char* argv[]) { diff --git a/src/tests/ascent/t_ascent_partition.cpp b/src/tests/ascent/t_ascent_partition.cpp index 6f5a49244..7733906ae 100644 --- a/src/tests/ascent/t_ascent_partition.cpp +++ b/src/tests/ascent/t_ascent_partition.cpp @@ -35,6 +35,14 @@ TEST(ascent_partition, test_partition_2D_multi_dom) Node n; ascent::about(n); + // only run this test if ascent was built with hdf5 \support + if(n["runtimes/ascent/hdf5/status"].as_string() == "disabled" ) + { + ASCENT_INFO("Ascent HDF5 support is disabled, skipping test"); + return; + } + + // // Create an example mesh. // @@ -47,24 +55,17 @@ TEST(ascent_partition, test_partition_2D_multi_dom) ASCENT_INFO("Testing blueprint partition of multi-domain mesh in serial"); - string output_path = prepare_output_dir(); - std::ostringstream oss; + // work around for windows roundtrip conduit read bug + // string output_path = prepare_output_dir(); + // string output_base = conduit::utils::join_file_path(output_path, + // "tout_partition_multi_dom_serial"); + string output_base = "tout_partition_multi_dom_serial"; + string output_root = output_base + ".cycle_000000.root"; - oss << "tout_partition_multi_dom_serial"; - string output_base = conduit::utils::join_file_path(output_path, - oss.str()); - std::ostringstream ossjson; - ossjson << "tout_partition_multi_dom_serial_json"; - string output_json = conduit::utils::join_file_path(output_base, - ossjson.str()); // remove existing file - if(utils::is_file(output_base)) - { - utils::remove_file(output_base); - } - if(utils::is_file(output_json)) + if(utils::is_file(output_root)) { - utils::remove_file(output_json); + utils::remove_file(output_root); } conduit::Node actions; @@ -73,16 +74,27 @@ TEST(ascent_partition, test_partition_2D_multi_dom) conduit::Node &add_pipelines = actions.append(); add_pipelines["action"] = "add_pipelines"; conduit::Node &pipelines = add_pipelines["pipelines"]; - pipelines["pl1/f1/type"] = "partition"; - pipelines["pl1/f1/params/target"] = target; - + // pipelines["pl1/f1/type"] = "add_domain_ids"; + // pipelines["pl1/f1/params/output"] = "d_id_pre"; + + pipelines["pl1/f2/type"] = "partition"; + pipelines["pl1/f2/params/target"] = target; + + // pipelines["pl1/f3/type"] = "add_domain_ids"; + // pipelines["pl1/f3/params/output"] = "d_id_post"; + //add the extract conduit::Node &add_extracts = actions.append(); add_extracts["action"] = "add_extracts"; conduit::Node &extracts = add_extracts["extracts"]; - extracts["e1/type"] = "relay"; - extracts["e1/pipeline"] = "pl1"; - extracts["e1/params/path"] = output_base; + extracts["eout/type"] = "relay"; + extracts["eout/pipeline"] = "pl1"; + extracts["eout/params/path"] = output_base; + extracts["eout/params/protocol"] = "blueprint/mesh/yaml"; + + extracts["einput/type"] = "relay"; + extracts["einput/params/path"] = output_base + "_input"; + extracts["einput/params/protocol"] = "blueprint/mesh/yaml"; // // Run Ascent @@ -97,14 +109,11 @@ TEST(ascent_partition, test_partition_2D_multi_dom) ascent.execute(actions); ascent.close(); - //Two files in _output directory: - //tout_partition_multi_dom_serial - //tout_partition_multi_dom_serial_json - EXPECT_TRUE(conduit::utils::is_file(output_base)); - Node read_csv; - conduit::relay::io::load(output_base,read_csv); + EXPECT_TRUE(conduit::utils::is_file(output_root)); + Node read_mesh; + conduit::relay::io::blueprint::load_mesh(output_root,read_mesh); - int num_doms = conduit::blueprint::mesh::number_of_domains(read_csv); + int num_doms = conduit::blueprint::mesh::number_of_domains(read_mesh); EXPECT_TRUE(num_doms == target); }