diff --git a/.autotools b/.autotools
new file mode 100644
index 000000000..3a9f307fe
--- /dev/null
+++ b/.autotools
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.gitignore b/.gitignore
index 12b79ab77..dc6c55f66 100644
--- a/.gitignore
+++ b/.gitignore
@@ -62,3 +62,4 @@ coverage
# ignoring uq tests
datadriven/tests/uq/
+/Debug/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
deleted file mode 100644
index 56da1c666..000000000
--- a/.gitlab-ci.yml
+++ /dev/null
@@ -1,152 +0,0 @@
-# For use with docker, build this image and use it
-#image: sgpp_img
-
-stages:
- - build
- - platforms
- - switches
- - doc
-
-
-Compile Standard:
- stage: build
- script:
- - scons -j $(nproc) VERBOSE=1 COMPILER=gnu ARCH=avx OPT=0 PYDOC=0
- tags:
- - ubuntu
- - gcc
- - scons
- - swig
- - avx
- only:
- - branches
- except:
- - master
-
-
-#Compile Intel:
-# stage: platforms
-# script:
-# - . /opt/intel/bin/compilervars.sh intel64
-# - scons -j $(nproc) VERBOSE=1 COMPILER=Intel ARCH=avx OPT=1 PYDOC=0
-# tags:
-# - ubuntu
-# - intel
-# - scons
-# - swig
-# - avx
-# except:
-# - master
-
-
-#TODO validate scons flags
-#Compile OSX:
-# stage: platforms
-# script:
-# - scons -j4 CXX="g++-5" CC="g++-5" COMPILER=GNU ARCH=SSE3 VERBOSE=1 OPT=1 PYDOC=0 NO_UNIT_TESTS=1 SG_JAVA=0 CPPFLAGS="-D_GLIBCXX_USE_CXX11_ABI=0" RUN_BOOST_TESTS=0
-# tags:
-# - osx
-# - gcc
-# - scons
-# - swig
-# - sse3
-# except:
-# - master
-
-
-Test PYDOC:
- stage: switches
- script:
- - scons -j $(nproc) VERBOSE=1 COMPILER=gnu ARCH=avx OPT=0 PYDOC=1 COMPILE_BOOST_TESTS=0
- tags:
- - ubuntu
- - gcc
- - scons
- - swig
- - avx
- except:
- - master
-
-
-Test SinglePrecision:
- stage: switches
- script:
- - scons -j $(nproc) VERBOSE=1 COMPILER=gnu ARCH=avx OPT=1 PYDOC=0 USE_DOUBLE_PRECISION=0
- tags:
- - ubuntu
- - gcc
- - scons
- - swig
- - avx
- except:
- - master
-
-
-Test StaticLib:
- stage: switches
- script:
- - scons -j $(nproc) VERBOSE=1 COMPILER=gnu ARCH=avx OPT=1 PYDOC=0 USE_STATICLIB=1
- tags:
- - ubuntu
- - gcc
- - scons
- - swig
- - avx
- except:
- - master
-
-
-##############################################
-#TODO all of these
-##############################################
-
-#Compile Win32-msvc:
-# stage: platforms
-# script:
-# - scons COMPILER=vcc ARCH=sse3 PYDOC=0 VERBOSE=1 SG_PARALLEL=0 SG_COMBIGRID=0 MSVC_USE_SCRIPT="C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\vcvars32.bat"
-# tags:
-# - windows
-# - msvc
-# - scons
-# - swig
-# - sse3
-# except:
-# - master
-
-#Test Win32-msvc-StaticLib:
-# stage: switches
-# script:
-# - scons COMPILER=vcc ARCH=sse3 PYDOC=0 VERBOSE=1 USE_STATICLIB=1 SG_PARALLEL=0 SG_COMBIGRID=0 MSVC_USE_SCRIPT="C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\vcvars32.bat"
-# tags:
-# - windows
-# - msvc
-# - scons
-# - swig
-# - sse3
-# except:
-# - master
-
-
-Build Documentation:
- stage: doc
- script:
- - scons -h #generating the Doxyfile
- - doxygen Doxyfile
- - cat doxygen_warnings.log
- tags:
- - doxygen
- - scons
- except:
- - master
-
-
-#Style Guide:
-# stage: doc
-# script:
-# - exit 1
-# tags:
-# - someflagforstylechecking
-# except:
-# - master
-
-
diff --git a/SConstruct b/SConstruct
index 913c4d337..e3df90991 100644
--- a/SConstruct
+++ b/SConstruct
@@ -113,6 +113,10 @@ vars.Add("OCL_INCLUDE_PATH", "Set path to the OpenCL header files (parent direct
vars.Add("OCL_LIBRARY_PATH", "Set path to the OpenCL library")
vars.Add("BOOST_INCLUDE_PATH", "Set path to the Boost header files", "/usr/include")
vars.Add("BOOST_LIBRARY_PATH", "Set path to the Boost library", "/usr/lib/x86_64-linux-gnu")
+vars.Add("GLPK_INCLUDE_PATH", 'Specifies the location of the GLPK header files.', '/usr/include')
+vars.Add("GLPK_LIBRARY_PATH", 'Specifies the location of the GLPK library.', '/usr/lib64')
+vars.Add("GSL_INCLUDE_PATH", 'Specifies the location of the GLPK header files.', '/usr/include')
+vars.Add("GSL_LIBRARY_PATH", 'Specifies the location of the GLPK library.', '/usr/lib64')
vars.Add(BoolVariable("COMPILE_BOOST_TESTS",
"Compile the test cases written using Boost Test", True))
vars.Add(BoolVariable("COMPILE_BOOST_PERFORMANCE_TESTS",
@@ -133,6 +137,7 @@ vars.Add(BoolVariable("USE_GMMPP", "Set if Gmm++ should be used " +
"(only relevant for sgpp::optimization)", False))
vars.Add(BoolVariable("USE_UMFPACK", "Set if UMFPACK should be used " +
"(only relevant for sgpp::optimization)", False))
+vars.Add(BoolVariable('USE_STATICLIB', 'Sets if a static library should be built.', False))
vars.Add(BoolVariable("BUILD_STATICLIB", "Set if static libraries should be built " +
"instead of shared libraries", False))
vars.Add(BoolVariable("PRINT_INSTRUCTIONS", "Print instructions for installing SG++", True))
diff --git a/compile.sh b/compile.sh
new file mode 100644
index 000000000..9d5829cc4
--- /dev/null
+++ b/compile.sh
@@ -0,0 +1 @@
+scons -j 4 SG_ALL=0 SG_DISTRIBUTEDCOMBIGRID=1 VERBOSE=1 RUN_BOOST_TESTS=0 RUN_CPPLINT=0 BUILD_STATICLIB=0 CXX=mpic++.mpich OPT=1
diff --git a/distributedcombigrid/SConscript b/distributedcombigrid/SConscript
index 12ca21b7c..b26ef825a 100755
--- a/distributedcombigrid/SConscript
+++ b/distributedcombigrid/SConscript
@@ -17,4 +17,4 @@ module.generatePythonDocstrings()
module.buildExamples()
module.buildBoostTests()
module.runBoostTests()
-module.runCpplint()
\ No newline at end of file
+module.runCpplint()
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/HelperFunctions.hpp b/distributedcombigrid/examples/combi_example_soft_faults/HelperFunctions.hpp
new file mode 100644
index 000000000..0f121f0e9
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/HelperFunctions.hpp
@@ -0,0 +1,173 @@
+/*
+ * HelperFunctions.cpp
+ *
+ * Created on: Jan 13, 2016
+ * Author: sccs
+ */
+#ifndef HELPERFUNCTIONS_HPP_
+#define HELPERFUNCTIONS_HPP_
+
+#include "mpi.h"
+#include
+
+namespace combigrid {
+
+void createCommunicators( size_t ngroup, size_t nprocs, int grank, int gsize,
+ int& key, int& managerID, MPI_Comm& gcomm, MPI_Comm& lcomm){
+ /* determine global rank of each process
+ * the manager process always has the highest rank
+ * all other processes are worker processes */
+
+
+ /* create a local communicator for each process group
+ * lcomm is the local communicator of its own process group for each worker process
+ * for manager, lcomm is a group which contains only manager process and can be ignored
+ */
+ int color = grank / nprocs;
+ key = grank - color*nprocs;
+ MPI_Comm_split(MPI_COMM_WORLD, color, key, &lcomm);
+
+ // create global communicator containing manager and pgroup roots
+ MPI_Group worldGroup;
+ MPI_Comm_group(MPI_COMM_WORLD, &worldGroup);
+
+ std::vector ranks(ngroup+1);
+ for (size_t i = 0; i < ngroup; ++i) {
+ ranks[i] = i*nprocs;
+ }
+ ranks.back() = managerID;
+
+ MPI_Group rootGroup;
+ MPI_Group_incl(worldGroup, (int)ranks.size(), &ranks[0], &rootGroup);
+
+ MPI_Comm_create(MPI_COMM_WORLD, rootGroup, &gcomm);
+}
+
+void readParameterFile(const std::string& fileName, size_t &ngroup, size_t &nprocs, DimType &dim,
+ LevelVector &lmin, LevelVector &lmax, LevelVector &leval,
+ IndexVector &p, double &time_step, double &time_start,
+ double &time_end, size_t &ncombi, FaultsInfo &faultsInfo, bool &plot ){
+
+ // parser for the ini parameter file
+
+ boost::property_tree::ptree cfg;
+ boost::property_tree::ini_parser::read_ini(fileName, cfg);
+ // there are ngroup*nprocs+1 processes needed
+ ngroup = cfg.get("manager.ngroup");
+ nprocs = cfg.get("manager.nprocs");
+
+ time_step = cfg.get("simulation.time_step");
+ time_start = cfg.get("simulation.time_start");
+ time_end = cfg.get("simulation.time_end");
+ ncombi = cfg.get("simulation.ncombi");
+ plot = cfg.get("simulation.plot");
+
+ dim = cfg.get("ct.dim");
+
+ lmin.resize(dim);
+ lmax.resize(dim);
+ leval.resize(dim);
+ p.resize(dim);
+
+ cfg.get("ct.lmin") >> lmin;
+ cfg.get("ct.lmax") >> lmax;
+ cfg.get("ct.leval") >> leval;
+ cfg.get("ct.p") >> p;
+
+ faultsInfo.numFaults_ = cfg.get("faults.num_faults");
+
+ if ( faultsInfo.numFaults_ == 0 ){
+ faultsInfo.iterationFaults_.resize(1);
+ faultsInfo.taskFaults_.resize(1);
+ } else {
+ faultsInfo.iterationFaults_.resize(faultsInfo.numFaults_);
+ faultsInfo.taskFaults_.resize(faultsInfo.numFaults_);
+ }
+
+ faultsInfo.sdcIndex_.resize(dim);
+
+ cfg.get("faults.iteration_faults") >> faultsInfo.iterationFaults_;
+ cfg.get("faults.task_faults") >> faultsInfo.taskFaults_;
+ cfg.get("faults.sdc_index") >> faultsInfo.sdcIndex_;
+
+ int sdcMag;
+ sdcMag = cfg.get("faults.sdc_mag");
+ switch( sdcMag ){
+ case 1 : faultsInfo.sdcMag_ = 1e-300; break;
+ case 2 : faultsInfo.sdcMag_ = pow(10,-0.5); break;
+ case 3 : faultsInfo.sdcMag_ = 1e150; break;
+ }
+
+ faultsInfo.sdcMethod_ = cfg.get("method.sdc_method");
+
+ // parameter for gnuplot
+ // TODO: use VTK
+ std::ofstream paramfile;
+ paramfile.open("param.plt");
+ paramfile << "time_step = " << time_step << std::endl;
+ paramfile << "time_start = " << time_start << std::endl;
+ paramfile << "time_end = " << time_end << std::endl;
+ paramfile << "dim = " << dim << std::endl;
+ paramfile.close();
+}
+
+// TODO: redundant definition of exact solution,
+// use solution from Problem.hh
+double exact(std::vector& x, double t)
+{
+ const int dim = x.size();
+
+ double exponent = 0;
+ for (int d = 0; d < dim; d++) {
+ x[d] -= 0.5 * t;
+ exponent -= std::pow(x[d]-0.5,2);
+ }
+
+ return std::exp(exponent*100.0);
+}
+
+void writeSolutionToFile(std::ofstream& outFile, const FullGrid& fg){
+
+ DimType dim = fg.getDimension();
+ if ( dim == 2 ) {
+ std::vector coords(dim, 0.0);
+
+ for (int i = 0; i < fg.getNrElements(); ++i) {
+ if (i % fg.length(0) == 0 && i > 0) {
+ outFile << std::endl;
+ }
+ fg.getCoords(i, coords);
+ outFile << coords[0] << "\t"
+ << coords[1] << "\t"
+ << fg.getElementVector()[i] << std::endl;
+ }
+ outFile << std::endl << std::endl;
+ }
+}
+
+void writeErrorToFile(std::ofstream& outFile, FullGrid& fg,
+ const double &time_step, const double &step){
+
+ // calculate error
+ std::vector coords(fg.getDimension(), 0.0);
+ for (int i = 0; i < fg.getNrElements(); ++i) {
+ fg.getCoords(i, coords);
+ fg.getElementVector()[i] -= exact(coords, time_step*step);
+ }
+
+ // output for approximation error in gnuplot
+ outFile << time_step*step << "\t"
+ << fg.getlpNorm(0) << "\t"
+ << fg.getlpNorm(2) << std::endl;
+}
+
+void checkProcs(const size_t &nprocs, const IndexVector &p){
+ IndexType check = 1;
+ for (auto k : p)
+ check *= k;
+ assert(check == IndexType(nprocs));
+}
+
+} // namespace combigrid
+
+#endif /* TASKEXAMPLE_HPP_ */
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/Makefile.sample b/distributedcombigrid/examples/combi_example_soft_faults/Makefile.sample
new file mode 100644
index 000000000..dfb4c42e2
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/Makefile.sample
@@ -0,0 +1,21 @@
+CC=mpic++.mpich
+CFLAGS=-std=c++11 -g -fopenmp -Wno-deprecated-declarations -Wno-unused-local-typedefs -Wno-deprecated -Wno-uninitialized -Wall
+
+SGPP_DIR=path/to/sgpp
+
+LD_SGPP=-L$(SGPP_DIR)/lib/sgpp
+
+INC_SGPP=-I$(SGPP_DIR)/distributedcombigrid/src/
+
+LDIR=$(LD_SGPP)
+INC=$(INC_SGPP)
+
+LIBS=-lsgppdistributedcombigrid -lboost_serialization -lglpk -lgsl -lcblas
+
+all: combi_example_soft_faults
+
+combi_example_soft_faults: combi_example_soft_faults.cpp TaskExample.hpp
+ $(CC) $(CFLAGS) $(LDIR) $(INC) -o combi_example_soft_faults combi_example_soft_faults.cpp $(LIBS)
+
+clean:
+ rm -f *.o out/* combi_example_soft_faults
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/TaskExample.hpp b/distributedcombigrid/examples/combi_example_soft_faults/TaskExample.hpp
new file mode 100644
index 000000000..68177f8ce
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/TaskExample.hpp
@@ -0,0 +1,297 @@
+/*
+ * TaskExample.hpp
+ *
+ * Created on: Sep 25, 2015
+ * Author: heenemo
+ */
+
+#ifndef TASKEXAMPLE_HPP_
+#define TASKEXAMPLE_HPP_
+
+#include "sgpp/distributedcombigrid/fullgrid/DistributedFullGrid.hpp"
+#include "sgpp/distributedcombigrid/task/Task.hpp"
+#include "sgpp/distributedcombigrid/fault_tolerance/FTUtils.hpp"
+
+namespace combigrid {
+
+class TaskExample: public Task {
+
+ public:
+ /* if the constructor of the base task class is not sufficient we can provide an
+ * own implementation. here, we add dt, nsteps, p, and faultsInfo as a new parameters.
+ */
+ TaskExample(DimType dim, LevelVector& l, std::vector& boundary,
+ real coeff, LoadModel* loadModel, real dt,
+ size_t nsteps, IndexVector p = IndexVector(0),
+ FaultsInfo faultsInfo = {0,IndexVector(0),IndexVector(0)}) :
+ Task(dim, l, boundary, coeff, loadModel), dt_(dt), nsteps_(
+ nsteps), stepsTotal_(0), p_(p), faultsInfo_(faultsInfo),
+ initialized_(false), combiStep_(0), dfg_(NULL)
+ {
+ }
+
+ void init(CommunicatorType lcomm) {
+// assert(!initialized_);
+// assert(dfg_ == NULL);
+
+ int lrank;
+ MPI_Comm_rank(lcomm, &lrank);
+
+ /* create distributed full grid. we try to find a balanced ratio between
+ * the number of grid points and the number of processes per dimension
+ * by this very simple algorithm. to keep things simple we require powers
+ * of two for the number of processes here. */
+ int np;
+ MPI_Comm_size(lcomm, &np);
+
+ // check if power of two
+ if (!((np > 0) && ((np & (~np + 1)) == np)))
+ assert(false && "number of processes not power of two");
+
+ DimType dim = this->getDim();
+ IndexVector p(dim, 1);
+ const LevelVector& l = this->getLevelVector();
+
+ if (p_.size() == 0) {
+ // compute domain decomposition
+ IndexType prod_p(1);
+
+ while (prod_p != static_cast(np)) {
+ DimType dimMaxRatio = 0;
+ real maxRatio = 0.0;
+
+ for (DimType k = 0; k < dim; ++k) {
+ real ratio = std::pow(2.0, l[k]) / p[k];
+
+ if (ratio > maxRatio) {
+ maxRatio = ratio;
+ dimMaxRatio = k;
+ }
+ }
+
+ p[dimMaxRatio] *= 2;
+ prod_p = 1;
+
+ for (DimType k = 0; k < dim; ++k)
+ prod_p *= p[k];
+ }
+ } else {
+ p = p_;
+ }
+
+ if (lrank == 0) {
+ std::cout << "group " << theMPISystem()->getGlobalRank() << " "
+ << "computing task " << this->getID() << " with l = "
+ << this->getLevelVector() << " and p = " << p << std::endl;
+ }
+
+ // create local subgrid on each process
+ dfg_ = new DistributedFullGrid(dim, l, lcomm,
+ this->getBoundary(), p);
+
+ /* loop over local subgrid and set initial values */
+ std::vector& elements = dfg_->getElementVector();
+
+ for (size_t i = 0; i < elements.size(); ++i) {
+ IndexType globalLinearIndex = dfg_->getGlobalLinearIndex(i);
+ std::vector globalCoords(dim);
+ dfg_->getCoordsGlobal(globalLinearIndex, globalCoords);
+ elements[i] = TaskExample::myfunction(globalCoords, 0.0);
+ }
+
+ initialized_ = true;
+ }
+ /* this is were the application code kicks in and all the magic happens.
+ * do whatever you have to do, but make sure that your application uses
+ * only lcomm or a subset of it as communicator.
+ * important: don't forget to set the isFinished flag at the end of the computation.
+ */
+ void run(CommunicatorType lcomm) {
+ assert(initialized_);
+
+ int lrank, globalRank;
+ MPI_Comm_rank(lcomm, &lrank);
+ MPI_Comm_rank(MPI_COMM_WORLD, &globalRank);
+
+ /* pseudo timestepping to demonstrate the behaviour of your typical
+ * time-dependent simulation problem. */
+ std::vector& elements = dfg_->getElementVector();
+
+ for (size_t step = stepsTotal_; step < stepsTotal_ + nsteps_; ++step) {
+ real time = (step + 1)* dt_;
+
+ for (size_t i = 0; i < elements.size(); ++i) {
+ IndexType globalLinearIndex = dfg_->getGlobalLinearIndex(i);
+ std::vector globalCoords(this->getDim());
+ dfg_->getCoordsGlobal(globalLinearIndex, globalCoords);
+ elements[i] = TaskExample::myfunction(globalCoords, time);
+ }
+ }
+
+
+ int localRank = theMPISystem()->getLocalRank();
+ if ( failNow(localRank) && initialized_ ){
+ std::cout<<"Task "<< id_<<" failed at iteration "<getGlobalLinearIndex(faultsInfo_.sdcIndex_);
+ IndexType sdcLinearIndex = dfg_->getLocalLinearIndex(sdcGLI);
+
+ std::cout<<"Old function value: "<getData()[sdcLinearIndex]<getData()[sdcLinearIndex] *= faultsInfo_.sdcMag_;
+ std::cout<<"New function value: "<getData()[sdcLinearIndex]< coords;
+ IndexType globalInd = dfg_->getGlobalLinearIndex(sdcLinearIndex);
+ dfg_->getGlobalLI(globalInd, sdcSub, inds);
+ std::cout<<"SDC injected in subspace "<getCoordsLocal( sdcLinearIndex, coords );
+ std::cout<<"Coordinate: ";
+ for (auto coord : coords)
+ std::cout<setFinished(true);
+ }
+
+ /* this function evaluates the combination solution on a given full grid.
+ * here, a full grid representation of your task's solution has to be created
+ * on the process of lcomm with the rank r.
+ * typically this would require gathering your (in whatever way) distributed
+ * solution on one process and then converting it to the full grid representation.
+ * the DistributedFullGrid class offers a convenient function to do this.
+ */
+ void getFullGrid(FullGrid& fg, RankType r,
+ CommunicatorType lcomm) {
+ assert(fg.getLevels() == dfg_->getLevels());
+ dfg_->gatherFullGrid(fg, r);
+ }
+
+ DistributedFullGrid& getDistributedFullGrid() {
+ return *dfg_;
+ }
+
+ static real myfunction(std::vector& coords, real t) {
+ real u = std::cos(M_PI * t);
+
+ for ( size_t d = 0; d < coords.size(); ++d )
+ u *= std::cos( 2.0 * M_PI * coords[d] );
+
+ return u;
+
+ /*
+ double res = 1.0;
+ for (size_t i = 0; i < coords.size(); ++i) {
+ res *= -4.0 * coords[i] * (coords[i] - 1);
+ }
+
+
+ return res;
+ */
+ }
+
+ inline void setStepsTotal( size_t stepsTotal );
+
+ void setZero(){
+ std::vector& data = dfg_->getElementVector();
+
+ for( size_t i=0; i* dfg_;
+
+ /**
+ * The serialize function has to be extended by the new member variables.
+ * However this concerns only member variables that need to be exchanged
+ * between manager and workers. We do not need to add "local" member variables
+ * that are only needed on either manager or worker processes.
+ * For serialization of the parent class members, the class must be
+ * registered with the BOOST_CLASS_EXPORT macro.
+ */
+ template
+ void serialize(Archive& ar, const unsigned int version) {
+ // handles serialization of base class
+ ar& boost::serialization::base_object(*this);
+
+ // add our new variables
+ ar& initialized_;
+ ar& dt_;
+ ar& nsteps_;
+ ar& stepsTotal_;
+ ar& p_;
+ ar& faultsInfo_;
+ }
+
+};
+
+inline bool TaskExample::failNow( const int& localRank ){
+ if ( faultsInfo_.numFaults_ == 0 )
+ return false;
+
+ IndexVector iF = faultsInfo_.iterationFaults_;
+ IndexVector tF = faultsInfo_.taskFaults_;
+ IndexVector indF = faultsInfo_.sdcIndex_;
+
+ std::vector::iterator it;
+ it = std::find(iF.begin(), iF.end(), combiStep_);
+ IndexType idx = std::distance(iF.begin(),it);
+ // Check if current iteration is in iterationFaults_
+ while (it!=iF.end() ){
+ // Check if my task should fail
+ if( this->getID() == tF[idx] ){
+ IndexType GLI = dfg_->getGlobalLinearIndex(indF);
+ IndexType LLI = dfg_->getLocalLinearIndex(GLI);
+ if (LLI != -1 )
+ return true;
+ }
+ it = std::find(++it, iF.end(), combiStep_);
+ idx = std::distance(iF.begin(),it);
+ }
+ return false;
+}
+
+inline void TaskExample::setStepsTotal( size_t stepsTotal ) {
+ stepsTotal_ = stepsTotal;
+}
+
+} // namespace combigrid
+
+#endif /* TASKEXAMPLE_HPP_ */
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/combi_example_soft_faults.cpp b/distributedcombigrid/examples/combi_example_soft_faults/combi_example_soft_faults.cpp
new file mode 100644
index 000000000..65f6052bb
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/combi_example_soft_faults.cpp
@@ -0,0 +1,218 @@
+/*
+ * combi_example.cpp
+ *
+ * Created on: Sep 23, 2015
+ * Author: heenemo
+ */
+#include
+#include
+#include
+#include
+#include
+
+// compulsory includes for basic functionality
+#include "sgpp/distributedcombigrid/task/Task.hpp"
+#include "sgpp/distributedcombigrid/utils/Types.hpp"
+#include "sgpp/distributedcombigrid/combischeme/CombiMinMaxScheme.hpp"
+#include "sgpp/distributedcombigrid/fullgrid/FullGrid.hpp"
+#include "sgpp/distributedcombigrid/loadmodel/LinearLoadModel.hpp"
+#include "sgpp/distributedcombigrid/manager/CombiParameters.hpp"
+#include "sgpp/distributedcombigrid/manager/ProcessGroupManager.hpp"
+#include "sgpp/distributedcombigrid/manager/ProcessGroupWorker.hpp"
+#include "sgpp/distributedcombigrid/manager/ProcessManager.hpp"
+
+// include user specific task. this is the interface to your application
+#include "TaskExample.hpp"
+
+#include "HelperFunctions.hpp"
+
+using namespace combigrid;
+
+// this is necessary for correct function of task serialization
+BOOST_CLASS_EXPORT(TaskExample)
+
+void solveProblem(ProcessManager& manager, LevelVector& leval,
+ std::vector& boundary, double time_step,
+ int numSteps, std::vector time_steps_combi,
+ bool plot, int sdcMethod )
+{
+ std::ofstream valueFile("out/values.dat");
+
+ WORLD_MANAGER_EXCLUSIVE_SECTION{ theStatsContainer()->setTimerStart("ct"); }
+
+ int combistep = 0;
+ std::vector reinitFaultsID, recomputeFaultsID, faultsID;
+ for (int step = 0; step < numSteps; ++step) {
+
+ if( step == 0 ){
+ manager.runfirst();
+ } else {
+ manager.runnext();
+ }
+
+ if ( step == time_steps_combi[combistep]){
+
+ bool success = true;
+ success = manager.searchSDC( sdcMethod );
+ combistep++;
+ if ( !success ) {
+ std::cout << "SDC detected at combi iteration " << step << std::endl;
+ manager.getSDCFaultIDs( faultsID );
+ /* call optimization code to find new coefficients */
+ const std::string prob_name = "interpolation based optimization";
+
+ manager.recomputeOptimumCoefficients(prob_name, faultsID,
+ reinitFaultsID, recomputeFaultsID);
+
+ /* communicate new combination scheme*/
+ manager.updateCombiParameters();
+
+ /* if some tasks have to be recomputed, do so*/
+ for ( auto id : recomputeFaultsID ) {
+ TaskExample* tmp = static_cast(manager.getTask(id));
+ tmp->setStepsTotal((combistep-1)*numSteps);
+ }
+ manager.recompute(recomputeFaultsID);
+ }
+
+
+ /* combine solution */
+ std::cout<<"Combining.."< fg( leval.size(), leval, boundary);
+ std::cout << "eval solution for plotting" << std::endl;
+ manager.gridEval(fg);
+ // output for function values in gnuplot (only in 2D)
+ writeSolutionToFile(valueFile, fg);
+ }
+
+ if ( !success ) {
+ manager.reinit(reinitFaultsID);
+ for ( auto id : reinitFaultsID ) {
+ TaskExample* tmp = static_cast(manager.getTask(id));
+ tmp->setStepsTotal(combistep*numSteps);
+ }
+ manager.restoreCombischeme();
+ manager.updateCombiParameters();
+ }
+ }
+ }
+
+ valueFile.close();
+
+ /* evaluate solution */
+ FullGrid fg_eval( leval.size(), leval, boundary);
+ manager.gridEval(fg_eval);
+ std::string filename( "solution.fg" );
+ fg_eval.save( filename );
+
+ std::cout << "exiting" << std::endl;
+ manager.exit();
+}
+
+int main(int argc, char** argv) {
+ MPI_Init(&argc, &argv);
+
+ /* number of process groups and number of processes per group */
+ size_t ngroup, nprocs;
+
+ DimType dim;
+ LevelVector lmin, lmax, leval;
+ IndexVector p;
+ size_t ncombi;
+ int numSteps;
+ std::vector time_steps_combi;
+ FaultsInfo faultsInfo;
+
+ double time_step, time_start, time_end;
+
+ bool plot;
+
+ /* read parameter file (ctparam) */
+ const std::string fileName = "settings.ini";
+ readParameterFile(fileName, ngroup, nprocs, dim, lmin, lmax,
+ leval, p, time_step, time_start, time_end, ncombi,
+ faultsInfo, plot);
+
+ numSteps = (time_end-time_start) / time_step;
+ for (size_t i = 0; i < ncombi; ++i)
+ time_steps_combi.push_back( numSteps - i*numSteps/ncombi - 1 );
+ std::reverse(time_steps_combi.begin(), time_steps_combi.end());
+
+ // todo: read in boundary vector from ctparam
+ std::vector boundary(dim, true);
+
+ theMPISystem()->init( ngroup, nprocs );
+
+ // ProcessGroupManager and ProcessManager Code
+ if (theMPISystem()->getWorldRank() == theMPISystem()->getManagerRankWorld()) {
+
+ /* create an abstraction of the process groups for the manager's view
+ * a pgroup is identified by the ID in gcomm
+ */
+ ProcessGroupManagerContainer pgroups;
+ for (size_t i=0; i ( pgroupRootID )
+ );
+ }
+
+ // ProcessManager Code
+ // create DuneTasks
+ LoadModel* loadmodel = new LinearLoadModel();
+
+ CombiMinMaxScheme combischeme(dim, lmin, lmax);
+ combischeme.createAdaptiveCombischeme();
+ combischeme.makeFaultTolerant();
+ std::vector levels = combischeme.getCombiSpaces();
+ std::vector coeffs = combischeme.getCoeffs();
+
+ TaskContainer tasks;
+ std::vector taskIDs;
+ for (uint i = 0; i < levels.size(); ++i) {
+ Task* t = new TaskExample(dim, levels[i], boundary, coeffs[i],
+ loadmodel, time_step, numSteps, p, faultsInfo);
+ tasks.push_back(t);
+ taskIDs.push_back( t->getID() );
+ }
+
+ // output of combination setup
+ std::cout << "lmin = " << lmin << std::endl;
+ std::cout << "lmax = " << lmax << std::endl;
+ std::cout << "CombiScheme: " << std::endl;
+ std::cout << combischeme << std::endl;
+
+ // create combiparamters
+ CombiParameters params(dim, lmin, lmax, boundary, levels, coeffs, taskIDs);
+
+ // create Manager with process groups
+ ProcessManager manager(pgroups, tasks, params);
+
+ // combiparameters need to be set before starting the computation
+ manager.updateCombiParameters();
+
+ // start calculation
+ solveProblem(manager, leval, boundary, time_step, numSteps, time_steps_combi, plot, faultsInfo.sdcMethod_ );
+ }
+
+ // worker code
+ else {
+ // create abstraction of the process group from the worker's view
+ ProcessGroupWorker pgroup;
+
+ // wait for instructions from manager
+ SignalType signal = -1;
+
+ while (signal != EXIT)
+ signal = pgroup.wait();
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/graph.gnu b/distributedcombigrid/examples/combi_example_soft_faults/graph.gnu
new file mode 100644
index 000000000..353951ccf
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/graph.gnu
@@ -0,0 +1,18 @@
+#!/bin/gnuplot
+
+#set terminal pngcairo size 800,600 enhanced font "Verdana,10"
+set mouse
+set xrange [0:1]
+set yrange [0:1]
+set zrange [-2:2]
+set cbrange [-1:1]
+set xlabel "x"
+set ylabel "y"
+set style line 1 lt -1 lw 0.3
+set pm3d hidden3d 1
+do for [i = 1:100] {
+ splot "out/values.dat" index (i-1) using 1:2:3 with pm3d
+ pause 0.02
+}
+reread
+pause mouse keypress
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/run.sh b/distributedcombigrid/examples/combi_example_soft_faults/run.sh
new file mode 100755
index 000000000..fee139dc1
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/run.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+read_properties ()
+{
+ file="$1"
+ while IFS=" = " read -r key value;
+ do
+ case "$key" in
+ "ngroup")
+ ngroup=$value;;
+ "nprocs")
+ nprocs=$value;;
+ esac
+ done < "$file"
+}
+
+ngroup=0
+nprocs=0
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+read_properties "$SCRIPT_DIR/settings.ini"
+mpiprocs=$((ngroup*nprocs+1))
+
+mpirun.mpich -n "$mpiprocs" ./combi_example_soft_faults
diff --git a/distributedcombigrid/examples/combi_example_soft_faults/settings.ini b/distributedcombigrid/examples/combi_example_soft_faults/settings.ini
new file mode 100644
index 000000000..e4cdf384d
--- /dev/null
+++ b/distributedcombigrid/examples/combi_example_soft_faults/settings.ini
@@ -0,0 +1,27 @@
+[manager]
+ngroup = 1
+nprocs = 4
+
+[simulation]
+time_step = 0.001
+time_start = 0.0
+time_end = 0.05
+ncombi = 50
+plot = 1
+
+[ct]
+dim = 2
+lmin = 3 3
+lmax = 7 7
+leval = 5 5
+p = 2 2
+
+[faults]
+num_faults = 1
+iteration_faults = 3
+task_faults = 3
+sdc_index = 32 8
+sdc_mag = 2
+
+[method]
+sdc_method = 2
diff --git a/distributedcombigrid/src/sgpp/distributedcombigrid/combicom/CombiCom.hpp b/distributedcombigrid/src/sgpp/distributedcombigrid/combicom/CombiCom.hpp
index a92b6e239..29c2f5505 100644
--- a/distributedcombigrid/src/sgpp/distributedcombigrid/combicom/CombiCom.hpp
+++ b/distributedcombigrid/src/sgpp/distributedcombigrid/combicom/CombiCom.hpp
@@ -9,6 +9,7 @@
#define COMBICOM_HPP_
#include "sgpp/distributedcombigrid/mpi/MPISystem.hpp"
+#include "sgpp/distributedcombigrid/mpi/MPIUtils.hpp"
#include "sgpp/distributedcombigrid/fullgrid/FullGrid.hpp"
#include "sgpp/distributedcombigrid/sparsegrid/SGrid.hpp"
#include "sgpp/distributedcombigrid/utils/StatsContainer.hpp"
@@ -49,7 +50,7 @@ class CombiCom {
static void FGAllreduce(FullGrid& fg, MPI_Comm comm);
template
- static void BetasReduce( std::vector& betas, RankType r, MPI_Comm comm);
+ static void BetasReduce( std::vector& betas, std::vector& betasReduced, MPI_Comm comm);
// multiply dfg with coeff and add to dsg. dfg will not be changed
template
@@ -279,20 +280,16 @@ inline void CombiCom::FGAllreduce >(
MPI_DOUBLE_COMPLEX, MPI_SUM, comm);
}
template<>
-inline void CombiCom::BetasReduce( std::vector& betas, RankType r, MPI_Comm comm ){
+inline void CombiCom::BetasReduce( std::vector& betas, std::vector& betasReduced,
+ MPI_Comm comm ){
- int myrank;
- MPI_Comm_rank(comm, &myrank);
+ MPI_Op OP_MAX_ABS;
- std::vector recvBetas(betas.size());
+ MPI_Op_create((MPI_User_function *) MPIUtils::MAX_ABS, 1, &OP_MAX_ABS );
- if ( myrank == r ) {
- MPI_Reduce( MPI_IN_PLACE, &betas[0], static_cast(betas.size()),
- MPI_DOUBLE, MPI_MAX, r, comm);
- } else {
- MPI_Reduce(&betas[0], &recvBetas[0], static_cast(betas.size()),
- MPI_DOUBLE, MPI_MAX, r, comm);
- }
+ MPI_Allreduce(betas.data(), betasReduced.data(), static_cast(betas.size()), MPI_DOUBLE, OP_MAX_ABS, comm);
+
+ MPI_Op_free( &OP_MAX_ABS );
}
template
@@ -311,7 +308,8 @@ void CombiCom::FGAllreduce(FullGrid& fg, MPI_Comm comm) {
}
template
-void CombiCom::BetasReduce(std::vector& betas, RankType r, MPI_Comm comm) {
+void CombiCom::BetasReduce(std::vector& betas, std::vector& betasReduced,
+ MPI_Comm comm) {
assert(!"this type is not yet implemented");
}
diff --git a/distributedcombigrid/src/sgpp/distributedcombigrid/combischeme/CombiMinMaxScheme.cpp b/distributedcombigrid/src/sgpp/distributedcombigrid/combischeme/CombiMinMaxScheme.cpp
index 93c679cf2..427a2929f 100644
--- a/distributedcombigrid/src/sgpp/distributedcombigrid/combischeme/CombiMinMaxScheme.cpp
+++ b/distributedcombigrid/src/sgpp/distributedcombigrid/combischeme/CombiMinMaxScheme.cpp
@@ -157,6 +157,7 @@ void CombiMinMaxScheme::computeCombiCoeffsClassical(){
LevelType p = n_ - l1;
// Classical combination coefficients
coefficients_.push_back(std::pow(-1, p)*boost::math::binomial_coefficient(effDim_ - 1, p) );
+
}
}
diff --git a/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.cpp b/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.cpp
new file mode 100644
index 000000000..6fac0f00a
--- /dev/null
+++ b/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.cpp
@@ -0,0 +1,503 @@
+#include "sgpp/distributedcombigrid/fault_tolerance/FTUtils.hpp"
+
+namespace combigrid {
+template
+T str_to_number(const std::string& no) {
+ T value;
+ std::stringstream stream(no);
+ stream >> value;
+
+ if (stream.fail()) {
+ std::runtime_error e(no);
+ std::cout << "Error in the conversion of " << no << "!" << std::endl;
+ throw e;
+ }
+
+ return value;
+}
+
+template
+void remove(std::vector& vec, size_t pos) {
+ auto it = vec.begin();
+ std::advance(it, pos);
+ vec.erase(it);
+}
+
+std::string python_code_caller(const std::string& script_name,
+ const LevelVectorList& levels, const int& dim) {
+ LevelType levels_no = 0;
+ LevelType level_size = 0;
+ LevelType one_level = 0;
+ std::stringstream caller;
+
+ levels_no = static_cast(levels.size());
+ level_size = static_cast(levels[0].size());
+
+ caller << "python " << script_name << " " << dim << " ";
+
+ for (int i = 0; i < levels_no; ++i) {
+ for (int j = 0; j < level_size; ++j) {
+ one_level = levels[i][j];
+ caller << one_level << " ";
+ }
+ }
+
+ return caller.str();
+}
+
+CombigridDict get_python_data(const std::string& script_run, const int& dim) {
+ FILE* stream;
+ char buffer[256];
+ std::string level_x_str;
+ std::string level_y_str;
+ std::string coeff_str;
+
+ double coeff = 0.0;
+
+ CombigridDict dict;
+
+ stream = popen(script_run.c_str(), "r");
+
+ if (stream) {
+ while (!feof(stream)) {
+ if (fgets(buffer, sizeof(buffer), stream) != NULL) {
+ std::string one_level_str;
+ int one_level = 0;
+ LevelVector levels;
+ std::stringstream temp(buffer);
+
+ for (int i = 0; i < dim; ++i) {
+ temp >> one_level_str;
+ one_level = str_to_number(one_level_str);
+ levels.push_back(one_level);
+ }
+
+ temp >> coeff_str;
+ coeff = str_to_number(coeff_str);
+
+ dict.insert(std::make_pair(levels, coeff));
+ }
+ }
+
+ pclose(stream);
+ } else {
+ throw "Error reading script output!";
+ }
+
+ return dict;
+}
+
+matrix create_M_matrix(const CombigridDict& aux_downset, const int& dim) {
+ int size_downset = static_cast(aux_downset.size());
+ int i = 0;
+ int j = 0;
+
+ matrix M(size_downset, std::vector(size_downset, 0.0));
+
+ for (auto ii = aux_downset.begin(); ii != aux_downset.end(); ++ii) {
+ i = static_cast(ii->second);
+ j = 0;
+
+ LevelVector w;
+ for (int it = 0; it < dim; ++it) {
+ w.push_back(ii->first[it]);
+ }
+
+ for (auto jj = aux_downset.begin(); jj != aux_downset.end(); ++jj) {
+ LevelVector c;
+ for (int it = 0; it < dim; ++it) {
+ c.push_back(jj->first[it]);
+ }
+ j = static_cast(jj->second);
+
+ if (test_greater(c, w)) {
+ M[i][j] = 1.0;
+ } else {
+ M[i][j] = 0.0;
+ }
+ }
+ }
+
+ return M;
+}
+
+matrix get_inv_M(const CombigridDict& aux_downset, const int& dim) {
+ int size_downset = static_cast(aux_downset.size());
+ int i = 0;
+ int j = 0;
+
+ std::valarray c(dim);
+ std::valarray w(dim);
+ std::valarray diff(dim);
+ std::valarray zeros(0, dim);
+
+ matrix M_inv(size_downset, std::vector(size_downset, 0.0));
+
+ for (auto ii = aux_downset.begin(); ii != aux_downset.end(); ++ii) {
+ i = static_cast(ii->second);
+ for (int it = 0; it < dim; ++it) {
+ w[it] = static_cast(ii->first[it]);
+ }
+
+ for (auto jj = ii; jj != aux_downset.end(); ++jj) {
+ j = static_cast(jj->second);
+ for (int it = 0; it < dim; ++it) {
+ c[it] = static_cast(jj->first[it]);
+ }
+
+ diff = c - w;
+
+ if (((diff.sum() > 0) || (diff.sum() <= dim))
+ && ((diff.max() <= 1) && (diff >= zeros).min())) {
+ M_inv[i][j] = pow(-1, diff.sum());
+ }
+ }
+ }
+
+ return M_inv;
+}
+
+CombigridDict set_entire_downset_dict(const LevelVectorList levels,
+ const CombigridDict& received_dict, const int& dim) {
+ LevelVector level_min = levels.front();
+ LevelVector level_max = levels.back();
+ CombigridDict active_downset;
+ CombigridDict output;
+
+ LevelVector level_active_downset;
+
+ LevelVector level;
+ LevelVectorList all_levels;
+ LevelVectorList feasible_levels;
+
+ double key = 0.0;
+
+ all_levels = mindex(dim, level_max);
+
+ for (auto ii = received_dict.begin(); ii != received_dict.end(); ++ii) {
+ if (ii->second > 0.0) {
+ active_downset.insert(std::make_pair(ii->first, ii->second));
+ }
+ }
+
+ for (auto ii = active_downset.begin(); ii != active_downset.end(); ++ii) {
+ level_active_downset = ii->first;
+
+ for (unsigned int i = 0; i < all_levels.size(); ++i) {
+ if (test_greater(level_active_downset, all_levels[i])
+ && test_greater(all_levels[i], level_min)) {
+ feasible_levels.push_back(all_levels[i]);
+ }
+ }
+ }
+
+ for (unsigned int i = 0; i < feasible_levels.size(); ++i) {
+ level = feasible_levels[i];
+ auto ii = received_dict.find(level);
+
+ if (ii != received_dict.end()) {
+ key = ii->second;
+ output.insert(std::make_pair(level, key));
+ } else {
+ key = 0.0;
+ output.insert(std::make_pair(level, key));
+ }
+ }
+
+ return output;
+}
+
+CombigridDict create_aux_entire_dict(const CombigridDict& entire_downset,
+ const int& dim) {
+ real key = 0;
+ int i = 0;
+
+ CombigridDict aux_dict;
+
+ for (auto ii = entire_downset.begin(); ii != entire_downset.end(); ++ii) {
+ LevelVector levels;
+ key = static_cast(i);
+
+ for (int i = 0; i < dim; ++i) {
+ levels.push_back(ii->first[i]);
+ }
+
+ ++i;
+ aux_dict.insert(std::make_pair(levels, key));
+ }
+
+ return aux_dict;
+}
+
+LevelVectorList get_downset_indices(const CombigridDict& entire_downset,
+ const int& dim) {
+ LevelVectorList indices;
+
+ for (auto ii = entire_downset.begin(); ii != entire_downset.end(); ++ii) {
+ LevelVector index;
+
+ for (int i = 0; i < dim; ++i) {
+ index.push_back(ii->first[i]);
+ }
+
+ indices.push_back(index);
+ }
+
+ return indices;
+}
+
+LevelVectorList filter_faults(const LevelVectorList& faults_input, const IndexType& l_max,
+ const CombigridDict& received_dict) {
+ int no_faults = 0;
+ int level_fault = 0;
+ LevelVectorList faults_output;
+
+ no_faults = static_cast(faults_input.size());
+
+ for (int i = 0; i < no_faults; ++i) {
+ auto it = received_dict.find(faults_input[i]);
+
+ if (it != received_dict.end()) {
+ level_fault = std::accumulate(faults_input[i].begin(),
+ faults_input[i].end(), 0);
+ if ((level_fault == l_max) || (level_fault == (l_max - 1))) {
+ faults_output.push_back(faults_input[i]);
+ }
+ }
+ }
+
+ return faults_output;
+}
+
+CombigridDict create_out_dict(const CombigridDict& given_downset,
+ const std::vector& new_c, const int& dim) {
+ real key = 0;
+ int i = 0;
+
+ CombigridDict out_dict;
+
+ for (auto ii = given_downset.begin(); ii != given_downset.end(); ++ii) {
+ LevelVector levels;
+ key = new_c[i];
+
+ for (int i = 0; i < dim; ++i) {
+ levels.push_back(ii->first[i]);
+ }
+
+ ++i;
+ out_dict.insert(std::make_pair(levels, key));
+ }
+
+ return out_dict;
+}
+
+std::string set_aux_var_name(const std::string& var_name, const int& index) {
+ std::stringstream aux_var;
+ aux_var << var_name << index;
+
+ return aux_var.str();
+}
+// generate integer random numbers between 0 and #levels-1
+//int generate_random_fault(const int& no_of_levels) {
+// std::random_device dev;
+// std::mt19937 rng(dev());
+// std::uniform_int_distribution rand_num(0, no_of_levels - 1);
+//
+// return rand_num(rng);
+//}
+
+std::vector gen_rand(const int& size) {
+ double rand_var = 0.0;
+ std::vector output;
+
+ for (int i = 0; i < size; ++i) {
+ rand_var = 1e-2 * (std::rand() % 10);
+ output.push_back(rand_var);
+ }
+
+ return output;
+}
+
+int get_size_downset(const std::vector& level_max, const int& dim) {
+ int size = 1;
+ int min_level_max = *std::min_element(level_max.begin(), level_max.end());
+
+ for (int i = 0; i < dim; ++i) {
+ size *= (min_level_max + i);
+ }
+
+ size = static_cast(size / (factorial(dim)));
+
+ return size;
+}
+
+int l1_norm(const LevelVector& u) {
+ int norm = 0;
+
+ for (auto elem : u) {
+ norm += abs(static_cast(elem));
+ }
+
+ return norm;
+}
+
+int factorial(const int& dim) {
+ int fact = 0;
+
+ if (dim == 0 || dim == 1) {
+ fact = 1;
+ } else {
+ fact = dim * factorial(dim - 1);
+ }
+
+ return fact;
+}
+
+bool test_greater(const LevelVector& b, const LevelVector& a) {
+ int dim = static_cast(a.size());
+ bool test = true;
+
+ for (int i = 0; i < dim; ++i) {
+ test *= (b[i] >= a[i]) ? true : false;
+ }
+
+ return test;
+}
+
+LevelVectorList mindex(const int& dimension, const LevelVector& level_max) {
+ int j = 0;
+ int norm = 0;
+ IndexType sum_level_max = 0;
+
+ LevelVector temp(dimension, 1);
+ LevelVectorList mindex_result;
+
+ auto upper_limit = std::max_element(level_max.begin(), level_max.end());
+
+ for (auto elem : level_max) {
+ sum_level_max += elem;
+ }
+
+ while (true) {
+ norm = l1_norm(temp);
+
+ if (norm <= sum_level_max) {
+ mindex_result.push_back(temp);
+ }
+
+ for (j = dimension - 1; j >= 0; --j) {
+ if (++temp[j] <= *upper_limit)
+ break;
+ else
+ temp[j] = 1;
+ }
+
+ if (j < 0)
+ break;
+ }
+
+ return mindex_result;
+}
+
+LevelVectorList check_dimensionality(const LevelVectorList& input_levels,
+ LevelVector& ignored_dimensions) {
+ LevelVector l_min = input_levels[0];
+ LevelVector l_max = input_levels[1];
+
+ LevelVector new_l_min;
+ LevelVector new_l_max;
+ LevelVectorList new_levels;
+
+ for ( size_t i = 0; i < l_min.size(); ++i) {
+ if (l_max[i] == l_min[i]) {
+ ignored_dimensions.push_back(i);
+ } else {
+ new_l_min.push_back(l_min[i]);
+ new_l_max.push_back(l_max[i]);
+ }
+ }
+
+ new_levels.push_back(new_l_min);
+ new_levels.push_back(new_l_max);
+
+ return new_levels;
+}
+
+LevelVectorList check_faults(const LevelVectorList& input_faults,
+ const LevelVector& ignored_dimensions) {
+ LevelVectorList new_faults;
+
+ for (unsigned int i = 0; i < input_faults.size(); ++i) {
+ LevelVector new_fault;
+
+ for (unsigned int j = 0; j < input_faults[0].size(); ++j) {
+ if (std::find(ignored_dimensions.begin(), ignored_dimensions.end(), j)
+ == ignored_dimensions.end()) {
+ new_fault.push_back(input_faults[i][j]);
+ }
+ }
+
+ new_faults.push_back(new_fault);
+ }
+
+ return new_faults;
+}
+
+CombigridDict set_new_given_dict(const CombigridDict& given_dict,
+ const LevelVector& ignored_dimensions, const int& dim) {
+ real key = 0.0;
+ CombigridDict new_given_dict;
+
+ for (auto ii = given_dict.begin(); ii != given_dict.end(); ++ii) {
+ LevelVector new_level;
+
+ for (int i = 0; i < dim; ++i) {
+ if (std::find(ignored_dimensions.begin(), ignored_dimensions.end(), i)
+ == ignored_dimensions.end()) {
+ new_level.push_back(ii->first[i]);
+ }
+ }
+
+ key = ii->second;
+ new_given_dict.insert(std::make_pair(new_level, key));
+ }
+
+ return new_given_dict;
+}
+
+void check_input_levels(const LevelVectorList& levels) {
+ LevelVector l_min = levels[0];
+ LevelVector l_max = levels[1];
+ LevelVector c;
+
+ for (unsigned int i = 0; i < l_min.size(); ++i) {
+ c.push_back(l_max[i] - l_min[i]);
+ }
+
+ if (std::adjacent_find(c.begin(), c.end(), std::not_equal_to())
+ == c.end()) {
+ std::cout << "Correct input levels!" << std::endl;
+ } else {
+ std::cout << "Input levels are incorrect!" << std::endl;
+ std::cout
+ << "Please input them of the form: l_max = l_min + c*ones(dim), c>=1, integer"
+ << std::endl;
+ exit(0);
+ }
+}
+
+std::vector select_coeff_downset(const std::vector& all_c,
+ const CombigridDict& given_downset, const CombigridDict& aux_downset) {
+ int given_downset_index = 0;
+ std::vector donwset_c;
+
+ for (auto ii = aux_downset.begin(); ii != aux_downset.end(); ++ii) {
+ if (given_downset.find(ii->first) != given_downset.end()) {
+ given_downset_index = static_cast(ii->second);
+ donwset_c.push_back(all_c.at(given_downset_index));
+ }
+ }
+
+ return donwset_c;
+}
+} // namespace combigrid
diff --git a/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.hpp b/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.hpp
new file mode 100644
index 000000000..2ad94153f
--- /dev/null
+++ b/distributedcombigrid/src/sgpp/distributedcombigrid/fault_tolerance/FTUtils.hpp
@@ -0,0 +1,149 @@
+#ifndef HELPER_HPP_
+#define HELPER_HPP_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include