From c21611ee5f5d6f56de83dd34ed69fe68a82e6c28 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Mon, 17 Feb 2025 15:17:17 +0000
Subject: [PATCH 01/13] Interface rewritten to use C bindings

---
 examples/extended/parallel/MPI/README.md      |   6 +-
 .../parallel/MPI/source/CMakeLists.txt        |   2 +-
 .../MPI/source/include/G4MPImanager.hh        |  11 +-
 .../MPI/source/include/G4MPIscorerMerger.hh   |  27 +--
 .../MPI/source/include/G4VUserMPIrunMerger.hh |  12 +-
 .../MPI/source/src/G4MPIhistoMerger.cc        |   7 +-
 .../parallel/MPI/source/src/G4MPImanager.cc   | 160 ++++++++++--------
 .../MPI/source/src/G4MPIscorerMerger.cc       |  79 ++++-----
 .../MPI/source/src/G4VUserMPIrunMerger.cc     |  48 +++---
 9 files changed, 200 insertions(+), 152 deletions(-)

diff --git a/examples/extended/parallel/MPI/README.md b/examples/extended/parallel/MPI/README.md
index 4593ed17ec3..39d1231f31c 100644
--- a/examples/extended/parallel/MPI/README.md
+++ b/examples/extended/parallel/MPI/README.md
@@ -27,9 +27,9 @@ http://www.open-mpi.org/
 MPI support:
 ------------
 G4mpi has been tested with the following MPI flavors:
- *   OpenMPI 1.8.1
- *   MPICH 3.2
- *   Intel MPI 5.0.1
+ *   OpenMPI 5.0.2 and 4.1.4
+ *   ~~MPICH 3.2~~
+ *   ~~Intel MPI 5.0.1~~
  
 ### CMake
 
diff --git a/examples/extended/parallel/MPI/source/CMakeLists.txt b/examples/extended/parallel/MPI/source/CMakeLists.txt
index fe3274214a9..a19aec284b2 100644
--- a/examples/extended/parallel/MPI/source/CMakeLists.txt
+++ b/examples/extended/parallel/MPI/source/CMakeLists.txt
@@ -10,7 +10,7 @@ project(${_projname})
 
 cmake_minimum_required(VERSION 3.16...3.27)
 find_package(MPI REQUIRED)
-find_package(Geant4 10.2.0 REQUIRED)
+find_package(Geant4 REQUIRED)
 include(${Geant4_USE_FILE})
 
 #------------------------------------------------------------------------------
diff --git a/examples/extended/parallel/MPI/source/include/G4MPImanager.hh b/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
index 6fea6854039..b85ec58beb8 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
@@ -34,6 +34,7 @@
 
 #include <fstream>
 #include <pthread.h>
+#include <vector>
 
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
   TypeName(const TypeName&);               \
@@ -123,11 +124,14 @@ class G4MPImanager
     // misc
     void ShowHelp() const;
 
-    const MPI::Intracomm* GetComm() const { return &COMM_G4COMMAND_; }
+    const MPI_Comm* GetComm() const { return &COMM_G4COMMAND_; }
     const MPI_Comm* GetProcessingComm() const { return &processing_comm_; }
     const MPI_Comm* GetCollectingComm() const { return &collecting_comm_; }
     const MPI_Comm* GetAllComm() const { return &all_comm_; }
 
+    std::vector<G4String> ReturnArguments() { return _options; }
+
+
   private:
     DISALLOW_COPY_AND_ASSIGN(G4MPImanager);
 
@@ -153,11 +157,12 @@ class G4MPImanager
     G4bool is_slave_;
     G4bool is_extra_worker_;
     G4int rank_;
+    G4int provided_;
     G4int size_;  // processing comm size
     G4int world_size_;  // world comm size
 
     // MPI communicator (when no extra ranks)
-    MPI::Intracomm COMM_G4COMMAND_;
+    MPI_Comm COMM_G4COMMAND_;
     // MPI communicator (processing ranks - if ntuple merging)
     MPI_Comm processing_comm_;
     // MPI communicator (collecting ranks - if ntuple merging)
@@ -188,6 +193,8 @@ class G4MPImanager
     // parallel parameters
     G4double master_weight_;
     G4int nof_extra_workers_;
+
+    std::vector<G4String> _options;
 };
 
 // ====================================================================
diff --git a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
index cc2a9ec36b3..77117732636 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
@@ -2,24 +2,24 @@
 // ********************************************************************
 // * License and Disclaimer                                           *
 // *                                                                  *
-// * The  Geant4 software  is  copyright of the Copyright Holders  of *
-// * the Geant4 Collaboration.  It is provided  under  the terms  and *
-// * conditions of the Geant4 Software License,  included in the file *
-// * LICENSE and available at  http://cern.ch/geant4/license .  These *
+// * The Geant4 software is copyright of the Copyright Holders of *
+// * the Geant4 Collaboration. It is provided under the terms and *
+// * conditions of the Geant4 Software License, included in the file *
+// * LICENSE and available at http://cern.ch/geant4/license. These *
 // * include a list of copyright holders.                             *
 // *                                                                  *
 // * Neither the authors of this software system, nor their employing *
-// * institutes,nor the agencies providing financial support for this *
-// * work  make  any representation or  warranty, express or implied, *
-// * regarding  this  software system or assume any liability for its *
-// * use.  Please see the license in the file  LICENSE  and URL above *
+// * institutes, nor the agencies providing financial support for this *
+// * work make any representation or warranty, express or implied, *
+// * regarding this software system or assume any liability for its *
+// * use. Please see the license in the file LICENSE and URL above *
 // * for the full disclaimer and the limitation of liability.         *
 // *                                                                  *
-// * This  code  implementation is the result of  the  scientific and *
+// * This code implementation is the result of the scientific and *
 // * technical work of the GEANT4 collaboration.                      *
-// * By using,  copying,  modifying or  distributing the software (or *
-// * any work based  on the software)  you  agree  to acknowledge its *
-// * use  in  resulting  scientific  publications,  and indicate your *
+// * By using, copying, modifying or distributing the software (or *
+// * any work based on the software) you agree to acknowledge its *
+// * use in resulting scientific publications, and indicate your *
 // * acceptance of all terms of the Geant4 Software license.          *
 // ********************************************************************
 //
@@ -103,8 +103,9 @@ class G4MPIscorerMerger
     G4ScoringManager* scoringManager;
     unsigned int commSize;
     unsigned int destinationRank;
-    MPI::Intracomm comm;
+    MPI_Comm comm;  // Changed from MPI::Intracomm to MPI_Comm
     G4int verbose;
+    void G4mpi_barrier(MPI_Comm* comm) {MPI_Barrier(*comm);};
 };
 
 #endif  // G4MPISCORERMERGER_HH
diff --git a/examples/extended/parallel/MPI/source/include/G4VUserMPIrunMerger.hh b/examples/extended/parallel/MPI/source/include/G4VUserMPIrunMerger.hh
index f1cc93e9e28..32331d4d10e 100644
--- a/examples/extended/parallel/MPI/source/include/G4VUserMPIrunMerger.hh
+++ b/examples/extended/parallel/MPI/source/include/G4VUserMPIrunMerger.hh
@@ -50,16 +50,16 @@ class G4VUserMPIrunMerger
     virtual void Pack() = 0;
     virtual G4Run* UnPack() = 0;
 
-    void InputUserData(/*const*/ void* input_data, const MPI::Datatype& dt, int count)
+    void InputUserData(/*const*/ void* input_data, const MPI_Datatype& dt, int count)
     {
       input_userdata.push_back(const_registered_data{input_data, dt, count});
     }
-    void OutputUserData(void* input_data, const MPI::Datatype& dt, int count)
+    void OutputUserData(void* input_data, const MPI_Datatype& dt, int count)
     {
       output_userdata.push_back(registered_data{input_data, dt, count});
     }
 
-    // void GetUserData(void* output_data,const MPI::Datatype& dt, int count);
+    // void GetUserData(void* output_data,const MPI_Datatype& dt, int count);
 
     void SetupOutputBuffer(char* buff, G4int size, G4int position)
     {
@@ -91,7 +91,7 @@ class G4VUserMPIrunMerger
     unsigned int destinationRank;
     G4Run* run;
     unsigned int commSize;
-    MPI::Intracomm COMM_G4COMMAND_;
+    MPI_Comm COMM_G4COMMAND_;
     G4int verbose;
     long bytesSent;
 
@@ -103,7 +103,7 @@ class G4VUserMPIrunMerger
         // const_registered_data(const_registered_data&&) = default;
         // const_registered_data& operator=(const_registered_data&&) = default;
         /*const*/ void* p_data;
-        /*const*/ MPI::Datatype dt;
+        /*const*/ MPI_Datatype dt;
         /*const*/ int count;
     };
     std::vector<const_registered_data> input_userdata;
@@ -114,7 +114,7 @@ class G4VUserMPIrunMerger
         registered_data(const registered_data&) = default;
         registered_data& operator=(const registered_data&) = default;
         void* p_data;
-        /*const*/ MPI::Datatype dt;
+        /*const*/ MPI_Datatype dt;
         /*const*/ int count;
     };
     std::vector<registered_data> output_userdata;
diff --git a/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc b/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
index 8a229e41a35..7f393ce7191 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
@@ -52,8 +52,9 @@ void G4MPIhistoMerger::Merge()
     G4cout << "Starting merging of histograms" << G4endl;
   }
 
-  const MPI::Intracomm* parentComm = G4MPImanager::GetManager()->GetComm();
-  MPI::Intracomm comm = parentComm->Dup();
+  const MPI_Comm* parentComm = G4MPImanager::GetManager()->GetComm();
+  MPI_Comm comm;// = parentComm->Dup();
+  MPI_Comm_dup(*parentComm, &comm);
 
   G4bool verbose = (verboseLevel > 1);
   G4int tag = G4MPImanager::kTAG_HISTO;
@@ -68,5 +69,5 @@ void G4MPIhistoMerger::Merge()
   if (verboseLevel > 0) {
     G4cout << "End merging of histograms" << G4endl;
   }
-  comm.Free();
+  MPI_Comm_free(&comm);
 }
diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index 04e357970f1..bf0136ea5ff 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -44,6 +44,7 @@
 #include <getopt.h>
 #include <stdio.h>
 #include <time.h>
+#include <vector>
 
 G4MPImanager* G4MPImanager::g4mpi_ = NULL;
 
@@ -84,7 +85,10 @@ G4MPImanager::G4MPImanager(int nof_extra_workers)
     nof_extra_workers_(nof_extra_workers)
 {
   // MPI::Init();
-  MPI::Init_thread(MPI::THREAD_SERIALIZED);
+  MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
+  if (provided_ < MPI_THREAD_SERIALIZED) {
+    std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;
+  }
   Initialize();
 }
 
@@ -102,8 +106,12 @@ G4MPImanager::G4MPImanager(int argc, char** argv, int nof_extra_workers)
     master_weight_(1.),
     nof_extra_workers_(nof_extra_workers)
 {
-  // MPI::Init(argc, argv);
-  MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
+  int provided;
+  MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
+  if (provided_ < MPI_THREAD_SERIALIZED) {
+    std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;
+  }
+
   Initialize();
   ParseArguments(argc, argv);
 }
@@ -133,10 +141,10 @@ G4MPImanager::~G4MPImanager()
     }
   }
   else {
-    COMM_G4COMMAND_.Free();
+    MPI_Comm_free(&COMM_G4COMMAND_);
   }
 
-  MPI::Finalize();
+  MPI_Finalize();
 }
 
 // --------------------------------------------------------------------------
@@ -173,14 +181,14 @@ void G4MPImanager::Initialize()
   g4mpi_ = this;
 
   // get rank information
-  world_size_ = MPI::COMM_WORLD.Get_size();
+  MPI_Comm_size(MPI_COMM_WORLD, &world_size_);//world_size_ = MPI::COMM_WORLD.Get_size();
   if (world_size_ - nof_extra_workers_ <= 0) {
     G4Exception("G4MPImanager::SetExtraWorker()", "MPI001", JustWarning,
                 "Cannot reserve extra ranks: the MPI size is not sufficient.");
     nof_extra_workers_ = 0;
   }
   size_ = world_size_ - nof_extra_workers_;
-  rank_ = MPI::COMM_WORLD.Get_rank();
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank_); //rank_ = MPI::COMM_WORLD.Get_rank();
   is_master_ = (rank_ == kRANK_MASTER);
   is_slave_ = (rank_ != kRANK_MASTER);
   is_extra_worker_ = false;
@@ -221,12 +229,12 @@ void G4MPImanager::Initialize()
     MPI_Comm_create_group(MPI_COMM_WORLD, all_group_, 0, &all_comm_);
 
     // COMM_G4COMMAND_ = processing_comm_ copy
-    COMM_G4COMMAND_ = MPI::Intracomm(processing_comm_);
+    COMM_G4COMMAND_ = MPI_Comm(processing_comm_);
   }
   else {
     // G4cout << "No extra workers requested" << G4endl;
     // initialize MPI communicator
-    COMM_G4COMMAND_ = MPI::COMM_WORLD.Dup();
+    MPI_Comm_dup(MPI_COMM_WORLD, &COMM_G4COMMAND_);
   }
 
   is_extra_worker_ = (collecting_comm_ != MPI_COMM_NULL);
@@ -257,50 +265,58 @@ void G4MPImanager::Initialize()
 // --------------------------------------------------------------------------
 void G4MPImanager::ParseArguments(int argc, char** argv)
 {
+  _options.clear();
   G4int qhelp = 0;
   G4String ofprefix = "mpi";
 
-  G4int c;
-  while (1) {
-    G4int option_index = 0;
-    static struct option long_options[] = {{"help", no_argument, NULL, 'h'},
-                                           {"verbose", no_argument, NULL, 'v'},
-                                           {"init", required_argument, NULL, 'i'},
-                                           {"ofile", optional_argument, NULL, 'o'},
-                                           {NULL, 0, NULL, 0}};
-
-    opterr = 0;  // suppress message
-    c = getopt_long(argc, argv, "hvi:o", long_options, &option_index);
-    opterr = 1;
-
-    if (c == -1) break;
-
-    switch (c) {
-      case 'h':
-        qhelp = 1;
-        break;
-      case 'v':
-        verbose_ = 1;
-        break;
-      case 'i':
-        qinitmacro_ = true;
-        init_file_name_ = optarg;
-        break;
-      case 'o':
-        qfcout_ = true;
-        if (optarg) ofprefix = optarg;
-        break;
-      default:
-        G4cerr << "*** invalid options specified." << G4endl;
-        std::exit(EXIT_FAILURE);
-        break;
+  G4int optind = -1;
+
+  static struct option long_options[] = {{"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {"init", required_argument, NULL, 'i'}, {"ofile", optional_argument, NULL, 'o'}, {NULL, 0, NULL, 0}};
+
+  for (int i = 1; i < argc; i++)
+  {
+    G4String arg = argv[i];
+
+    if (arg == "help")
+    {
+      qhelp = 1;
+    }
+    else if (arg == "verbose")
+    {
+      verbose_ = 1;
+    }
+    else if (arg == "init")
+    {
+      qinitmacro_ = true;
+      init_file_name_ = optarg;
+    }
+    else if (arg == "ofile")
+    {
+      qfcout_ = true;
+      if (optarg) ofprefix = optarg;
+    }
+    else if (arg[0] == 'm')
+    {
+      G4String sub = arg.substr(0,5);
+      if (sub=="macro")
+      {
+        optind = i;
+        qbatchmode_ = true;
+      }
+    }
+    //default:
+      //G4cerr << "*** invalid options specified." << G4endl;
+      //std::exit(EXIT_FAILURE);
+    else
+    {
+      _options.push_back(arg);
     }
   }
 
   // show help
   if (qhelp) {
     if (is_master_) ShowHelp();
-    MPI::Finalize();
+    MPI_Finalize();
     std::exit(EXIT_SUCCESS);
   }
 
@@ -314,10 +330,10 @@ void G4MPImanager::ParseArguments(int argc, char** argv)
   }
 
   // non-option ARGV-elements ...
-  if (optind < argc) {
-    qbatchmode_ = true;
+  if (qbatchmode_) {
     macro_file_name_ = argv[optind];
   }
+
 }
 
 // ====================================================================
@@ -365,7 +381,7 @@ void G4MPImanager::ShowStatus()
 
     // receive from each slave
     for (G4int islave = 1; islave < size_; islave++) {
-      COMM_G4COMMAND_.Recv(buff, G4MPIstatus::kNSIZE, MPI::INT, islave, kTAG_G4STATUS);
+      MPI_Recv(buff, G4MPIstatus::kNSIZE, MPI_INT, islave, kTAG_G4STATUS, COMM_G4COMMAND_, MPI_STATUS_IGNORE);
       status_->UnPack(buff);
       status_->Print();
 
@@ -389,7 +405,7 @@ void G4MPImanager::ShowStatus()
   }
   else {
     status_->Pack(buff);
-    COMM_G4COMMAND_.Send(buff, G4MPIstatus::kNSIZE, MPI::INT, kRANK_MASTER, kTAG_G4STATUS);
+    MPI_Send(buff, G4MPIstatus::kNSIZE, MPI_INT, kRANK_MASTER, kTAG_G4STATUS, COMM_G4COMMAND_);
   }
 }
 
@@ -413,13 +429,13 @@ void G4MPImanager::ShowSeeds()
     G4cout << "* rank= " << rank_ << " seed= " << G4Random::getTheSeed() << G4endl;
     // receive from each slave
     for (G4int islave = 1; islave < size_; islave++) {
-      COMM_G4COMMAND_.Recv(&buff, 1, MPI::LONG, islave, kTAG_G4SEED);
+      MPI_Recv(&buff, 1, MPI_LONG, islave, kTAG_G4SEED, COMM_G4COMMAND_, MPI_STATUS_IGNORE);
       G4cout << "* rank= " << islave << " seed= " << buff << G4endl;
     }
   }
   else {  // slaves
     buff = G4Random::getTheSeed();
-    COMM_G4COMMAND_.Send(&buff, 1, MPI::LONG, kRANK_MASTER, kTAG_G4SEED);
+    MPI_Send(&buff, 1, MPI_LONG, kRANK_MASTER, kTAG_G4SEED, COMM_G4COMMAND_);
   }
 }
 
@@ -441,21 +457,27 @@ G4bool G4MPImanager::CheckThreadStatus()
     qstatus = (thread_id_ != 0);
     // get slave status
     for (G4int islave = 1; islave < size_; islave++) {
-      MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::UNSIGNED, islave, kTAG_G4STATUS);
-      while (!request.Test()) {
-        ::Wait(1000);
+      MPI_Request request;
+      MPI_Irecv(&buff, 1, MPI_UNSIGNED, islave, kTAG_G4STATUS, COMM_G4COMMAND_, &request);
+
+      int flag = 0;  // Variable to hold the status of the test
+      while (!flag) {
+        MPI_Test(&request, &flag, MPI_STATUS_IGNORE);  // Check if the operation is complete
+        ::Wait(1000);  // Optionally wait, replace with your own wait function if needed
       }
+
       qstatus |= buff;
     }
   }
   else {
     buff = (thread_id_ != 0);
-    COMM_G4COMMAND_.Send(&buff, 1, MPI::UNSIGNED, kRANK_MASTER, kTAG_G4STATUS);
+    MPI_Send(&buff, 1, MPI_UNSIGNED, kRANK_MASTER, kTAG_G4STATUS, COMM_G4COMMAND_);
+
   }
 
   // broadcast
   buff = qstatus;  // for master
-  COMM_G4COMMAND_.Bcast(&buff, 1, MPI::UNSIGNED, kRANK_MASTER);
+  MPI_Bcast(&buff, 1, MPI_UNSIGNED, kRANK_MASTER, COMM_G4COMMAND_);
   qstatus = buff;  // for slave
 
   if (qstatus != 0)
@@ -540,21 +562,23 @@ G4String G4MPImanager::BcastCommand(const G4String& command)
   // "command" is not yet fixed in slaves at this time.
 
   // waiting message exhausts CPU in LAM!
-  // COMM_G4COMMAND_.Bcast(sbuff, ssize, MPI::CHAR, RANK_MASTER);
+  // COMM_G4COMMAND_->Bcast(sbuff, ssize, MPI::CHAR, RANK_MASTER);
 
   // another implementation
   if (is_master_) {
     for (G4int islave = 1; islave < size_; islave++) {
-      COMM_G4COMMAND_.Send(sbuff, kBUFF_SIZE, MPI::CHAR, islave, kTAG_G4COMMAND);
+      MPI_Send(sbuff, kBUFF_SIZE, MPI_CHAR, islave, kTAG_G4COMMAND, COMM_G4COMMAND_);
     }
   }
   else {
     // try non-blocking receive
-    MPI::Request request =
-      COMM_G4COMMAND_.Irecv(sbuff, kBUFF_SIZE, MPI::CHAR, kRANK_MASTER, kTAG_G4COMMAND);
-    // polling...
-    while (!request.Test()) {
-      ::Wait(1000);
+    MPI_Request request;
+    MPI_Irecv(sbuff, kBUFF_SIZE, MPI_CHAR, kRANK_MASTER, kTAG_G4COMMAND, COMM_G4COMMAND_, &request);
+
+    int flag = 0;  // Variable to hold the status of the test
+    while (!flag) {
+        MPI_Test(&request, &flag, MPI_STATUS_IGNORE);  // Check if the non-blocking receive is done
+        ::Wait(1000);  // Optionally wait for a bit before checking again
     }
   }
 
@@ -654,16 +678,20 @@ void G4MPImanager::WaitBeamOn()
       // receive from each slave
       for (G4int islave = 1; islave < size_; islave++) {
         // G4cout << "calling Irecv for islave " << islave << G4endl;
-        MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::INT, islave, kTAG_G4STATUS);
-        while (!request.Test()) {
-          ::Wait(1000);
+        MPI_Request request;
+        MPI_Irecv(&buff, 1, MPI_INT, islave, kTAG_G4STATUS, COMM_G4COMMAND_, &request);
+
+        int flag = 0;  // Variable to hold the status of the test
+        while (!flag) {
+            MPI_Test(&request, &flag, MPI_STATUS_IGNORE);  // Check if the non-blocking receive is done
+            ::Wait(1000);  // Optionally wait for a bit before checking again
         }
       }
     }
     else {
       buff = 1;
       // G4cout << "calling send for i " << kRANK_MASTER << G4endl;
-      COMM_G4COMMAND_.Send(&buff, 1, MPI::INT, kRANK_MASTER, kTAG_G4STATUS);
+      MPI_Send(&buff, 1, MPI_INT, kRANK_MASTER, kTAG_G4STATUS, COMM_G4COMMAND_);
     }
   }
 }
diff --git a/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc b/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
index 86428744109..bf9a0433d59 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
@@ -49,20 +49,20 @@ namespace
 struct MPIStatDouble : public G4StatDouble
 {
     G4int verbose;
-    inline void Pack(void* buffer, int bufferSize, int* position, MPI::Intracomm& comm) const
+    inline void Pack(void* buffer, int bufferSize, int* position, MPI_Comm& comm) const
     {
       DMSG(4, "Packing G4StatDouble(n,scale,sum_w,sum_w2,sum_wx,sum_wx2): "
                 << m_n << " " << m_scale << " " << m_sum_w << " " << m_sum_w2 << " " << m_sum_wx
                 << " " << m_sum_wx2);
-      MPI_Pack(&m_n, 1, MPI::INT, buffer, bufferSize, position, comm);
+      MPI_Pack(&m_n, 1, MPI_INT, buffer, bufferSize, position, comm);
       const G4double data[]{m_scale, m_sum_w, m_sum_w2, m_sum_wx, m_sum_wx2};
-      MPI_Pack(&data, 5, MPI::DOUBLE, buffer, bufferSize, position, comm);
+      MPI_Pack(&data, 5, MPI_DOUBLE, buffer, bufferSize, position, comm);
     }
-    inline void UnPack(void* buffer, int bufferSize, int* position, MPI::Intracomm& comm)
+    inline void UnPack(void* buffer, int bufferSize, int* position, MPI_Comm& comm)
     {
-      MPI_Unpack(buffer, bufferSize, position, &m_n, 1, MPI::INT, comm);
+      MPI_Unpack(buffer, bufferSize, position, &m_n, 1, MPI_INT, comm);
       G4double data[5];
-      MPI_Unpack(buffer, bufferSize, position, data, 5, MPI::DOUBLE, comm);
+      MPI_Unpack(buffer, bufferSize, position, data, 5, MPI_DOUBLE, comm);
       m_scale = data[0];
       m_sum_w = data[1];
       m_sum_w2 = data[2];
@@ -148,8 +148,8 @@ void G4MPIscorerMerger::Merge()
     DMSG(1, "Comm world size is 1, nothing to do");
     return;
   }
-  const MPI::Intracomm* parentComm = G4MPImanager::GetManager()->GetComm();
-  comm = parentComm->Dup();
+  MPI_Comm_dup(*(G4MPImanager::GetManager()->GetComm()), &comm);
+
   DestroyBuffer();
 
   // ANDREA:->
@@ -168,14 +168,15 @@ void G4MPIscorerMerger::Merge()
   // ANDREA:<-
 
   bytesSent = 0;
-  const G4double sttime = MPI::Wtime();
+  const G4double sttime = MPI_Wtime();
 
   // Use G4MPIutils to optimize communications between ranks
   typedef std::function<void(unsigned int)> handler_t;
   using std::placeholders::_1;
   handler_t sender = std::bind(&G4MPIscorerMerger::Send, this, _1);
   handler_t receiver = std::bind(&G4MPIscorerMerger::Receive, this, _1);
-  std::function<void(void)> barrier = std::bind(&MPI::Intracomm::Barrier, &comm);
+  //std::function<void(void)> barrier = std::bind(MPI_Barrier, comm);
+  std::function<void(void)> barrier = std::bind(MPI_Barrier, comm);
   G4mpi::Merge(sender, receiver, barrier, commSize, myrank);
 
   // OLD Style p2p communications
@@ -194,9 +195,9 @@ void G4MPIscorerMerger::Merge()
       }
   }
 */
-  const G4double elapsed = MPI::Wtime() - sttime;
+  const G4double elapsed = MPI_Wtime() - sttime;
   long total = 0;
-  comm.Reduce(&bytesSent, &total, 1, MPI::LONG, MPI::SUM, destinationRank);
+  MPI_Reduce(&bytesSent, &total, 1, MPI_LONG, MPI_SUM, destinationRank, comm);
   if (verbose > 0 && myrank == destinationRank) {
     // Collect from ranks how much data was sent around
     G4cout << "G4MPIscorerMerger::Merge() -data transfer performances: "
@@ -218,7 +219,7 @@ void G4MPIscorerMerger::Merge()
   //    }
   //  }
   // ANDREA:<-
-  comm.Free();
+  MPI_Comm_free(&comm);
   DMSG(0, "G4MPIscorerMerger::Merge done.");
 }
 
@@ -227,9 +228,11 @@ void G4MPIscorerMerger::Receive(const unsigned int source)
   DMSG(1, "Receiving scorers");
   // DestroyBuffer();
   DMSG(2, "Receiving from: " << source);
-  MPI::Status status;
-  comm.Probe(source, G4MPImanager::kTAG_CMDSCR, status);
-  const G4int newbuffsize = status.Get_count(MPI::PACKED);
+  MPI_Status status;
+  MPI_Probe(source, G4MPImanager::kTAG_CMDSCR, comm, &status);
+  int newbuffsize1;
+  MPI_Get_count(&status, MPI_PACKED, &newbuffsize1);
+  const G4int newbuffsize = newbuffsize1;
   DMSG(2, "Preparing to receive buffer of size: " << newbuffsize);
   char* buffer = outputBuffer;
   if (newbuffsize > outputBufferSize) {
@@ -244,7 +247,7 @@ void G4MPIscorerMerger::Receive(const unsigned int source)
     ownsBuffer = true;
   }
   SetupOutputBuffer(buffer, newbuffsize, 0);
-  comm.Recv(buffer, newbuffsize, MPI::PACKED, source, G4MPImanager::kTAG_CMDSCR, status);
+  MPI_Recv(buffer, newbuffsize, MPI_PACKED, source, G4MPImanager::kTAG_CMDSCR, comm, &status);
   DMSG(3, "Buffer Size: " << outputBufferSize << " bytes at: " << (void*)outputBuffer);
   UnPackAndMerge(scoringManager);
   DMSG(1, "Receiving of comamnd line scorers done");
@@ -271,7 +274,7 @@ void G4MPIscorerMerger::Send(const unsigned int destination)
   assert(outputBufferSize == outputBufferPosition);
 
   // Version 1: p2p communication
-  comm.Send(outputBuffer, outputBufferSize, MPI::PACKED, destination, G4MPImanager::kTAG_CMDSCR);
+  MPI_Send(outputBuffer, outputBufferSize, MPI_PACKED, destination, G4MPImanager::kTAG_CMDSCR, comm);
   bytesSent += newbuffsize;
   // Receiver should use probe to get size of the package being sent
   DMSG(1, "Sending done");
@@ -287,10 +290,10 @@ void G4MPIscorerMerger::Pack(const G4ScoringManager* sm)
   }
   DMSG(2, "Starting packing of meshes, # meshes: " << sm->GetNumberOfMesh());
   /*const*/ size_t numMeshes = sm->GetNumberOfMesh();  // TODO: OLD MPI interface
-  MPI_Pack(&numMeshes, 1, MPI::UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition,
+  MPI_Pack(&numMeshes, 1, MPI_UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition,
            comm);
   for (size_t i = 0; i < numMeshes; ++i) {
-    MPI_Pack(&i, 1, MPI::UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
+    MPI_Pack(&i, 1, MPI_UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
     Pack(sm->GetMesh(i));
   }
 }
@@ -304,7 +307,7 @@ void G4MPIscorerMerger::UnPackAndMerge(const G4ScoringManager* sm)
     return;
   }
   size_t numMeshes = 0;
-  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &numMeshes, 1, MPI::UNSIGNED,
+  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &numMeshes, 1, MPI_UNSIGNED,
              comm);
   if (numMeshes != sm->GetNumberOfMesh()) {
     G4ExceptionDescription msg;
@@ -318,7 +321,7 @@ void G4MPIscorerMerger::UnPackAndMerge(const G4ScoringManager* sm)
 
   size_t meshid = 0;
   for (size_t i = 0; i < numMeshes; ++i) {
-    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &meshid, 1, MPI::UNSIGNED,
+    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &meshid, 1, MPI_UNSIGNED,
                comm);
     if (meshid != i) {
       G4ExceptionDescription msg;
@@ -342,18 +345,18 @@ void G4MPIscorerMerger::Pack(const G4VScoringMesh* mesh)
 
   auto map = mesh->GetScoreMap();
   /*const*/ size_t nummaps = map.size();  // TODO: old MPI interface
-  MPI_Pack(&nummaps, 1, MPI::UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
+  MPI_Pack(&nummaps, 1, MPI_UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
   for (const auto& ele : map) {
     const G4String& name = ele.first;
     /*const*/ size_t ss = name.size();
-    MPI_Pack(&ss, 1, MPI::UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
+    MPI_Pack(&ss, 1, MPI_UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
     char* nn = new char[name.length()];
     std::copy(name.begin(), name.end(), nn);
 #else
     const char* nn = name.c_str();
 #endif
-    MPI_Pack(nn, ss, MPI::CHAR, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
+    MPI_Pack(nn, ss, MPI_CHAR, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
     Pack(ele.second);
 #ifdef G4MPI_USE_MPI_PACK_NOT_CONST
     delete[] nn;
@@ -369,17 +372,17 @@ void G4MPIscorerMerger::UnPackAndMerge(G4VScoringMesh* inmesh)
   DMSG(3, "Preparing to unpack a mesh and merge into: " << inmesh);
   const G4String& detName = inmesh->GetWorldName();
   size_t nummaps = 0;
-  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &nummaps, 1, MPI::UNSIGNED,
+  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &nummaps, 1, MPI_UNSIGNED,
              comm);
   for (size_t i = 0; i < nummaps; ++i) {
     size_t nameSize = 0;
-    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &nameSize, 1, MPI::UNSIGNED,
+    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &nameSize, 1, MPI_UNSIGNED,
                comm);
     // Create a null-terminated c-string: needed later when converting this to a G4String
     //(Not sure: but issue reported by valgrind with the use of MPI_Unpack)
     char* name = new char[nameSize + 1];
     std::fill(name, name + nameSize + 1, 0);
-    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, name, nameSize, MPI::CHAR,
+    MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, name, nameSize, MPI_CHAR,
                comm);
     const G4String colname(name, nameSize);
     delete[] name;
@@ -399,7 +402,7 @@ void G4MPIscorerMerger::UnPackAndMerge(G4VScoringMesh* inmesh)
 //   assert(outputBufferPosition<=outputBufferSize);
 //   DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
 //   /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
-//   MPI_Pack(&numEl,1,MPI::UNSIGNED,
+//   MPI_Pack(&numEl,1,MPI_UNSIGNED,
 //       outputBuffer,outputBufferSize,
 //       &outputBufferPosition,comm);
 //   const auto& theMap = *sm->GetMap();
@@ -410,10 +413,10 @@ void G4MPIscorerMerger::UnPackAndMerge(G4VScoringMesh* inmesh)
 //   std::transform(theMap.begin(),theMap.end(),std::back_inserter(vals),
 //       [](decltype(*theMap.begin())& e){ return *e.second;});
 //   assert(ids.size()==vals.size()&&ids.size()==numEl);
-//   MPI_Pack(ids.data(),ids.size(),MPI::INT,
+//   MPI_Pack(ids.data(),ids.size(),MPI_INT,
 //       outputBuffer,outputBufferSize,
 //       &outputBufferPosition,comm);
-//   MPI_Pack(vals.data(),vals.size(),MPI::DOUBLE,
+//   MPI_Pack(vals.data(),vals.size(),MPI_DOUBLE,
 //       outputBuffer,outputBufferSize,
 //       &outputBufferPosition,comm);
 // }
@@ -425,13 +428,13 @@ void G4MPIscorerMerger::Pack(const HitStatDoubleMap* sm)
   assert(outputBufferPosition <= outputBufferSize);
   DMSG(3, "Packing hitmap: " << sm << " with: " << sm->GetSize() << " elements.");
   /*const*/ size_t numEl = sm->GetSize();  // TODO: old MPI implementation
-  MPI_Pack(&numEl, 1, MPI::UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
+  MPI_Pack(&numEl, 1, MPI_UNSIGNED, outputBuffer, outputBufferSize, &outputBufferPosition, comm);
   const auto& theMap = *sm->GetMap();
   std::vector<G4int> ids;
   std::transform(theMap.begin(), theMap.end(), std::back_inserter(ids),
                  [](decltype(*theMap.begin())& e) { return e.first; });
   assert(/*ids.size()==vals.size()&&*/ ids.size() == numEl);
-  MPI_Pack(ids.data(), ids.size(), MPI::INT, outputBuffer, outputBufferSize, &outputBufferPosition,
+  MPI_Pack(ids.data(), ids.size(), MPI_INT, outputBuffer, outputBufferSize, &outputBufferPosition,
            comm);
   for (const auto& e : theMap) {
     const MPIStatDouble sd(*e.second, verbose);
@@ -446,13 +449,13 @@ void G4MPIscorerMerger::Pack(const HitStatDoubleMap* sm)
 //   DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
 //   size_t numEl =0 ;
 //   MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
-//              &numEl,1,MPI::UNSIGNED,comm);
+//              &numEl,1,MPI_UNSIGNED,comm);
 //   G4int* ids = new G4int[numEl];
 //   MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
-//              ids,numEl,MPI::INT,comm);
+//              ids,numEl,MPI_INT,comm);
 //   G4double* vals = new G4double[numEl];
 //   MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
-//       vals,numEl,MPI::DOUBLE,comm);
+//       vals,numEl,MPI_DOUBLE,comm);
 //   HitMap* result = new HitMap(detName,colName);
 //   for ( unsigned int i = 0; i<numEl;++i) result->set(ids[i],vals[i]);
 //   delete[] ids;
@@ -467,10 +470,10 @@ HitStatDoubleMap* G4MPIscorerMerger::UnPackHitStatDoubleMap(const G4String& detN
   assert(outputBufferPosition <= outputBufferSize);
   DMSG(3, "Preparing to unpack a hit map for: " << detName << "," << colName);
   size_t numEl = 0;
-  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &numEl, 1, MPI::UNSIGNED, comm);
+  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, &numEl, 1, MPI_UNSIGNED, comm);
   DMSG(3, "Will receive " << numEl << " values");
   G4int* ids = new G4int[numEl];
-  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, ids, numEl, MPI::INT, comm);
+  MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, ids, numEl, MPI_INT, comm);
   HitStatDoubleMap* result = new HitStatDoubleMap(detName, colName);
   for (unsigned int i = 0; i < numEl; ++i) {
     MPIStatDouble sd(verbose);
diff --git a/examples/extended/parallel/MPI/source/src/G4VUserMPIrunMerger.cc b/examples/extended/parallel/MPI/source/src/G4VUserMPIrunMerger.cc
index 1c768107c29..4011f854eff 100644
--- a/examples/extended/parallel/MPI/source/src/G4VUserMPIrunMerger.cc
+++ b/examples/extended/parallel/MPI/source/src/G4VUserMPIrunMerger.cc
@@ -60,12 +60,15 @@ void G4VUserMPIrunMerger::Send(const unsigned int destination)
                                                             << " events to: " << destination);
   input_userdata.clear();
   Pack();  // User code
-  InputUserData(&nevts, MPI::INT, 1);
+  InputUserData(&nevts, MPI_INT, 1);
 
   DestroyBuffer();
   G4int newbuffsize = 0;
+  G4int size_of_type;
+
   for (const const_registered_data& el : input_userdata) {
-    newbuffsize += (el.dt.Get_size() * el.count);
+    MPI_Type_size(el.dt, &size_of_type);
+    newbuffsize += (size_of_type * el.count);
   }
   char* buffer = new char[newbuffsize];
   // Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
@@ -85,25 +88,28 @@ void G4VUserMPIrunMerger::Send(const unsigned int destination)
              outputBuffer, outputBufferSize, &outputBufferPosition, COMM_G4COMMAND_);
   }
   assert(outputBufferSize == outputBufferPosition);
-  COMM_G4COMMAND_.Send(outputBuffer, outputBufferSize, MPI::PACKED, destination,
-                       G4MPImanager::kTAG_RUN);
+  MPI_Send(outputBuffer, outputBufferSize, MPI_PACKED, destination,
+                       G4MPImanager::kTAG_RUN, COMM_G4COMMAND_);
   bytesSent += outputBufferSize;
   DMSG(2, "G4VUserMPIrunMerger::Send() : Done ");
 }
 
 void G4VUserMPIrunMerger::Receive(const unsigned int source)
 {
-  const MPI::Intracomm* parentComm = G4MPImanager::GetManager()->GetComm();
-  DMSG(1, "G4VUserMPIrunMerger::Receive(...) , this rank : " << parentComm->Get_rank()
-                                                             << " and receiving from : " << source);
+  const MPI_Comm* parentComm = G4MPImanager::GetManager()->GetComm();
+  int rank;
+  MPI_Comm_rank(*parentComm, &rank);
+  DMSG(1, "G4VUserMPIrunMerger::Receive(...) , this rank : " << rank << " and receiving from : " << source);
   // DestroyBuffer();
   // Receive from all but one
   // for (G4int rank = 0; rank < commSize-1; ++rank)
   //{
-  MPI::Status status;
-  COMM_G4COMMAND_.Probe(source, G4MPImanager::kTAG_RUN, status);
+  MPI_Status status;
+  MPI_Probe(source, G4MPImanager::kTAG_RUN, COMM_G4COMMAND_, &status);
   // const G4int source = status.Get_source();
-  const G4int newbuffsize = status.Get_count(MPI::PACKED);
+  int newbuffsize1;
+  MPI_Get_count(&status, MPI_PACKED, &newbuffsize1);
+  const G4int newbuffsize = newbuffsize1;
   DMSG(2, "Preparing to receive buffer of size: " << newbuffsize);
   char* buffer = outputBuffer;
   if (newbuffsize > outputBufferSize) {
@@ -117,7 +123,7 @@ void G4VUserMPIrunMerger::Receive(const unsigned int source)
     ownsBuffer = true;
   }
   SetupOutputBuffer(buffer, newbuffsize, 0);
-  COMM_G4COMMAND_.Recv(buffer, newbuffsize, MPI::PACKED, source, G4MPImanager::kTAG_RUN, status);
+  MPI_Recv(buffer, newbuffsize, MPI_PACKED, source, G4MPImanager::kTAG_RUN, COMM_G4COMMAND_, &status);
   DMSG(3, "Buffer Size: " << outputBufferSize << " bytes at: " << (void*)outputBuffer);
   output_userdata.clear();
   // User code, if implemented will return the concrete G4Run class
@@ -125,7 +131,7 @@ void G4VUserMPIrunMerger::Receive(const unsigned int source)
   if (aNewRun == nullptr) aNewRun = new G4Run;
   // Add number of events counter
   G4int nevets = 0;
-  OutputUserData(&nevets, MPI::INT, 1);
+  OutputUserData(&nevets, MPI_INT, 1);
   // now userdata contains all data references, do the real unpacking
   for (const registered_data& el : output_userdata) {
     MPI_Unpack(outputBuffer, outputBufferSize, &outputBufferPosition, el.p_data, el.count, el.dt,
@@ -147,8 +153,10 @@ void G4VUserMPIrunMerger::Merge()
   // G4cout << "G4VUserMPIrunMerger::Merge called" << G4endl;
 
   DMSG(0, "G4VUserMPIrunMerger::Merge called");
-  const MPI::Intracomm* parentComm = G4MPImanager::GetManager()->GetComm();
-  const unsigned int myrank = parentComm->Get_rank();
+  const MPI_Comm* parentComm = G4MPImanager::GetManager()->GetComm();
+  int rank;
+  MPI_Comm_rank(*parentComm, &rank);
+  const unsigned int myrank = rank;
   commSize = G4MPImanager::GetManager()->GetActiveSize();
   // do not include extra worker in this communication
 
@@ -156,16 +164,16 @@ void G4VUserMPIrunMerger::Merge()
     DMSG(1, "Comm world size is 1, nothing to do");
     return;
   }
-  COMM_G4COMMAND_ = parentComm->Dup();
+  MPI_Comm_dup(*parentComm, &COMM_G4COMMAND_);
   bytesSent = 0;
-  const G4double sttime = MPI::Wtime();
+  const G4double sttime = MPI_Wtime();
 
   // Use G4MPIutils to optimize communications between ranks
   typedef std::function<void(unsigned int)> handler_t;
   using std::placeholders::_1;
   handler_t sender = std::bind(&G4VUserMPIrunMerger::Send, this, _1);
   handler_t receiver = std::bind(&G4VUserMPIrunMerger::Receive, this, _1);
-  std::function<void(void)> barrier = std::bind(&MPI::Intracomm::Barrier, &COMM_G4COMMAND_);
+  std::function<void(void)> barrier = std::bind(MPI_Barrier, COMM_G4COMMAND_);
   // G4cout << "go to  G4mpi::Merge" << G4endl;
   G4mpi::Merge(sender, receiver, barrier, commSize, myrank);
 
@@ -183,9 +191,9 @@ void G4VUserMPIrunMerger::Merge()
         }
     }
   */
-  const G4double elapsed = MPI::Wtime() - sttime;
+  const G4double elapsed = MPI_Wtime() - sttime;
   long total = 0;
-  COMM_G4COMMAND_.Reduce(&bytesSent, &total, 1, MPI::LONG, MPI::SUM, destinationRank);
+  MPI_Reduce(&bytesSent, &total, 1, MPI_LONG, MPI_SUM, destinationRank, COMM_G4COMMAND_);
   if (verbose > 0 && myrank == destinationRank) {
     // Collect from ranks how much data was sent around
     G4cout << "G4VUserMPIrunMerger::Merge() - data transfer performances: "
@@ -194,6 +202,6 @@ void G4VUserMPIrunMerger::Merge()
            << G4endl;
   }
 
-  COMM_G4COMMAND_.Free();
+  MPI_Comm_free(&COMM_G4COMMAND_);
   DMSG(0, "G4VUserMPIrunMerger::Merge done");
 }

From 5175b6ac58ccf54adc3a61a02fd3d4320f96a1f3 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.com>
Date: Wed, 19 Feb 2025 11:56:40 +0000
Subject: [PATCH 02/13] Fix macro parsing

---
 .../parallel/MPI/source/src/G4MPImanager.cc         | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index bf0136ea5ff..60c634352d6 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -277,6 +277,9 @@ void G4MPImanager::ParseArguments(int argc, char** argv)
   {
     G4String arg = argv[i];
 
+    G4String sub = arg.substr(0,5);
+    G4String endsub = arg.substr(arg.length()-4);
+    
     if (arg == "help")
     {
       qhelp = 1;
@@ -295,14 +298,10 @@ void G4MPImanager::ParseArguments(int argc, char** argv)
       qfcout_ = true;
       if (optarg) ofprefix = optarg;
     }
-    else if (arg[0] == 'm')
+    else if (sub=="macro" or endsub==".mac")
     {
-      G4String sub = arg.substr(0,5);
-      if (sub=="macro")
-      {
-        optind = i;
-        qbatchmode_ = true;
-      }
+      optind = i;
+      qbatchmode_ = true;
     }
     //default:
       //G4cerr << "*** invalid options specified." << G4endl;

From 3faf65a5ee3b9c49e11328cef5624ede4c8b899c Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Wed, 19 Feb 2025 12:02:24 +0000
Subject: [PATCH 03/13] Changed MPI::INIT to MPI_INIT C binding

---
 .../extended/parallel/MPI/examples/exMPI03/src/RunMerger.cc   | 4 ++--
 .../extended/parallel/MPI/examples/exMPI04/src/RunMerger.cc   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/examples/extended/parallel/MPI/examples/exMPI03/src/RunMerger.cc b/examples/extended/parallel/MPI/examples/exMPI03/src/RunMerger.cc
index 45c2bad8765..88ae4ed812f 100644
--- a/examples/extended/parallel/MPI/examples/exMPI03/src/RunMerger.cc
+++ b/examples/extended/parallel/MPI/examples/exMPI03/src/RunMerger.cc
@@ -32,7 +32,7 @@ void RunMerger::Pack()
 {
   // Very imporant, here fMyRun is const!
   // Register a user-data in the user Run class with MPI merger
-  InputUserData(const_cast<int*>(&(fMyRun->fDummyCounter)), MPI::INT, 1);
+  InputUserData(const_cast<int*>(&(fMyRun->fDummyCounter)), MPI_INT, 1);
 }
 
 //....oooOO0OOooo........oooOO0OOooo........oooOO0OOooo........oooOO0OOooo......
@@ -40,6 +40,6 @@ G4Run* RunMerger::UnPack()
 {
   // Create a dummy user-Run, used to contain data received via MPI
   Run* aDummyRun = new Run;
-  OutputUserData(&(aDummyRun->fDummyCounter), MPI::INT, 1);
+  OutputUserData(&(aDummyRun->fDummyCounter), MPI_INT, 1);
   return aDummyRun;
 }
diff --git a/examples/extended/parallel/MPI/examples/exMPI04/src/RunMerger.cc b/examples/extended/parallel/MPI/examples/exMPI04/src/RunMerger.cc
index 45c2bad8765..88ae4ed812f 100644
--- a/examples/extended/parallel/MPI/examples/exMPI04/src/RunMerger.cc
+++ b/examples/extended/parallel/MPI/examples/exMPI04/src/RunMerger.cc
@@ -32,7 +32,7 @@ void RunMerger::Pack()
 {
   // Very imporant, here fMyRun is const!
   // Register a user-data in the user Run class with MPI merger
-  InputUserData(const_cast<int*>(&(fMyRun->fDummyCounter)), MPI::INT, 1);
+  InputUserData(const_cast<int*>(&(fMyRun->fDummyCounter)), MPI_INT, 1);
 }
 
 //....oooOO0OOooo........oooOO0OOooo........oooOO0OOooo........oooOO0OOooo......
@@ -40,6 +40,6 @@ G4Run* RunMerger::UnPack()
 {
   // Create a dummy user-Run, used to contain data received via MPI
   Run* aDummyRun = new Run;
-  OutputUserData(&(aDummyRun->fDummyCounter), MPI::INT, 1);
+  OutputUserData(&(aDummyRun->fDummyCounter), MPI_INT, 1);
   return aDummyRun;
 }

From 74f03d199aa711a00a0e53c3f7076d0cc8b83656 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.com>
Date: Wed, 19 Feb 2025 13:29:02 +0000
Subject: [PATCH 04/13] Remove unnecessary comment as suggested by drbenmorgan

Co-authored-by: Ben Morgan <drbenmorgan@users.noreply.github.com>
---
 .../extended/parallel/MPI/source/include/G4MPIscorerMerger.hh | 2 +-
 examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc | 2 +-
 examples/extended/parallel/MPI/source/src/G4MPImanager.cc     | 4 ++--
 .../extended/parallel/MPI/source/src/G4MPIscorerMerger.cc     | 1 -
 4 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
index 77117732636..93832211899 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
@@ -103,7 +103,7 @@ class G4MPIscorerMerger
     G4ScoringManager* scoringManager;
     unsigned int commSize;
     unsigned int destinationRank;
-    MPI_Comm comm;  // Changed from MPI::Intracomm to MPI_Comm
+    MPI_Comm comm;
     G4int verbose;
     void G4mpi_barrier(MPI_Comm* comm) {MPI_Barrier(*comm);};
 };
diff --git a/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc b/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
index 7f393ce7191..10d3aed7b90 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPIhistoMerger.cc
@@ -53,7 +53,7 @@ void G4MPIhistoMerger::Merge()
   }
 
   const MPI_Comm* parentComm = G4MPImanager::GetManager()->GetComm();
-  MPI_Comm comm;// = parentComm->Dup();
+  MPI_Comm comm;
   MPI_Comm_dup(*parentComm, &comm);
 
   G4bool verbose = (verboseLevel > 1);
diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index 60c634352d6..ec2c60525e9 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -181,14 +181,14 @@ void G4MPImanager::Initialize()
   g4mpi_ = this;
 
   // get rank information
-  MPI_Comm_size(MPI_COMM_WORLD, &world_size_);//world_size_ = MPI::COMM_WORLD.Get_size();
+  MPI_Comm_size(MPI_COMM_WORLD, &world_size_);
   if (world_size_ - nof_extra_workers_ <= 0) {
     G4Exception("G4MPImanager::SetExtraWorker()", "MPI001", JustWarning,
                 "Cannot reserve extra ranks: the MPI size is not sufficient.");
     nof_extra_workers_ = 0;
   }
   size_ = world_size_ - nof_extra_workers_;
-  MPI_Comm_rank(MPI_COMM_WORLD, &rank_); //rank_ = MPI::COMM_WORLD.Get_rank();
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank_);
   is_master_ = (rank_ == kRANK_MASTER);
   is_slave_ = (rank_ != kRANK_MASTER);
   is_extra_worker_ = false;
diff --git a/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc b/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
index bf9a0433d59..2e5e7bce751 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPIscorerMerger.cc
@@ -175,7 +175,6 @@ void G4MPIscorerMerger::Merge()
   using std::placeholders::_1;
   handler_t sender = std::bind(&G4MPIscorerMerger::Send, this, _1);
   handler_t receiver = std::bind(&G4MPIscorerMerger::Receive, this, _1);
-  //std::function<void(void)> barrier = std::bind(MPI_Barrier, comm);
   std::function<void(void)> barrier = std::bind(MPI_Barrier, comm);
   G4mpi::Merge(sender, receiver, barrier, commSize, myrank);
 

From f7bd18a43ea24b130df4cb1a49abd2201dccb047 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 20 Feb 2025 12:19:50 +0000
Subject: [PATCH 05/13] Fixed license

---
 .../MPI/source/include/G4MPIscorerMerger.hh   | 24 +++++++++----------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
index 77117732636..01991aa5e96 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
@@ -2,24 +2,24 @@
 // ********************************************************************
 // * License and Disclaimer                                           *
 // *                                                                  *
-// * The Geant4 software is copyright of the Copyright Holders of *
-// * the Geant4 Collaboration. It is provided under the terms and *
-// * conditions of the Geant4 Software License, included in the file *
-// * LICENSE and available at http://cern.ch/geant4/license. These *
+// * The  Geant4 software  is  copyright of the Copyright Holders  of *
+// * the Geant4 Collaboration.  It is provided  under  the terms  and *
+// * conditions of the Geant4 Software License,  included in the file *
+// * LICENSE and available at  http://cern.ch/geant4/license .  These *
 // * include a list of copyright holders.                             *
 // *                                                                  *
 // * Neither the authors of this software system, nor their employing *
-// * institutes, nor the agencies providing financial support for this *
-// * work make any representation or warranty, express or implied, *
-// * regarding this software system or assume any liability for its *
-// * use. Please see the license in the file LICENSE and URL above *
+// * institutes,nor the agencies providing financial support for this *
+// * work  make  any representation or  warranty, express or implied, *
+// * regarding  this  software system or assume any liability for its *
+// * use.  Please see the license in the file  LICENSE  and URL above *
 // * for the full disclaimer and the limitation of liability.         *
 // *                                                                  *
-// * This code implementation is the result of the scientific and *
+// * This  code  implementation is the result of  the  scientific and *
 // * technical work of the GEANT4 collaboration.                      *
-// * By using, copying, modifying or distributing the software (or *
-// * any work based on the software) you agree to acknowledge its *
-// * use in resulting scientific publications, and indicate your *
+// * By using,  copying,  modifying or  distributing the software (or *
+// * any work based  on the software)  you  agree  to acknowledge its *
+// * use  in  resulting  scientific  publications,  and indicate your *
 // * acceptance of all terms of the Geant4 Software license.          *
 // ********************************************************************
 //

From 4d64fb0581f47fae67117e2fde5923358b8fde73 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 20 Feb 2025 12:20:10 +0000
Subject: [PATCH 06/13] removed unecessary function

---
 .../extended/parallel/MPI/source/include/G4MPIscorerMerger.hh    | 1 -
 1 file changed, 1 deletion(-)

diff --git a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
index 01991aa5e96..fc1013b0b9c 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPIscorerMerger.hh
@@ -105,7 +105,6 @@ class G4MPIscorerMerger
     unsigned int destinationRank;
     MPI_Comm comm;  // Changed from MPI::Intracomm to MPI_Comm
     G4int verbose;
-    void G4mpi_barrier(MPI_Comm* comm) {MPI_Barrier(*comm);};
 };
 
 #endif  // G4MPISCORERMERGER_HH

From dbd371be7a03b49e8c9f71c7eb4fd22b8a5f3c10 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Mon, 24 Feb 2025 16:23:45 +0000
Subject: [PATCH 07/13] Removed useless comment, fixed provided_ check in
 constructor

---
 examples/extended/parallel/MPI/source/src/G4MPImanager.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index ec2c60525e9..ad7f2934825 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -84,7 +84,7 @@ G4MPImanager::G4MPImanager(int nof_extra_workers)
     master_weight_(1.),
     nof_extra_workers_(nof_extra_workers)
 {
-  // MPI::Init();
+  int provided_;
   MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
   if (provided_ < MPI_THREAD_SERIALIZED) {
     std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;
@@ -106,7 +106,7 @@ G4MPImanager::G4MPImanager(int argc, char** argv, int nof_extra_workers)
     master_weight_(1.),
     nof_extra_workers_(nof_extra_workers)
 {
-  int provided;
+  int provided_;
   MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
   if (provided_ < MPI_THREAD_SERIALIZED) {
     std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;

From c0f8e07b25e4b6863fb659045eb87ab48c080839 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Mon, 24 Feb 2025 20:33:04 +0000
Subject: [PATCH 08/13] Updated History

---
 examples/extended/parallel/MPI/History | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/examples/extended/parallel/MPI/History b/examples/extended/parallel/MPI/History
index 2f29bfb87fa..f560baf2a8b 100644
--- a/examples/extended/parallel/MPI/History
+++ b/examples/extended/parallel/MPI/History
@@ -4,6 +4,10 @@ See `CONTRIBUTING.rst` for details of **required** info/format for each entry,
 which **must** added in reverse chronological order (newest at the top). It must **not**
 be used as a substitute for writing good git commit messages!
 
+## 2025-02-24 Filippo Falezza (MPI-V11-03-00)
+- Rewritten G4MPI to use C bindings (MPI 3+)
+  - Fixes [GitHub PR #81](https://github.com/Geant4/geant4/pull/81/)
+
 ## 2023-03-11 Ben Morgan (MPI-V11-02-00)
 - Move CTests definitions to tests/ctests_examples
 

From b4494258b1be2106aa73766133980bb862be23ac Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 27 Feb 2025 11:48:44 +0000
Subject: [PATCH 09/13] Raise exception as per
 https://github.com/Geant4/geant4/pull/81#discussion_r1973415840

---
 .../extended/parallel/MPI/source/src/G4MPImanager.cc  | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index ad7f2934825..708f6ed18d0 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -87,8 +87,12 @@ G4MPImanager::G4MPImanager(int nof_extra_workers)
   int provided_;
   MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
   if (provided_ < MPI_THREAD_SERIALIZED) {
-    std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;
+    G4Exception("G4MPImanager::G4MPImanager()",
+                "G4MPImanager001",
+                FatalException,
+                "MPI Initialization failed to setup with MPI_THREAD_SERIALIZED or better");
   }
+
   Initialize();
 }
 
@@ -109,7 +113,10 @@ G4MPImanager::G4MPImanager(int argc, char** argv, int nof_extra_workers)
   int provided_;
   MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided_);
   if (provided_ < MPI_THREAD_SERIALIZED) {
-    std::cerr << "Warning: MPI does not provide the requested threading support!" << std::endl;
+    G4Exception("G4MPImanager::G4MPImanager()",
+                "G4MPImanager001",
+                FatalException,
+                "MPI Initialization failed to setup with MPI_THREAD_SERIALIZED or better");
   }
 
   Initialize();

From 14a1f0cc339048470a55ebcdeb84d0ea4308ac9c Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 27 Feb 2025 12:07:33 +0000
Subject: [PATCH 10/13] Restore as it was, keep 10.2.0 minimum requirement, but
 this should be tested against 10.2.0

---
 examples/extended/parallel/MPI/source/CMakeLists.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/examples/extended/parallel/MPI/source/CMakeLists.txt b/examples/extended/parallel/MPI/source/CMakeLists.txt
index a19aec284b2..fe3274214a9 100644
--- a/examples/extended/parallel/MPI/source/CMakeLists.txt
+++ b/examples/extended/parallel/MPI/source/CMakeLists.txt
@@ -10,7 +10,7 @@ project(${_projname})
 
 cmake_minimum_required(VERSION 3.16...3.27)
 find_package(MPI REQUIRED)
-find_package(Geant4 REQUIRED)
+find_package(Geant4 10.2.0 REQUIRED)
 include(${Geant4_USE_FILE})
 
 #------------------------------------------------------------------------------

From 15f6cbbee0b3988d465efe99100117ca082ce7be Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.com>
Date: Thu, 27 Feb 2025 12:09:05 +0000
Subject: [PATCH 11/13] Update as suggested by drbenmorgan

Co-authored-by: Ben Morgan <drbenmorgan@users.noreply.github.com>
---
 examples/extended/parallel/MPI/source/include/G4MPImanager.hh | 1 -
 1 file changed, 1 deletion(-)

diff --git a/examples/extended/parallel/MPI/source/include/G4MPImanager.hh b/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
index b85ec58beb8..c4946cdb0d6 100644
--- a/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
+++ b/examples/extended/parallel/MPI/source/include/G4MPImanager.hh
@@ -157,7 +157,6 @@ class G4MPImanager
     G4bool is_slave_;
     G4bool is_extra_worker_;
     G4int rank_;
-    G4int provided_;
     G4int size_;  // processing comm size
     G4int world_size_;  // world comm size
 

From 44a601ee6681181d90b4cab7a2cee530639a917a Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 27 Feb 2025 14:49:15 +0000
Subject: [PATCH 12/13] Removed uniused variable from parser

---
 examples/extended/parallel/MPI/source/src/G4MPImanager.cc | 2 --
 1 file changed, 2 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index 708f6ed18d0..5e652efe19a 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -278,8 +278,6 @@ void G4MPImanager::ParseArguments(int argc, char** argv)
 
   G4int optind = -1;
 
-  static struct option long_options[] = {{"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {"init", required_argument, NULL, 'i'}, {"ofile", optional_argument, NULL, 'o'}, {NULL, 0, NULL, 0}};
-
   for (int i = 1; i < argc; i++)
   {
     G4String arg = argv[i];

From 38b02ffddb313f4b9263486afa92fa48a6e65948 Mon Sep 17 00:00:00 2001
From: Filippo Falezza <filippo.falezza@outlook.it>
Date: Thu, 6 Mar 2025 12:52:51 +0000
Subject: [PATCH 13/13] Fix parser in case arguments are too short

---
 .../parallel/MPI/source/src/G4MPImanager.cc      | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
index 5e652efe19a..c50a3990dd9 100644
--- a/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
+++ b/examples/extended/parallel/MPI/source/src/G4MPImanager.cc
@@ -53,9 +53,11 @@ namespace
 {
 
 // wrappers for thread functions
-void thread_ExecuteThreadCommand(const G4String* command)
+void* thread_ExecuteThreadCommand(void* arg)
 {
+  const G4String* command = static_cast<const G4String*>(arg);
   G4MPImanager::GetManager()->ExecuteThreadCommand(*command);
+  return nullptr;
 }
 
 // --------------------------------------------------------------------------
@@ -281,10 +283,14 @@ void G4MPImanager::ParseArguments(int argc, char** argv)
   for (int i = 1; i < argc; i++)
   {
     G4String arg = argv[i];
+    G4String sub;
+    G4String endsub = sub;
+    if (arg.length() > 5)
+    {
+      sub = arg.substr(0,5);
+      endsub = arg.substr(arg.length()-4);
+    }
 
-    G4String sub = arg.substr(0,5);
-    G4String endsub = arg.substr(arg.length()-4);
-    
     if (arg == "help")
     {
       qhelp = 1;
@@ -532,7 +538,7 @@ void G4MPImanager::ExecuteBeamOnThread(const G4String& command)
   else {  // ok
     static G4String cmdstr;
     cmdstr = command;
-    G4int rc = pthread_create(&thread_id_, 0, (Func_t)thread_ExecuteThreadCommand, (void*)&cmdstr);
+    G4int rc = pthread_create(&thread_id_, 0, thread_ExecuteThreadCommand, (void*)&cmdstr);
     if (rc != 0)
       G4Exception("G4MPImanager::ExecuteBeamOnThread()", "MPI003", FatalException,
                   "Failed to create a beamOn thread.");