From 0a643dcc9917a54f6dafd1ee797cede460c5bf8c Mon Sep 17 00:00:00 2001 From: Davide Lasagna Date: Mon, 18 Nov 2019 04:48:03 +0000 Subject: [PATCH] add `sendrecv` wrapper (#317) * add `sendrecv` wrapper * added docs * added other Sendrecv methods --- src/pointtopoint.jl | 59 ++++++++++++++++++++++++++++++++++++++++++ test/test_sendrecv.jl | 60 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 118 insertions(+), 1 deletion(-) diff --git a/src/pointtopoint.jl b/src/pointtopoint.jl index 4b350da10..1027748f4 100644 --- a/src/pointtopoint.jl +++ b/src/pointtopoint.jl @@ -341,6 +341,65 @@ function irecv(src::Integer, tag::Integer, comm::Comm) (true, MPI.deserialize(buf), stat) end +""" + Sendrecv(sendbuf, sendcount::Integer, sendtype::Union{Datatype, MPI_Datatype}, dest::Integer, sendtag::Integer, + recvbuf, recvcount::Integer, recvtype::Union{Datatype, MPI_Datatype}, source::Integer, recvtag::Integer, + comm::Comm) + +Complete a blocking send-receive operation over the MPI communicator `comm`. Send +`sendcount` elements of type `sendtype` from `sendbuf` to the MPI rank `dest` using message +tag `tag`, and receive `recvcount` elements of type `recvtype` from MPI rank `source` into +the buffer `recvbuf` using message tag `tag`. Return an MPI.Status object. +""" +function Sendrecv(sendbuf, sendcount::Integer, sendtype::Union{Datatype, MPI_Datatype}, dest::Integer, sendtag::Integer, + recvbuf, recvcount::Integer, recvtype::Union{Datatype, MPI_Datatype}, source::Integer, recvtag::Integer, + comm::Comm) + # int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, + # void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, + # MPI_Comm comm, MPI_Status *status) + stat_ref = Ref{Status}() + @mpichk ccall((:MPI_Sendrecv, libmpi), Cint, + (MPIPtr, Cint, MPI_Datatype, Cint, Cint, + MPIPtr, Cint, MPI_Datatype, Cint, Cint, + MPI_Comm, Ptr{Status}), + sendbuf, sendcount, sendtype, dest, sendtag, + recvbuf, recvcount, recvtype, source, recvtag, comm, stat_ref) + return stat_ref[] +end + +""" + Sendrecv(sendbuf::MPIBuffertype{T}, sendcount::Integer, dest::Integer, sendtag::Integer, + recvbuf::MPIBuffertype{T}, recvcount::Integer, source::Integer, recvtag::Integer, + comm::Comm) where {T} + +Complete a blocking send-receive operation over the MPI communicator `comm`, sending +`sendcount` elements of type `T` from `sendbuf` to the MPI rank `dest` using message +tag `tag`, and receiving `recvcount` elements of the same type from MPI rank `source` into +the buffer `recvbuf` using message tag `tag`. Return an MPI.Status object. +""" +function Sendrecv(sendbuf::MPIBuffertype{T}, sendcount::Integer, dest::Integer, sendtag::Integer, + recvbuf::MPIBuffertype{T}, recvcount::Integer, source::Integer, recvtag::Integer, + comm::Comm) where {T} + return Sendrecv(sendbuf, sendcount, mpitype(eltype(sendbuf)), dest, sendtag, + recvbuf, recvcount, mpitype(eltype(recvbuf)), source, recvtag, comm) +end + +""" + Sendrecv(sendbuf::AbstractArray{T}, dest::Integer, sendtag::Integer, + recvbuf::AbstractArray{T}, source::Integer, recvtag::Integer, + comm::Comm) where {T} + +Complete a blocking send-receive operation over the MPI communicator `comm`, sending +`sendbuf` to the MPI rank `dest` using message tag `tag`, and receiving the buffer +`recvbuf` using message tag `tag`. Return an MPI.Status object. +""" +function Sendrecv(sendbuf::AbstractArray{T}, dest::Integer, sendtag::Integer, + recvbuf::AbstractArray{T}, source::Integer, recvtag::Integer, + comm::Comm) where {T} + return Sendrecv(sendbuf, length(sendbuf), dest, sendtag, + recvbuf, length(recvbuf), source, recvtag, comm) +end + """ Wait!(req::Request) diff --git a/test/test_sendrecv.jl b/test/test_sendrecv.jl index 6e117ed01..e55342792 100644 --- a/test/test_sendrecv.jl +++ b/test/test_sendrecv.jl @@ -79,5 +79,63 @@ MPI.Cancel!(sreq) GC.gc() +# --------------------- +# MPI_Sendrecv function +# --------------------- +# +# send datatype +# --------------------- +# We test this function by executing a left shift of the leftmost element in a 1D +# cartesian topology with periodic boundary conditions. +# +# Assume we have two processors, the data will look like this +# proc 0 | proc 1 +# 0 0 0 | 1 1 1 +# +# After the shift the data will contain +# proc 0 | proc 1 +# 0 0 1 | 1 1 0 +# +# init data +comm_rank = MPI.Comm_rank(comm) +comm_size = MPI.Comm_size(comm) +a = Float64[comm_rank, comm_rank, comm_rank] + +# construct subarray type +subarr_send = MPI.Type_Create_Subarray(1, Cint[3], Cint[1], Cint[0], MPI.MPI_ORDER_FORTRAN, Float64) +subarr_recv = MPI.Type_Create_Subarray(1, Cint[3], Cint[1], Cint[2], MPI.MPI_ORDER_FORTRAN, Float64) +MPI.Type_Commit!(subarr_send) +MPI.Type_Commit!(subarr_recv) + +# construct cartesian communicator with 1D topology +comm_cart = MPI.Cart_create(comm, 1, Cint[comm_size], Cint[1], false) + +# get source and dest ranks using Cart_shift +src_rank, dest_rank = MPI.Cart_shift(comm_cart, 0, -1) + +# execute left shift using subarrays +MPI.Sendrecv(a, 1, subarr_send, dest_rank, 0, + a, 1, subarr_recv, src_rank, 0, comm_cart) + +@test a == [comm_rank, comm_rank, (comm_rank+1) % comm_size] + +# send elements from a buffer +# --------------------------- +a = Float64[comm_rank, comm_rank, comm_rank] +b = Float64[ -1, -1, -1] +MPI.Sendrecv(a, 2, dest_rank, 1, + b, 2, src_rank, 1, comm_cart) + +@test b == [(comm_rank+1) % comm_size, (comm_rank+1) % comm_size, -1] + +# send entire buffer +# --------------------------- +a = Float64[comm_rank, comm_rank, comm_rank] +b = Float64[ -1, -1, -1] +MPI.Sendrecv(a, dest_rank, 2, + b, src_rank, 2, comm_cart) + +@test b == [(comm_rank+1) % comm_size, (comm_rank+1) % comm_size, (comm_rank+1) % comm_size] + MPI.Finalize() -@test MPI.Finalized() +# @test MPI.Finalized()