Skip to content

Commit

Permalink
add sendrecv wrapper (#317)
Browse files Browse the repository at this point in the history
* add `sendrecv` wrapper
* added docs
* added other Sendrecv methods
  • Loading branch information
gasagna authored and simonbyrne committed Nov 18, 2019
1 parent f8d9cde commit 0a643dc
Show file tree
Hide file tree
Showing 2 changed files with 118 additions and 1 deletion.
59 changes: 59 additions & 0 deletions src/pointtopoint.jl
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,65 @@ function irecv(src::Integer, tag::Integer, comm::Comm)
(true, MPI.deserialize(buf), stat)
end

"""
Sendrecv(sendbuf, sendcount::Integer, sendtype::Union{Datatype, MPI_Datatype}, dest::Integer, sendtag::Integer,
recvbuf, recvcount::Integer, recvtype::Union{Datatype, MPI_Datatype}, source::Integer, recvtag::Integer,
comm::Comm)
Complete a blocking send-receive operation over the MPI communicator `comm`. Send
`sendcount` elements of type `sendtype` from `sendbuf` to the MPI rank `dest` using message
tag `tag`, and receive `recvcount` elements of type `recvtype` from MPI rank `source` into
the buffer `recvbuf` using message tag `tag`. Return an MPI.Status object.
"""
function Sendrecv(sendbuf, sendcount::Integer, sendtype::Union{Datatype, MPI_Datatype}, dest::Integer, sendtag::Integer,
recvbuf, recvcount::Integer, recvtype::Union{Datatype, MPI_Datatype}, source::Integer, recvtag::Integer,
comm::Comm)
# int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
# void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
# MPI_Comm comm, MPI_Status *status)
stat_ref = Ref{Status}()
@mpichk ccall((:MPI_Sendrecv, libmpi), Cint,
(MPIPtr, Cint, MPI_Datatype, Cint, Cint,
MPIPtr, Cint, MPI_Datatype, Cint, Cint,
MPI_Comm, Ptr{Status}),
sendbuf, sendcount, sendtype, dest, sendtag,
recvbuf, recvcount, recvtype, source, recvtag, comm, stat_ref)
return stat_ref[]
end

"""
Sendrecv(sendbuf::MPIBuffertype{T}, sendcount::Integer, dest::Integer, sendtag::Integer,
recvbuf::MPIBuffertype{T}, recvcount::Integer, source::Integer, recvtag::Integer,
comm::Comm) where {T}
Complete a blocking send-receive operation over the MPI communicator `comm`, sending
`sendcount` elements of type `T` from `sendbuf` to the MPI rank `dest` using message
tag `tag`, and receiving `recvcount` elements of the same type from MPI rank `source` into
the buffer `recvbuf` using message tag `tag`. Return an MPI.Status object.
"""
function Sendrecv(sendbuf::MPIBuffertype{T}, sendcount::Integer, dest::Integer, sendtag::Integer,
recvbuf::MPIBuffertype{T}, recvcount::Integer, source::Integer, recvtag::Integer,
comm::Comm) where {T}
return Sendrecv(sendbuf, sendcount, mpitype(eltype(sendbuf)), dest, sendtag,
recvbuf, recvcount, mpitype(eltype(recvbuf)), source, recvtag, comm)
end

"""
Sendrecv(sendbuf::AbstractArray{T}, dest::Integer, sendtag::Integer,
recvbuf::AbstractArray{T}, source::Integer, recvtag::Integer,
comm::Comm) where {T}
Complete a blocking send-receive operation over the MPI communicator `comm`, sending
`sendbuf` to the MPI rank `dest` using message tag `tag`, and receiving the buffer
`recvbuf` using message tag `tag`. Return an MPI.Status object.
"""
function Sendrecv(sendbuf::AbstractArray{T}, dest::Integer, sendtag::Integer,
recvbuf::AbstractArray{T}, source::Integer, recvtag::Integer,
comm::Comm) where {T}
return Sendrecv(sendbuf, length(sendbuf), dest, sendtag,
recvbuf, length(recvbuf), source, recvtag, comm)
end

"""
Wait!(req::Request)
Expand Down
60 changes: 59 additions & 1 deletion test/test_sendrecv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -79,5 +79,63 @@ MPI.Cancel!(sreq)

GC.gc()

# ---------------------
# MPI_Sendrecv function
# ---------------------
#
# send datatype
# ---------------------
# We test this function by executing a left shift of the leftmost element in a 1D
# cartesian topology with periodic boundary conditions.
#
# Assume we have two processors, the data will look like this
# proc 0 | proc 1
# 0 0 0 | 1 1 1
#
# After the shift the data will contain
# proc 0 | proc 1
# 0 0 1 | 1 1 0
#
# init data
comm_rank = MPI.Comm_rank(comm)
comm_size = MPI.Comm_size(comm)
a = Float64[comm_rank, comm_rank, comm_rank]

# construct subarray type
subarr_send = MPI.Type_Create_Subarray(1, Cint[3], Cint[1], Cint[0], MPI.MPI_ORDER_FORTRAN, Float64)
subarr_recv = MPI.Type_Create_Subarray(1, Cint[3], Cint[1], Cint[2], MPI.MPI_ORDER_FORTRAN, Float64)
MPI.Type_Commit!(subarr_send)
MPI.Type_Commit!(subarr_recv)

# construct cartesian communicator with 1D topology
comm_cart = MPI.Cart_create(comm, 1, Cint[comm_size], Cint[1], false)

# get source and dest ranks using Cart_shift
src_rank, dest_rank = MPI.Cart_shift(comm_cart, 0, -1)

# execute left shift using subarrays
MPI.Sendrecv(a, 1, subarr_send, dest_rank, 0,
a, 1, subarr_recv, src_rank, 0, comm_cart)

@test a == [comm_rank, comm_rank, (comm_rank+1) % comm_size]

# send elements from a buffer
# ---------------------------
a = Float64[comm_rank, comm_rank, comm_rank]
b = Float64[ -1, -1, -1]
MPI.Sendrecv(a, 2, dest_rank, 1,
b, 2, src_rank, 1, comm_cart)

@test b == [(comm_rank+1) % comm_size, (comm_rank+1) % comm_size, -1]

# send entire buffer
# ---------------------------
a = Float64[comm_rank, comm_rank, comm_rank]
b = Float64[ -1, -1, -1]
MPI.Sendrecv(a, dest_rank, 2,
b, src_rank, 2, comm_cart)

@test b == [(comm_rank+1) % comm_size, (comm_rank+1) % comm_size, (comm_rank+1) % comm_size]

MPI.Finalize()
@test MPI.Finalized()
# @test MPI.Finalized()

0 comments on commit 0a643dc

Please sign in to comment.