Skip to content
Snippets Groups Projects

Reorder ranks to improve locality in communication

Merged Sebastian Ohlmann requested to merge reorder_ranks into develop
+ 23
9
@@ -583,7 +583,7 @@ contains
@@ -583,7 +583,7 @@ contains
#endif
#endif
type(mpi_grp_t) :: reorder_grp
type(mpi_grp_t) :: reorder_grp
integer :: base_group, reorder_group, ranks(base_grp%size)
integer :: base_group, reorder_group, ranks(base_grp%size)
integer :: ii, jj, kk, ll, nn
integer :: ii, jj, kk, ll, nn, reorder_comm
PUSH_SUB(multicomm_init.group_comm_create)
PUSH_SUB(multicomm_init.group_comm_create)
@@ -600,22 +600,36 @@ contains
@@ -600,22 +600,36 @@ contains
! this is done to get a column-major ordering of the ranks in the
! this is done to get a column-major ordering of the ranks in the
! Cartesian communicator, since they a ordered row-major otherwise
! Cartesian communicator, since they a ordered row-major otherwise
call MPI_Comm_group(base_grp%comm, base_group, mpi_err)
call MPI_Comm_group(base_grp%comm, base_group, mpi_err)
 
if(mpi_err /= MPI_SUCCESS) then
 
message(1) = "Error in getting MPI group!"
 
call messages_fatal(1)
 
end if
! now transpose the hypercube => get rank numbers in column-major order
! now transpose the hypercube => get rank numbers in column-major order
nn = 1
nn = 1
do ii = 1, mc%group_sizes(1)
do ii = 1, mc%group_sizes(1)
do jj = 1, mc%group_sizes(2)
do jj = 1, mc%group_sizes(2)
do kk = 1, mc%group_sizes(3)
do kk = 1, mc%group_sizes(3)
do ll = 1, mc%group_sizes(4)
do ll = 1, mc%group_sizes(4)
ranks(nn) = (ll-1)*mc%group_sizes(3) + (kk-1)*mc%group_sizes(2) + (jj-1)*mc%group_sizes(1) + ii - 1
ranks(nn) = (ll-1)*mc%group_sizes(3)*mc%group_sizes(2)*mc%group_sizes(1) &
 
+ (kk-1)*mc%group_sizes(2)*mc%group_sizes(1) &
 
+ (jj-1)*mc%group_sizes(1) + ii - 1
nn = nn + 1
nn = nn + 1
end do
end do
end do
end do
end do
end do
end do
end do
call MPI_Group_incl(base_group, base_grp%size, ranks, reorder_group, mpi_err)
call MPI_Group_incl(base_group, base_grp%size, ranks, reorder_group, mpi_err)
 
if(mpi_err /= MPI_SUCCESS) then
 
message(1) = "Error in creating MPI group!"
 
call messages_fatal(1)
 
end if
! now get the reordered communicator
! now get the reordered communicator
call MPI_Comm_create(base_grp%comm, reorder_group, reorder_grp%comm, mpi_err)
call MPI_Comm_create(base_grp%comm, reorder_group, reorder_comm, mpi_err)
call mpi_grp_init(reorder_grp, reorder_grp%comm)
if(mpi_err /= MPI_SUCCESS) then
 
message(1) = "Error in creating reordered communicator!"
 
call messages_fatal(1)
 
end if
 
call mpi_grp_init(reorder_grp, reorder_comm)
! Multilevel parallelization is organized in a hypercube. We
! Multilevel parallelization is organized in a hypercube. We
! use an MPI Cartesian topology to generate the communicators
! use an MPI Cartesian topology to generate the communicators
@@ -697,12 +711,12 @@ contains
@@ -697,12 +711,12 @@ contains
if(num_slaves > 0) call create_slave_intercommunicators()
if(num_slaves > 0) call create_slave_intercommunicators()
else
else
! we initialize these communicators so we can use them even in serial
! we initialize these communicators so we can use them even in serial
mc%group_comm = reorder_grp%comm
mc%group_comm = base_grp%comm
mc%who_am_i = 0
mc%who_am_i = 0
mc%master_comm = reorder_grp%comm
mc%master_comm = base_grp%comm
mc%dom_st_comm = reorder_grp%comm
mc%dom_st_comm = base_grp%comm
mc%st_kpt_comm = reorder_grp%comm
mc%st_kpt_comm = base_grp%comm
mc%dom_st_kpt_comm = reorder_grp%comm
mc%dom_st_kpt_comm = base_grp%comm
end if
end if
! This is temporary debugging information.
! This is temporary debugging information.
Loading