Commit aca32a50 authored by giannozz's avatar giannozz

Module mp_global split into more modules, one per parallelization level.

Module mp_global is still there for compatibility and still contains the
routine mp_startup that performs the intialization, but shouldn't be any
longer used in new developments and should be slowly replaced by specific
modules. New module containing command-line options added. Command-line
options are read at the beginning of the run and broadcast to all processors
(there is no guarantee that command-line options cam be accessed by all mpi
 processes). Nothing should be broken by these changes but please verify if
all parallelization levels work (I have limited access to parallel machines
right now)

The reason for these changes, and for those that will come soon, is to make
image parallelization easier (in particular in NEB but also for other cases). 
Right now it is a pain, in part because the initialization is inadequate,
in part because it is never clear who reads/writes what from/to where.
It will take a few weeks before everything converges to a stable state.
Meanwhile, please be patient and fix/report problems.



git-svn-id: http://qeforge.qe-forge.org/svn/q-e/trunk/[email protected] c92efa57-630b-4861-b058-cf58834340f0
parent aca74920
......@@ -611,7 +611,7 @@ newd.o : ../../Modules/fft_interfaces.o
newd.o : ../../Modules/ions_base.o
newd.o : ../../Modules/kind.o
newd.o : ../../Modules/mp.o
newd.o : ../../Modules/mp_global.o
newd.o : ../../Modules/mp_bands.o
newd.o : ../../Modules/uspp.o
newd.o : modules.o
newd.o : smallbox.o
......@@ -670,6 +670,8 @@ ortho_base.o : ../../Modules/io_global.o
ortho_base.o : ../../Modules/ions_base.o
ortho_base.o : ../../Modules/kind.o
ortho_base.o : ../../Modules/mp.o
ortho_base.o : ../../Modules/mp_bands.o
ortho_base.o : ../../Modules/mp_diag.o
ortho_base.o : ../../Modules/mp_global.o
ortho_base.o : ../../Modules/ptoolkit.o
ortho_base.o : ../../Modules/recvec.o
......
......@@ -27,8 +27,8 @@
USE electrons_base, ONLY: nspin
USE control_flags, ONLY: iprint, thdyn, tfor, tprnfor
USE mp, ONLY: mp_sum
USE mp_global, ONLY: intra_bgrp_comm, inter_bgrp_comm, &
distribute_over_bgrp, my_bgrp_id, nbgrp
USE mp_bands, ONLY: intra_bgrp_comm, inter_bgrp_comm, &
my_bgrp_id, nbgrp
USE fft_interfaces, ONLY: invfft
USE fft_base, ONLY: dfftb, dfftp
!
......
......@@ -282,8 +282,9 @@ END SUBROUTINE diagonalize_parallel
SUBROUTINE mesure_mmul_perf( n )
!
USE mp_global, ONLY: nproc_bgrp, me_bgrp, intra_bgrp_comm, &
root_bgrp, ortho_comm, nproc_ortho, np_ortho, &
USE mp_bands, ONLY: nproc_bgrp, me_bgrp, intra_bgrp_comm, &
root_bgrp
USE mp_diag, ONLY: ortho_comm, nproc_ortho, np_ortho, &
me_ortho, init_ortho_group, ortho_comm_id
USE io_global, ONLY: ionode, stdout
USE mp, ONLY: mp_sum, mp_bcast, mp_barrier
......
......@@ -46,6 +46,13 @@ mp.o \
mp_base.o \
mp_global.o \
mp_wave.o \
mp_world.o \
mp_images.o \
mp_pots.o \
mp_pools.o \
mp_bands.o \
mp_diag.o \
command_line_options.o \
noncol.o \
open_close_input_file.o \
parallel_include.o \
......
!
! Copyright (C) 2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!----------------------------------------------------------------------------
MODULE command_line_options
!----------------------------------------------------------------------------
!
! ... Read variables from command line and broadcast it to all processors
! ... ( this is done because there is no guarantee that all processors
! ... have access to command-line options in parallel execution )
! ... Interpret QE-specific variables, store the corresponding values
! ... Leave the rest (including the code name) in "command_line"
!
USE mp, ONLY : mp_bcast
USE mp_world, ONLY : root, world_comm
USE io_global, ONLY : meta_ionode
!
IMPLICIT NONE
SAVE
!
! ... Number of arguments in command line
INTEGER :: nargs = 0
! ... QE arguments read from command line
INTEGER :: nimage_= 1, npool_= 1, npot_= 1, ndiag_ = 0, nband_= 1, ntg_= 1
CHARACTER(LEN=80) :: input_file_ = ' '
! ... Command line arguments not identified
CHARACTER(LEN=256) :: command_line = ' '
!
!PRIVATE
!PUBLIC :: command_line
!
CONTAINS
!
SUBROUTINE get_command_line ( )
IMPLICIT NONE
INTEGER :: narg
! Do not define iargc as external: gfortran doesn't like it
INTEGER :: iargc
CHARACTER(LEN=80) :: arg
CHARACTER(LEN=6), EXTERNAL :: int_to_char
!
command_line = ' '
nargs = iargc()
!
arg = ' '
narg=0
10 CONTINUE
CALL getarg ( narg, arg )
narg = narg + 1
SELECT CASE ( TRIM(arg) )
CASE ( '-i', '-in', '-inp', '-input' )
CALL getarg ( narg, input_file_ )
IF ( TRIM (input_file_) == ' ' ) GO TO 15
narg = narg + 1
CASE ( '-ni', '-nimage', '-nimages' )
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) nimage_
narg = narg + 1
CASE ( '-npot', '-npots' )
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) npot_
narg = narg + 1
CASE ( '-nk', '-npool', '-npools')
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) npool_
narg = narg + 1
CASE ( '-nt', '-ntg', '-ntask_groups')
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) ntg_
narg = narg + 1
CASE ( '-nb', '-nband', '-nbgrp', '-nband_group')
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) nband_
narg = narg + 1
CASE ( '-nd', '-ndiag', '-northo', '-nproc_diag', '-nproc_ortho')
CALL getarg ( narg, arg )
READ ( arg, *, ERR = 15, END = 15) ndiag_
narg = narg + 1
CASE DEFAULT
command_line = TRIM(command_line) // ' ' // TRIM(arg)
END SELECT
IF ( narg > nargs ) GO TO 20
GO TO 10
! .... something wrong, notify and continue
15 CALL mp_bcast ( narg, 0 ) !, world_comm )
CALL infomsg ('get_command_line', 'unexpected argument # ' // &
& int_to_char(narg) // ':' //TRIM(arg), narg)
narg = narg + 1
GO TO 10
20 CONTINUE
CALL mp_bcast( command_line, root, world_comm )
CALL mp_bcast( input_file_ , root, world_comm )
CALL mp_bcast( nimage_, root, world_comm )
CALL mp_bcast( npot_ , root, world_comm )
CALL mp_bcast( npool_ , root, world_comm )
CALL mp_bcast( ntg_ , root, world_comm )
CALL mp_bcast( nband_ , root, world_comm )
CALL mp_bcast( ndiag_ , root, world_comm )
END SUBROUTINE get_command_line
!
END MODULE command_line_options
......@@ -30,6 +30,9 @@ check_stop.o : set_signal.o
clocks.o : io_global.o
clocks.o : kind.o
clocks.o : mp_global.o
command_line_options.o : io_global.o
command_line_options.o : mp.o
command_line_options.o : mp_world.o
compute_dipole.o : cell_base.o
compute_dipole.o : fft_base.o
compute_dipole.o : kind.o
......@@ -131,13 +134,38 @@ mm_dispersion.o : mp_global.o
mp.o : io_global.o
mp.o : kind.o
mp.o : parallel_include.o
mp_bands.o : command_line_options.o
mp_bands.o : mp.o
mp_bands.o : parallel_include.o
mp_base.o : kind.o
mp_base.o : parallel_include.o
mp_diag.o : command_line_options.o
mp_diag.o : mp.o
mp_diag.o : mp_bands.o
mp_diag.o : mp_pools.o
mp_diag.o : parallel_include.o
mp_global.o : command_line_options.o
mp_global.o : io_global.o
mp_global.o : mp.o
mp_global.o : parallel_include.o
mp_global.o : mp_bands.o
mp_global.o : mp_diag.o
mp_global.o : mp_images.o
mp_global.o : mp_pools.o
mp_global.o : mp_pots.o
mp_global.o : mp_world.o
mp_images.o : command_line_options.o
mp_images.o : io_global.o
mp_images.o : mp.o
mp_images.o : parallel_include.o
mp_pools.o : command_line_options.o
mp_pools.o : mp.o
mp_pools.o : parallel_include.o
mp_pots.o : command_line_options.o
mp_pots.o : mp.o
mp_pots.o : parallel_include.o
mp_wave.o : kind.o
mp_wave.o : parallel_include.o
mp_world.o : io_global.o
mp_world.o : mp.o
noncol.o : kind.o
noncol.o : parameters.o
open_close_input_file.o : ../iotk/src/iotk_module.o
......
!
! Copyright (C) 2002-2009 Quantum ESPRESSO group
! Copyright (C) 2002-2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
......@@ -9,7 +9,10 @@
#if defined __HPM
# include "/cineca/prod/hpm/include/f_hpm.h"
#endif
!
! This module contains interfaces to most low-level MPI operations:
! initialization and stopping, broadcast, parallel sum, etc.
!
!------------------------------------------------------------------------------!
MODULE mp
!------------------------------------------------------------------------------!
......@@ -183,17 +186,12 @@
ierr = 0
taskid = 0
#if defined __HPM
! terminate the IBM Harware performance monitor
#if defined(__MPI)
CALL mpi_comm_rank( mpi_comm_world, taskid, ierr)
#endif
#if defined __HPM
! terminate the IBM Harware performance monitor
CALL f_hpmterminate( taskid )
#endif
#if defined(__MPI)
CALL mpi_finalize(ierr)
IF (ierr/=0) CALL mp_stop( 8004 )
#endif
......
!
! Copyright (C) 2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!----------------------------------------------------------------------------
MODULE mp_bands
!----------------------------------------------------------------------------
!
USE mp, ONLY : mp_barrier, mp_bcast, mp_size, mp_rank
USE command_line_options, ONLY : nband_
USE parallel_include
!
IMPLICIT NONE
SAVE
!
! ... Band groups (processors within a pool of bands)
! ... Subdivision of pool group, used for parallelization over bands
!
INTEGER :: nbgrp = 1 ! number of band groups
INTEGER :: nproc_bgrp = 1 ! number of processors within a band group
INTEGER :: me_bgrp = 0 ! index of the processor within a band group
INTEGER :: root_bgrp = 0 ! index of the root processor within a band group
INTEGER :: my_bgrp_id = 0 ! index of my band group
INTEGER :: inter_bgrp_comm = 0 ! inter band group communicator
INTEGER :: intra_bgrp_comm = 0 ! intra band group communicator
!
! ... The following variables not set during initialization but later
!
INTEGER :: ibnd_start = 0 ! starting band index
INTEGER :: ibnd_end = 0 ! ending band index
!
CONTAINS
!
!----------------------------------------------------------------------------
SUBROUTINE mp_start_bands( parent_comm )
!---------------------------------------------------------------------------
!
! ... Divide processors (of the "parent_comm" group) into bands pools
! ... Requires: nband_, read from command line
! ... parent_comm, typically processors of a k-point pool
! ... (intra_pool_comm)
!
IMPLICIT NONE
!
INTEGER, INTENT(IN) :: parent_comm
!
INTEGER :: parent_nproc = 1, parent_mype = 0, ierr = 0
!
#if defined (__MPI)
!
parent_nproc = mp_size( parent_comm )
parent_mype = mp_rank( parent_comm )
!
! ... nband_ must have been previously read from command line argument
! ... by a call to routine get_command_line
!
nbgrp = nband_
!
IF ( nbgrp < 1 .OR. nbgrp > parent_nproc ) CALL errore( 'init_bands', &
'invalid number of band groups, out of range', 1 )
IF ( MOD( parent_nproc, nbgrp ) /= 0 ) CALL errore( 'init_bands', &
'n. of band groups must be divisor of parent_nproc', 1 )
!
! ... Set number of processors per band group
!
nproc_bgrp = parent_nproc / nbgrp
!
! ... set index of band group for this processor ( 0 : nbgrp - 1 )
!
my_bgrp_id = parent_mype / nproc_bgrp
!
! ... set index of processor within the image ( 0 : nproc_image - 1 )
!
me_bgrp = MOD( parent_mype, nproc_bgrp )
!
CALL mp_barrier( parent_comm )
!
! ... the intra_bgrp_comm communicator is created
!
CALL MPI_COMM_SPLIT( parent_comm, my_bgrp_id, parent_mype, intra_bgrp_comm, ierr )
!
IF ( ierr /= 0 ) CALL errore( 'init_bands', &
'intra band group communicator initialization', ABS(ierr) )
!
CALL mp_barrier( parent_comm )
!
! ... the inter_bgrp_comm communicator is created
!
CALL MPI_COMM_SPLIT( parent_comm, me_bgrp, parent_mype, inter_bgrp_comm, ierr )
!
IF ( ierr /= 0 ) CALL errore( 'init_bands', &
'inter band group communicator initialization', ABS(ierr) )
!
#endif
RETURN
!
END SUBROUTINE mp_start_bands
!
SUBROUTINE init_index_over_band (comm,nbnd)
!
IMPLICIT NONE
INTEGER, INTENT(IN) :: comm, nbnd
INTEGER :: npe, myrank, ierror, rest, k
myrank = mp_rank(comm)
npe = mp_size(comm)
rest = mod(nbnd, npe)
k = int(nbnd/npe)
IF ( k >= 1) THEN
IF (rest > myrank) THEN
ibnd_start = (myrank)*k + (myrank+1)
ibnd_end = (myrank+1)*k + (myrank+1)
ELSE
ibnd_start = (myrank)*k + rest + 1
ibnd_end = (myrank+1)*k + rest
ENDIF
ELSE
ibnd_start = 1
ibnd_end = nbnd
ENDIF
END SUBROUTINE init_index_over_band
!
END MODULE mp_bands
!
! Copyright (C) 2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!----------------------------------------------------------------------------
MODULE mp_diag
!----------------------------------------------------------------------------
!
USE mp, ONLY : mp_size, mp_rank, mp_sum, mp_comm_free
USE command_line_options, ONLY : ndiag_
!
! The following variables are needed in order to set up the communicator
! for scalapack
!
USE mp_pools, ONLY : npool, nproc_pool, my_pool_id
USE mp_bands, ONLY : nbgrp, my_bgrp_id
!
USE parallel_include
!
IMPLICIT NONE
SAVE
!
! ... linear-algebra group (also known as "ortho" or "diag" group).
! ... Used for parallelization of dense-matrix diagonalization used in
! ... iterative diagonalization/orthonormalization, matrix-matrix products
!
INTEGER :: np_ortho(2) = 1 ! size of the processor grid used in ortho
INTEGER :: me_ortho(2) = 0 ! coordinates of the processors
INTEGER :: me_ortho1 = 0 ! task id for the ortho group
INTEGER :: nproc_ortho = 1 ! size of the ortho group:
INTEGER :: leg_ortho = 1 ! the distance in the father communicator
! of two neighbour processors in ortho_comm
INTEGER :: ortho_comm = 0 ! communicator for the ortho group
INTEGER :: ortho_row_comm = 0 ! communicator for the ortho row group
INTEGER :: ortho_col_comm = 0 ! communicator for the ortho col group
INTEGER :: ortho_comm_id= 0 ! id of the ortho_comm
!
#if defined __SCALAPACK
INTEGER :: me_blacs = 0 ! BLACS processor index starting from 0
INTEGER :: np_blacs = 1 ! BLACS number of processor
INTEGER :: world_cntx = -1 ! BLACS context of all processor
INTEGER :: ortho_cntx = -1 ! BLACS context for ortho_comm
#endif
!
CONTAINS
!
!----------------------------------------------------------------------------
SUBROUTINE mp_start_diag( parent_comm )
!---------------------------------------------------------------------------
!
! ... Ortho/diag/linear algebra group initialization
!
IMPLICIT NONE
!
INTEGER, INTENT(IN) :: parent_comm ! communicator of the parent group
!
INTEGER :: nproc_ortho_try
INTEGER :: parent_nproc ! nproc of the parent group
INTEGER :: ierr = 0
!
parent_nproc = mp_size( parent_comm )
!
#if defined __SCALAPACK
! define a 1D grid containing all MPI task of MPI_COMM_WORLD communicator
!
CALL BLACS_PINFO( me_blacs, np_blacs )
CALL BLACS_GET( -1, 0, world_cntx )
CALL BLACS_GRIDINIT( world_cntx, 'Row', 1, np_blacs )
!
#endif
!
IF( ndiag_ > 0 ) THEN
! command-line argument -ndiag N or -northo N set to a value N
! use the command line value ensuring that it falls in the proper range
nproc_ortho_try = MIN( ndiag_ , parent_nproc )
ELSE
! no command-line argument -ndiag N or -northo N is present
! insert here custom architecture specific default definitions
#if defined __SCALAPACK
nproc_ortho_try = MAX( parent_nproc/2, 1 )
#else
nproc_ortho_try = 1
#endif
END IF
!
! the ortho group for parallel linear algebra is a sub-group of the pool,
! then there are as many ortho groups as pools.
!
CALL init_ortho_group( nproc_ortho_try, parent_comm )
!
RETURN
!
END SUBROUTINE mp_start_diag
!
!
SUBROUTINE init_ortho_group( nproc_try_in, comm_all )
!
IMPLICIT NONE
INTEGER, INTENT(IN) :: nproc_try_in, comm_all
LOGICAL, SAVE :: first = .true.
INTEGER :: ierr, color, key, me_all, nproc_all, nproc_try
#if defined __SCALAPACK
INTEGER, ALLOCATABLE :: blacsmap(:,:)
INTEGER, ALLOCATABLE :: ortho_cntx_pe(:,:,:)
INTEGER :: nprow, npcol, myrow, mycol, i, j, k
INTEGER, EXTERNAL :: BLACS_PNUM
!
INTEGER :: nparent=1
INTEGER :: total_nproc=1
INTEGER :: total_mype=0
INTEGER :: nproc_parent=1
INTEGER :: my_parent_id=0
#endif
#if defined __MPI
me_all = mp_rank( comm_all )
!
nproc_all = mp_size( comm_all )
!
nproc_try = MIN( nproc_try_in, nproc_all )
nproc_try = MAX( nproc_try, 1 )
IF( .NOT. first ) THEN
!
! free resources associated to the communicator
!
CALL mp_comm_free( ortho_comm )
!
#if defined __SCALAPACK
IF( ortho_comm_id > 0 ) THEN
CALL BLACS_GRIDEXIT( ortho_cntx )
ENDIF
ortho_cntx = -1
#endif
!
END IF
! find the square closer (but lower) to nproc_try
!
CALL grid2d_dims( 'S', nproc_try, np_ortho(1), np_ortho(2) )
!
! now, and only now, it is possible to define the number of tasks
! in the ortho group for parallel linear algebra
!
nproc_ortho = np_ortho(1) * np_ortho(2)
!
IF( nproc_all >= 4*nproc_ortho ) THEN
!
! here we choose a processor every 4, in order not to stress memory BW
! on multi core procs, for which further performance enhancements are
! possible using OpenMP BLAS inside regter/cegter/rdiaghg/cdiaghg
! (to be implemented)
!
color = 0
IF( me_all < 4*nproc_ortho .AND. MOD( me_all, 4 ) == 0 ) color = 1
!
leg_ortho = 4
!
ELSE IF( nproc_all >= 2*nproc_ortho ) THEN
!
! here we choose a processor every 2, in order not to stress memory BW
!
color = 0
IF( me_all < 2*nproc_ortho .AND. MOD( me_all, 2 ) == 0 ) color = 1
!
leg_ortho = 2
!
ELSE
!
! here we choose the first processors
!
color = 0
IF( me_all < nproc_ortho ) color = 1
!
leg_ortho = 1
!
END IF
!
key = me_all
!
! initialize the communicator for the new group by splitting the input communicator
!
CALL MPI_COMM_SPLIT( comm_all, color, key, ortho_comm, ierr )
IF( ierr /= 0 ) &
CALL errore( " init_ortho_group ", " initializing ortho group communicator ", ierr )
!
! Computes coordinates of the processors, in row maior order
!
me_ortho1 = mp_rank( ortho_comm )
!
IF( me_all == 0 .AND. me_ortho1 /= 0 ) &
CALL errore( " init_ortho_group ", " wrong root task in ortho group ", ierr )
!
if( color == 1 ) then
ortho_comm_id = 1
CALL GRID2D_COORDS( 'R', me_ortho1, np_ortho(1), np_ortho(2), me_ortho(1), me_ortho(2) )
CALL GRID2D_RANK( 'R', np_ortho(1), np_ortho(2), me_ortho(1), me_ortho(2), ierr )
IF( ierr /= me_ortho1 ) &
CALL errore( " init_ortho_group ", " wrong task coordinates in ortho group ", ierr )
IF( me_ortho1*leg_ortho /= me_all ) &
CALL errore( " init_ortho_group ", " wrong rank assignment in ortho group ", ierr )
CALL MPI_COMM_SPLIT( ortho_comm, me_ortho(2), me_ortho(1), ortho_col_comm, ierr )
CALL MPI_COMM_SPLIT( ortho_comm, me_ortho(1), me_ortho(2), ortho_row_comm, ierr )
else
ortho_comm_id = 0
me_ortho(1) = me_ortho1
me_ortho(2) = me_ortho1
endif
#if defined __SCALAPACK
!
! This part is used to eliminate the image dependency from ortho groups
! SCALAPACK is now independent of whatever level of parallelization
! is present on top of pool parallelization
!
total_nproc = mp_size(mpi_comm_world)
total_mype = mp_rank(mpi_comm_world)
nparent = total_nproc/npool/nproc_pool
nproc_parent = total_nproc/nparent
my_parent_id = total_mype/nproc_parent
!
!
ALLOCATE( ortho_cntx_pe( npool, nbgrp, nparent ) )
ALLOCATE( blacsmap( np_ortho(1), np_ortho(2) ) )
DO j = 1, nparent
DO k = 1, nbgrp
DO i = 1, npool
CALL BLACS_GET( -1, 0, ortho_cntx_pe( i, k, j ) ) ! take a default value
blacsmap = 0
nprow = np_ortho(1)
npcol = np_ortho(2)
IF( ( j == ( my_parent_id + 1 ) ) .and. ( k == ( my_bgrp_id + 1 ) ) .and. &
( i == ( my_pool_id + 1 ) ) .and. ( ortho_comm_id > 0 ) ) THEN
blacsmap( me_ortho(1) + 1, me_ortho(2) + 1 ) = BLACS_PNUM( world_cntx, 0, me_blacs )
END IF
! All MPI tasks defined in world comm take part in the definition of the BLACS grid
CALL mp_sum( blacsmap )
CALL BLACS_GRIDMAP( ortho_cntx_pe(i,k,j), blacsmap, nprow, nprow, npcol )
CALL BLACS_GRIDINFO( ortho_cntx_pe(i,k,j), nprow, npcol, myrow, mycol )
IF( ( j == ( my_parent_id + 1 ) ) .and. ( k == ( my_bgrp_id + 1 ) ) .and. &
( i == ( my_pool_id + 1 ) ) .and. ( ortho_comm_id > 0 ) ) THEN
IF( np_ortho(1) /= nprow ) &
CALL errore( ' init_ortho_group ', ' problem with SCALAPACK, wrong no. of task rows ', 1 )
IF( np_ortho(2) /= npcol ) &
CALL errore( ' init_ortho_group ', ' problem with SCALAPACK, wrong no. of task columns ', 1 )
IF( me_ortho(1) /= myrow ) &
CALL errore( ' init_ortho_group ', ' problem with SCALAPACK, wrong task row ID ', 1 )
IF( me_ortho(2) /= mycol ) &
CALL errore( ' init_ortho_group ', ' problem with SCALAPACK, wrong task columns ID ', 1 )
ortho_cntx = ortho_cntx_pe(i,k,j)
END IF
END DO
END DO
END DO
DEALLOCATE( blacsmap )
DEALLOCATE( ortho_cntx_pe )
#endif
#else
ortho_comm_id = 1
#endif
first = .false.
RETURN
END SUBROUTINE init_ortho_group
!
END MODULE mp_diag
This diff is collapsed.
!
! Copyright (C) 2013 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!----------------------------------------------------------------------------
MODULE mp_images
!----------------------------------------------------------------------------
!
USE mp, ONLY : mp_barrier, mp_bcast, mp_size, mp_rank