Commit 3632a252 authored by Alberto Garcia's avatar Alberto Garcia
Browse files

Put back the MPI interfaces

The custom MPI interfaces in Src/MPI are now compiled
by default, except if the preprocessor option

 -DNO_MPI_INTERFACES

is specified.

+ 2D arrays have to be passed as a(1,1) in the
  mpi_(all)gather calls in the pexsi modules...

+ Update pexsi arch.makes in Src/Sys


parent af3efe33
------------------------------------------------------------
July 30, 2016 A. Garcia trunk-537--pexsi-v0.8-spin-12
Put back the MPI interfaces
The custom MPI interfaces in Src/MPI are now compiled
by default, except if the preprocessor option
-DNO_MPI_INTERFACES
is specified.
+ 2D arrays have to be passed as a(1,1) in the
mpi_(all)gather calls in the pexsi modules...
+ Update pexsi arch.makes in Src/Sys
------------------------------------------------------------
July 30, 2016 A. Garcia trunk-537--pexsi-v0.8-spin-11
Wrap PEXSI code within preprocessor blocks
......
......@@ -6,18 +6,41 @@
! See Docs/Contributors.txt for a list of contributors.
! ---
MODULE MPI_SIESTA
#ifndef NO_MPI_INTERFACES
!
! This is an interface to supplant some MPI routines called by siesta,
! in order to time-profile them. J.M.Soler. May.2009
!
USE MPI_INTERFACES, &! Previously called MPI_SIESTA
trueMPI_BARRIER => MPI_BARRIER, & ! Renamed to avoid conflicts
trueMPI_COMM_RANK => MPI_COMM_RANK, &
trueMPI_COMM_SIZE => MPI_COMM_SIZE, &
trueMPI_COMM_SPLIT => MPI_COMM_SPLIT, &
trueMPI_GET_COUNT => MPI_GET_COUNT, &
trueMPI_INIT => MPI_INIT, &
trueMPI_WAIT => MPI_WAIT, &
trueMPI_WAITALL => MPI_WAITALL
USE TIMER_MPI_M, only: timer_mpi
#else /* NO_MPI_INTERFACES */
! For this PEXSI version, temporarily removed timing versions of some
! MPI routines.
#undef MPI
#endif /* NO_MPI_INTERFACES */
! The following construction allows to supplant MPI_Comm_World within SIESTA,
! and to use it as a subroutine with its own internal MPI communicator.
! JMS. Oct.2010, AG, March 2013
#ifndef NO_MPI_INTERFACES
! JMS. Oct.2010
USE MPI_INTERFACES, only: true_MPI_Comm_World => MPI_Comm_World
#else /* NO_MPI_INTERFACES */
! AG, March 2013
USE MPI, true_MPI_Comm_World => MPI_Comm_World
#endif /* NO_MPI_INTERFACES */
integer, public :: MPI_Comm_World = true_MPI_Comm_World
#ifdef NO_MPI_INTERFACES
public :: true_MPI_Comm_World
......@@ -30,9 +53,10 @@ MODULE MPI_SIESTA
#endif
#endif /* NO_MPI_INTERFACES */
!
! Export explicitly some symbols to help some versions of
! the PGI compiler. It does not consider them public by default
! the PGI compiler, which do not consider them public by default
!
public :: mpi_real
public :: mpi_complex
......@@ -46,10 +70,139 @@ MODULE MPI_SIESTA
public :: mpi_status_size
public :: mpi_comm_self
public :: mpi_grid_real
public :: mpi_finalize
public :: mpi_group_null, mpi_comm_null, mpi_proc_null
! public :: mpi_thread_single
public :: mpi_thread_funneled
#ifndef NO_MPI_INTERFACES
PUBLIC :: MPI_BARRIER
INTERFACE MPI_BARRIER
MODULE PROCEDURE myMPI_BARRIER
END INTERFACE
PUBLIC :: MPI_COMM_RANK
INTERFACE MPI_COMM_RANK
MODULE PROCEDURE myMPI_COMM_RANK
END INTERFACE
PUBLIC :: MPI_COMM_SIZE
INTERFACE MPI_COMM_SIZE
MODULE PROCEDURE myMPI_COMM_SIZE
END INTERFACE
PUBLIC :: MPI_COMM_SPLIT
INTERFACE MPI_COMM_SPLIT
MODULE PROCEDURE myMPI_COMM_SPLIT
END INTERFACE
PUBLIC :: MPI_GET_COUNT
INTERFACE MPI_GET_COUNT
MODULE PROCEDURE myMPI_GET_COUNT
END INTERFACE
PUBLIC :: MPI_INIT
INTERFACE MPI_INIT
MODULE PROCEDURE myMPI_INIT
END INTERFACE
PUBLIC :: MPI_WAIT
INTERFACE MPI_WAIT
MODULE PROCEDURE myMPI_WAIT
END INTERFACE
PUBLIC :: MPI_WAITALL
INTERFACE MPI_WAITALL
MODULE PROCEDURE myMPI_WAITALL
END INTERFACE
CONTAINS
SUBROUTINE myMPI_BARRIER(COMM, IERROR)
INTEGER, INTENT(IN) :: COMM
INTEGER, INTENT(OUT) :: IERROR
external MPI_BARRIER
call timer_mpi('MPI_BARRIER',1)
call MPI_BARRIER(COMM, IERROR)
call timer_mpi('MPI_BARRIER',2)
END SUBROUTINE myMPI_BARRIER
SUBROUTINE myMPI_COMM_RANK(COMM, RANK, IERROR)
INTEGER, INTENT(IN) :: COMM
INTEGER, INTENT(OUT) :: RANK
INTEGER, INTENT(OUT) :: IERROR
external MPI_COMM_RANK
call timer_mpi('MPI_COMM_RANK',1)
call MPI_COMM_RANK(COMM, RANK, IERROR)
call timer_mpi('MPI_COMM_RANK',2)
END SUBROUTINE myMPI_COMM_RANK
SUBROUTINE myMPI_COMM_SIZE(COMM, SIZE, IERROR)
INTEGER, INTENT(IN) :: COMM
INTEGER, INTENT(OUT) :: SIZE
INTEGER, INTENT(OUT) :: IERROR
external MPI_COMM_SIZE
call timer_mpi('MPI_COMM_SIZE',1)
call MPI_COMM_SIZE(COMM, SIZE, IERROR)
call timer_mpi('MPI_COMM_SIZE',2)
END SUBROUTINE myMPI_COMM_SIZE
SUBROUTINE myMPI_COMM_SPLIT(COMM, COLOR, KEY, NEWCOMM, IERROR)
INTEGER, INTENT(IN) :: COMM
INTEGER, INTENT(IN) :: COLOR
INTEGER, INTENT(IN) :: KEY
INTEGER, INTENT(OUT) :: NEWCOMM
INTEGER, INTENT(OUT) :: IERROR
external MPI_COMM_SPLIT
call timer_mpi('MPI_COMM_SPLIT',1)
call MPI_COMM_SPLIT(COMM, COLOR, KEY, NEWCOMM, IERROR)
call timer_mpi('MPI_COMM_SPLIT',2)
END SUBROUTINE myMPI_COMM_SPLIT
SUBROUTINE myMPI_GET_COUNT(STATUS, DATATYPE, COUNT, IERROR)
USE MPI__INCLUDE, ONLY: MPI_STATUS_SIZE
INTEGER, INTENT(IN) :: STATUS(MPI_STATUS_SIZE)
INTEGER, INTENT(IN) :: DATATYPE
INTEGER, INTENT(OUT) :: COUNT
INTEGER, INTENT(OUT) :: IERROR
external MPI_GET_COUNT
call timer_mpi('MPI_GET_COUNT',1)
call MPI_GET_COUNT(STATUS, DATATYPE, COUNT, IERROR)
call timer_mpi('MPI_GET_COUNT',2)
END SUBROUTINE myMPI_GET_COUNT
SUBROUTINE myMPI_INIT(IERROR)
INTEGER, INTENT(OUT) :: IERROR
external MPI_INIT
call timer_mpi('MPI_INIT',1)
call MPI_INIT(IERROR)
call timer_mpi('MPI_INIT',2)
END SUBROUTINE myMPI_INIT
SUBROUTINE myMPI_WAIT(REQUEST, STATUS, IERROR)
USE MPI__INCLUDE, ONLY: MPI_STATUS_SIZE
INTEGER, INTENT(INOUT) :: REQUEST
INTEGER, INTENT(OUT) :: STATUS(MPI_STATUS_SIZE)
INTEGER, INTENT(OUT) :: IERROR
external MPI_WAIT
call timer_mpi('MPI_WAIT',1)
call MPI_WAIT(REQUEST, STATUS, IERROR)
call timer_mpi('MPI_WAIT',2)
END SUBROUTINE myMPI_WAIT
SUBROUTINE myMPI_WAITALL( &
COUNT, ARRAY_OF_REQUESTS, ARRAY_OF_STATUSES, IERROR)
USE MPI__INCLUDE, ONLY: MPI_STATUS_SIZE
INTEGER, INTENT(IN) :: COUNT
INTEGER, INTENT(INOUT) :: ARRAY_OF_REQUESTS(*)
INTEGER, INTENT(OUT) :: ARRAY_OF_STATUSES(MPI_STATUS_SIZE,*)
INTEGER, INTENT(OUT) :: IERROR
external MPI_WAITALL
call timer_mpi('MPI_WAITALL',1)
call MPI_WAITALL(COUNT, ARRAY_OF_REQUESTS, ARRAY_OF_STATUSES, IERROR)
call timer_mpi('MPI_WAITALL',2)
END SUBROUTINE myMPI_WAITALL
#endif /* ! NO_MPI_INTERFACES */
END MODULE MPI_SIESTA
......@@ -26,7 +26,7 @@ KINDS="4 8"
FPPFLAGS_MPI=-DMPI
#
PEXSI_INCFLAGS = -I/project/projectdirs/m1027/PEXSI/libpexsi_interfaces/$(PEXSI_VERSION)
FPPFLAGS_PEXSI=-DPEXSI_$(PEXSI_VERSION)
FPPFLAGS_PEXSI=-DPEXSI
#
# Extended interface
PEXSI_DIR = /project/projectdirs/m1027/PEXSI/libpexsi_edison
......
#
# This file is part of the SIESTA package.
#
# Copyright (c) Fundacion General Universidad Autonoma de Madrid:
# E.Artacho, J.Gale, A.Garcia, J.Junquera, P.Ordejon, D.Sanchez-Portal
# and J.M.Soler, 1996- .
#
# Use of this software constitutes agreement with the full conditions
# given in the SIESTA license, as signed by all legitimate users.
#
SIESTA_ARCH=gfortran-macosx64-openmpi
# The only thing you should change is the location of the libraries
# on your computer
#
FC=mpif90
#
FC_ASIS=$(FC)
#
FFLAGS= -g -O0 -fbacktrace #-fcheck=all
FFLAGS_CHECKS= -O0 -g -fcheck=all
FFLAGS_DEBUG= -g -O0
RANLIB=echo
COMP_LIBS=
#
#FOX_ROOT=$(HOME)/lib/FoX/4.1.2/gfortran-4.8.3
#
NETCDF_INCFLAGS=-I$(NETCDF_INCLUDE)
NETCDF_LIBS= $(NETCDF_FORTRAN_LIBS)
FPPFLAGS_CDF=-DCDF
#
MPI_INTERFACE=libmpi_f90.a
MPI_INCLUDE=. # Note . for no-op
FPPFLAGS_MPI=-DMPI
PEXSI_LIB=$(HOME)/lib/PEXSI/0.7.3/openmpi-1.8.1-gfortran-4.8.3/lib/libpexsi_osx_v0.7.3.a
SUPERLU_LIB=$(HOME)/lib/SuperLU_DIST_3.3/lib/libsuperlu_dist_3.3.a
PARMETIS_LIB=-L$(HOME)/lib/parmetis-4.0.2/lib -lparmetis
METIS_LIB=-L$(HOME)/lib/metis/lib -lmetis
MPICXX_LIB=/opt/openmpi-1.8.1-gfortran-4.8.3/lib/libmpi_cxx.dylib -lstdc++
LIBS=-L/opt/scalapack/openmpi-1.6.1-gfortran/lib \
-lscalapack -ltmg -lreflapack -lrefblas \
$(NETCDF_LIBS) \
$(PEXSI_LIB) $(SUPERLU_LIB) \
$(PARMETIS_LIB) $(METIS_LIB) $(MPICXX_LIB)
SYS=nag
FPPFLAGS= $(FPPFLAGS_CDF) $(FPPFLAGS_MPI) -DMPI_TIMING
#
#
.F.o:
$(FC) -c $(FFLAGS) $(INCFLAGS) $(FPPFLAGS) $<
.f.o:
$(FC) -c $(FFLAGS) $(INCFLAGS) $<
.F90.o:
$(FC) -c $(FFLAGS) $(INCFLAGS) $(FPPFLAGS) $<
.f90.o:
$(FC) -c $(FFLAGS) $(INCFLAGS) $<
#
......@@ -4,7 +4,7 @@
PEXSI_VERSION=0.9.0
PEXSI_LIB_DIR=/gpfs/projects/bsc21/bsc21308/SIESTA/build/pexsi_v0.9.0
FPPFLAGS_PEXSI=-DPEXSI_$(PEXSI_VERSION)
FPPFLAGS_PEXSI=-DPEXSI
PEXSI_INCFLAGS=-I$(PEXSI_LIB_DIR)/fortran
SIESTA_ARCH=MareNostrum3-intel-openmpi-pexsi$(PEXSI_VERSION)
......
......@@ -20,13 +20,16 @@ FFLAGS= -g -O2 -fbacktrace #-fcheck=all
FFLAGS_CHECKS= -O0 -g -fcheck=all
FFLAGS_DEBUG= -g -O0
RANLIB=echo
#
# Put here libfdict.a and libncdf.a if -DNCDF(_4)
#
COMP_LIBS=
#
#FOX_ROOT=$(HOME)/lib/FoX/4.1.2/gfortran-4.8.3
#
PEXSI_VERSION=0.8.0
PEXSI_LIB_DIR=$(HOME)/lib/PEXSI/$(PEXSI_VERSION)/openmpi-1.8.1-gfortran-4.8.3
FPPFLAGS_PEXSI=-DPEXSI_$(PEXSI_VERSION)
FPPFLAGS_PEXSI=-DPEXSI
PEXSI_INCFLAGS=-I$(PEXSI_LIB_DIR)/include
#
NETCDF_ROOT=/usr/local
......
......@@ -178,7 +178,7 @@ call MPI_Group_translate_ranks( PEXSI_Pole_Group, npPerPole, &
! in the distribution object
allocate (PEXSI_Pole_ranks_in_World_Spin(npPerPole,nspin))
call MPI_AllGather(pexsi_pole_ranks_in_world,npPerPole,MPI_integer,&
PEXSI_Pole_Ranks_in_World_Spin,npPerPole, &
PEXSI_Pole_Ranks_in_World_Spin(1,1),npPerPole, &
MPI_integer,PEXSI_Spin_Comm,ierr)
! Create distributions known to all nodes
......@@ -363,7 +363,7 @@ call timer("pexsi-raw-inertia-ct", 2)
allocate(intdos_spin(npoints,nspin))
call MPI_Gather( intdos, npoints, MPI_double_precision, &
intdos_spin, npoints, MPI_double_precision, &
intdos_spin(1,1), npoints, MPI_double_precision, &
0, PEXSI_Spin_Comm, ierr )
if (mpirank == 0) then
......
......@@ -217,7 +217,7 @@ call MPI_Group_translate_ranks( PEXSI_Pole_Group, npPerPole, &
allocate (PEXSI_Pole_ranks_in_World_Spin(npPerPole,nspin))
call MPI_AllGather(pexsi_pole_ranks_in_world,npPerPole,MPI_integer,&
PEXSI_Pole_Ranks_in_World_Spin,npPerPole, &
PEXSI_Pole_Ranks_in_World_Spin(1,1),npPerPole, &
MPI_integer,PEXSI_Spin_Comm,ierr)
! Create distributions known to all nodes
......
......@@ -215,7 +215,7 @@
! in the distribution object
allocate (PEXSI_Pole_ranks_in_World_Spin(npPerPole,nspin))
call MPI_AllGather(pexsi_pole_ranks_in_world,npPerPole,MPI_integer,&
PEXSI_Pole_Ranks_in_World_Spin,npPerPole, &
PEXSI_Pole_Ranks_in_World_Spin(1,1),npPerPole, &
MPI_integer,PEXSI_Spin_Comm,ierr)
! Create distributions known to all nodes
......
trunk-537--pexsi-v0.8-spin-11
trunk-537--pexsi-v0.8-spin-12
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment