Skip to content

Commit

Permalink
Implemented Yingdi's fix for the RK solver (#56)
Browse files Browse the repository at this point in the history
  • Loading branch information
martijnende authored Jul 22, 2020
1 parent aa37cd8 commit 9d68bdc
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 5 deletions.
15 changes: 15 additions & 0 deletions src/ode_rk45.f90
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,8 @@ end subroutine rkf45_d

subroutine rkfs_d ( f, neqn, y, t, tout, relerr, abserr, iflag, yp, h, &
f1, f2, f3, f4, f5, savre, savae, nfe, kop, init, jflag, kflag )

use my_mpi, only: is_MPI_parallel, max_allproc, min_allproc
!
!*******************************************************************************
!
Expand Down Expand Up @@ -464,6 +466,7 @@ subroutine rkfs_d ( f, neqn, y, t, tout, relerr, abserr, iflag, yp, h, &
double precision y(neqn)
double precision yp(neqn)
double precision ypk
double precision hminglob ! YD
!
! Check the input parameters.
!
Expand Down Expand Up @@ -627,6 +630,12 @@ subroutine rkfs_d ( f, neqn, y, t, tout, relerr, abserr, iflag, yp, h, &
end if
end do

! YD
if (is_MPI_parallel()) then
call min_allproc(h,hminglob)
h = hminglob
endif

if ( toln <= 0.0D+00 ) then
h = 0.0D+00
end if
Expand Down Expand Up @@ -773,6 +782,12 @@ subroutine rkfs_d ( f, neqn, y, t, tout, relerr, abserr, iflag, yp, h, &

end do

! YD: MPI sync max err, min overhead
if (is_MPI_parallel()) then
call max_allproc(eeoet, hminglob)
eeoet = hminglob
endif

esttol = abs ( h ) * eeoet * scale / 752400.0D+00

! SEISMIC: if the integration result contains NaNs,
Expand Down
18 changes: 13 additions & 5 deletions src/parallel.f90
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ module my_mpi
my_mpi_tag, my_mpi_rank, my_mpi_NPROCS, is_mpi_parallel, &
is_mpi_master, gather_allv, gather_allvdouble, &
gather_allvdouble_root, gather_allvi_root, gather_alli, &
synchronize_all, sum_allreduce, max_allproc
synchronize_all, sum_allreduce, max_allproc, min_allproc

contains

Expand All @@ -28,10 +28,6 @@ subroutine init_mpi()

call MPI_INIT(ier)
if (ier /= 0 ) stop 'Error initializing MPI'
call MPI_COMM_RANK(MPI_COMM_WORLD,MY_RANK,ier)
if (ier /= 0 ) stop 'Error getting MPI rank'
call MPI_COMM_SIZE(MPI_COMM_WORLD,NPROCS,ier)
if (ier /= 0 ) stop 'Error getting MPI world size'

if (NPROCS<2) call MPI_FINALIZE(ier)
if (MY_RANK==0) write(6,*) 'Number of processors = ',NPROCS
Expand Down Expand Up @@ -234,4 +230,16 @@ subroutine max_allproc(sendbuf, recvbuf)
end subroutine max_allproc

!-------------------------------------------------------------------------------------------------
!
subroutine min_allproc(sendbuf, recvbuf)

double precision :: sendbuf, recvbuf
integer ier

call MPI_ALLREDUCE(sendbuf,recvbuf,1,MPI_DOUBLE_PRECISION,MPI_MIN,MPI_COMM_WORLD,ier)

end subroutine min_allproc

!-------------------------------------------------------------------------------------------------

end module my_mpi

0 comments on commit 9d68bdc

Please sign in to comment.