Skip to content
Snippets Groups Projects
Commit d404d981 authored by Sabine's avatar Sabine
Browse files

Merge branch 'dev' of git.nilu.no:flexpart/flexpart into dev

parents 46706c74 861805ae
No related branches found
No related tags found
No related merge requests found
...@@ -89,10 +89,13 @@ module mpi_mod ...@@ -89,10 +89,13 @@ module mpi_mod
! MPI tags/requests for send/receive operation ! MPI tags/requests for send/receive operation
integer :: tm1 integer :: tm1
integer, parameter :: nvar_async=26 !27 !29 :DBG: integer, parameter :: nvar_async=26
!integer, dimension(:), allocatable :: tags !integer, dimension(:), allocatable :: tags
integer, dimension(:), allocatable :: reqs integer, dimension(:), allocatable :: reqs
! Status array used for certain MPI operations (MPI_RECV)
integer, dimension(MPI_STATUS_SIZE) :: mp_status
integer :: id_read ! readwind/getfield process integer :: id_read ! readwind/getfield process
integer :: numpart_mpi,maxpart_mpi ! number of particles per node integer :: numpart_mpi,maxpart_mpi ! number of particles per node
...@@ -118,7 +121,7 @@ module mpi_mod ...@@ -118,7 +121,7 @@ module mpi_mod
logical, parameter :: mp_dbg_mode = .false. logical, parameter :: mp_dbg_mode = .false.
logical, parameter :: mp_dev_mode = .false. logical, parameter :: mp_dev_mode = .false.
logical, parameter :: mp_dbg_out = .false. logical, parameter :: mp_dbg_out = .false.
logical, parameter :: mp_time_barrier=.true. logical, parameter :: mp_time_barrier=.false.
logical, parameter :: mp_measure_time=.false. logical, parameter :: mp_measure_time=.false.
logical, parameter :: mp_exact_numpart=.true. logical, parameter :: mp_exact_numpart=.true.
...@@ -148,6 +151,22 @@ module mpi_mod ...@@ -148,6 +151,22 @@ module mpi_mod
! dat_lun logical unit number for i/o ! dat_lun logical unit number for i/o
integer, private :: dat_lun integer, private :: dat_lun
! Temporary arrays for particles (allocated and deallocated as needed)
integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
& itramem_tmp, itrasplit_tmp
real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
real, allocatable, dimension(:) :: ztra1_tmp
real, allocatable, dimension(:,:) :: xmass1_tmp
! mp_redist_fract Exchange particles between processes if relative difference
! is larger. A good value is between 0.0 and 0.5
! mp_maxpart_factor Allocate more memory per process than strictly needed
! (safety factor). Recommended value between 1.5 and 2.5
! mp_min_redist Do not redistribute particles if below this limit
real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
integer,parameter :: mp_min_redist=100000
contains contains
subroutine mpif_init subroutine mpif_init
...@@ -241,7 +260,6 @@ contains ...@@ -241,7 +260,6 @@ contains
! as running with one process less but not using separate read process ! as running with one process less but not using separate read process
!********************************************************************** !**********************************************************************
! id_read = min(mp_np-1, 1)
id_read = mp_np-1 id_read = mp_np-1
if (mp_pid.eq.id_read) lmpreader=.true. if (mp_pid.eq.id_read) lmpreader=.true.
...@@ -310,8 +328,8 @@ contains ...@@ -310,8 +328,8 @@ contains
end if end if
! Set maxpart per process ! Set maxpart per process
if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart maxpart_mpi=int(mp_maxpart_factor*maxpart/mp_partgroup_np)
! Do not allocate particle data arrays for readwind process ! Do not allocate particle data arrays for readwind process
if (lmpreader.and.lmp_use_reader) then if (lmpreader.and.lmp_use_reader) then
...@@ -320,14 +338,10 @@ contains ...@@ -320,14 +338,10 @@ contains
! Set random seed for each non-root process ! Set random seed for each non-root process
if (mp_pid.gt.0) then if (mp_pid.gt.0) then
! if (mp_pid.ge.0) then
! call system_clock(s)
s = 244 s = 244
mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
end if end if
if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
if (mp_dbg_mode) then if (mp_dbg_mode) then
! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
mp_seed=0 mp_seed=0
if (lroot) write(*,*) 'MPI: setting seed=0' if (lroot) write(*,*) 'MPI: setting seed=0'
end if end if
...@@ -453,6 +467,366 @@ contains ...@@ -453,6 +467,366 @@ contains
end subroutine mpif_tm_send_dims end subroutine mpif_tm_send_dims
subroutine mpif_send_part_properties(num_part)
!***********************************************************************
! Distribute particle properties from root to all processes.
!
! Used by init_domainfill_mpi
!
! Variables:
!
! num_part input, number of particles per process (rounded up)
!
!***********************************************************************
use com_mod
implicit none
integer,intent(in) :: num_part
integer :: i,jj, addone
! Time for MPI communications
!****************************
if (mp_measure_time) call mpif_mtime('commtime',0)
! Distribute variables, send from pid 0 to other processes (including itself)
!****************************************************************************
call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
&num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
&num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
&num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
&num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
do i=1,nspec
call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
&num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
end do
if (mp_measure_time) call mpif_mtime('commtime',1)
write(*,*) "PID ", mp_partid, "ending MPI_Scatter operation"
goto 601
600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
stop
! After the transfer of particles it it possible that the value of
! "numpart" is larger than the actual, so we reduce it if there are
! invalid particles at the end of the arrays
601 do i=num_part, 1, -1
if (itra1(i).eq.-999999999) then
numpart=numpart-1
else
exit
end if
end do
!601 end subroutine mpif_send_part_properties
end subroutine mpif_send_part_properties
subroutine mpif_calculate_part_redist(itime)
!***********************************************************************
! Calculate number of particles to redistribute between processes. This routine
! can be called at regular time intervals to keep a level number of
! particles on each process.
!
! First, all processes report their local "numpart" to each other, which is
! stored in array "numpart_mpi(np)". This array is sorted from low to
! high values, along with a corresponding process ID array "idx_arr(np)".
! If the relative difference between the highest and lowest "numpart_mpi" value
! is above a threshold, particles are transferred from process idx_arr(np-1)
! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
! for all valid i.
! Note: If np is an odd number, the process with the median
! number of particles is left unchanged
!
! VARIABLES
! itime input, current time
!***********************************************************************
use com_mod
implicit none
integer, intent(in) :: itime
real :: pmin,z
integer :: i,jj,nn, num_part=1,m,imin, num_trans
logical :: first_iter
integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr
real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
! Exit if running with only 1 process
!************************************************************************
if (mp_np.eq.1) return
! All processes exchange information on number of particles
!****************************************************************************
allocate(numparticles_mpi(0:mp_partgroup_np-1), &
&idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &
& 1, MPI_INTEGER, mp_comm_used, mp_ierr)
! Sort from lowest to highest
! Initial guess: correct order
sorted(:) = numparticles_mpi(:)
do i=0, mp_partgroup_np-1
idx_arr(i) = i
end do
! For each successive element in index array, see if a lower value exists
do i=0, mp_partgroup_np-2
pmin=sorted(i)
imin=idx_arr(i)
m=i+1
do jj=m, mp_partgroup_np-1
if (pmin.le.sorted(jj)) cycle
z=pmin
pmin=sorted(jj)
sorted(jj)=z
nn=imin
imin=idx_arr(jj)
idx_arr(jj)=nn
end do
sorted(i)=pmin
idx_arr(i)=imin
end do
! For each pair of processes, transfer particles if the difference is above a
! limit, and numpart at sending process large enough
m=mp_partgroup_np-1 ! index for last sorted process (most particles)
do i=0,mp_partgroup_np/2-1
num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))
if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&
& real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then
call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
end if
end if
m=m-1
end do
deallocate(numparticles_mpi, idx_arr, sorted)
end subroutine mpif_calculate_part_redist
subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
!***********************************************************************
! Transfer particle properties between two arbitrary processes.
!
! VARIABLES
!
! itime input, current time
! src_proc input, ID of source process
! dest_proc input, ID of destination process
! num_trans input, number of particles to transfer
!
!************************************************************************
use com_mod !TODO: ,only: nclass etc
implicit none
integer, intent(in) :: itime, src_proc, dest_proc, num_trans
integer :: ll, ul ! lower and upper indices in arrays
integer :: arr_sz ! size of temporary arrays
integer :: mtag ! MPI message tag
integer :: i, j, minpart, ipart, maxnumpart
! Measure time for MPI communications
!************************************
if (mp_measure_time) call mpif_mtime('commtime',0)
! Send particles
!***************
if (mp_partid.eq.src_proc) then
mtag = 2000
! Array indices for the transferred particles
ll=numpart-num_trans+1
ul=numpart
! if (mp_dev_mode) then
! write(*,FMT='(72("#"))')
! write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
! write(*,*) "numpart before: ", numpart
! end if
call MPI_SEND(nclass(ll:ul),num_trans,&
& MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
call MPI_SEND(npoint(ll:ul),num_trans,&
& MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
call MPI_SEND(itra1(ll:ul),num_trans, &
& MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
call MPI_SEND(idt(ll:ul),num_trans, &
& MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
call MPI_SEND(itramem(ll:ul),num_trans, &
& MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
call MPI_SEND(itrasplit(ll:ul),num_trans,&
& MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
call MPI_SEND(xtra1(ll:ul),num_trans, &
& mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
call MPI_SEND(ytra1(ll:ul),num_trans,&
& mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
call MPI_SEND(ztra1(ll:ul),num_trans,&
& mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
do j=1,nspec
call MPI_SEND(xmass1(ll:ul,j),num_trans,&
& mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
end do
! Terminate transferred particles, update numpart
itra1(ll:ul) = -999999999
numpart = numpart-num_trans
! if (mp_dev_mode) then
! write(*,*) "numpart after: ", numpart
! write(*,FMT='(72("#"))')
! end if
else if (mp_partid.eq.dest_proc) then
! Receive particles
!******************
mtag = 2000
! Array indices for the transferred particles
ll=numpart+1
ul=numpart+num_trans
! if (mp_dev_mode) then
! write(*,FMT='(72("#"))')
! write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
! write(*,*) "numpart before: ", numpart
! end if
! We could receive the data directly at nclass(ll:ul) etc., but this leaves
! vacant spaces at lower indices. Using temporary arrays instead.
arr_sz = ul-ll+1
allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
& idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
& xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
& xmass1_tmp(arr_sz, maxspec))
call MPI_RECV(nclass_tmp,num_trans,&
& MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(npoint_tmp,num_trans,&
& MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(itra1_tmp,num_trans, &
& MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(idt_tmp,num_trans, &
& MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(itramem_tmp,num_trans, &
& MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(itrasplit_tmp,num_trans,&
& MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(xtra1_tmp,num_trans, &
& mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(ytra1_tmp,num_trans,&
& mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
call MPI_RECV(ztra1_tmp,num_trans,&
& mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
do j=1,nspec
call MPI_RECV(xmass1_tmp(:,j),num_trans,&
& mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
end do
! This is the maximum value numpart can possibly have after the transfer
maxnumpart=numpart+num_trans
! Search for vacant space and copy from temporary storage
!********************************************************
minpart=1
do i=1, num_trans
! Take into acount that we may have transferred invalid particles
if (itra1_tmp(minpart).ne.itime) goto 200
do ipart=minpart,maxnumpart
if (itra1(ipart).ne.itime) then
itra1(ipart) = itra1_tmp(minpart)
npoint(ipart) = npoint_tmp(minpart)
nclass(ipart) = nclass_tmp(minpart)
idt(ipart) = idt_tmp(minpart)
itramem(ipart) = itramem_tmp(minpart)
itrasplit(ipart) = itrasplit_tmp(minpart)
xtra1(ipart) = xtra1_tmp(minpart)
ytra1(ipart) = ytra1_tmp(minpart)
ztra1(ipart) = ztra1_tmp(minpart)
xmass1(ipart,:) = xmass1_tmp(minpart,:)
! Increase numpart, if necessary
numpart=max(numpart,ipart)
goto 200 ! Storage space has been found, stop searching
end if
end do
200 minpart=minpart+1
end do
deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,&
& xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
! if (mp_dev_mode) then
! write(*,*) "numpart after: ", numpart
! write(*,FMT='(72("#"))')
! end if
else
! This routine should only be called by the two participating processes
write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
stop
return
end if
! Measure time for MPI communications
!************************************
if (mp_measure_time) call mpif_mtime('commtime',1)
end subroutine mpif_redist_part
subroutine mpif_tm_send_vars subroutine mpif_tm_send_vars
!*********************************************************************** !***********************************************************************
! Distribute particle variables from pid0 to all processes. ! Distribute particle variables from pid0 to all processes.
...@@ -2443,4 +2817,79 @@ contains ...@@ -2443,4 +2817,79 @@ contains
end subroutine write_data_dbg end subroutine write_data_dbg
subroutine set_fields_synthetic()
!*******************************************************************************
! DESCRIPTION
! Set all meteorological fields to synthetic (usually constant/homogeneous)
! values.
! Used for validation and error-checking
!
! NOTE
! This version uses asynchronious communications.
!
! VARIABLES
!
!
!
!*******************************************************************************
use com_mod
implicit none
integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
if (.not.lmp_sync) ui=3
! Variables transferred at initialization only
!*********************************************
! readclouds=readclouds_
oro(:,:)=0.0
excessoro(:,:)=0.0
lsm(:,:)=0.0
xlanduse(:,:,:)=0.0
! wftime
! numbwf
! nmixz
! height
! Time-varying fields:
uu(:,:,:,li:ui) = 10.0
vv(:,:,:,li:ui) = 0.0
uupol(:,:,:,li:ui) = 10.0
vvpol(:,:,:,li:ui)=0.0
ww(:,:,:,li:ui)=0.
tt(:,:,:,li:ui)=300.
rho(:,:,:,li:ui)=1.3
drhodz(:,:,:,li:ui)=.0
tth(:,:,:,li:ui)=0.0
qvh(:,:,:,li:ui)=1.0
qv(:,:,:,li:ui)=1.0
pv(:,:,:,li:ui)=1.0
clouds(:,:,:,li:ui)=0
clwc(:,:,:,li:ui)=0.0
ciwc(:,:,:,li:ui)=0.0
! 2D fields
cloudsh(:,:,li:ui)=0
vdep(:,:,:,li:ui)=0.0
ps(:,:,:,li:ui)=1.0e5
sd(:,:,:,li:ui)=0.0
tcc(:,:,:,li:ui)=0.0
tt2(:,:,:,li:ui)=300.
td2(:,:,:,li:ui)=250.
lsprec(:,:,:,li:ui)=0.0
convprec(:,:,:,li:ui)=0.0
ustar(:,:,:,li:ui)=1.0
wstar(:,:,:,li:ui)=1.0
hmix(:,:,:,li:ui)=10000.
tropopause(:,:,:,li:ui)=10000.
oli(:,:,:,li:ui)=0.01
end subroutine set_fields_synthetic
end module mpi_mod end module mpi_mod
...@@ -45,7 +45,7 @@ subroutine releaseparticles(itime) ...@@ -45,7 +45,7 @@ subroutine releaseparticles(itime)
! npart(maxpoint) number of particles to be released in total * ! npart(maxpoint) number of particles to be released in total *
! numrel number of particles to be released during this time * ! numrel number of particles to be released during this time *
! step * ! step *
! addpart extra particle assigned to MPI process if * ! addone extra particle assigned to MPI process if *
! mod(npart(i),mp_partgroup_np) .ne. 0) * ! mod(npart(i),mp_partgroup_np) .ne. 0) *
!***************************************************************************** !*****************************************************************************
...@@ -415,7 +415,8 @@ subroutine releaseparticles(itime) ...@@ -415,7 +415,8 @@ subroutine releaseparticles(itime)
goto 34 ! Storage space has been found, stop searching goto 34 ! Storage space has been found, stop searching
endif endif
end do end do
if (ipart.gt.maxpart) goto 996 ! ESO TODO: Here we could use dynamic allocation and increase array sizes
if (ipart.gt.maxpart_mpi) goto 996
34 minpart=ipart+1 34 minpart=ipart+1
end do end do
......
...@@ -103,7 +103,7 @@ subroutine timemanager ...@@ -103,7 +103,7 @@ subroutine timemanager
implicit none implicit none
logical :: reqv_state=.false. ! .true. if waiting for a MPI_Irecv to complete logical :: reqv_state=.false. ! .true. if waiting for a MPI_Irecv to complete
integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 !,mind integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0
! integer :: ksp ! integer :: ksp
integer :: ip integer :: ip
integer :: loutnext,loutstart,loutend integer :: loutnext,loutstart,loutend
...@@ -155,6 +155,7 @@ subroutine timemanager ...@@ -155,6 +155,7 @@ subroutine timemanager
do itime=0,ideltas,lsynctime do itime=0,ideltas,lsynctime
! Computation of wet deposition, OH reaction and mass transfer ! Computation of wet deposition, OH reaction and mass transfer
! between two species every lsynctime seconds ! between two species every lsynctime seconds
! maybe wet depo frequency can be relaxed later but better be on safe side ! maybe wet depo frequency can be relaxed later but better be on safe side
...@@ -165,7 +166,7 @@ subroutine timemanager ...@@ -165,7 +166,7 @@ subroutine timemanager
! changed by Petra Seibert 9/02 ! changed by Petra Seibert 9/02
!******************************************************************** !********************************************************************
if (mp_dev_mode) write(*,*) 'myid, itime: ',mp_pid,itime if (mp_dbg_mode) write(*,*) 'myid, itime: ',mp_pid,itime
if (WETDEP .and. itime .ne. 0 .and. numpart .gt. 0) then if (WETDEP .and. itime .ne. 0 .and. numpart .gt. 0) then
if (verbosity.gt.0) then if (verbosity.gt.0) then
...@@ -274,6 +275,12 @@ subroutine timemanager ...@@ -274,6 +275,12 @@ subroutine timemanager
if (mp_measure_time.and..not.(lmpreader.and.lmp_use_reader)) call mpif_mtime('getfields',1) if (mp_measure_time.and..not.(lmpreader.and.lmp_use_reader)) call mpif_mtime('getfields',1)
! For validation and tests: call the function below to set all fields to simple
! homogeneous values
! if (mp_dev_mode) call set_fields_synthetic
!*******************************************************************************
if (lmpreader.and.nstop1.gt.1) stop 'NO METEO FIELDS AVAILABLE' if (lmpreader.and.nstop1.gt.1) stop 'NO METEO FIELDS AVAILABLE'
! Reader process goes back to top of time loop (unless simulation end) ! Reader process goes back to top of time loop (unless simulation end)
...@@ -325,6 +332,11 @@ subroutine timemanager ...@@ -325,6 +332,11 @@ subroutine timemanager
endif endif
! Check if particles should be redistributed among processes
!***********************************************************
call mpif_calculate_part_redist(itime)
! Compute convective mixing for forward runs ! Compute convective mixing for forward runs
! for backward runs it is done before next windfield is read in ! for backward runs it is done before next windfield is read in
!************************************************************** !**************************************************************
...@@ -541,20 +553,20 @@ subroutine timemanager ...@@ -541,20 +553,20 @@ subroutine timemanager
! Decide whether to write an estimate of the number of particles released, ! Decide whether to write an estimate of the number of particles released,
! or exact number (require MPI reduce operation) ! or exact number (require MPI reduce operation)
if (mp_dev_mode) then if (mp_dbg_mode) then
numpart_tot_mpi = numpart numpart_tot_mpi = numpart
else else
numpart_tot_mpi = numpart*mp_partgroup_np numpart_tot_mpi = numpart*mp_partgroup_np
end if end if
if (mp_exact_numpart.and..not.(lmpreader.and.lmp_use_reader).and.& if (mp_exact_numpart.and..not.(lmpreader.and.lmp_use_reader).and.&
&.not.mp_dev_mode) then &.not.mp_dbg_mode) then
call MPI_Reduce(numpart, numpart_tot_mpi, 1, MPI_INTEGER, MPI_SUM, id_root, & call MPI_Reduce(numpart, numpart_tot_mpi, 1, MPI_INTEGER, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr) & mp_comm_used, mp_ierr)
endif endif
!CGZ-lifetime: output species lifetime !CGZ-lifetime: output species lifetime
if (lroot.or.mp_dev_mode) then if (lroot.or.mp_dbg_mode) then
! write(*,*) 'Overview species lifetime in days', & ! write(*,*) 'Overview species lifetime in days', &
! real((species_lifetime(:,1)/species_lifetime(:,2))/real(3600.0*24.0)) ! real((species_lifetime(:,1)/species_lifetime(:,2))/real(3600.0*24.0))
! write(*,*) 'all info:',species_lifetime ! write(*,*) 'all info:',species_lifetime
...@@ -565,6 +577,10 @@ subroutine timemanager ...@@ -565,6 +577,10 @@ subroutine timemanager
! end if ! end if
end if end if
! Write particles for all processes
if (mp_dev_mode) write(*,*) "PID, itime, numpart", mp_pid,itime,numpart
45 format(i13,' SECONDS SIMULATED: ',i13, ' PARTICLES: Uncertainty: ',3f7.3) 45 format(i13,' SECONDS SIMULATED: ',i13, ' PARTICLES: Uncertainty: ',3f7.3)
46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles') 46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles')
if (ipout.ge.1) then if (ipout.ge.1) then
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment