-
Notifications
You must be signed in to change notification settings - Fork 0
/
Advection_mod.f90
executable file
·4237 lines (3579 loc) · 150 KB
/
Advection_mod.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
! <Advection_mod.f90 - A component of the EMEP MSC-W Unified Eulerian
! Chemical transport Model>
!*****************************************************************************!
!*
!* Copyright (C) 2007-2011 met.no
!*
!* Contact information:
!* Norwegian Meteorological Institute
!* Box 43 Blindern
!* 0313 OSLO
!* NORWAY
!* email: [email protected]
!* http://www.emep.int
!*
!* This program is free software: you can redistribute it and/or modify
!* it under the terms of the GNU General Public License as published by
!* the Free Software Foundation, either version 3 of the License, or
!* (at your option) any later version.
!*
!* This program is distributed in the hope that it will be useful,
!* but WITHOUT ANY WARRANTY; without even the implied warranty of
!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
!* GNU General Public License for more details.
!*
!* You should have received a copy of the GNU General Public License
!* along with this program. If not, see <http://www.gnu.org/licenses/>.
!*****************************************************************************!
Module Advection_mod
!CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
! DESCRIPTION
!
! This module contains the routines for advection and diffusion.
! The sequence of advection in x y or z direction and vertical diffusion,
! is controlled in the advecdiff routine.
!
! The horizontal advection is performed in the advx and advy routines using
! "Bott's fourth order scheme". The routine preadvx and preadvy take care of
! the transfer of information between processors before the advection step.
!
! The advvk routine performs the vertical advection. Bott's second order
! scheme with variable grid distance is used.
! The calculation of the coefficients used for this scheme is done in the
! routine vgrid.
!
! Notes from Peter; 7/11/01
! About the division by the surface pressure (p*):
! In my opinion the best way is to divide by the advected p*, (corresponding
! to the option where ADVEC_TYPE==1). This should ensure
! that, in the case of a uniform mixing ratio, we end up with a uniform mixing
! ratio, whatever the meteo. The problem is that if the meteo is not
! consistent (air is "created", or surface pressure does not corespond to the
! quantity of air) the total weight of species may vary, creating problems for
! the mass budget. I see however no simple solution for this problem.
!
!
! Peter, January 2002: The advecdiff routine has been completely reorganised,
! in order to allow for flexible timesteps. The timestep can now be large.
! The advecdiff routine will divide dt_advec in several advection steps if the
! CFL condition is not met. For small dt_advec (600s in a 50x50 km2 grid))
! these changes should usually have no effect on the result.
!
! Peter, January 2003: The vertical diffusion and the division by p* have been
! extracted out of the advvdifvk routine. Now they are done separately.
! The number of diffusion iterations can be chosen (ndiff).
!
!CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
use Chemfields_mod, only: xn_adv
use ChemDims_mod, only: NSPEC_ADV
use ChemSpecs_mod, only: species,species_adv
use CheckStop_mod, only: CheckStop,StopAll
use Config_module, only: EPSIL, dt_advec
use Config_module, only : KMAX_BND,KMAX_MID,NMET, nstep, nmax, &
dt_advec, dt_advec_inv, PT,Pref, KCHEMTOP, &
NPROCX,NPROCY,NPROC, &
USES,USE_uEMEP,uEMEP,ZERO_ORDER_ADVEC
use Debug_module, only: DEBUG_ADV
use Convection_mod, only: convection_pstar,convection_Eta
use EmisDef_mod, only: NSECTORS, Nneighbors, loc_frac, loc_frac_1d
use GridValues_mod, only: GRIDWIDTH_M,xm2,xmd,xm2ji,xmdji,xm_i, Pole_Singular, &
dhs1, dhs1i, dhs2i, &
dA,dB,i_fdom,j_fdom,i_local,j_local,Eta_bnd,dEta_i,&
extendarea_N
use Io_mod, only: datewrite
use Io_Progs_mod, only: PrintLog
use MetFields_mod, only: ps,Etadot,SigmaKz,EtaKz,u_xmj,v_xmi,cnvuf,cnvdf&
,uw,ue,vs,vn
use MassBudget_mod, only: fluxin_top,fluxout_top,fluxin,fluxout
use My_Timing_mod, only: Code_timer, Add_2timing, tim_before,tim_after,NTIMING
!do not use "only", because MPI_IN_PLACE does not behave well on certain versions of gfortran(?)
use MPI_Groups_mod !, only :MPI_DOUBLE_PRECISION, MPI_MAX, MPI_SUM,MPI_INTEGER, MPI_BYTE, IERROR,&
! MPISTATUS, MPI_COMM_IO, MPI_COMM_CALC, ME_IO, ME_CALC, ME_MPI,MPI_IN_PLACE,&
! request_n,request_s,request_xn_n,request_xn_s,&
! request_e,request_w, request_xn_w, request_xn_e
use Par_mod, only: LIMAX,LJMAX,GJMAX,GIMAX,me,mex,mey,&
li0,li1,lj0,lj1 ,limax,ljmax, gi0, IRUNBEG,gj0, JRUNBEG &
,neighbor,WEST,EAST,SOUTH,NORTH,NOPROC &
,MSG_NORTH2,MSG_EAST2,MSG_SOUTH2,MSG_WEST2
use PhysicalConstants_mod, only: GRAV,ATWAIR ! gravity
use uEMEP_mod, only: uEMEP_Size1, uemep_adv_x, uemep_adv_y, uemep_adv_k, uemep_diff
implicit none
private
integer, private, parameter :: NADVS = 3
! for vertical advection (nonequidistant spacing)
real, private, save, allocatable, dimension(:,:,:) :: alfnew
real, private, save, dimension(3) :: alfbegnew,alfendnew
! real, private,save,allocatable, dimension(:,:,:) :: uw,ue
! real, private,save,allocatable, dimension(:,:,:) :: vs,vn
integer, public, parameter :: ADVEC_TYPE = 1 ! Divides by advected p*
! integer, public, parameter :: ADVEC_TYPE = 2 ! Divides by "meteorologically"
! advected p*
public :: assign_dtadvec
public :: assign_nmax
public :: alloc_adv_arrays
public :: vgrid
public :: vgrid_Eta
public :: advecdiff
public :: advecdiff_poles
public :: advecdiff_Eta
public :: vertdiff_1d
! public :: adv_var
! public :: adv_int
private :: advvk
private :: adv_vert_zero
private :: advx
private :: advy
private :: preadvx3
private :: preadvy3
!NB: vertdiffn is outside the module, because of circular dependencies with uEMEP_mod
! Checks & warnings
! introduced after getting Nan when using "poor" meteo can give this too.
! ps3d can get zero values when winds are extremely divergent (empty a
! gridcell for air). This seems to happen only very occasionally (one
! gridcell, once every week for instance); & does not harm results
! significantly, at least much less than the poor metdata does anyway.
! Still, we need to know about it.
integer, private, save :: nWarnings = 0
integer, private, parameter :: MAX_WARNINGS = 100
logical, save :: hor_adv0th=.false.
logical, save :: vert_adv0th=.false.
contains
!>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
subroutine assign_dtadvec(GRIDWIDTH_M)
!
! dt_advec is set according to the grid resolution
! The choosed timestep should lead to a Courant number <1 for
! "normal" wind speeds, but this is not a strict limitation.
!
! The values of dt_advec must be an integer fraction of 3600
!
! The values put here are only suggestions
!
implicit none
real, intent(in) ::GRIDWIDTH_M
if(dt_advec<0.0)then
dt_advec=1800.0
if(GRIDWIDTH_M<61000.0) dt_advec=1200.0
if(GRIDWIDTH_M<21000.0) dt_advec= 900.0
if(GRIDWIDTH_M<11000.0) dt_advec= 600.0
if(GRIDWIDTH_M< 6000.0) dt_advec= 300.0
! GEMS025 domain 0.25 deg resol --> GRIDWIDTH_M~=27.8 km --> dt_advec=1200.0
! MACC02 domain 0.20 deg resol --> GRIDWIDTH_M~=22.2 km --> dt_advec=1200.0
if(me==0)write(*,fmt="(a,F8.1,a)")' advection time step (dt_advec) set to: ',dt_advec,' seconds'
else
!the value prescribed by the config file overrides dt_advec
if(me==0)write(*,fmt="(a,F8.1,a)")&
' advection time step (dt_advec) set by config file to: ',dt_advec,' seconds'
endif
!check that it is allowed:
call CheckStop(mod(3600,nint(dt_advec)).ne.0, "3600/dt_advec must be an integer")
dt_advec_inv=1.0/dt_advec
call alloc_adv_arrays!should be moved elsewhere
end subroutine assign_dtadvec
!>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
subroutine assign_nmax(metstep)
implicit none
integer, intent(in) :: metstep
! Assigne number of time-steps for the inner time-loop (over 3 hours)
! from dt_advec
call CheckStop(mod(3600*metstep,nint(dt_advec)).ne.0, "3600*metstep/dt_advec must be an integer")
! Use nint for safety anyway:
nmax = nint( (3600*metstep)/dt_advec )
if (me .eq. 0) then
! write(6,*)
! write(6,*)'**********************************************'
write(6,fmt="(I3,a,I2,a)")nmax,' advection steps within each metstep (',metstep,' hours)'
! write(6,*)'**********************************************'
! write(6,*)
end if
end subroutine assign_nmax
!>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
subroutine advecdiff
call StopAll('advecdiff and sigma coordinates no more available')
end subroutine advecdiff
subroutine advecdiff_poles
call StopAll('advecdiff_poles and sigma coordinates no more available')
end subroutine advecdiff_poles
! <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
subroutine advecdiff_Eta
!___________________________________________________________________________________
!Uses more robust options:
!1)Advect i,j directions independently and 1D with own timestep
!2)Do not advect but only "mix" the concentrations near poles ("near"
! poles is determined by NITERXMAX.
!
!1/10/2012: divide by ps3d (p*) after each partial advection (x,y or z
!direction)
!
!Flexible timestep. Peter Wind january-2002
!
! dt_advec : time interval between two advections calls
! (controls time splitting between advection and chemistry )
!
! dt_xys : time intervall between vertical and horizontal advection steps
!(controls time splitting between vertical and horizontal advection)
! There is one sequence (z),(x,y,y,x),(z) during each dt_xys
!
! dt_xy : time intervall for horizontal advection iterations
!(controls time splitting between x and y advection)
! There is one sequence x,y,y,x during each dt_xy
!
! dt_s : time intervall for vertical advection iterations
!
! dt_advec >= dt_xys >= max(dt_xy, dt_s)
!
! March 2013: Eta coordinates
! P* = Ps-PT is replaced by (dA+dB*Ps)/(dA/Pref+dB)
! Both are defined by dP/dEta, but in general Eta coordinates it is not height independent
implicit none
! local
integer i,j,k,n,ix,iix
real dth
real xntop(NSPEC_ADV,LIMAX,LJMAX)
real xnw(3*NSPEC_ADV),xne(3*NSPEC_ADV)
real xnn(3*NSPEC_ADV),xns(3*NSPEC_ADV)
real dpdeta(LIMAX,LJMAX,KMAX_MID),psi
real psw(3),pse(3)
real psn(3),pss(3)
real ds3(2:KMAX_MID),ds4(2:KMAX_MID)
real xcmax(KMAX_MID,GJMAX),ycmax(KMAX_MID,GIMAX),scmax,sdcmax
real dt_smax,dt_s
real dt_x(LJMAX,KMAX_MID),dt_y(LIMAX,KMAX_MID)
real dt_xmax(LJMAX,KMAX_MID),dt_ymax(LIMAX,KMAX_MID)
integer niterx(LJMAX,KMAX_MID),nitery(LIMAX,KMAX_MID)
integer niterxys,niters,nxy,ndiff
integer iterxys,iters,iterx,itery,nxx,nxxmin,nyy,dx,dy
integer ::isum,isumtot,iproc,isec_poll1,ipoll,isec_poll
real :: xn_advjktot(NSPEC_ADV),xn_advjk(NSPEC_ADV),rfac
real :: dpdeta0,mindpdeta,xxdg,fac1
real :: xn_k(uEMEP%Nsec_poll*(1+(uEMEP%dist*2+1)*(uEMEP%dist*2+1)),kmax_mid),x
real :: fluxx(NSPEC_ADV,-1:LIMAX+1)
real :: fluxy(NSPEC_ADV,-1:LJMAX+1)
real :: fluxk(NSPEC_ADV,KMAX_MID)
logical,save :: firstcall = .true.
!NITERXMAX=max value of iterations accepted for fourth order Bott scheme.
!If the calculated number of iterations (determined from Courant number)
!exceeds NITERXMAX, the advection is not done, but instead all the mixing
!ratio along that line are averaged (1D).
!This case can arises where there is a singularity close to the
!poles in long-lat coordinates.
integer,parameter :: NITERXMAX=40
real :: tim_uemep_before,tim_uemep_after
xxdg=GRIDWIDTH_M*GRIDWIDTH_M/GRAV !constant used in loops
call Code_timer(tim_before)
if(firstcall)then
if(NPROCY>2.and.me==0.and.Pole_Singular>1)then
write(*,*)&
'COMMENT: Advection routine will work faster if NDY = 2 (or 1)'
elseif(NPROCY>1.and.me==0.and.Pole_Singular==1)then
write(*,*)&
'COMMENT: Advection routine will work faster if NDY = 1'
end if
!Overwrite the cooefficients for vertical advection, with Eta-adpated values
call vgrid_Eta
if(.not.allocated(loc_frac_1d))allocate(loc_frac_1d(0,1,1,1))!to avoid error messages
if(ZERO_ORDER_ADVEC)then
hor_adv0th = .true.
vert_adv0th = .true.
if(me==0)call PrintLog("USING ZERO ORDER ADVECTION")
endif
end if
if(KCHEMTOP==2)then
xntop(:,:,:)=xn_adv(:,:,:,1)
end if
! convert from mixing ratio to concentration before advection
do k = 1,KMAX_MID
do j = 1,ljmax
do i = 1,limax
dpdeta(i,j,k) = (dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*dpdeta(i,j,k)
end do
end do
end do
call Add_2timing(22,tim_after,tim_before,"advecdiff:ps")
! time-splitting is used for the physical and chemical operators.
! second-order accuracy in time is obtained by alternating the order
! of the advx and advy operators from one time-step to another.
!
! Determine timestep for horizontal advection.
!
! Courant criterion, which takes into account the mapping factor xm2:
! left face: xm2(i) u_xmj(i) dt/dx < 1 when u_xmj(i) > 0
! right face: xm2(i) |u_xmj(i-1)| dt/dx < 1 when u_xmj(i-1) < 0
!
! In the case where the flux is streaming out of the cell i from both faces,
! then the total should be < 1:
! xm2(i) |u_xmj(i-1)| dt/dx + xm2(i) u_xmj(i) dt/dx < 1 for u_xmj(i-1)<0 and u_xmj(i)>0
!
! The three conditions can be written as:
!
! max(xm2(i)*u_xmj(i)*dt/dx , 0.0) - min(xm2(i)*u_xmj(i-1)*dt/dx , 0.0) < 1
!
! or equivalently:
! dt < dx / ( max(xm2(i)*u_xmj(i) , 0.0) - min(xm2(i)*u_xmj(i-1) , 0.0) )
!
! In the case of variable cell size, dx is defined as dx(i) in these formula.
!
! The value 1.e-30 is to ensure that we don't divide by 0 when all the velocities are 0.
dth = dt_advec/GRIDWIDTH_M
xcmax=0.0
ycmax=0.0
do k=1,KMAX_MID
do j=1,ljmax
xcmax(k,j+gj0-1) = maxval( &
max(u_xmj(1:limax ,j,k,1)*xm2(1:limax,j),1.e-30) &
-min(u_xmj(0:limax-1,j,k,1)*xm2(1:limax,j),0.0 ))
end do
do i=1,limax
ycmax(k,i+gi0-1) = maxval( &
max(v_xmi(i,1:ljmax ,k,1)*xm2(i,1:ljmax),1.e-30) &
-min(v_xmi(i,0:ljmax-1,k,1)*xm2(i,1:ljmax),0.0 ))
end do
end do
CALL MPI_ALLREDUCE(MPI_IN_PLACE,xcmax,KMAX_MID*gjmax,MPI_DOUBLE_PRECISION, &
MPI_MAX,MPI_COMM_CALC,IERROR)
CALL MPI_ALLREDUCE(MPI_IN_PLACE,ycmax,KMAX_MID*gimax,MPI_DOUBLE_PRECISION, &
MPI_MAX,MPI_COMM_CALC, IERROR)
do i=1,limax
do k=1,KMAX_MID
dt_ymax(i,k)=GRIDWIDTH_M/ycmax(k,i+gi0-1)
end do
end do
do j=1,ljmax
do k=1,KMAX_MID
dt_xmax(j,k)=GRIDWIDTH_M/xcmax(k,j+gj0-1)
end do
end do
niterx=1
do k=1,KMAX_MID
do j=1,ljmax
niterx(j,k) = int(dt_advec/dt_xmax(j,k))+1
dt_x(j,k) = dt_advec/real(niterx(j,k))
!if(me==0)write(*,*)'x',me,j,k,niterx(j,k),xcmax(k,j+gj0-1)
end do
end do
do k=1,KMAX_MID
do i=1,limax
nitery(i,k) = int(dt_advec/dt_ymax(i,k))+1
dt_y(i,k) = dt_advec/real(nitery(i,k))
end do
end do
!Courant number in vertical sigma coordinates: sigmadot*dt/deltasigma
!
!Note that dhs1(k+1) denotes thickness of layer k
! and sdot(k+1) denotes sdot at the boundary between layer k and k+1
!
!flux through wall k+1: sdot(k+1) *dt/dhs1(k+1)<1 for sdot(k+1)>0
! |sdot(k+1)|*dt/dhs1(k+2)<1 for sdot(k+1)<0
!
!layer k: sdot(k+1)*dt/dhs1(k+1) + |sdot(k)|*dt/dhs1(k+1) <1 for sdot(k+1)>0 and sdot(k)<0
!
!total out of layer k: max(sdot(1:limax,1:ljmax,k+1,1),0.0)-min(sdot(1:limax,1:ljmax,k,1),0.0)
!
scmax = 1.e-30
do k = 1,KMAX_MID
sdcmax = maxval(max(Etadot(1:limax,1:ljmax,k+1,1),0.0) &
-min(Etadot(1:limax,1:ljmax,k ,1),0.0))
scmax = max(sdcmax/dhs1(k+1),scmax)
end do
CALL MPI_ALLREDUCE(MPI_IN_PLACE,scmax,1,MPI_DOUBLE_PRECISION, &
MPI_MAX,MPI_COMM_CALC,IERROR)
dt_smax = 1./scmax
42 FORMAT(A,F10.2)
if(me==0.and. firstcall.and.DEBUG_ADV)write(*,42)'dt_smax',dt_smax
niters = int(dt_advec/dt_smax)+1
dt_s = dt_advec/real(niters)
niterxys = 1
nxy=0
nxx=0
nxxmin=0
nyy=0
do k=1,KMAX_MID
do j=1,ljmax
nxy=nxy+niterx(j,k)-1
nxx=nxx+niterx(j,k)-1
if(niterx(j,k)>NITERXMAX)then
nxxmin=nxxmin+niterx(j,k)
end if
end do
do i=1,limax
nxy=nxy+nitery(i,k)-1
nyy=nyy+nitery(i,k)-1
end do
end do
if(me.eq.0)then
! write(*,43)KMAX_MID*ljmax,nxx,nxxmin,KMAX_MID*limax,nyy,niters
end if
!43 format('total iterations x, y, k: ',I4,' +',I4,' -',I4,', ',I5,' +',I3,',',I4)
! stop
call Add_2timing(17,tim_after,tim_before,"advecdiff:synchronization")
! Start xys advection loop:
iterxys = 0
do while (iterxys < niterxys)
if(mod(nstep,2) /= 0 .or. iterxys /= 0)then !start a xys sequence
iterxys = iterxys + 1
do k = 1,KMAX_MID
fac1=(dA(k)/Pref+dB(k))*xxdg
do j = lj0,lj1
if(niterx(j,k)<=NITERXMAX)then
dth = dt_x(j,k)/GRIDWIDTH_M
do iterx=1,niterx(j,k)
! send/receive in x-direction
call preadvx3(110+k+KMAX_MID*j &
,xn_adv(1,1,j,k),dpdeta(1,j,k),u_xmj(0,j,k,1)&
,xnw,xne &
,psw,pse,j,k,loc_frac_1d)
! x-direction
call advx( &
u_xmj(0,j,k,1),uw(j,k,1),ue(j,k,1) &
,xn_adv(1,1,j,k),xnw,xne &
,dpdeta(1,j,k),psw,pse &
,xm2(0,j),xmd(0,j) &
,dth,fac1,fluxx)
do i = li0,li1
if(USE_uEMEP .and. k>KMAX_MID-uEMEP%Nvert)call uemep_adv_x(fluxx,i,j,k)
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
end do !iter
end if
end do !j
! end do !k horizontal (x) advection
call Add_2timing(18,tim_after,tim_before,"advecdiff:advx")
! y-direction
! do k = 1,KMAX_MID
do i = li0,li1
dth = dt_y(i,k)/GRIDWIDTH_M
do itery=1,nitery(i,k)
! send/receive in y-direction
call preadvy3(520+k &
,xn_adv(1,1,1,k),dpdeta(1,1,k),v_xmi(1,0,k,1) &
,xns, xnn &
,pss, psn,i,k,loc_frac_1d)
call advy( &
v_xmi(i,0,k,1),vs(i,k,1),vn(i,k,1) &
,xn_adv(1,i,1,k),xns,xnn &
,dpdeta(i,1,k),pss,psn &
,xm2ji(0,i),xmdji(0,i) &
,dth,fac1,fluxy)
do j = lj0,lj1
if(USE_uEMEP .and. k>KMAX_MID-uEMEP%Nvert)call uemep_adv_y(fluxy,i,j,k)
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
end do !iter
end do !i
end do !k horizontal (y) advection
call Add_2timing(20,tim_after,tim_before,"advecdiff:advy")
do iters=1,niters
! perform vertical advection
do j = lj0,lj1
do i = li0,li1
if(vert_adv0th)then
call adv_vert_zero(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s,fluxk)
else
! call adv_vert_fourth(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s)
call advvk(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s,fluxk)
endif
if(USE_uEMEP)then
call uemep_adv_k(fluxk,i,j)
end if
if(iters<niters .or. iterxys < niterxys)then
do k=1,KMAX_MID
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
else
!advection finished for this i,j
do k=1,KMAX_MID
psi =1.0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
end do
end if
end do
end do
end do ! vertical (s) advection
call Add_2timing(21,tim_after,tim_before,"advecdiff:advvk")
else !start a yxs sequence
iterxys = iterxys + 1
do k = 1,KMAX_MID
fac1=(dA(k)/Pref+dB(k))*xxdg
do i = li0,li1
dth = dt_y(i,k)/GRIDWIDTH_M
do itery=1,nitery(i,k)
! send/receive in y-direction
call preadvy3(13000+k+KMAX_MID*itery+1000*i &
,xn_adv(1,1,1,k),dpdeta(1,1,k),v_xmi(1,0,k,1) &
,xns, xnn &
,pss, psn,i,k,loc_frac_1d)
! y-direction
call advy( &
v_xmi(i,0,k,1),vs(i,k,1),vn(i,k,1) &
,xn_adv(1,i,1,k),xns,xnn &
,dpdeta(i,1,k),pss,psn &
,xm2ji(0,i),xmdji(0,i) &
,dth,fac1,fluxy)
do j = lj0,lj1
if(USE_uEMEP .and. k>KMAX_MID-uEMEP%Nvert)call uemep_adv_y(fluxy,i,j,k)
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
end do !iter
end do !i
! end do !k horizontal (y) advection
call Add_2timing(20,tim_after,tim_before,"advecdiff:preadvy,advy")
! do k = 1,KMAX_MID
do j = lj0,lj1
if(niterx(j,k)<=NITERXMAX)then
dth = dt_x(j,k)/GRIDWIDTH_M
do iterx=1,niterx(j,k)
! send/receive in x-direction
call preadvx3(21000+k+KMAX_MID*iterx+1000*j &
,xn_adv(1,1,j,k),dpdeta(1,j,k),u_xmj(0,j,k,1)&
,xnw,xne &
,psw,pse,j,k,loc_frac_1d)
! x-direction
call advx( &
u_xmj(0,j,k,1),uw(j,k,1),ue(j,k,1) &
,xn_adv(1,1,j,k),xnw,xne &
,dpdeta(1,j,k),psw,pse &
,xm2(0,j),xmd(0,j) &
,dth,fac1,fluxx)
do i = li0,li1
if(USE_uEMEP .and. k>KMAX_MID-uEMEP%Nvert)call uemep_adv_x(fluxx,i,j,k)
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
end do !iter
end if
end do !j
end do !k horizontal (x) advection
call Add_2timing(18,tim_after,tim_before,"advecdiff:preadvx,advx")
do iters=1,niters
! perform vertical advection
do j = lj0,lj1
do i = li0,li1
if(vert_adv0th)then
call adv_vert_zero(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s,fluxk)
else
! call adv_vert_fourth(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s)
call advvk(xn_adv(1,i,j,1),dpdeta(i,j,1),Etadot(i,j,1,1),dt_s,fluxk)
endif
if(USE_uEMEP)then
call uemep_adv_k(fluxk,i,j)
end if
if(iters<niters .or. iterxys < niterxys)then
do k=1,KMAX_MID
dpdeta0=(dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
psi = dpdeta0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
dpdeta(i,j,k) = dpdeta0
end do
else
!advection finished for this i,j
do k=1,KMAX_MID
psi =1.0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
end do
end if
end do
end do
end do ! vertical (s) advection
call Add_2timing(21,tim_after,tim_before,"advecdiff:advvk")
end if ! yxs sequence
end do
if(USES%CONVECTION)then
call CheckStop(ADVEC_TYPE/=1, "ADVEC_TYPE no longer supported")
do k=1,KMAX_MID
do j = lj0,lj1
do i = li0,li1
dpdeta(i,j,k) = (dA(k)+dB(k)*ps(i,j,1))*dEta_i(k)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*dpdeta(i,j,k)
end do
end do
end do
call convection_Eta(dpdeta,dt_advec)
do k=1,KMAX_MID
do j = lj0,lj1
do i = li0,li1
psi = 1.0/max(dpdeta(i,j,k),1.0)
xn_adv(:,i,j,k) = xn_adv(:,i,j,k)*psi
end do
end do
mindpdeta = minval( dpdeta(li0:li1, lj0:lj1, k) )
if ( nWarnings < MAX_WARNINGS .and.mindpdeta<1.0) then
call datewrite("WARNING:C dpdeta < 1",k, (/ mindpdeta /) )
nWarnings = nWarnings + 1
end if
end do
end if
do k=1,KMAX_MID
do j = lj0,lj1
if(niterx(j,k)>NITERXMAX)then
!if(mex==0)write(*,*)'Simplified advection',k,j,niterx(j,k)
!simplified "advection": average the mixing ratios over all x
!average 1D
xn_advjk=0.0
isum=0
do i = li0,li1
! psi=1.0/(ps(i,j,1)-PT)
xn_advjk(:) = xn_advjk(:)+xn_adv(:,i,j,k)!*psi
isum=isum+1
end do
!sum over all processors along i direction. mex=0 collects the sum
!me = mex + mey*NPROCX
if(mex>0)then
CALL MPI_SEND(xn_advjk,8*NSPEC_ADV,MPI_BYTE, &
mey*NPROCX,100*mey+j+1000,MPI_COMM_CALC,IERROR)
!receive averages from mex=0
CALL MPI_RECV(xn_advjk,8*NSPEC_ADV,MPI_BYTE, &
mey*NPROCX,100*mey+j+3000,MPI_COMM_CALC,MPISTATUS,IERROR)
else
xn_advjktot(:) = xn_advjk(:)
isumtot=isum
do iproc=1,NPROCX-1
CALL MPI_RECV(xn_advjk,8*NSPEC_ADV,MPI_BYTE, &
iproc+mey*NPROCX,100*mey+j+1000,MPI_COMM_CALC,MPISTATUS,IERROR)
xn_advjktot(:) = xn_advjktot(:)+xn_advjk(:)
! isumtot=isumtot+isum
end do
rfac=1.0/GIMAX
xn_advjk(:) = xn_advjktot(:)*rfac
! write(*,*)'ISUM',mey,isumtot,isum,GIMAX
!send result to all processors in i direction
do iproc=1,NPROCX-1
CALL MPI_SEND(xn_advjk,8*NSPEC_ADV,MPI_BYTE, &
iproc+mey*NPROCX,100*mey+j+3000,MPI_COMM_CALC,IERROR)
end do
end if
do i = li0,li1
xn_adv(:,i,j,k)= xn_advjk(:)
end do
end if
end do
end do
call Add_2timing(22,tim_after,tim_before,"advecdiff:ps")
!________ vertical diffusion ______
ndiff = 1 !number of vertical diffusion iterations (the larger the better)
do k = 2,KMAX_MID
ds3(k) = dt_advec*dhs1i(k)*dhs2i(k)
ds4(k) = dt_advec*dhs1i(k+1)*dhs2i(k)
end do
! sum is conserved under vertical diffusion
! sum = 0.
! do k=1,KMAX_MID
! sum = sum + xn_adv(1,4,4,k)/dhs1i(k+1)
! end do
! write(*,*)'sum before diffusion ',me,sum
do j = lj0,lj1
do i = li0,li1
if(USE_uEMEP)call uemep_diff(i,j,ds3,ds4,ndiff)
call vertdiffn(xn_adv(1,i,j,1),NSPEC_ADV,LIMAX*LJMAX,1,EtaKz(i,j,1,1),ds3,ds4,ndiff)
end do
end do
! sum = 0.
! do k=1,KMAX_MID
! sum = sum + xn_adv(1,4,4,k)/dhs1i(k+1)
! end do
! write(*,*)'sum after diffusion ',me,sum
call Add_2timing(19,tim_after,tim_before,"advecdiff:diffusion")
if(lj0.ne.1)then
do k=KCHEMTOP,KMAX_MID
do i = 1,limax
xn_adv(:,i,1,k) = xn_adv(:,i,1,k)/((dA(k)+dB(k)*ps(i,1,1))*dEta_i(k))
end do
end do
end if
if(li0.ne.1)then
do k=KCHEMTOP,KMAX_MID
do j=lj0,lj1
xn_adv(:,1,j,k) = xn_adv(:,1,j,k)/((dA(k)+dB(k)*ps(1,j,1))*dEta_i(k))
end do
end do
end if
if(li1.ne.limax)then
do k=KCHEMTOP,KMAX_MID
do j=lj0,lj1
xn_adv(:,limax,j,k) = xn_adv(:,limax,j,k)/((dA(k)+dB(k)*ps(limax,j,1))*dEta_i(k))
end do
end do
end if
if(lj1.ne.ljmax)then
do k=KCHEMTOP,KMAX_MID
do i = 1,limax
xn_adv(:,i,ljmax,k) = xn_adv(:,i,ljmax,k)/((dA(k)+dB(k)*ps(i,ljmax,1))*dEta_i(k))
end do
end do
end if
if(KCHEMTOP==2)then
! since the xn_adv are changed it corresponds to a flux in or
! out of the system:
do i = li0,li1
do j = lj0,lj1
where(xn_adv(:,i,j,1) .gt. xntop(:,i,j))
fluxout_top(:) = fluxout_top(:) + &
(xn_adv(:,i,j,1)-xntop(:,i,j))*(dA(1)+dB(1)*ps(i,j,1))*xxdg*xmd(i,j)
elsewhere
fluxin_top(:) = fluxin_top(:) + &
(xntop(:,i,j)-xn_adv(:,i,j,1))*(dA(1)+dB(1)*ps(i,j,1))*xxdg*xmd(i,j)
end where
end do
end do
xn_adv(:,:,:,1) = xntop(:,:,:)
end if
firstcall=.false.
return
end subroutine advecdiff_Eta
! <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
subroutine vgrid
!
! inclusion of the variable grid spacing when interpolating the
! polynominal is done by introducing new local coordinates, cor.
!
!
! modified by pw january 2002: alfnew is modified such that
! a Courant number of one corresponds exactly to "empty" a cell.
! (small effects on results: less than 1%)
!
use GridValues_mod, only : sigma_bnd,sigma_mid
implicit none
integer k
real cor1, cor2, dcorl
real hscor1(KMAX_BND+2),hscor2(KMAX_MID+2)
real alfa1(NADVS), alfa2(NADVS), dei
real corl1(KMAX_BND), corl2(KMAX_BND)
real alf(9,2:KMAX_BND)
do k=1,KMAX_MID
hscor1(k+1) = sigma_bnd(k)
hscor2(k+1) = sigma_mid(k)
end do
hscor1(KMAX_BND+1) = sigma_bnd(KMAX_BND)
hscor1(1) = - sigma_bnd(2)
hscor1(KMAX_BND+2) = 2.*sigma_bnd(KMAX_BND) - sigma_bnd(KMAX_BND-1)
hscor2(1) = 2.*sigma_mid(1) - sigma_mid(2)
hscor2(KMAX_MID+2) = 2.*sigma_mid(KMAX_MID) - sigma_mid(KMAX_MID-1)
do k=1,KMAX_BND
dhs1(k) = hscor1(k+1) - hscor1(k)
dhs1i(k) = 1./dhs1(k)
dhs2i(k) = 1./(hscor2(k+1) - hscor2(k))
end do
do k=2,KMAX_BND
corl1(k) = (hscor1(k) - hscor2(k))*dhs1i(k)
corl2(k) = (hscor1(k+1) - hscor2(k))*dhs1i(k)
dcorl = corl2(k) - corl1(k)
alfa1(NADVS-1) = (corl2(k)**2 - corl1(k)**2)/(2.*dcorl)
alfa2(NADVS-1) = (corl2(k)**3 - corl1(k)**3)/(3.*dcorl)
cor1 = (hscor1(k-1) - hscor2(k))*dhs1i(k)
! cor2 = (hscor1(k) - hscor2(k))*dhs1i(k)
dcorl = corl1(k) - cor1
alfa1(NADVS-2) = (corl1(k)**2 - cor1**2)/(2.*dcorl)
alfa2(NADVS-2) = (corl1(k)**3 - cor1**3)/(3.*dcorl)
! cor1 = (hscor1(k+1) - hscor2(k))*dhs1i(k)
cor2 = (hscor1(k+2) - hscor2(k))*dhs1i(k)
dcorl = cor2 - corl2(k)
alfa1(NADVS) = (cor2**2 - corl2(k)**2)/(2.*dcorl)
alfa2(NADVS) = (cor2**3 - corl2(k)**3)/(3.*dcorl)
dei = alfa1(NADVS-1)*alfa2(NADVS) &
- alfa1(NADVS) *alfa2(NADVS-1) &
- alfa1(NADVS-2)*(alfa2(NADVS)-alfa2(NADVS-1)) &
+ alfa2(NADVS-2)*(alfa1(NADVS)-alfa1(NADVS-1))
dei = 1./dei
alf(1,k) = dei*(alfa1(NADVS-1)*alfa2(NADVS) &
-alfa1(NADVS) *alfa2(NADVS-1))
alf(4,k) = dei*(alfa2(NADVS-2)*alfa1(NADVS) &
-alfa2(NADVS) *alfa1(NADVS-2))
alf(7,k) = dei*(alfa2(NADVS-1)*alfa1(NADVS-2) &
-alfa2(NADVS-2)*alfa1(NADVS-1))
alf(2,k) = dei*(alfa2(NADVS-1)-alfa2(NADVS)) /2.
alf(5,k) = dei*(alfa2(NADVS) -alfa2(NADVS-2))/2.
alf(8,k) = dei*(alfa2(NADVS-2)-alfa2(NADVS-1))/2.
alf(3,k) = dei*(alfa1(NADVS) -alfa1(NADVS-1))/3.
alf(6,k) = dei*(alfa1(NADVS-2)-alfa1(NADVS)) /3.
alf(9,k) = dei*(alfa1(NADVS-1)-alfa1(NADVS-2))/3.
end do
do k=2,KMAX_MID
alfnew(1,k,0) = alf(1,k) + 2.*alf(2,k)*corl2(k) &
+ 3.*alf(3,k)*corl2(k)*corl2(k)
alfnew(1,k,1) = -(alf(1,k+1) + 2.*alf(2,k+1)*corl1(k+1) &
+ 3.*alf(3,k+1)*corl1(k+1)*corl1(k+1))
alfnew(4,k,0) = alf(4,k) + 2.*alf(5,k)*corl2(k) &
+ 3.*alf(6,k)*corl2(k)*corl2(k)
alfnew(4,k,1) = -(alf(4,k+1) + 2.*alf(5,k+1)*corl1(k+1) &
+ 3.*alf(6,k+1)*corl1(k+1)*corl1(k+1))
alfnew(7,k,0) = alf(7,k) + 2.*alf(8,k)*corl2(k) &
+ 3.*alf(9,k)*corl2(k)*corl2(k)
alfnew(7,k,1) = -(alf(7,k+1) + 2.*alf(8,k+1)*corl1(k+1) &
+ 3.*alf(9,k+1)*corl1(k+1)*corl1(k+1))
!pw alfnew(2,k,0) = -(alf(2,k) + 3.*alf(3,k) *corl2(k)) *dhs2i(k)
! alfnew(2,k,1) = (alf(2,k+1) + 3.*alf(3,k+1)*corl1(k+1))*dhs2i(k)
alfnew(2,k,0) = -(alf(2,k) + 3.*alf(3,k) *corl2(k)) *dhs1i(k)
alfnew(2,k,1) = (alf(2,k+1) + 3.*alf(3,k+1)*corl1(k+1))*dhs1i(k+1)
!pw alfnew(5,k,0) = -(alf(5,k) + 3.*alf(6,k) *corl2(k)) *dhs2i(k)
! alfnew(5,k,1) = (alf(5,k+1) + 3.*alf(6,k+1)*corl1(k+1))*dhs2i(k)
alfnew(5,k,0) = -(alf(5,k) + 3.*alf(6,k) *corl2(k)) *dhs1i(k)
alfnew(5,k,1) = (alf(5,k+1) + 3.*alf(6,k+1)*corl1(k+1))*dhs1i(k+1)
!pw alfnew(8,k,0) = -(alf(8,k) + 3.*alf(9,k) *corl2(k)) *dhs2i(k)
! alfnew(8,k,1) = (alf(8,k+1) + 3.*alf(9,k+1)*corl1(k+1))*dhs2i(k)
alfnew(8,k,0) = -(alf(8,k) + 3.*alf(9,k) *corl2(k)) *dhs1i(k)
alfnew(8,k,1) = (alf(8,k+1) + 3.*alf(9,k+1)*corl1(k+1))*dhs1i(k+1)
!pw alfnew(3,k,0) = alf(3,k) *dhs2i(k)*dhs2i(k)
! alfnew(3,k,1) = -alf(3,k+1)*dhs2i(k)*dhs2i(k)
! alfnew(6,k,0) = alf(6,k) *dhs2i(k)*dhs2i(k)
! alfnew(6,k,1) = -alf(6,k+1)*dhs2i(k)*dhs2i(k)
! alfnew(9,k,0) = alf(9,k) *dhs2i(k)*dhs2i(k)
! alfnew(9,k,1) = -alf(9,k+1)*dhs2i(k)*dhs2i(k)
alfnew(3,k,0) = alf(3,k) *dhs1i(k) *dhs1i(k)
alfnew(3,k,1) = -alf(3,k+1)*dhs1i(k+1)*dhs1i(k+1)
alfnew(6,k,0) = alf(6,k) *dhs1i(k) *dhs1i(k)
alfnew(6,k,1) = -alf(6,k+1)*dhs1i(k+1)*dhs1i(k+1)
alfnew(9,k,0) = alf(9,k) *dhs1i(k) *dhs1i(k)
alfnew(9,k,1) = -alf(9,k+1)*dhs1i(k+1)*dhs1i(k+1)
end do
alfbegnew(1) = alfnew(1,2,0)+alfnew(4,2,0)
alfbegnew(2) = alfnew(2,2,0)+alfnew(5,2,0)
alfbegnew(3) = alfnew(3,2,0)+alfnew(6,2,0)
alfendnew(1) = alfnew(4,KMAX_MID,1)+alfnew(7,KMAX_MID,1)
alfendnew(2) = alfnew(5,KMAX_MID,1)+alfnew(8,KMAX_MID,1)
alfendnew(3) = alfnew(6,KMAX_MID,1)+alfnew(9,KMAX_MID,1)
end subroutine vgrid