[WAM-IPE] WAM-IPE r90097: commit changes: 1) bug fix for the adding the surfa...

Samuel.Trahan at noaa.gov Samuel.Trahan at noaa.gov
Wed Mar 22 21:13:08 UTC 2017


Friendly WAM-IPE developers,

This is an automated email about a WAM-IPE commit.

Project: WAM-IPE
URL: https://svnemc.ncep.noaa.gov/projects/gsm/branches/WAM-IPE/milestone4
Revision: 90097
Author:   weiyu.yang at noaa.gov
Date:     2017-03-22T20:59:19.375552Z
Message:
commit changes: 1) bug fix for the adding the surface height twice; 2) add f107_kp_data_size for reading the real time data; 3) add the output wam-ipe coupling fields routine for making figures, comparisons and diagnostic purposes; 4) bug fix in treadeo.io.f (not affect results).


See attached file for full differences.


First 4000 bytes of differences:
Index: checkout/phys/compns_physics.f
===================================================================
--- checkout/phys/compns_physics.f	(revision 90054)
+++ checkout/phys/compns_physics.f	(revision 90097)
@@ -119,7 +119,7 @@
 !cmy mpi_def holds liope
       use mpi_def, only : liope
       use wam_f107_kp_mod, ONLY: f107_kp_size, f107_kp_interval,
-     &                           f107_kp_skip_size
+     &                           f107_kp_skip_size, f107_kp_data_size
 !
 !VAY NEMGSM-vwersion  "f107_kp_data_size"  check with SWPC-fst-datafiles
 !
@@ -155,6 +155,7 @@
      & isol, ico2, ialb, iems, iaer, iovr_sw, iovr_lw,ictm,
      & isubc_lw, isubc_sw, fdaer, lsidea, 
      & f107_kp_size, f107_kp_interval,f107_kp_skip_size,
+     & f107_kp_data_size,
      & ncw, crtrh,old_monin,flgmin,cnvgwd,cgwf,prslrd0,ral_ts,fixtrc,
 !    & ncw, crtrh,old_monin,flgmin,gfsio_in,gfsio_out,cnvgwd,
      & ccwf,shal_cnv,imfshalcnv,imfdeepcnv,
@@ -245,6 +246,7 @@
       lsidea           = .false.
       f107_kp_size     = 56
       f107_kp_skip_size= 0
+      f107_kp_data_size= 56
       f107_kp_interval = 10800
 
 ! Add: a2oi_out, cplflx & ngrid_a2oi
Index: checkout/dyn/grid_collect_ipe.f
===================================================================
--- checkout/dyn/grid_collect_ipe.f	(nonexistent)
+++ checkout/dyn/grid_collect_ipe.f	(revision 90097)
@@ -0,0 +1,240 @@
+      SUBROUTINE grid_collect_ipe(wwg,zzg,uug,vvg,ttg,rqg,n2g,
+     &                   global_lats_a,lonsperlat, lats_nodes_a, kdt)
+!!
+!! Revision history:
+!  2007           Henry Juang, original code
+!  2008           Jun Wang  modified buff for write grid component
+!  Nov 23 2009    Sarah Lu, comment out 4D tracer
+!  Sep 08 2010    Jun Wang  change gfsio to nemsio
+!  Dec 16 2010    Jun Wang  change to nemsio library
+!  Feb 20 2011    Hann-Ming Henry Juang add code for NDSL
+!  Sep 24 2014    S Moorthi - some cleanup and optimization
+!  Feb 04 2015    S. Moorthi - threading and optimization
+!  May 17 2016    Weiyu Yang - modified from grid_collect.f for WAM-IPE
+!                              coupling outputs.
+!
+
+      use gfs_dyn_resol_def
+      use gfs_dyn_layout1
+      use gfs_dyn_mpi_def
+
+      implicit none
+
+      integer, dimension(latg) :: global_lats_a, lonsperlat
+      integer, dimension(nodes_comp) :: lats_nodes_a
+!
+      real(kind=kind_grid), dimension(lonf,lats_node_a,levs) ::  uug,vvg
+     &,                                                          ttg,wwg
+     &,                                                          zzg,n2g
+      real(kind=kind_grid), dimension(lonf,lats_node_a,levh) ::  rqg
+      real(kind=kind_io4), dimension(:, :, :), allocatable :: 
+     &                                              buff_mult_pieceg_ipe
+!
+      real(kind=kind_io8), dimension(lonf,lats_node_a) :: buffo, buffi
+      integer, dimension(lonf,lats_node_a)             :: kmsk
+      integer i, j, k, ngrids_gg_ipe, kdt
+!
+      ngrids_gg_ipe = 6*levs+levh
+
+      if(.not. allocated(buff_mult_pieceg_ipe)) then
+         allocate(buff_mult_pieceg_ipe(lonf,lats_node_a,ngrids_gg_ipe))
+      endif
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = wwg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,k) )
+!      write(0,*)'in grid collect, buff_wwg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,k))
+      enddo
+!!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = zzg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,levs+k) )
+!      write(0,*)'in grid collect, buff_zzg=',' me=',


... see attachment for the rest ...
-------------- next part --------------
Index: checkout/phys/compns_physics.f
===================================================================
--- checkout/phys/compns_physics.f	(revision 90054)
+++ checkout/phys/compns_physics.f	(revision 90097)
@@ -119,7 +119,7 @@
 !cmy mpi_def holds liope
       use mpi_def, only : liope
       use wam_f107_kp_mod, ONLY: f107_kp_size, f107_kp_interval,
-     &                           f107_kp_skip_size
+     &                           f107_kp_skip_size, f107_kp_data_size
 !
 !VAY NEMGSM-vwersion  "f107_kp_data_size"  check with SWPC-fst-datafiles
 !
@@ -155,6 +155,7 @@
      & isol, ico2, ialb, iems, iaer, iovr_sw, iovr_lw,ictm,
      & isubc_lw, isubc_sw, fdaer, lsidea, 
      & f107_kp_size, f107_kp_interval,f107_kp_skip_size,
+     & f107_kp_data_size,
      & ncw, crtrh,old_monin,flgmin,cnvgwd,cgwf,prslrd0,ral_ts,fixtrc,
 !    & ncw, crtrh,old_monin,flgmin,gfsio_in,gfsio_out,cnvgwd,
      & ccwf,shal_cnv,imfshalcnv,imfdeepcnv,
@@ -245,6 +246,7 @@
       lsidea           = .false.
       f107_kp_size     = 56
       f107_kp_skip_size= 0
+      f107_kp_data_size= 56
       f107_kp_interval = 10800
 
 ! Add: a2oi_out, cplflx & ngrid_a2oi
Index: checkout/dyn/grid_collect_ipe.f
===================================================================
--- checkout/dyn/grid_collect_ipe.f	(nonexistent)
+++ checkout/dyn/grid_collect_ipe.f	(revision 90097)
@@ -0,0 +1,240 @@
+      SUBROUTINE grid_collect_ipe(wwg,zzg,uug,vvg,ttg,rqg,n2g,
+     &                   global_lats_a,lonsperlat, lats_nodes_a, kdt)
+!!
+!! Revision history:
+!  2007           Henry Juang, original code
+!  2008           Jun Wang  modified buff for write grid component
+!  Nov 23 2009    Sarah Lu, comment out 4D tracer
+!  Sep 08 2010    Jun Wang  change gfsio to nemsio
+!  Dec 16 2010    Jun Wang  change to nemsio library
+!  Feb 20 2011    Hann-Ming Henry Juang add code for NDSL
+!  Sep 24 2014    S Moorthi - some cleanup and optimization
+!  Feb 04 2015    S. Moorthi - threading and optimization
+!  May 17 2016    Weiyu Yang - modified from grid_collect.f for WAM-IPE
+!                              coupling outputs.
+!
+
+      use gfs_dyn_resol_def
+      use gfs_dyn_layout1
+      use gfs_dyn_mpi_def
+
+      implicit none
+
+      integer, dimension(latg) :: global_lats_a, lonsperlat
+      integer, dimension(nodes_comp) :: lats_nodes_a
+!
+      real(kind=kind_grid), dimension(lonf,lats_node_a,levs) ::  uug,vvg
+     &,                                                          ttg,wwg
+     &,                                                          zzg,n2g
+      real(kind=kind_grid), dimension(lonf,lats_node_a,levh) ::  rqg
+      real(kind=kind_io4), dimension(:, :, :), allocatable :: 
+     &                                              buff_mult_pieceg_ipe
+!
+      real(kind=kind_io8), dimension(lonf,lats_node_a) :: buffo, buffi
+      integer, dimension(lonf,lats_node_a)             :: kmsk
+      integer i, j, k, ngrids_gg_ipe, kdt
+!
+      ngrids_gg_ipe = 6*levs+levh
+
+      if(.not. allocated(buff_mult_pieceg_ipe)) then
+         allocate(buff_mult_pieceg_ipe(lonf,lats_node_a,ngrids_gg_ipe))
+      endif
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = wwg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,k) )
+!      write(0,*)'in grid collect, buff_wwg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,k))
+      enddo
+!!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = zzg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,levs+k) )
+!      write(0,*)'in grid collect, buff_zzg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,levs+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,levs+k))
+      enddo
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = uug(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,2*levs+k) )
+!      write(0,*)'in grid collect, buff_uug=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,2*levs+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,2*levs+k))
+      enddo
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = vvg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,3*levs+k) )
+!      write(0,*)'in grid collect, buff_vvg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,3*levs+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,3*levs+k))
+      enddo
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = ttg(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,4*levs+k) )
+!      write(0,*)'in grid collect, buff_ttg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,4*levs+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,4*levs+k))
+      enddo
+!
+      if (levh > 0) then
+        do k=1,levh
+!$omp parallel do private(i,j)
+          do j=1,lats_node_a
+            do i=1,lonf
+              buffi(i,j) = rqg(i,j,k)
+!             if (abs(buffi(i,j)) < 1.0e-15) buffi(i,j) = 0.0
+            enddo
+          enddo
+          CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                     buff_mult_pieceg_ipe(1,1,5*levs+k) )
+
+!      write(0,*)'in grid collect, buff_rqg=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,5*levs+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,5*levs+k))
+        enddo
+      endif
+!
+      do k=1,levs
+!$omp parallel do private(i,j)
+        do j=1,lats_node_a
+          do i=1,lonf
+            buffi(i,j) = n2g(i,j,k)
+          enddo
+        enddo
+        CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+     &                   buff_mult_pieceg_ipe(1,1,5*levs+levh+k) )
+!      write(0,*)'in grid collect, buff_n2g=',' me=',me,
+!    & maxval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,5*levs+levh+k)),
+!    & minval(buff_mult_pieceg_ipe(1:lonf,1:lats_node_a,5*levs+levh+k))
+      enddo
+
+      CALL atmgg_move_ipe(buff_mult_pieceg_ipe,ngrids_gg_ipe, kdt,
+     &                    global_lats_a, lats_nodes_a)
+
+      return
+      end
+
+      subroutine atmgg_move_ipe(buff_mult_pieceg_ipe,ngrids_gg_ipe, kdt,
+     &                          global_lats_a, lats_nodes_a)
+c
+c***********************************************************************
+c
+      use gfs_dyn_resol_def
+      use gfs_dyn_write_state
+      use gfs_dyn_layout1
+      use gfs_dyn_mpi_def
+      implicit none
+!
+      integer ngrids_gg_ipe
+      real(kind=kind_io4), dimension(lonf,lats_node_a,ngrids_gg_ipe)
+     &                                           :: buff_mult_pieceg_ipe
+      real(kind=kind_io4), dimension(lonf,lats_node_a_max,ngrids_gg_ipe)
+     &                                           :: grid_node
+      real(kind=kind_io4),dimension(:,:,:),  allocatable :: buff_final
+      real(kind=kind_io4),dimension(:,:,:,:),allocatable :: grid_nodes
+      integer, dimension(latg)       :: global_lats_a
+      integer, dimension(nodes_comp) :: lats_nodes_a
+      integer ioproc, kdt, lat, ipt_lats
+      integer j,k,i,ierr, node
+      integer lenrec
+!
+      ioproc = nodes_comp - 1
+      DO k = 1, ngrids_gg_ipe
+        DO j = 1, lats_node_a
+          DO i = 1, lonf
+            grid_node(i, j, k) = buff_mult_pieceg_ipe(i, j, k)
+          END DO
+        END DO
+      END DO
+!!
+      if(me == ioproc) then
+        if(.not. allocated(buff_final)) then
+           allocate(buff_final(lonf, latg, ngrids_gg_ipe))
+        endif
+        if(.not. allocated(grid_nodes)) then
+           allocate(grid_nodes(lonf, lats_node_a_max, ngrids_gg_ipe, 
+     &                         nodes_comp))
+        endif
+      else
+        if(.not. allocated(grid_nodes)) then
+           allocate(grid_nodes(1, 1, 1, 1))
+        endif
+      endif
+!
+      if(nodes_comp>1) then
+        lenrec = lonf * lats_node_a_max * ngrids_gg_ipe
+!
+        call mpi_gather( grid_node , lenrec, mpi_real4,
+     x                 grid_nodes, lenrec, mpi_real4,
+     x                 ioproc, MPI_COMM_ALL, ierr)
+      else
+        grid_nodes(:,:,:,1)=grid_node(:,:,:)
+      endif
+
+      IF(me == ioproc) THEN
+        DO k = 1, ngrids_gg_ipe
+          ipt_lats = 1
+          DO node = 1, nodes_comp
+            DO j = 1, lats_nodes_a(node)
+              lat = global_lats_a(ipt_lats-1+j)
+              DO i = 1, lonf
+                buff_final(i, lat, k) = grid_nodes(i, j, k, node)
+              END DO
+            END DO
+            ipt_lats = ipt_lats+lats_nodes_a(node)
+          END DO
+        END DO
+      END IF
+
+      call mpi_barrier(mpi_comm_all,ierr)
+      deallocate(grid_nodes)
+
+! Write out the wwg, zzg, uug, vvg, ttg, rqg, n2g full grid fields to
+! disk.
+!--------------------------------------------------------------------
+! buff_final contains wwg, zzg, uug, vvg, ttg, rqg, n2g.
+!-------------------------------------------------------
+      if(me == ioproc) then
+        write(178) kdt, lonf, latg, ngrids_gg_ipe
+        print*, 'kdt, lonf, latg, ngrids_gg_ipe=',kdt, lonf, latg,
+     &           ngrids_gg_ipe
+        write(178) buff_final
+        deallocate(buff_final)
+      end if
+!!
+      return
+      end
Index: checkout/dyn/do_dynamics_two_loop.f
===================================================================
--- checkout/dyn/do_dynamics_two_loop.f	(revision 90054)
+++ checkout/dyn/do_dynamics_two_loop.f	(revision 90097)
@@ -1058,7 +1058,8 @@
      &               LATS_NODES_A,GLOBAL_LATS_A,LONSPERLAT,
      &               EPSE,EPSO,EPSEDN,EPSODN,
      &               PLNEV_A,PLNOD_A,PLNEW_A,PLNOW_A,
-     &               PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD)
+!     &               PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD)
+     &               PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD, kdt)
 
         call fillWAMFields(uug, vvg, wwg, ttg, zzg, n2g, rqg, 
      &              ipt_lats_node_a,global_lats_a)
Index: checkout/dyn/treadeo.io.f
===================================================================
--- checkout/dyn/treadeo.io.f	(revision 90054)
+++ checkout/dyn/treadeo.io.f	(revision 90097)
@@ -198,7 +198,10 @@
          enddo								! hmhj
         endif								! hmhj
         ck5p(levp1)=ck5(levp1)						! hmhj
-        do k=1,levp1							! hmhj
+! modify.Weiyu.
+!--------------
+!        do k=1,levp1							! hmhj
+        do k=1,levp1 - 1							! hmhj
           ck5p(k)=ck5(k)*(teref(k)/thref(k))**rkapi			! hmhj
         enddo								! hmhj
         if( me.eq.0 ) then						! hmhj
Index: checkout/dyn/get_w_z.f
===================================================================
--- checkout/dyn/get_w_z.f	(revision 90054)
+++ checkout/dyn/get_w_z.f	(revision 90097)
@@ -19,8 +19,8 @@
      &                 LATS_NODES_A,GLOBAL_LATS_A,LONSPERLAT,
      &                 EPSE,EPSO,EPSEDN,EPSODN,
      &                 PLNEV_A,PLNOD_A,PLNEW_A,PLNOW_A,
-     &                 PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD)
-!     &                 PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD, kdt)
+!     &                 PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD)
+     &                 PDDEV_A,PDDOD_A,SNNP1EV,SNNP1OD, kdt)
 !!
 ! Program History Log:
 ! Mar 2015    Henry Juang	use existed variables to get w hydrostatically
@@ -62,8 +62,8 @@
       REAL(KIND=KIND_EVOD) TRIO_LS(LEN_TRIO_LS,2,LOTls)
       REAL(KIND=KIND_GRID) GRID_GR(lonf*lats_node_a_max,lotgr)
 
-      integer          ls_node(ls_dim,3)
-!      integer          ls_node(ls_dim,3), kdt
+!      integer          ls_node(ls_dim,3)
+      integer          ls_node(ls_dim,3), kdt
 !
       INTEGER          LS_NODES(LS_DIM,NODES)
       INTEGER          MAX_LS_NODES   (NODES)
@@ -213,7 +213,11 @@
                 ilan=i+jlonf
                 syn_gr_s_z(i,lan)=grid_gr(ilan,g_gz)
                 zs=syn_gr_s_z(i,lan)
-                phis(i,lan)=g0re*zs/(re+zs)
+! Since later when calculate zzg, the anl_gr_a_z is phi/g+g0*zs, to
+! avoid add zs twice, need to pre-remove the term g0*zs.  Weiyu.
+!-------------------------------------------------------------------
+!                phis(i,lan)=g0re*zs/(re+zs)
+                phis(i,lan)=g0*zs*(re / (re+zs)-1)
               enddo
             else !g(z)=g0
               do i=lon,lon+njeff-1
@@ -321,9 +325,9 @@
              mmm = mmm + rqg(i, lan, k+levs4) / con_amo2   ! add    O2 weight.
              mmm = mmm + n2g(i, lan, k)       / 28.0       ! add    N2 weight.
              mmm = 1.0 / mmm                               ! final mmm.
-             IF(i == 4. and. lan == 1 .and.me == 0) THEN
-               print*, 'in get_w_z, i, lan, k, mmm=',i, lan, k, mmm
-             END IF
+!             IF(i == 4. and. lan == 1 .and.me == 0) THEN
+!               print*, 'in get_w_z, i, lan, k, mmm=',i, lan, k, mmm
+!             END IF
 
              rmdo1 = mmm * avgdr         ! Md * average number / R.
              rmdo2 = rmdo1 / con_amo2    ! Md * average number / R / Mi_o2.
@@ -445,10 +449,16 @@
 !      print*, 'In get_w_z, o2g = ', rqg(2, 2, 749), rqg(4, 1,  749)
 !      print*, 'In get_w_z, n2g = ', n2g(2, 2, 149), n2g(4, 1,  149)
 !
+! The following is only to output for making figures and comparisons. WY.
+!------------------------------------------------------------------------
+
 ! For 3 minutes time step, output field every 6 hours.
 !-----------------------------------------------------
 !      IF(MOD(kdt, 120) == 0)  CALL grid_collect_ipe(wwg,zzg,uug,vvg,
-!     &                        ttg,rqg,n2g,global_lats_a,lonsperlat, 
+! For 3 minutes time step, output field every 240 hours.
+!-------------------------------------------------------
+!      IF(MOD(kdt, 4800) == 0)  CALL grid_collect_ipe(wwg,zzg,uug,vvg,
+!     &                        ttg,rqg,n2g,global_lats_a,lonsperlat,
 !     &                        lats_nodes_a, kdt)
 
       END subroutine get_w_z
Index: checkout/dyn/makefile
===================================================================
--- checkout/dyn/makefile	(revision 90054)
+++ checkout/dyn/makefile	(revision 90097)
@@ -107,6 +107,7 @@
 	ifshuff.o \
 	shuffle_grid.o \
 	grid_collect.o \
+	grid_collect_ipe.o \
 	excha.o \
 	info.o \
 	four2grid_thread.o \


More information about the WAM-IPE mailing list