7 USE module_driver_constants
11 #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
12 INTEGER, PARAMETER :: max_halo_width = 6
14 INTEGER, PARAMETER :: max_halo_width = 6 ! 5
17 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
19 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
20 INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
21 INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
22 LOGICAL :: dm_debug_flag = .FALSE.
24 INTERFACE wrf_dm_maxval
26 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
28 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
32 INTERFACE wrf_dm_minval ! gopal's doing
34 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
36 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
43 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
45 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
50 IF ( MOD( P, M ) .EQ. 0 ) THEN
52 IF ( ABS(M-N) .LT. MINI &
53 .AND. M .GE. PROCMIN_M &
54 .AND. N .GE. PROCMIN_N &
62 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
63 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
64 CALL wrf_message ( TRIM ( wrf_err_message ) )
65 WRITE(0,*)' PROCMIN_M ', PROCMIN_M
66 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
67 CALL wrf_message ( TRIM ( wrf_err_message ) )
68 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
69 CALL wrf_message ( TRIM ( wrf_err_message ) )
70 WRITE( wrf_err_message , * )' P ', P
71 CALL wrf_message ( TRIM ( wrf_err_message ) )
72 WRITE( wrf_err_message , * )' MINM ', MINM
73 CALL wrf_message ( TRIM ( wrf_err_message ) )
74 WRITE( wrf_err_message , * )' MINN ', MINN
75 CALL wrf_message ( TRIM ( wrf_err_message ) )
76 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
79 END SUBROUTINE MPASPECT
81 SUBROUTINE wrf_dm_initialize
86 INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
87 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
89 INTEGER, DIMENSION(2) :: dims, coords
90 LOGICAL, DIMENSION(2) :: isperiodic
91 LOGICAL :: reorder_mesh
93 CALL wrf_get_dm_communicator ( local_comm )
94 CALL mpi_comm_size( local_comm, ntasks, ierr )
95 CALL nl_get_nproc_x ( 1, ntasks_x )
96 CALL nl_get_nproc_y ( 1, ntasks_y )
97 CALL nl_get_reorder_mesh( 1, reorder_mesh )
99 ! check if user has specified in the namelist
100 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
101 ! if only ntasks_x is specified then make it 1-d decomp in i
102 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
103 ntasks_y = ntasks / ntasks_x
104 ! if only ntasks_y is specified then make it 1-d decomp in j
105 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
106 ntasks_x = ntasks / ntasks_y
108 ! make sure user knows what they're doing
109 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
110 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
111 CALL wrf_error_fatal ( wrf_err_message )
114 ! When neither is specified, work out mesh with MPASPECT
115 ! Pass nproc_ln and nproc_nt so that number of procs in
116 ! i-dim (nproc_ln) is equal or lesser.
117 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
119 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
120 CALL wrf_message( wrf_err_message )
122 CALL mpi_comm_rank( local_comm, mytask, ierr )
123 ! extra code to reorder the communicator 20051212jm
124 IF ( reorder_mesh ) THEN
125 ALLOCATE (ranks(ntasks))
126 CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
127 CALL mpi_comm_group ( local_comm2, group, ierr )
130 ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
132 CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
134 CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
136 new_local_comm = local_comm
138 ! end extra code to reorder the communicator 20051212jm
139 dims(1) = ntasks_y ! rows
140 dims(2) = ntasks_x ! columns
141 isperiodic(1) = .false.
142 isperiodic(2) = .false.
143 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
144 dims(1) = ntasks_y ! rows
145 dims(2) = ntasks_x ! columns
146 isperiodic(1) = .true.
147 isperiodic(2) = .true.
148 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
150 CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
151 CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
152 ! write(0,*)'periodic coords ',mytask, coords
154 CALL mpi_comm_rank( local_communicator, mytask, ierr )
155 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
156 ! write(0,*)'non periodic coords ',mytask, coords
157 mytask_x = coords(2) ! col task (x)
158 mytask_y = coords(1) ! row task (y)
159 CALL nl_set_nproc_x ( 1, ntasks_x )
160 CALL nl_set_nproc_y ( 1, ntasks_y )
162 ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
163 ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
164 ! in other words, local_comm_x has all the processes with the same rank in Y
165 CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
166 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
167 CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
168 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
169 CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
170 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
172 CALL wrf_set_dm_communicator ( local_communicator )
183 END SUBROUTINE wrf_dm_initialize
185 SUBROUTINE get_dm_max_halo_width( id, width )
187 INTEGER, INTENT(IN) :: id
188 INTEGER, INTENT(OUT) :: width
189 IF ( id .EQ. 1 ) THEN ! this is coarse domain
190 width = max_halo_width
192 width = max_halo_width + 3
195 END SUBROUTINE get_dm_max_halo_width
197 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
198 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
199 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
200 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
201 sp1x , ep1x , sm1x , em1x , &
202 sp2x , ep2x , sm2x , em2x , &
203 sp3x , ep3x , sm3x , em3x , &
204 sp1y , ep1y , sm1y , em1y , &
205 sp2y , ep2y , sm2y , em2y , &
206 sp3y , ep3y , sm3y , em3y , &
209 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
213 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
214 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
215 sm1 , em1 , sm2 , em2 , sm3 , em3
216 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
217 sm1x , em1x , sm2x , em2x , sm3x , em3x
218 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
219 sm1y , em1y , sm2y , em2y , sm3y , em3y
220 INTEGER, INTENT(IN) :: id, parent_id
221 TYPE(domain),POINTER :: parent
224 INTEGER :: ids, ide, jds, jde, kds, kde
225 INTEGER :: ims, ime, jms, jme, kms, kme
226 INTEGER :: ips, ipe, jps, jpe, kps, kpe
227 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
228 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
229 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
230 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
232 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
233 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
234 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
235 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
236 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
237 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
238 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
240 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
241 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
242 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
244 INTEGER :: idim , jdim , kdim , rem , a, b
245 INTEGER :: i, j, ni, nj, Px, Py, P
247 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
249 INTEGER :: idim_cd, jdim_cd, ierr
252 TYPE(domain), POINTER :: intermediate_grid
253 TYPE(domain), POINTER :: nest_grid
254 CHARACTER*256 :: mess
256 INTEGER parent_max_halo_width
257 INTEGER thisdomain_max_halo_width
259 SELECT CASE ( model_data_order )
260 ! need to finish other cases
261 CASE ( DATA_ORDER_ZXY )
262 ids = sd2 ; ide = ed2
263 jds = sd3 ; jde = ed3
264 kds = sd1 ; kde = ed1
265 CASE ( DATA_ORDER_XYZ )
266 ids = sd1 ; ide = ed1
267 jds = sd2 ; jde = ed2
268 kds = sd3 ; kde = ed3
269 CASE ( DATA_ORDER_XZY )
270 ids = sd1 ; ide = ed1
271 jds = sd3 ; jde = ed3
272 kds = sd2 ; kde = ed2
273 CASE ( DATA_ORDER_YXZ)
274 ids = sd2 ; ide = ed2
275 jds = sd1 ; jde = ed1
276 kds = sd3 ; kde = ed3
279 CALL nl_get_max_dom( 1 , max_dom )
281 CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
282 IF ( id .GT. 1 ) THEN
283 CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
286 CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
287 ids, ide, jds, jde, kds, kde, &
288 ims, ime, jms, jme, kms, kme, &
289 imsx, imex, jmsx, jmex, kmsx, kmex, &
290 imsy, imey, jmsy, jmey, kmsy, kmey, &
291 ips, ipe, jps, jpe, kps, kpe, &
292 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
293 ipsy, ipey, jpsy, jpey, kpsy, kpey )
295 ! ensure that the every parent domain point has a full set of nested points under it
296 ! even at the borders. Do this by making sure the number of nest points is a multiple of
297 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
298 ! is the subject of the scatter gather comms with the parent
300 IF ( id .GT. 1 ) THEN
301 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
302 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
303 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
306 SELECT CASE ( model_data_order )
307 CASE ( DATA_ORDER_ZXY )
308 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
309 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
310 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
311 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
312 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
313 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
314 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
315 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
316 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
317 CASE ( DATA_ORDER_ZYX )
318 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
319 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
320 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
321 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
322 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
323 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
324 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
325 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
326 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
327 CASE ( DATA_ORDER_XYZ )
328 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
329 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
330 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
331 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
332 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
333 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
334 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
335 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
336 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
337 CASE ( DATA_ORDER_YXZ)
338 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
339 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
340 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
341 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
342 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
343 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
344 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
345 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
346 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
347 CASE ( DATA_ORDER_XZY )
348 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
349 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
350 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
351 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
352 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
353 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
354 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
355 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
356 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
357 CASE ( DATA_ORDER_YZX )
358 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
359 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
360 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
361 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
362 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
363 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
364 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
365 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
366 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
370 WRITE(wrf_err_message,*)'*************************************'
371 CALL wrf_message( TRIM(wrf_err_message) )
372 WRITE(wrf_err_message,*)'Parent domain'
373 CALL wrf_message( TRIM(wrf_err_message) )
374 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
375 CALL wrf_message( TRIM(wrf_err_message) )
376 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
377 CALL wrf_message( TRIM(wrf_err_message) )
378 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
379 CALL wrf_message( TRIM(wrf_err_message) )
380 WRITE(wrf_err_message,*)'*************************************'
381 CALL wrf_message( TRIM(wrf_err_message) )
384 IF ( id .GT. 1 ) THEN
386 CALL nl_get_shw( id, shw )
387 CALL nl_get_i_parent_start( id , i_parent_start )
388 CALL nl_get_j_parent_start( id , j_parent_start )
389 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
391 SELECT CASE ( model_data_order )
392 CASE ( DATA_ORDER_ZXY )
396 c_kds = sd1 ; c_kde = ed1
397 CASE ( DATA_ORDER_ZYX )
401 c_kds = sd1 ; c_kde = ed1
402 CASE ( DATA_ORDER_XYZ )
406 c_kds = sd3 ; c_kde = ed3
407 CASE ( DATA_ORDER_YXZ)
411 c_kds = sd3 ; c_kde = ed3
412 CASE ( DATA_ORDER_XZY )
416 c_kds = sd2 ; c_kde = ed2
417 CASE ( DATA_ORDER_YZX )
421 c_kds = sd2 ; c_kde = ed2
424 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
425 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
427 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
428 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
430 ! we want the intermediate domain to be decomposed the
431 ! the same as the underlying nest. So try this:
434 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
437 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
438 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
439 thisdomain_max_halo_width, thisdomain_max_halo_width, ierr )
440 IF ( Px .EQ. mytask_x ) THEN
442 IF ( c_ips .EQ. -1 ) c_ips = i
445 IF ( ierr .NE. 0 ) THEN
446 CALL tfp_message(__FILE__,__LINE__)
448 IF (c_ips .EQ. -1 ) THEN
454 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
457 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
458 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
459 thisdomain_max_halo_width, thisdomain_max_halo_width, ierr )
462 IF ( Py .EQ. mytask_y ) THEN
464 IF ( c_jps .EQ. -1 ) c_jps = j
467 IF ( ierr .NE. 0 ) THEN
468 CALL tfp_message(__FILE__,__LINE__)
470 IF (c_jps .EQ. -1 ) THEN
475 IF ( c_jps < c_jpe .AND. c_ips < c_ipe ) THEN
476 ! extend the patch dimensions out shw along edges of domain
477 IF ( mytask_x .EQ. 0 ) THEN
480 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
483 c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
484 c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
487 ! extend the patch dimensions out shw along edges of domain
488 IF ( mytask_y .EQ. 0 ) THEN
491 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
494 c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
495 c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
508 WRITE(wrf_err_message,*)'*************************************'
509 CALL wrf_message( TRIM(wrf_err_message) )
510 WRITE(wrf_err_message,*)'Nesting domain'
511 CALL wrf_message( TRIM(wrf_err_message) )
512 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
513 CALL wrf_message( TRIM(wrf_err_message) )
514 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
515 CALL wrf_message( TRIM(wrf_err_message) )
516 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
517 CALL wrf_message( TRIM(wrf_err_message) )
518 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
519 CALL wrf_message( TRIM(wrf_err_message) )
520 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
521 CALL wrf_message( TRIM(wrf_err_message) )
522 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
523 CALL wrf_message( TRIM(wrf_err_message) )
524 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
525 CALL wrf_message( TRIM(wrf_err_message) )
526 WRITE(wrf_err_message,*)'*************************************'
527 CALL wrf_message( TRIM(wrf_err_message) )
529 SELECT CASE ( model_data_order )
530 CASE ( DATA_ORDER_ZXY )
531 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
532 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
533 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
534 CASE ( DATA_ORDER_ZYX )
535 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
536 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
537 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
538 CASE ( DATA_ORDER_XYZ )
539 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
540 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
541 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
542 CASE ( DATA_ORDER_YXZ)
543 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
544 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
545 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
546 CASE ( DATA_ORDER_XZY )
547 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
548 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
549 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
550 CASE ( DATA_ORDER_YZX )
551 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
552 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
553 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
556 ALLOCATE ( intermediate_grid )
557 ALLOCATE ( intermediate_grid%parents( max_parents ) )
558 ALLOCATE ( intermediate_grid%nests( max_nests ) )
560 NULLIFY( intermediate_grid%sibling )
562 NULLIFY( intermediate_grid%nests(i)%ptr )
564 NULLIFY (intermediate_grid%next)
565 NULLIFY (intermediate_grid%same_level)
566 NULLIFY (intermediate_grid%i_start)
567 NULLIFY (intermediate_grid%j_start)
568 NULLIFY (intermediate_grid%i_end)
569 NULLIFY (intermediate_grid%j_end)
570 intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
571 intermediate_grid%num_nests = 0
572 intermediate_grid%num_siblings = 0
573 intermediate_grid%num_parents = 1
574 intermediate_grid%max_tiles = 0
575 intermediate_grid%num_tiles_spec = 0
576 CALL find_grid_by_id ( id, head_grid, nest_grid )
578 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
579 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
580 intermediate_grid%num_parents = 1
582 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
583 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
585 intermediate_grid%sm31x = c_sm1x
586 intermediate_grid%em31x = c_em1x
587 intermediate_grid%sm32x = c_sm2x
588 intermediate_grid%em32x = c_em2x
589 intermediate_grid%sm33x = c_sm3x
590 intermediate_grid%em33x = c_em3x
591 intermediate_grid%sm31y = c_sm1y
592 intermediate_grid%em31y = c_em1y
593 intermediate_grid%sm32y = c_sm2y
594 intermediate_grid%em32y = c_em2y
595 intermediate_grid%sm33y = c_sm3y
596 intermediate_grid%em33y = c_em3y
598 #if defined(SGIALTIX) && (! defined(MOVE_NESTS) )
599 ! allocate space for the intermediate domain
600 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
601 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
602 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
603 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
604 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
606 intermediate_grid%sd31 = c_sd1
607 intermediate_grid%ed31 = c_ed1
608 intermediate_grid%sp31 = c_sp1
609 intermediate_grid%ep31 = c_ep1
610 intermediate_grid%sm31 = c_sm1
611 intermediate_grid%em31 = c_em1
612 intermediate_grid%sd32 = c_sd2
613 intermediate_grid%ed32 = c_ed2
614 intermediate_grid%sp32 = c_sp2
615 intermediate_grid%ep32 = c_ep2
616 intermediate_grid%sm32 = c_sm2
617 intermediate_grid%em32 = c_em2
618 intermediate_grid%sd33 = c_sd3
619 intermediate_grid%ed33 = c_ed3
620 intermediate_grid%sp33 = c_sp3
621 intermediate_grid%ep33 = c_ep3
622 intermediate_grid%sm33 = c_sm3
623 intermediate_grid%em33 = c_em3
625 CALL med_add_config_info_to_grid ( intermediate_grid )
627 intermediate_grid%dx = parent%dx
628 intermediate_grid%dy = parent%dy
629 intermediate_grid%dt = parent%dt
633 END SUBROUTINE patch_domain_rsl_lite
635 SUBROUTINE compute_memory_dims_rsl_lite ( &
636 id , maxhalowidth , &
638 ids, ide, jds, jde, kds, kde, &
639 ims, ime, jms, jme, kms, kme, &
640 imsx, imex, jmsx, jmex, kmsx, kmex, &
641 imsy, imey, jmsy, jmey, kmsy, kmey, &
642 ips, ipe, jps, jpe, kps, kpe, &
643 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
644 ipsy, ipey, jpsy, jpey, kpsy, kpey )
648 INTEGER, INTENT(IN) :: id , maxhalowidth
649 INTEGER, INTENT(IN) :: shw, bdx, bdy
650 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
651 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
652 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
653 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
654 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
655 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
656 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
658 INTEGER Px, Py, P, i, j, k, ierr
660 #if ( ! NMM_CORE == 1 )
668 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
669 maxhalowidth, maxhalowidth, ierr )
670 IF ( Px .EQ. mytask_x ) THEN
672 IF ( ips .EQ. -1 ) ips = i
675 IF ( ierr .NE. 0 ) THEN
676 CALL tfp_message(__FILE__,__LINE__)
678 ! handle setting the memory dimensions where there are no X elements assigned to this proc
679 IF (ips .EQ. -1 ) THEN
687 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
688 maxhalowidth, maxhalowidth, ierr )
689 IF ( Py .EQ. mytask_y ) THEN
691 IF ( jps .EQ. -1 ) jps = j
694 IF ( ierr .NE. 0 ) THEN
695 CALL tfp_message(__FILE__,__LINE__)
697 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
698 IF (jps .EQ. -1 ) THEN
703 !begin: wig; 12-Mar-2008
704 ! This appears redundant with the conditionals above, but we get cases with only
705 ! one of the directions being set to "missing" when turning off extra processors.
706 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
707 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
713 !end: wig; 12-Mar-2008
716 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
718 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
719 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
720 ! corresponds to grid dimension.
724 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
727 ! +------------------+ <- this edge is costly; see below
729 ! The aim is to avoid all-to-all communication over whole
730 ! communicator. Instead, when possible, use a transpose scheme that requires
731 ! all-to-all within dimensional communicators; that is, communicators
732 ! defined for the processes in a rank or column of the processor mesh. Note,
733 ! however, it is not possible to create a ring of transposes between
734 ! xy-yz-xz decompositions without at least one of the edges in the ring
735 ! being fully all-to-all (in other words, one of the tranpose edges must
736 ! rotate and not just transpose a plane of the model grid within the
737 ! processor mesh). The issue is then, where should we put this costly edge
738 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
739 ! we chose a scheme most natural for models that use parallel spectral
740 ! transforms, where the costly edge is the one that goes from the xz to
741 ! the xy decomposition. (May be implemented as just a two step transpose
744 ! Additional notational convention, below. The 'x' or 'y' appended to the
745 ! dimension start or end variable refers to which grid dimension is all
746 ! on-processor in the given decomposition. That is ipsx and ipex are the
747 ! start and end for the i-dimension in the zy decomposition where x is
748 ! on-processor. ('z' is assumed for xy decomposition and not appended to
749 ! the ips, ipe, etc. variable names).
758 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
759 1, maxhalowidth, ierr )
760 IF ( Px .EQ. mytask_x ) THEN
762 IF ( kpsx .EQ. -1 ) kpsx = k
765 IF ( ierr .NE. 0 ) THEN
766 CALL tfp_message(__FILE__,__LINE__)
769 ! handle case where no levels are assigned to this process
770 ! no iterations. Do same for I and J. Need to handle memory alloc below.
771 IF (kpsx .EQ. -1 ) THEN
780 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
781 1, maxhalowidth, ierr )
782 IF ( Py .EQ. mytask_y ) THEN
784 IF ( jpsx .EQ. -1 ) jpsx = j
787 IF ( ierr .NE. 0 ) THEN
788 CALL tfp_message(__FILE__,__LINE__)
790 IF (jpsx .EQ. -1 ) THEN
795 !begin: wig; 12-Mar-2008
796 ! This appears redundant with the conditionals above, but we get cases with only
797 ! one of the directions being set to "missing" when turning off extra processors.
798 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
799 IF (ipex .EQ. -1 .or. jpex .EQ. -1) THEN
805 !end: wig; 12-Mar-2008
807 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
809 kpsy = kpsx ! same as above
810 kpey = kpex ! same as above
816 CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
817 maxhalowidth, 1, ierr ) ! x and y for proc mesh reversed
818 IF ( Py .EQ. mytask_y ) THEN
820 IF ( ipsy .EQ. -1 ) ipsy = i
823 IF ( ierr .NE. 0 ) THEN
824 CALL tfp_message(__FILE__,__LINE__)
826 IF (ipsy .EQ. -1 ) THEN
834 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
835 ! adjust decomposition to reflect. 20051020 JM
840 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
841 maxhalowidth, maxhalowidth , ierr )
842 IF ( Px .EQ. mytask_x ) THEN
844 IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
845 IF ( ips .EQ. -1 ) ips = i
848 IF ( ierr .NE. 0 ) THEN
849 CALL tfp_message(__FILE__,__LINE__)
855 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
856 maxhalowidth , maxhalowidth , ierr )
857 IF ( Py .EQ. mytask_y ) THEN
859 IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
860 IF ( jps .EQ. -1 ) jps = j
863 IF ( ierr .NE. 0 ) THEN
864 CALL tfp_message(__FILE__,__LINE__)
868 ! extend the patch dimensions out shw along edges of domain
869 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
870 IF ( mytask_x .EQ. 0 ) THEN
874 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
878 IF ( mytask_y .EQ. 0 ) THEN
882 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
886 ENDIF !wig; 11-Mar-2008
898 ! handle setting the memory dimensions where there are no levels assigned to this proc
899 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
903 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
908 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
912 ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
913 ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
919 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
920 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
928 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
932 jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
933 jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
939 ! handle setting the memory dimensions where there are no X elements assigned to this proc
940 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
948 END SUBROUTINE compute_memory_dims_rsl_lite
950 ! internal, used below for switching the argument to MPI calls
951 ! if reals are being autopromoted to doubles in the build of WRF
952 INTEGER function getrealmpitype()
956 INTEGER rtypesize, dtypesize, ierr
957 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
958 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
959 IF ( RWORDSIZE .EQ. rtypesize ) THEN
960 getrealmpitype = MPI_REAL
961 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
962 getrealmpitype = MPI_DOUBLE_PRECISION
964 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
967 ! required dummy initialization for function that is never called
971 END FUNCTION getrealmpitype
973 REAL FUNCTION wrf_dm_max_real ( inval )
979 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
980 wrf_dm_max_real = retval
983 wrf_dm_max_real = inval
985 END FUNCTION wrf_dm_max_real
987 REAL FUNCTION wrf_dm_min_real ( inval )
993 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
994 wrf_dm_min_real = retval
997 wrf_dm_min_real = inval
999 END FUNCTION wrf_dm_min_real
1001 SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
1009 CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, local_communicator, ierr )
1011 retval(1:n) = inval(1:n)
1013 END SUBROUTINE wrf_dm_min_reals
1015 REAL FUNCTION wrf_dm_sum_real ( inval )
1021 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
1022 wrf_dm_sum_real = retval
1025 wrf_dm_sum_real = inval
1027 END FUNCTION wrf_dm_sum_real
1029 SUBROUTINE wrf_dm_sum_reals (inval, retval)
1031 REAL, INTENT(IN) :: inval(:)
1032 REAL, INTENT(OUT) :: retval(:)
1036 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
1040 END SUBROUTINE wrf_dm_sum_reals
1042 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
1046 INTEGER inval, retval
1048 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
1049 wrf_dm_sum_integer = retval
1052 wrf_dm_sum_integer = inval
1054 END FUNCTION wrf_dm_sum_integer
1056 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
1060 REAL val, val_all( ntasks )
1061 INTEGER idex, jdex, ierr
1063 INTEGER dex_all (2,ntasks)
1066 dex(1) = idex ; dex(2) = jdex
1067 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1068 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
1070 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1072 IF ( val_all(i) .GT. val ) THEN
1080 INTEGER idex, jdex, ierr
1082 END SUBROUTINE wrf_dm_maxval_real
1084 #ifndef PROMOTE_FLOAT
1085 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
1089 DOUBLE PRECISION val, val_all( ntasks )
1090 INTEGER idex, jdex, ierr
1092 INTEGER dex_all (2,ntasks)
1095 dex(1) = idex ; dex(2) = jdex
1096 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1097 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
1099 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1101 IF ( val_all(i) .GT. val ) THEN
1108 DOUBLE PRECISION val
1109 INTEGER idex, jdex, ierr
1111 END SUBROUTINE wrf_dm_maxval_doubleprecision
1114 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
1118 INTEGER val, val_all( ntasks )
1119 INTEGER idex, jdex, ierr
1121 INTEGER dex_all (2,ntasks)
1124 dex(1) = idex ; dex(2) = jdex
1125 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1126 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
1128 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1130 IF ( val_all(i) .GT. val ) THEN
1140 END SUBROUTINE wrf_dm_maxval_integer
1142 ! For HWRF some additional computation is required. This is gopal's doing
1144 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
1146 REAL val, val_all( ntasks )
1147 INTEGER idex, jdex, ierr
1149 INTEGER dex_all (2,ntasks)
1151 ! Collective operation. Each processor calls passing a local value and its index; on return
1152 ! all processors are passed back the maximum of all values passed and its index.
1159 CALL wrf_get_dm_communicator ( comm )
1160 dex(1) = idex ; dex(2) = jdex
1161 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1162 CALL mpi_allgather ( val, 1, MPI_REAL, val_all , 1, MPI_REAL, comm, ierr )
1164 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1166 IF ( val_all(i) .LT. val ) THEN
1173 END SUBROUTINE wrf_dm_minval_real
1175 #ifndef PROMOTE_FLOAT
1176 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1178 DOUBLE PRECISION val, val_all( ntasks )
1179 INTEGER idex, jdex, ierr
1181 INTEGER dex_all (2,ntasks)
1183 ! Collective operation. Each processor calls passing a local value and its index; on return
1184 ! all processors are passed back the maximum of all values passed and its index.
1191 CALL wrf_get_dm_communicator ( comm )
1192 dex(1) = idex ; dex(2) = jdex
1193 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1194 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1196 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1198 IF ( val_all(i) .LT. val ) THEN
1205 END SUBROUTINE wrf_dm_minval_doubleprecision
1208 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1210 INTEGER val, val_all( ntasks )
1211 INTEGER idex, jdex, ierr
1213 INTEGER dex_all (2,ntasks)
1215 ! Collective operation. Each processor calls passing a local value and its index; on return
1216 ! all processors are passed back the maximum of all values passed and its index.
1223 CALL wrf_get_dm_communicator ( comm )
1224 dex(1) = idex ; dex(2) = jdex
1225 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1226 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1228 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1230 IF ( val_all(i) .LT. val ) THEN
1237 END SUBROUTINE wrf_dm_minval_integer ! End of gopal's doing
1239 SUBROUTINE split_communicator
1244 INTEGER mpi_comm_here, mpi_comm_local, comdup, mytask, ntasks, ierr, io_status
1245 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1246 INTEGER thread_support_provided, thread_support_requested
1249 INTEGER, ALLOCATABLE :: icolor(:)
1250 INTEGER tasks_per_split
1251 NAMELIST /namelist_split/ tasks_per_split
1253 CALL MPI_INITIALIZED( mpi_inited, ierr )
1254 IF ( .NOT. mpi_inited ) THEN
1255 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1256 thread_support_requested = MPI_THREAD_FUNNELED
1257 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
1258 IF ( thread_support_provided .lt. thread_support_requested ) THEN
1259 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
1262 CALL mpi_init ( ierr )
1264 CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
1267 CALL wrf_get_dm_communicator( mpi_comm_here )
1269 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1270 CALL mpi_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1272 CALL mpi_comm_split( MPI_COMM_WORLD , 4 , 1 , mpi_comm_local, ierr )
1273 CALL wrf_set_dm_communicator( mpi_comm_local )
1275 CALL wrf_get_dm_communicator( mpi_comm_here )
1278 IF ( mytask .EQ. 0 ) THEN
1279 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1280 tasks_per_split = ntasks
1281 READ ( 27 , NML = namelist_split, IOSTAT=io_status )
1284 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1285 IF ( io_status .NE. 0 ) THEN
1286 RETURN ! just ignore and return
1288 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1289 IF ( tasks_per_split .GT. ntasks .OR. tasks_per_split .LE. 0 ) RETURN
1290 IF ( mod( ntasks, tasks_per_split ) .NE. 0 ) THEN
1291 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1294 ALLOCATE( icolor(ntasks) )
1296 DO WHILE ( j .LT. ntasks / tasks_per_split )
1297 DO i = 1, tasks_per_split
1298 icolor( i + j * tasks_per_split ) = j
1303 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1304 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
1305 CALL wrf_set_dm_communicator( mpi_comm_local )
1307 DEALLOCATE( icolor )
1309 END SUBROUTINE split_communicator
1311 SUBROUTINE init_module_dm
1314 INTEGER mpi_comm_local, ierr, mytask, nproc
1317 CALL mpi_initialized( mpi_inited, ierr )
1318 IF ( .NOT. mpi_inited ) THEN
1319 ! If MPI has not been initialized then initialize it and
1320 ! make comm_world the communicator
1321 ! Otherwise, something else (e.g. quilt-io) has already
1322 ! initialized MPI, so just grab the communicator that
1323 ! should already be stored and use that.
1324 CALL mpi_init ( ierr )
1326 CALL wrf_set_dm_communicator ( MPI_COMM_WORLD )
1328 CALL wrf_get_dm_communicator( mpi_comm_local )
1330 END SUBROUTINE init_module_dm
1333 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
1334 USE module_domain, ONLY : domain
1336 TYPE (domain), INTENT(INOUT) :: parent, nest
1337 INTEGER, INTENT(IN) :: dx,dy
1339 END SUBROUTINE wrf_dm_move_nest
1341 !------------------------------------------------------------------------------
1342 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
1345 mp_local_cobmask, errf )
1347 !------------------------------------------------------------------------------
1348 ! PURPOSE: Do MPI allgatherv operation across processors to get the
1349 ! errors at each observation point on all processors.
1351 !------------------------------------------------------------------------------
1353 INTEGER, INTENT(IN) :: nsta ! Observation index.
1354 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
1355 INTEGER, INTENT(IN) :: niobf ! Number of observations.
1356 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
1357 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
1358 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
1359 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
1364 ! Local declarations
1365 integer i, n, nlocal_dot, nlocal_crs
1366 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
1367 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
1368 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
1369 INTEGER N_BUFFER(NIOBF)
1370 REAL FULL_BUFFER(NIOBF)
1371 INTEGER IFULL_BUFFER(NIOBF)
1372 INTEGER IDISPLACEMENT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1373 INTEGER ICOUNT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1375 INTEGER :: MPI_COMM_COMP ! MPI group communicator
1376 INTEGER :: NPROCS ! Number of processors
1377 INTEGER :: IERR ! Error code from MPI routines
1379 ! Get communicator for MPI operations.
1380 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
1382 ! Get rank of monitor processor and broadcast to others.
1383 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
1388 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
1389 NLOCAL_DOT = NLOCAL_DOT + 1
1390 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
1391 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
1392 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
1393 N_BUFFER(NLOCAL_DOT) = N
1396 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1397 ICOUNT,1,MPI_INTEGER, &
1401 IDISPLACEMENT(1) = 0
1403 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1405 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1406 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1407 MPI_INTEGER, MPI_COMM_COMP, IERR)
1409 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1410 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1411 MPI_REAL, MPI_COMM_COMP, IERR)
1413 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1415 ! SURF PRESS AT U-POINTS
1416 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1417 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1418 MPI_REAL, MPI_COMM_COMP, IERR)
1420 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1423 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
1424 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1425 MPI_REAL, MPI_COMM_COMP, IERR)
1427 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1433 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
1434 NLOCAL_DOT = NLOCAL_DOT + 1
1435 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
1436 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
1437 N_BUFFER(NLOCAL_DOT) = N
1440 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1441 ICOUNT,1,MPI_INTEGER, &
1445 IDISPLACEMENT(1) = 0
1447 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1449 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1450 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1451 MPI_INTEGER, MPI_COMM_COMP, IERR)
1453 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1454 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1455 MPI_REAL, MPI_COMM_COMP, IERR)
1457 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1459 ! SURF PRESS AT V-POINTS
1460 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1461 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1462 MPI_REAL, MPI_COMM_COMP, IERR)
1464 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1467 ! DO THE CROSS FIELDS, T AND Q
1470 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
1471 NLOCAL_CRS = NLOCAL_CRS + 1
1472 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
1473 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
1474 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
1475 N_BUFFER(NLOCAL_CRS) = N
1478 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
1479 ICOUNT,1,MPI_INTEGER, &
1481 IDISPLACEMENT(1) = 0
1483 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1485 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
1486 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1487 MPI_INTEGER, MPI_COMM_COMP, IERR)
1489 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
1490 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1491 MPI_REAL, MPI_COMM_COMP, IERR)
1494 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1497 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
1498 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1499 MPI_REAL, MPI_COMM_COMP, IERR)
1501 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1503 ! SURF PRESS AT MASS POINTS
1504 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
1505 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1506 MPI_REAL, MPI_COMM_COMP, IERR)
1508 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1511 END SUBROUTINE get_full_obs_vector
1515 SUBROUTINE wrf_dm_maxtile_real ( val , tile)
1517 REAL val, val_all( ntasks )
1522 ! Collective operation. Each processor calls passing a local value and its index; on return
1523 ! all processors are passed back the maximum of all values passed and its tile number.
1530 CALL wrf_get_dm_communicator ( comm )
1531 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1535 IF ( val_all(i) .GT. val ) THEN
1541 END SUBROUTINE wrf_dm_maxtile_real
1544 SUBROUTINE wrf_dm_mintile_real ( val , tile)
1546 REAL val, val_all( ntasks )
1551 ! Collective operation. Each processor calls passing a local value and its index; on return
1552 ! all processors are passed back the minimum of all values passed and its tile number.
1559 CALL wrf_get_dm_communicator ( comm )
1560 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1564 IF ( val_all(i) .LT. val ) THEN
1570 END SUBROUTINE wrf_dm_mintile_real
1573 SUBROUTINE wrf_dm_mintile_double ( val , tile)
1575 DOUBLE PRECISION val, val_all( ntasks )
1580 ! Collective operation. Each processor calls passing a local value and its index; on return
1581 ! all processors are passed back the minimum of all values passed and its tile number.
1588 CALL wrf_get_dm_communicator ( comm )
1589 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1593 IF ( val_all(i) .LT. val ) THEN
1599 END SUBROUTINE wrf_dm_mintile_double
1602 SUBROUTINE wrf_dm_tile_val_int ( val , tile)
1604 INTEGER val, val_all( ntasks )
1609 ! Collective operation. Get value from input tile.
1616 CALL wrf_get_dm_communicator ( comm )
1617 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1620 END SUBROUTINE wrf_dm_tile_val_int
1622 SUBROUTINE wrf_get_hostname ( str )
1626 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1631 END SUBROUTINE wrf_get_hostname
1633 SUBROUTINE wrf_get_hostid ( hostid )
1636 INTEGER i, sz, n, cs
1637 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1640 END SUBROUTINE wrf_get_hostid
1642 END MODULE module_dm
1644 !=========================================================================
1645 ! wrf_dm_patch_domain has to be outside the module because it is called
1646 ! by a routine in module_domain but depends on module domain
1648 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
1649 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1650 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1651 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1652 sp1x , ep1x , sm1x , em1x , &
1653 sp2x , ep2x , sm2x , em2x , &
1654 sp3x , ep3x , sm3x , em3x , &
1655 sp1y , ep1y , sm1y , em1y , &
1656 sp2y , ep2y , sm2y , em2y , &
1657 sp3y , ep3y , sm3y , em3y , &
1659 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
1663 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
1664 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
1665 sm1 , em1 , sm2 , em2 , sm3 , em3
1666 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
1667 sm1x , em1x , sm2x , em2x , sm3x , em3x
1668 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
1669 sm1y , em1y , sm2y , em2y , sm3y , em3y
1670 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
1672 TYPE(domain), POINTER :: parent
1673 TYPE(domain), POINTER :: grid_ptr
1675 ! this is necessary because we cannot pass parent directly into
1676 ! wrf_dm_patch_domain because creating the correct interface definitions
1677 ! would generate a circular USE reference between module_domain and module_dm
1678 ! see comment this date in module_domain for more information. JM 20020416
1681 grid_ptr => head_grid
1682 CALL find_grid_by_id( parent_id , grid_ptr , parent )
1684 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
1685 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1686 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1687 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1688 sp1x , ep1x , sm1x , em1x , &
1689 sp2x , ep2x , sm2x , em2x , &
1690 sp3x , ep3x , sm3x , em3x , &
1691 sp1y , ep1y , sm1y , em1y , &
1692 sp2y , ep2y , sm2y , em2y , &
1693 sp3y , ep3y , sm3y , em3y , &
1697 END SUBROUTINE wrf_dm_patch_domain
1699 SUBROUTINE wrf_termio_dup
1701 INTEGER mytask, ntasks
1705 CALL mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr )
1706 CALL mpi_comm_rank(MPI_COMM_WORLD, mytask, ierr )
1707 write(0,*)'starting wrf task ',mytask,' of ',ntasks
1708 CALL rsl_error_dup1( mytask )
1713 END SUBROUTINE wrf_termio_dup
1715 SUBROUTINE wrf_get_myproc( myproc )
1716 USE module_dm , ONLY : mytask
1721 END SUBROUTINE wrf_get_myproc
1723 SUBROUTINE wrf_get_nproc( nproc )
1724 USE module_dm , ONLY : ntasks
1729 END SUBROUTINE wrf_get_nproc
1731 SUBROUTINE wrf_get_nprocx( nprocx )
1732 USE module_dm , ONLY : ntasks_x
1737 END SUBROUTINE wrf_get_nprocx
1739 SUBROUTINE wrf_get_nprocy( nprocy )
1740 USE module_dm , ONLY : ntasks_y
1745 END SUBROUTINE wrf_get_nprocy
1747 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
1748 USE module_dm , ONLY : local_communicator
1757 CHARACTER*1 BUF(size)
1760 CALL BYTE_BCAST ( buf , size, local_communicator )
1763 END SUBROUTINE wrf_dm_bcast_bytes
1765 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
1769 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
1774 INTEGER ibuf(256),i,n
1777 ! Root task is required to have the correct value of N1, other tasks
1778 ! might not have the correct value.
1779 CALL wrf_dm_bcast_integer( n , 1 )
1780 IF (n .GT. 256) n = 256
1783 ibuf(I) = ichar(buf(I:I))
1785 CALL wrf_dm_bcast_integer( ibuf, n )
1788 buf(i:i) = char(ibuf(i))
1793 END SUBROUTINE wrf_dm_bcast_string
1795 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
1799 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
1801 END SUBROUTINE wrf_dm_bcast_integer
1803 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
1806 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1807 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1808 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1809 ! since we were not indexing the globbuf and Field arrays it does not matter
1811 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
1813 END SUBROUTINE wrf_dm_bcast_double
1815 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
1819 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
1821 END SUBROUTINE wrf_dm_bcast_real
1823 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
1827 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
1829 END SUBROUTINE wrf_dm_bcast_logical
1831 SUBROUTINE write_68( grid, v , s , &
1832 ids, ide, jds, jde, kds, kde, &
1833 ims, ime, jms, jme, kms, kme, &
1834 its, ite, jts, jte, kts, kte )
1835 USE module_domain, ONLY : domain
1837 TYPE(domain) , INTENT (INOUT) :: grid
1839 INTEGER ids, ide, jds, jde, kds, kde, &
1840 ims, ime, jms, jme, kms, kme, &
1841 its, ite, jts, jte, kts, kte
1842 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
1846 logical, external :: wrf_dm_on_monitor
1847 real globbuf( ids:ide, kds:kde, jds:jde )
1848 character*3 ord, stag
1850 if ( kds == kde ) then
1853 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1854 ids, ide, jds, jde, kds, kde, &
1855 ims, ime, jms, jme, kms, kme, &
1856 its, ite, jts, jte, kts, kte )
1861 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1862 ids, ide, kds, kde, jds, jde, &
1863 ims, ime, kms, kme, jms, jme, &
1864 its, ite, kts, kte, jts, jte )
1868 if ( wrf_dm_on_monitor() ) THEN
1869 WRITE(68,*) ide-ids+1, jde-jds+1 , s
1872 WRITE(68,*) globbuf(i,1,j)
1880 SUBROUTINE wrf_abort
1885 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
1889 END SUBROUTINE wrf_abort
1891 SUBROUTINE wrf_dm_shutdown
1895 CALL MPI_FINALIZE( ierr )
1898 END SUBROUTINE wrf_dm_shutdown
1900 LOGICAL FUNCTION wrf_dm_on_monitor()
1905 INTEGER tsk, ierr, mpi_comm_local
1906 CALL wrf_get_dm_communicator( mpi_comm_local )
1907 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
1908 wrf_dm_on_monitor = tsk .EQ. 0
1910 wrf_dm_on_monitor = .TRUE.
1913 END FUNCTION wrf_dm_on_monitor
1915 INTEGER FUNCTION wrf_dm_monitor_rank()
1918 wrf_dm_monitor_rank = 0
1920 END FUNCTION wrf_dm_monitor_rank
1922 SUBROUTINE wrf_get_dm_communicator ( communicator )
1923 USE module_dm , ONLY : local_communicator
1925 INTEGER , INTENT(OUT) :: communicator
1926 communicator = local_communicator
1928 END SUBROUTINE wrf_get_dm_communicator
1930 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
1931 USE module_dm , ONLY : local_iocommunicator
1933 INTEGER , INTENT(OUT) :: iocommunicator
1934 iocommunicator = local_iocommunicator
1936 END SUBROUTINE wrf_get_dm_iocommunicator
1938 SUBROUTINE wrf_set_dm_communicator ( communicator )
1939 USE module_dm , ONLY : local_communicator
1941 INTEGER , INTENT(IN) :: communicator
1942 local_communicator = communicator
1944 END SUBROUTINE wrf_set_dm_communicator
1946 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
1947 USE module_dm , ONLY : local_iocommunicator
1949 INTEGER , INTENT(IN) :: iocommunicator
1950 local_iocommunicator = iocommunicator
1952 END SUBROUTINE wrf_set_dm_iocommunicator
1955 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1957 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
1958 DS1,DE1,DS2,DE2,DS3,DE3,&
1959 MS1,ME1,MS2,ME2,MS3,ME3,&
1960 PS1,PE1,PS2,PE2,PS3,PE3 )
1962 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1963 MS1,ME1,MS2,ME2,MS3,ME3,&
1964 PS1,PE1,PS2,PE2,PS3,PE3
1965 CHARACTER *(*) stagger,ordering
1970 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
1971 DS1,DE1,DS2,DE2,DS3,DE3,&
1972 MS1,ME1,MS2,ME2,MS3,ME3,&
1973 PS1,PE1,PS2,PE2,PS3,PE3 )
1976 END SUBROUTINE wrf_patch_to_global_real
1978 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
1979 DS1,DE1,DS2,DE2,DS3,DE3,&
1980 MS1,ME1,MS2,ME2,MS3,ME3,&
1981 PS1,PE1,PS2,PE2,PS3,PE3 )
1983 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1984 MS1,ME1,MS2,ME2,MS3,ME3,&
1985 PS1,PE1,PS2,PE2,PS3,PE3
1986 CHARACTER *(*) stagger,ordering
1988 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1989 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1990 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1991 ! since we were not indexing the globbuf and Field arrays it does not matter
1995 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
1996 DS1,DE1,DS2,DE2,DS3,DE3,&
1997 MS1,ME1,MS2,ME2,MS3,ME3,&
1998 PS1,PE1,PS2,PE2,PS3,PE3 )
2001 END SUBROUTINE wrf_patch_to_global_double
2004 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
2005 DS1,DE1,DS2,DE2,DS3,DE3,&
2006 MS1,ME1,MS2,ME2,MS3,ME3,&
2007 PS1,PE1,PS2,PE2,PS3,PE3 )
2009 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2010 MS1,ME1,MS2,ME2,MS3,ME3,&
2011 PS1,PE1,PS2,PE2,PS3,PE3
2012 CHARACTER *(*) stagger,ordering
2017 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
2018 DS1,DE1,DS2,DE2,DS3,DE3,&
2019 MS1,ME1,MS2,ME2,MS3,ME3,&
2020 PS1,PE1,PS2,PE2,PS3,PE3 )
2023 END SUBROUTINE wrf_patch_to_global_integer
2026 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
2027 DS1,DE1,DS2,DE2,DS3,DE3,&
2028 MS1,ME1,MS2,ME2,MS3,ME3,&
2029 PS1,PE1,PS2,PE2,PS3,PE3 )
2031 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2032 MS1,ME1,MS2,ME2,MS3,ME3,&
2033 PS1,PE1,PS2,PE2,PS3,PE3
2034 CHARACTER *(*) stagger,ordering
2039 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
2040 DS1,DE1,DS2,DE2,DS3,DE3,&
2041 MS1,ME1,MS2,ME2,MS3,ME3,&
2042 PS1,PE1,PS2,PE2,PS3,PE3 )
2045 END SUBROUTINE wrf_patch_to_global_logical
2048 # define FRSTELEM (1)
2053 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
2054 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2055 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2056 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2057 USE module_driver_constants
2059 USE module_wrf_error
2062 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2063 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2064 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2065 CHARACTER *(*) stagger,ordering
2066 INTEGER domdesc,typesize,ierr
2070 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2071 MS1,ME1,MS2,ME2,MS3,ME3,&
2072 PS1,PE1,PS2,PE2,PS3,PE3
2073 INTEGER ids,ide,jds,jde,kds,kde,&
2074 ims,ime,jms,jme,kms,kme,&
2075 ips,ipe,jps,jpe,kps,kpe
2076 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2078 INTEGER i, j, k, ndim
2079 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2080 ! allocated further down, after the D indices are potentially recalculated for staggering
2081 REAL, ALLOCATABLE :: tmpbuf( : )
2082 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2084 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2085 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2086 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2088 SELECT CASE ( TRIM(ordering) )
2092 ndim = 3 ! where appropriate
2095 SELECT CASE ( TRIM(ordering) )
2097 ! the non-staggered variables come in at one-less than
2098 ! domain dimensions, but code wants full domain spec, so
2099 ! adjust if not staggered
2100 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2101 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2102 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2104 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2105 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2106 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2108 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2109 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2110 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2112 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2113 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2114 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2118 ! moved to here to be after the potential recalculations of D dims
2119 IF ( wrf_dm_on_monitor() ) THEN
2120 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2122 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2124 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
2126 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2127 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2128 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2130 IF ( typesize .EQ. RWORDSIZE ) THEN
2131 CALL just_patch_r ( buf , locbuf , size(locbuf), &
2132 PS1, PE1, PS2, PE2, PS3, PE3 , &
2133 MS1, ME1, MS2, ME2, MS3, ME3 )
2134 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2135 CALL just_patch_i ( buf , locbuf , size(locbuf), &
2136 PS1, PE1, PS2, PE2, PS3, PE3 , &
2137 MS1, ME1, MS2, ME2, MS3, ME3 )
2138 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2139 CALL just_patch_d ( buf , locbuf , size(locbuf), &
2140 PS1, PE1, PS2, PE2, PS3, PE3 , &
2141 MS1, ME1, MS2, ME2, MS3, ME3 )
2142 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2143 CALL just_patch_l ( buf , locbuf , size(locbuf), &
2144 PS1, PE1, PS2, PE2, PS3, PE3 , &
2145 MS1, ME1, MS2, ME2, MS3, ME3 )
2148 ! defined in external/io_quilt
2149 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2153 CALL collect_on_comm0 ( local_communicator , typesize , &
2154 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
2155 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
2157 ndim = len(TRIM(ordering))
2159 IF ( wrf_at_debug_level(500) ) THEN
2163 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
2165 IF ( typesize .EQ. RWORDSIZE ) THEN
2166 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
2167 DS1, DE1, DS2, DE2, DS3, DE3 , &
2169 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2170 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
2171 DS1, DE1, DS2, DE2, DS3, DE3 , &
2173 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2174 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
2175 DS1, DE1, DS2, DE2, DS3, DE3 , &
2177 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2178 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
2179 DS1, DE1, DS2, DE2, DS3, DE3 , &
2185 IF ( wrf_at_debug_level(500) ) THEN
2186 CALL end_timing('wrf_patch_to_global_generic')
2188 DEALLOCATE( tmpbuf )
2191 END SUBROUTINE wrf_patch_to_global_generic
2193 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
2194 PS1,PE1,PS2,PE2,PS3,PE3, &
2195 MS1,ME1,MS2,ME2,MS3,ME3 )
2198 INTEGER , INTENT(IN) :: noutbuf
2199 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2200 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2201 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2202 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
2204 INTEGER :: i,j,k,n , icurs
2209 outbuf( icurs ) = inbuf( i, j, k )
2215 END SUBROUTINE just_patch_i
2217 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
2218 PS1,PE1,PS2,PE2,PS3,PE3, &
2219 MS1,ME1,MS2,ME2,MS3,ME3 )
2222 INTEGER , INTENT(IN) :: noutbuf
2223 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2224 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2225 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2226 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2228 INTEGER :: i,j,k , icurs
2233 outbuf( icurs ) = inbuf( i, j, k )
2239 END SUBROUTINE just_patch_r
2241 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
2242 PS1,PE1,PS2,PE2,PS3,PE3, &
2243 MS1,ME1,MS2,ME2,MS3,ME3 )
2246 INTEGER , INTENT(IN) :: noutbuf
2247 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2248 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2249 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2250 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2252 INTEGER :: i,j,k,n , icurs
2257 outbuf( icurs ) = inbuf( i, j, k )
2263 END SUBROUTINE just_patch_d
2265 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
2266 PS1,PE1,PS2,PE2,PS3,PE3, &
2267 MS1,ME1,MS2,ME2,MS3,ME3 )
2270 INTEGER , INTENT(IN) :: noutbuf
2271 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2272 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2273 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2274 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2276 INTEGER :: i,j,k,n , icurs
2281 outbuf( icurs ) = inbuf( i, j, k )
2287 END SUBROUTINE just_patch_l
2290 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
2291 DS1,DE1,DS2,DE2,DS3,DE3, &
2295 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2296 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2297 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2299 INTEGER :: i,j,k,n , icurs
2302 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2303 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2304 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2305 outbuf( i, j, k ) = inbuf( icurs )
2313 END SUBROUTINE patch_2_outbuf_r
2315 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
2316 DS1,DE1,DS2,DE2,DS3,DE3,&
2320 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2321 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2322 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2324 INTEGER :: i,j,k,n , icurs
2327 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2328 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2329 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2330 outbuf( i, j, k ) = inbuf( icurs )
2337 END SUBROUTINE patch_2_outbuf_i
2339 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
2340 DS1,DE1,DS2,DE2,DS3,DE3,&
2344 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2345 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2346 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2348 INTEGER :: i,j,k,n , icurs
2351 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2352 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2353 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2354 outbuf( i, j, k ) = inbuf( icurs )
2361 END SUBROUTINE patch_2_outbuf_d
2363 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
2364 DS1,DE1,DS2,DE2,DS3,DE3,&
2368 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2369 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2370 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2372 INTEGER :: i,j,k,n , icurs
2375 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2376 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2377 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2378 outbuf( i, j, k ) = inbuf( icurs )
2385 END SUBROUTINE patch_2_outbuf_l
2387 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2389 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
2390 DS1,DE1,DS2,DE2,DS3,DE3,&
2391 MS1,ME1,MS2,ME2,MS3,ME3,&
2392 PS1,PE1,PS2,PE2,PS3,PE3 )
2394 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2395 MS1,ME1,MS2,ME2,MS3,ME3,&
2396 PS1,PE1,PS2,PE2,PS3,PE3
2397 CHARACTER *(*) stagger,ordering
2402 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
2403 DS1,DE1,DS2,DE2,DS3,DE3,&
2404 MS1,ME1,MS2,ME2,MS3,ME3,&
2405 PS1,PE1,PS2,PE2,PS3,PE3 )
2407 END SUBROUTINE wrf_global_to_patch_real
2409 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
2410 DS1,DE1,DS2,DE2,DS3,DE3,&
2411 MS1,ME1,MS2,ME2,MS3,ME3,&
2412 PS1,PE1,PS2,PE2,PS3,PE3 )
2414 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2415 MS1,ME1,MS2,ME2,MS3,ME3,&
2416 PS1,PE1,PS2,PE2,PS3,PE3
2417 CHARACTER *(*) stagger,ordering
2419 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2420 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2421 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2422 ! since we were not indexing the globbuf and Field arrays it does not matter
2426 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
2427 DS1,DE1,DS2,DE2,DS3,DE3,&
2428 MS1,ME1,MS2,ME2,MS3,ME3,&
2429 PS1,PE1,PS2,PE2,PS3,PE3 )
2431 END SUBROUTINE wrf_global_to_patch_double
2434 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
2435 DS1,DE1,DS2,DE2,DS3,DE3,&
2436 MS1,ME1,MS2,ME2,MS3,ME3,&
2437 PS1,PE1,PS2,PE2,PS3,PE3 )
2439 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2440 MS1,ME1,MS2,ME2,MS3,ME3,&
2441 PS1,PE1,PS2,PE2,PS3,PE3
2442 CHARACTER *(*) stagger,ordering
2447 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
2448 DS1,DE1,DS2,DE2,DS3,DE3,&
2449 MS1,ME1,MS2,ME2,MS3,ME3,&
2450 PS1,PE1,PS2,PE2,PS3,PE3 )
2452 END SUBROUTINE wrf_global_to_patch_integer
2454 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
2455 DS1,DE1,DS2,DE2,DS3,DE3,&
2456 MS1,ME1,MS2,ME2,MS3,ME3,&
2457 PS1,PE1,PS2,PE2,PS3,PE3 )
2459 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2460 MS1,ME1,MS2,ME2,MS3,ME3,&
2461 PS1,PE1,PS2,PE2,PS3,PE3
2462 CHARACTER *(*) stagger,ordering
2467 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
2468 DS1,DE1,DS2,DE2,DS3,DE3,&
2469 MS1,ME1,MS2,ME2,MS3,ME3,&
2470 PS1,PE1,PS2,PE2,PS3,PE3 )
2472 END SUBROUTINE wrf_global_to_patch_logical
2474 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
2475 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2476 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2477 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2479 USE module_driver_constants
2481 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2482 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2483 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2484 CHARACTER *(*) stagger,ordering
2485 INTEGER domdesc,typesize,ierr
2489 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2490 MS1,ME1,MS2,ME2,MS3,ME3,&
2491 PS1,PE1,PS2,PE2,PS3,PE3
2492 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2494 INTEGER i,j,k,ord,ord2d,ndim
2495 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2496 REAL, ALLOCATABLE :: tmpbuf( : )
2497 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2499 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2500 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2501 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2503 SELECT CASE ( TRIM(ordering) )
2507 ndim = 3 ! where appropriate
2510 SELECT CASE ( TRIM(ordering) )
2512 ! the non-staggered variables come in at one-less than
2513 ! domain dimensions, but code wants full domain spec, so
2514 ! adjust if not staggered
2515 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2516 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2517 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2519 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2520 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2521 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2523 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2524 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2525 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2527 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2528 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2529 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2533 ! moved to here to be after the potential recalculations of D dims
2534 IF ( wrf_dm_on_monitor() ) THEN
2535 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2537 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2539 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
2541 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2542 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2543 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2545 ! defined in external/io_quilt
2546 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2549 ndim = len(TRIM(ordering))
2551 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
2552 IF ( typesize .EQ. RWORDSIZE ) THEN
2553 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
2554 DS1, DE1, DS2, DE2, DS3, DE3 , &
2555 MS1, ME1, MS2, ME2, MS3, ME3 , &
2557 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2558 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
2559 DS1, DE1, DS2, DE2, DS3, DE3 , &
2561 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2562 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
2563 DS1, DE1, DS2, DE2, DS3, DE3 , &
2565 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2566 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
2567 DS1, DE1, DS2, DE2, DS3, DE3 , &
2572 CALL dist_on_comm0 ( local_communicator , typesize , &
2573 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
2574 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
2576 IF ( typesize .EQ. RWORDSIZE ) THEN
2577 CALL all_sub_r ( locbuf , buf , &
2578 PS1, PE1, PS2, PE2, PS3, PE3 , &
2579 MS1, ME1, MS2, ME2, MS3, ME3 )
2581 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2582 CALL all_sub_i ( locbuf , buf , &
2583 PS1, PE1, PS2, PE2, PS3, PE3 , &
2584 MS1, ME1, MS2, ME2, MS3, ME3 )
2585 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2586 CALL all_sub_d ( locbuf , buf , &
2587 PS1, PE1, PS2, PE2, PS3, PE3 , &
2588 MS1, ME1, MS2, ME2, MS3, ME3 )
2589 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2590 CALL all_sub_l ( locbuf , buf , &
2591 PS1, PE1, PS2, PE2, PS3, PE3 , &
2592 MS1, ME1, MS2, ME2, MS3, ME3 )
2596 DEALLOCATE ( tmpbuf )
2599 END SUBROUTINE wrf_global_to_patch_generic
2601 SUBROUTINE all_sub_i ( inbuf , outbuf, &
2602 PS1,PE1,PS2,PE2,PS3,PE3, &
2603 MS1,ME1,MS2,ME2,MS3,ME3 )
2606 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2607 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2608 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2609 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2611 INTEGER :: i,j,k,n , icurs
2616 outbuf( i, j, k ) = inbuf ( icurs )
2622 END SUBROUTINE all_sub_i
2624 SUBROUTINE all_sub_r ( inbuf , outbuf, &
2625 PS1,PE1,PS2,PE2,PS3,PE3, &
2626 MS1,ME1,MS2,ME2,MS3,ME3 )
2629 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2630 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2631 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2632 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2634 INTEGER :: i,j,k,n , icurs
2639 outbuf( i, j, k ) = inbuf ( icurs )
2646 END SUBROUTINE all_sub_r
2648 SUBROUTINE all_sub_d ( inbuf , outbuf, &
2649 PS1,PE1,PS2,PE2,PS3,PE3, &
2650 MS1,ME1,MS2,ME2,MS3,ME3 )
2653 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2654 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2655 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2656 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2658 INTEGER :: i,j,k,n , icurs
2663 outbuf( i, j, k ) = inbuf ( icurs )
2669 END SUBROUTINE all_sub_d
2671 SUBROUTINE all_sub_l ( inbuf , outbuf, &
2672 PS1,PE1,PS2,PE2,PS3,PE3, &
2673 MS1,ME1,MS2,ME2,MS3,ME3 )
2676 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2677 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2678 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2679 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2681 INTEGER :: i,j,k,n , icurs
2686 outbuf( i, j, k ) = inbuf ( icurs )
2692 END SUBROUTINE all_sub_l
2694 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
2695 DS1,DE1,DS2,DE2,DS3,DE3, &
2696 MS1, ME1, MS2, ME2, MS3, ME3 , &
2700 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2701 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2702 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2703 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2705 INTEGER :: i,j,k,n , icurs
2709 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2710 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2711 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2712 outbuf( icurs ) = inbuf( i,j,k )
2719 END SUBROUTINE outbuf_2_patch_r
2721 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
2722 DS1,DE1,DS2,DE2,DS3,DE3,&
2726 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
2727 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2728 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2730 INTEGER :: i,j,k,n , icurs
2733 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2734 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2735 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2736 outbuf( icurs ) = inbuf( i,j,k )
2743 END SUBROUTINE outbuf_2_patch_i
2745 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
2746 DS1,DE1,DS2,DE2,DS3,DE3,&
2750 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
2751 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2752 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2754 INTEGER :: i,j,k,n , icurs
2757 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2758 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2759 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2760 outbuf( icurs ) = inbuf( i,j,k )
2767 END SUBROUTINE outbuf_2_patch_d
2769 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
2770 DS1,DE1,DS2,DE2,DS3,DE3,&
2774 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2775 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2776 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2778 INTEGER :: i,j,k,n , icurs
2781 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2782 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2783 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2784 outbuf( icurs ) = inbuf( i,j,k )
2791 END SUBROUTINE outbuf_2_patch_l
2795 !------------------------------------------------------------------
2797 #if ( EM_CORE == 1 )
2799 !------------------------------------------------------------------
2801 SUBROUTINE force_domain_em_part2 ( grid, ngrid, config_flags &
2803 #include "dummy_new_args.inc"
2806 USE module_state_description
2807 USE module_domain, ONLY : domain, get_ijk_from_grid
2808 USE module_configure, ONLY : grid_config_rec_type
2812 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2813 TYPE(domain), POINTER :: ngrid
2814 #include <dummy_new_decl.inc>
2816 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2817 TYPE (grid_config_rec_type) :: config_flags
2819 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2820 cims, cime, cjms, cjme, ckms, ckme, &
2821 cips, cipe, cjps, cjpe, ckps, ckpe
2822 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2823 nims, nime, njms, njme, nkms, nkme, &
2824 nips, nipe, njps, njpe, nkps, nkpe
2825 INTEGER :: ids, ide, jds, jde, kds, kde, &
2826 ims, ime, jms, jme, kms, kme, &
2827 ips, ipe, jps, jpe, kps, kpe
2829 CALL get_ijk_from_grid ( grid , &
2830 cids, cide, cjds, cjde, ckds, ckde, &
2831 cims, cime, cjms, cjme, ckms, ckme, &
2832 cips, cipe, cjps, cjpe, ckps, ckpe )
2833 CALL get_ijk_from_grid ( ngrid , &
2834 nids, nide, njds, njde, nkds, nkde, &
2835 nims, nime, njms, njme, nkms, nkme, &
2836 nips, nipe, njps, njpe, nkps, nkpe )
2838 nlev = ckde - ckds + 1
2840 #include "nest_interpdown_unpack.inc"
2842 CALL get_ijk_from_grid ( grid , &
2843 ids, ide, jds, jde, kds, kde, &
2844 ims, ime, jms, jme, kms, kme, &
2845 ips, ipe, jps, jpe, kps, kpe )
2847 #include "HALO_FORCE_DOWN.inc"
2849 ! code here to interpolate the data into the nested domain
2850 # include "nest_forcedown_interp.inc"
2853 END SUBROUTINE force_domain_em_part2
2855 !------------------------------------------------------------------
2857 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
2859 #include "dummy_new_args.inc"
2862 USE module_state_description
2863 USE module_domain, ONLY : domain, get_ijk_from_grid
2864 USE module_configure, ONLY : grid_config_rec_type
2869 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2870 TYPE(domain), POINTER :: intermediate_grid
2871 TYPE(domain), POINTER :: ngrid
2872 #include <dummy_new_decl.inc>
2874 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2875 INTEGER iparstrt,jparstrt,sw
2876 TYPE (grid_config_rec_type) :: config_flags
2878 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2879 cims, cime, cjms, cjme, ckms, ckme, &
2880 cips, cipe, cjps, cjpe, ckps, ckpe
2881 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
2882 iims, iime, ijms, ijme, ikms, ikme, &
2883 iips, iipe, ijps, ijpe, ikps, ikpe
2884 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2885 nims, nime, njms, njme, nkms, nkme, &
2886 nips, nipe, njps, njpe, nkps, nkpe
2888 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
2889 INTEGER thisdomain_max_halo_width
2890 INTEGER local_comm, myproc, nproc
2892 CALL wrf_get_dm_communicator ( local_comm )
2893 CALL wrf_get_myproc( myproc )
2894 CALL wrf_get_nproc( nproc )
2896 CALL get_ijk_from_grid ( grid , &
2897 cids, cide, cjds, cjde, ckds, ckde, &
2898 cims, cime, cjms, cjme, ckms, ckme, &
2899 cips, cipe, cjps, cjpe, ckps, ckpe )
2900 CALL get_ijk_from_grid ( intermediate_grid , &
2901 iids, iide, ijds, ijde, ikds, ikde, &
2902 iims, iime, ijms, ijme, ikms, ikme, &
2903 iips, iipe, ijps, ijpe, ikps, ikpe )
2904 CALL get_ijk_from_grid ( ngrid , &
2905 nids, nide, njds, njde, nkds, nkde, &
2906 nims, nime, njms, njme, nkms, nkme, &
2907 nips, nipe, njps, njpe, nkps, nkpe )
2909 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
2910 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2911 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2912 CALL nl_get_shw ( intermediate_grid%id, sw )
2913 icoord = iparstrt - sw
2914 jcoord = jparstrt - sw
2915 idim_cd = iide - iids + 1
2916 jdim_cd = ijde - ijds + 1
2918 nlev = ckde - ckds + 1
2920 ! get max_halo_width for parent. It may be smaller if it is moad
2921 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
2923 #include "nest_interpdown_pack.inc"
2925 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
2928 END SUBROUTINE interp_domain_em_part1
2930 !------------------------------------------------------------------
2932 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, config_flags &
2934 #include "dummy_new_args.inc"
2937 USE module_state_description
2938 USE module_domain, ONLY : domain, get_ijk_from_grid
2939 USE module_configure, ONLY : grid_config_rec_type
2943 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2944 TYPE(domain), POINTER :: ngrid
2945 #include <dummy_new_decl.inc>
2947 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2948 TYPE (grid_config_rec_type) :: config_flags
2950 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2951 cims, cime, cjms, cjme, ckms, ckme, &
2952 cips, cipe, cjps, cjpe, ckps, ckpe
2953 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2954 nims, nime, njms, njme, nkms, nkme, &
2955 nips, nipe, njps, njpe, nkps, nkpe
2956 INTEGER :: ids, ide, jds, jde, kds, kde, &
2957 ims, ime, jms, jme, kms, kme, &
2958 ips, ipe, jps, jpe, kps, kpe
2961 INTEGER thisdomain_max_halo_width
2963 CALL get_ijk_from_grid ( grid , &
2964 cids, cide, cjds, cjde, ckds, ckde, &
2965 cims, cime, cjms, cjme, ckms, ckme, &
2966 cips, cipe, cjps, cjpe, ckps, ckpe )
2967 CALL get_ijk_from_grid ( ngrid , &
2968 nids, nide, njds, njde, nkds, nkde, &
2969 nims, nime, njms, njme, nkms, nkme, &
2970 nips, nipe, njps, njpe, nkps, nkpe )
2972 nlev = ckde - ckds + 1
2974 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
2976 #include "nest_interpdown_unpack.inc"
2978 CALL get_ijk_from_grid ( grid , &
2979 ids, ide, jds, jde, kds, kde, &
2980 ims, ime, jms, jme, kms, kme, &
2981 ips, ipe, jps, jpe, kps, kpe )
2983 #include "HALO_INTERP_DOWN.inc"
2985 # include "nest_interpdown_interp.inc"
2988 END SUBROUTINE interp_domain_em_part2
2990 !------------------------------------------------------------------
2992 SUBROUTINE feedback_nest_prep ( grid, config_flags &
2994 #include "dummy_new_args.inc"
2997 USE module_state_description
2998 USE module_domain, ONLY : domain, get_ijk_from_grid
2999 USE module_configure, ONLY : grid_config_rec_type
3003 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3004 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3005 ! soil temp, moisture, etc., has vertical dim
3006 ! of soil categories
3007 #include <dummy_new_decl.inc>
3009 INTEGER :: ids, ide, jds, jde, kds, kde, &
3010 ims, ime, jms, jme, kms, kme, &
3011 ips, ipe, jps, jpe, kps, kpe
3013 INTEGER :: idum1, idum2
3016 CALL get_ijk_from_grid ( grid , &
3017 ids, ide, jds, jde, kds, kde, &
3018 ims, ime, jms, jme, kms, kme, &
3019 ips, ipe, jps, jpe, kps, kpe )
3022 #include "HALO_INTERP_UP.inc"
3025 END SUBROUTINE feedback_nest_prep
3027 !------------------------------------------------------------------
3029 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
3031 #include "dummy_new_args.inc"
3034 USE module_state_description
3035 USE module_domain, ONLY : domain, get_ijk_from_grid
3036 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3040 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3041 TYPE(domain), POINTER :: ngrid
3042 #include <dummy_new_decl.inc>
3044 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3045 TYPE(domain), POINTER :: xgrid
3046 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3048 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3049 cims, cime, cjms, cjme, ckms, ckme, &
3050 cips, cipe, cjps, cjpe, ckps, ckpe
3051 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3052 nims, nime, njms, njme, nkms, nkme, &
3053 nips, nipe, njps, njpe, nkps, nkpe
3054 INTEGER local_comm, myproc, nproc, idum1, idum2
3055 INTEGER thisdomain_max_halo_width
3058 SUBROUTINE feedback_nest_prep ( grid, config_flags &
3060 #include "dummy_new_args.inc"
3063 USE module_state_description
3064 USE module_domain, ONLY : domain
3065 USE module_configure, ONLY : grid_config_rec_type
3067 TYPE (grid_config_rec_type) :: config_flags
3068 TYPE(domain), TARGET :: grid
3069 #include <dummy_new_decl.inc>
3070 END SUBROUTINE feedback_nest_prep
3074 CALL wrf_get_dm_communicator ( local_comm )
3075 CALL wrf_get_myproc( myproc )
3076 CALL wrf_get_nproc( nproc )
3080 CALL get_ijk_from_grid ( grid , &
3081 cids, cide, cjds, cjde, ckds, ckde, &
3082 cims, cime, cjms, cjme, ckms, ckme, &
3083 cips, cipe, cjps, cjpe, ckps, ckpe )
3085 CALL get_ijk_from_grid ( ngrid , &
3086 nids, nide, njds, njde, nkds, nkde, &
3087 nims, nime, njms, njme, nkms, nkme, &
3088 nips, nipe, njps, njpe, nkps, nkpe )
3090 nlev = ckde - ckds + 1
3092 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
3093 jps_save = ngrid%j_parent_start
3094 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
3095 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
3097 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3098 ! in a separate routine because the HALOs need the data to be dereference from the
3099 ! grid data structure and, in this routine, the dereferenced fields are related to
3100 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3101 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3102 ! to point to intermediate domain.
3104 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3105 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3109 CALL feedback_nest_prep ( grid, nconfig_flags &
3111 #include "actual_new_args.inc"
3115 ! put things back so grid is intermediate grid
3118 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3120 ! "interp" (basically copy) ngrid onto intermediate grid
3122 #include "nest_feedbackup_interp.inc"
3125 END SUBROUTINE feedback_domain_em_part1
3127 !------------------------------------------------------------------
3129 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
3131 #include "dummy_new_args.inc"
3134 USE module_state_description
3135 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3136 USE module_configure, ONLY : grid_config_rec_type, model_config_rec
3142 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3143 TYPE(domain), POINTER :: intermediate_grid
3144 TYPE(domain), POINTER :: ngrid
3146 #include <dummy_new_decl.inc>
3148 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3149 TYPE (grid_config_rec_type) :: config_flags
3151 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3152 cims, cime, cjms, cjme, ckms, ckme, &
3153 cips, cipe, cjps, cjpe, ckps, ckpe
3154 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3155 nims, nime, njms, njme, nkms, nkme, &
3156 nips, nipe, njps, njpe, nkps, nkpe
3157 INTEGER :: ids, ide, jds, jde, kds, kde, &
3158 ims, ime, jms, jme, kms, kme, &
3159 ips, ipe, jps, jpe, kps, kpe
3160 INTEGER icoord, jcoord, idim_cd, jdim_cd
3161 INTEGER local_comm, myproc, nproc
3162 INTEGER iparstrt, jparstrt, sw, thisdomain_max_halo_width
3165 character*256 :: timestr
3168 LOGICAL, EXTERNAL :: cd_feedback_mask
3170 ! On entry to this routine,
3171 ! "grid" refers to the parent domain
3172 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3173 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3174 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3176 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3177 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3178 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3179 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3180 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3181 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3185 CALL domain_clock_get( grid, current_timestr=timestr )
3187 CALL get_ijk_from_grid ( intermediate_grid , &
3188 cids, cide, cjds, cjde, ckds, ckde, &
3189 cims, cime, cjms, cjme, ckms, ckme, &
3190 cips, cipe, cjps, cjpe, ckps, ckpe )
3191 CALL get_ijk_from_grid ( grid , &
3192 nids, nide, njds, njde, nkds, nkde, &
3193 nims, nime, njms, njme, nkms, nkme, &
3194 nips, nipe, njps, njpe, nkps, nkpe )
3196 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3197 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3198 CALL nl_get_shw ( intermediate_grid%id, sw )
3199 icoord = iparstrt - sw
3200 jcoord = jparstrt - sw
3201 idim_cd = cide - cids + 1
3202 jdim_cd = cjde - cjds + 1
3204 nlev = ckde - ckds + 1
3206 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
3208 #include "nest_feedbackup_pack.inc"
3210 CALL wrf_get_dm_communicator ( local_comm )
3211 CALL wrf_get_myproc( myproc )
3212 CALL wrf_get_nproc( nproc )
3214 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3216 #define NEST_INFLUENCE(A,B) A = B
3217 #include "nest_feedbackup_unpack.inc"
3219 ! smooth coarse grid
3220 CALL get_ijk_from_grid ( ngrid, &
3221 nids, nide, njds, njde, nkds, nkde, &
3222 nims, nime, njms, njme, nkms, nkme, &
3223 nips, nipe, njps, njpe, nkps, nkpe )
3224 CALL get_ijk_from_grid ( grid , &
3225 ids, ide, jds, jde, kds, kde, &
3226 ims, ime, jms, jme, kms, kme, &
3227 ips, ipe, jps, jpe, kps, kpe )
3229 #include "HALO_INTERP_UP.inc"
3231 CALL get_ijk_from_grid ( grid , &
3232 cids, cide, cjds, cjde, ckds, ckde, &
3233 cims, cime, cjms, cjme, ckms, ckme, &
3234 cips, cipe, cjps, cjpe, ckps, ckpe )
3236 #include "nest_feedbackup_smooth.inc"
3239 END SUBROUTINE feedback_domain_em_part2
3242 #if ( NMM_CORE == 1 && NMM_NEST == 1 )
3243 !==============================================================================
3244 ! NMM nesting infrastructure extended from EM core. This is gopal's doing.
3245 !==============================================================================
3247 SUBROUTINE interp_domain_nmm_part1 ( grid, intermediate_grid, ngrid, config_flags &
3249 #include "dummy_args.inc"
3252 USE module_state_description
3253 USE module_domain, ONLY : domain, get_ijk_from_grid
3254 USE module_configure, ONLY : grid_config_rec_type
3259 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3260 TYPE(domain), POINTER :: intermediate_grid
3261 TYPE(domain), POINTER :: ngrid
3262 #include <dummy_decl.inc>
3264 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3265 INTEGER iparstrt,jparstrt,sw
3266 TYPE (grid_config_rec_type) :: config_flags
3268 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3269 cims, cime, cjms, cjme, ckms, ckme, &
3270 cips, cipe, cjps, cjpe, ckps, ckpe
3271 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
3272 iims, iime, ijms, ijme, ikms, ikme, &
3273 iips, iipe, ijps, ijpe, ikps, ikpe
3274 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3275 nims, nime, njms, njme, nkms, nkme, &
3276 nips, nipe, njps, njpe, nkps, nkpe
3278 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
3279 INTEGER local_comm, myproc, nproc
3280 INTEGER thisdomain_max_halo_width
3282 CALL wrf_get_dm_communicator ( local_comm )
3283 CALL wrf_get_myproc( myproc )
3284 CALL wrf_get_nproc( nproc )
3287 #include <scalar_derefs.inc>
3289 CALL get_ijk_from_grid ( grid , &
3290 cids, cide, cjds, cjde, ckds, ckde, &
3291 cims, cime, cjms, cjme, ckms, ckme, &
3292 cips, cipe, cjps, cjpe, ckps, ckpe )
3293 CALL get_ijk_from_grid ( intermediate_grid , &
3294 iids, iide, ijds, ijde, ikds, ikde, &
3295 iims, iime, ijms, ijme, ikms, ikme, &
3296 iips, iipe, ijps, ijpe, ikps, ikpe )
3297 CALL get_ijk_from_grid ( ngrid , &
3298 nids, nide, njds, njde, nkds, nkde, &
3299 nims, nime, njms, njme, nkms, nkme, &
3300 nips, nipe, njps, njpe, nkps, nkpe )
3302 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
3303 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3304 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3305 CALL nl_get_shw ( intermediate_grid%id, sw )
3306 icoord = iparstrt - sw
3307 jcoord = jparstrt - sw
3308 idim_cd = iide - iids + 1
3309 jdim_cd = ijde - ijds + 1
3311 nlev = ckde - ckds + 1
3313 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3314 #include "nest_interpdown_pack.inc"
3316 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
3319 #include <scalar_derefs.inc>
3321 END SUBROUTINE interp_domain_nmm_part1
3323 !------------------------------------------------------------------
3325 SUBROUTINE interp_domain_nmm_part2 ( grid, ngrid, config_flags &
3327 #include "dummy_args.inc"
3330 USE module_state_description
3331 USE module_domain, ONLY : domain, get_ijk_from_grid
3332 USE module_configure, ONLY : grid_config_rec_type
3336 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3337 TYPE(domain), POINTER :: ngrid
3338 #include <dummy_decl.inc>
3340 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3341 TYPE (grid_config_rec_type) :: config_flags
3343 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3344 cims, cime, cjms, cjme, ckms, ckme, &
3345 cips, cipe, cjps, cjpe, ckps, ckpe
3346 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3347 nims, nime, njms, njme, nkms, nkme, &
3348 nips, nipe, njps, njpe, nkps, nkpe
3349 INTEGER :: ids, ide, jds, jde, kds, kde, &
3350 ims, ime, jms, jme, kms, kme, &
3351 ips, ipe, jps, jpe, kps, kpe
3356 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3357 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3358 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3359 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3361 #include "deref_kludge.h"
3364 #include <scalar_derefs.inc>
3365 CALL get_ijk_from_grid ( grid , &
3366 cids, cide, cjds, cjde, ckds, ckde, &
3367 cims, cime, cjms, cjme, ckms, ckme, &
3368 cips, cipe, cjps, cjpe, ckps, ckpe )
3369 CALL get_ijk_from_grid ( ngrid , &
3370 nids, nide, njds, njde, nkds, nkde, &
3371 nims, nime, njms, njme, nkms, nkme, &
3372 nips, nipe, njps, njpe, nkps, nkpe )
3374 nlev = ckde - ckds + 1
3376 #include "nest_interpdown_unpack.inc"
3378 CALL get_ijk_from_grid ( grid , &
3379 ids, ide, jds, jde, kds, kde, &
3380 ims, ime, jms, jme, kms, kme, &
3381 ips, ipe, jps, jpe, kps, kpe )
3383 #include "HALO_NMM_INTERP_DOWN1.inc"
3385 #include "nest_interpdown_interp.inc"
3388 #include <scalar_derefs.inc>
3391 END SUBROUTINE interp_domain_nmm_part2
3393 !------------------------------------------------------------------
3395 SUBROUTINE force_domain_nmm_part1 ( grid, intermediate_grid, config_flags &
3397 #include "dummy_args.inc"
3400 USE module_state_description
3401 USE module_domain, ONLY : domain, get_ijk_from_grid
3402 USE module_configure, ONLY : grid_config_rec_type
3406 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3407 TYPE(domain), POINTER :: intermediate_grid
3408 #include <dummy_decl.inc>
3410 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3411 TYPE (grid_config_rec_type) :: config_flags
3413 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3414 cims, cime, cjms, cjme, ckms, ckme, &
3415 cips, cipe, cjps, cjpe, ckps, ckpe
3416 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3417 nims, nime, njms, njme, nkms, nkme, &
3418 nips, nipe, njps, njpe, nkps, nkpe
3420 #include <scalar_derefs.inc>
3422 CALL get_ijk_from_grid ( grid , &
3423 cids, cide, cjds, cjde, ckds, ckde, &
3424 cims, cime, cjms, cjme, ckms, ckme, &
3425 cips, cipe, cjps, cjpe, ckps, ckpe )
3427 CALL get_ijk_from_grid ( intermediate_grid , &
3428 nids, nide, njds, njde, nkds, nkde, &
3429 nims, nime, njms, njme, nkms, nkme, &
3430 nips, nipe, njps, njpe, nkps, nkpe )
3432 nlev = ckde - ckds + 1
3434 #include "nest_forcedown_pack.inc"
3436 ! WRITE(0,*)'I have completed PACKING of BCs data successfully'
3439 #include <scalar_derefs.inc>
3441 END SUBROUTINE force_domain_nmm_part1
3443 !==============================================================================================
3445 SUBROUTINE force_domain_nmm_part2 ( grid, ngrid, config_flags &
3447 #include "dummy_args.inc"
3450 USE module_state_description
3451 USE module_domain, ONLY : domain, get_ijk_from_grid
3452 USE module_configure, ONLY : grid_config_rec_type
3456 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3457 TYPE(domain), POINTER :: ngrid
3458 #include <dummy_decl.inc>
3460 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3461 TYPE (grid_config_rec_type) :: config_flags
3463 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3464 cims, cime, cjms, cjme, ckms, ckme, &
3465 cips, cipe, cjps, cjpe, ckps, ckpe
3466 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3467 nims, nime, njms, njme, nkms, nkme, &
3468 nips, nipe, njps, njpe, nkps, nkpe
3469 INTEGER :: ids, ide, jds, jde, kds, kde, &
3470 ims, ime, jms, jme, kms, kme, &
3471 ips, ipe, jps, jpe, kps, kpe
3475 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3476 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3477 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3478 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3480 #include "deref_kludge.h"
3483 #include <scalar_derefs.inc>
3485 CALL get_ijk_from_grid ( grid , &
3486 cids, cide, cjds, cjde, ckds, ckde, &
3487 cims, cime, cjms, cjme, ckms, ckme, &
3488 cips, cipe, cjps, cjpe, ckps, ckpe )
3489 CALL get_ijk_from_grid ( ngrid , &
3490 nids, nide, njds, njde, nkds, nkde, &
3491 nims, nime, njms, njme, nkms, nkme, &
3492 nips, nipe, njps, njpe, nkps, nkpe )
3494 nlev = ckde - ckds + 1
3496 #include "nest_interpdown_unpack.inc"
3498 CALL get_ijk_from_grid ( grid , &
3499 ids, ide, jds, jde, kds, kde, &
3500 ims, ime, jms, jme, kms, kme, &
3501 ips, ipe, jps, jpe, kps, kpe )
3503 #include "HALO_NMM_FORCE_DOWN1.inc"
3505 ! code here to interpolate the data into the nested domain
3506 #include "nest_forcedown_interp.inc"
3509 #include <scalar_derefs.inc>
3512 END SUBROUTINE force_domain_nmm_part2
3514 !================================================================================
3516 ! This routine exists only to call a halo on a domain (the nest)
3517 ! gets called from feedback_domain_em_part1, below. This is needed
3518 ! because the halo code expects the fields being exchanged to have
3519 ! been dereferenced from the grid data structure, but in feedback_domain_em_part1
3520 ! the grid data structure points to the coarse domain, not the nest.
3521 ! And we want the halo exchange on the nest, so that the code in
3522 ! em_nest_feedbackup_interp.inc will work correctly on multi-p. JM 20040308
3525 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3527 #include "dummy_args.inc"
3530 USE module_state_description
3531 USE module_domain, ONLY : domain, get_ijk_from_grid
3532 USE module_configure, ONLY : grid_config_rec_type
3536 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3537 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3538 ! soil temp, moisture, etc., has vertical dim
3539 ! of soil categories
3540 #include <dummy_decl.inc>
3542 INTEGER :: ids, ide, jds, jde, kds, kde, &
3543 ims, ime, jms, jme, kms, kme, &
3544 ips, ipe, jps, jpe, kps, kpe
3546 INTEGER :: idum1, idum2
3550 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3551 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3552 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3553 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3555 #include "deref_kludge.h"
3558 #include <scalar_derefs.inc>
3560 CALL get_ijk_from_grid ( grid , &
3561 ids, ide, jds, jde, kds, kde, &
3562 ims, ime, jms, jme, kms, kme, &
3563 ips, ipe, jps, jpe, kps, kpe )
3566 #include "HALO_NMM_WEIGHTS.inc"
3570 #include <scalar_derefs.inc>
3572 END SUBROUTINE feedback_nest_prep_nmm
3574 !------------------------------------------------------------------
3576 SUBROUTINE feedback_domain_nmm_part1 ( grid, ngrid, config_flags &
3578 #include "dummy_args.inc"
3581 USE module_state_description
3582 USE module_domain, ONLY : domain, get_ijk_from_grid
3583 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3587 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3588 TYPE(domain), POINTER :: ngrid
3589 #include <dummy_decl.inc>
3591 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3592 TYPE(domain), POINTER :: xgrid
3593 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3595 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3596 cims, cime, cjms, cjme, ckms, ckme, &
3597 cips, cipe, cjps, cjpe, ckps, ckpe
3598 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3599 nims, nime, njms, njme, nkms, nkme, &
3600 nips, nipe, njps, njpe, nkps, nkpe
3601 INTEGER local_comm, myproc, nproc, idum1, idum2
3604 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3605 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3606 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3607 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3611 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3613 #include "dummy_args.inc"
3616 USE module_state_description
3617 USE module_domain, ONLY : domain
3618 USE module_configure, ONLY : grid_config_rec_type
3620 TYPE (grid_config_rec_type) :: config_flags
3621 TYPE(domain), TARGET :: grid
3622 #include <dummy_decl.inc>
3623 END SUBROUTINE feedback_nest_prep_nmm
3627 #include <scalar_derefs.inc>
3629 CALL wrf_get_dm_communicator ( local_comm )
3630 CALL wrf_get_myproc( myproc )
3631 CALL wrf_get_nproc( nproc )
3636 CALL get_ijk_from_grid ( grid , &
3637 cids, cide, cjds, cjde, ckds, ckde, &
3638 cims, cime, cjms, cjme, ckms, ckme, &
3639 cips, cipe, cjps, cjpe, ckps, ckpe )
3641 CALL get_ijk_from_grid ( ngrid , &
3642 nids, nide, njds, njde, nkds, nkde, &
3643 nims, nime, njms, njme, nkms, nkme, &
3644 nips, nipe, njps, njpe, nkps, nkpe )
3646 nlev = ckde - ckds + 1
3648 ips_save = ngrid%i_parent_start ! +1 not used in ipe_save & jpe_save
3649 jps_save = ngrid%j_parent_start ! because of one extra namelist point
3650 ipe_save = ngrid%i_parent_start + (nide-nids) / ngrid%parent_grid_ratio
3651 jpe_save = ngrid%j_parent_start + (njde-njds) / ngrid%parent_grid_ratio
3653 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3654 ! in a separate routine because the HALOs need the data to be dereference from the
3655 ! grid data structure and, in this routine, the dereferenced fields are related to
3656 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3657 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3658 ! to point to intermediate domain.
3660 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3661 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3664 #include "deref_kludge.h"
3665 CALL feedback_nest_prep_nmm ( grid, config_flags &
3667 #include "actual_args.inc"
3671 ! put things back so grid is intermediate grid
3674 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3676 ! "interp" (basically copy) ngrid onto intermediate grid
3678 #include "nest_feedbackup_interp.inc"
3681 #include <scalar_derefs.inc>
3683 END SUBROUTINE feedback_domain_nmm_part1
3685 !------------------------------------------------------------------
3687 SUBROUTINE feedback_domain_nmm_part2 ( grid, intermediate_grid, ngrid , config_flags &
3689 #include "dummy_args.inc"
3692 USE module_state_description
3693 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3694 USE module_configure, ONLY : grid_config_rec_type
3700 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3701 TYPE(domain), POINTER :: intermediate_grid
3702 TYPE(domain), POINTER :: ngrid
3704 #include <dummy_decl.inc>
3706 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3707 TYPE (grid_config_rec_type) :: config_flags
3709 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3710 cims, cime, cjms, cjme, ckms, ckme, &
3711 cips, cipe, cjps, cjpe, ckps, ckpe
3712 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3713 nims, nime, njms, njme, nkms, nkme, &
3714 nips, nipe, njps, njpe, nkps, nkpe
3715 INTEGER :: ids, ide, jds, jde, kds, kde, &
3716 ims, ime, jms, jme, kms, kme, &
3717 ips, ipe, jps, jpe, kps, kpe
3718 INTEGER icoord, jcoord, idim_cd, jdim_cd
3719 INTEGER local_comm, myproc, nproc
3720 INTEGER iparstrt, jparstrt, sw
3721 INTEGER thisdomain_max_halo_width
3723 character*256 :: timestr
3727 LOGICAL, EXTERNAL :: cd_feedback_mask
3728 LOGICAL, EXTERNAL :: cd_feedback_mask_v
3731 #include <scalar_derefs.inc>
3733 ! On entry to this routine,
3734 ! "grid" refers to the parent domain
3735 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3736 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3737 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3739 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3740 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3741 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3742 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3743 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3744 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3747 nest_influence = 0.5
3748 #define NEST_INFLUENCE(A,B) A = nest_influence*(B) + (1.0-nest_influence)*(A)
3751 CALL domain_clock_get( grid, current_timestr=timestr )
3753 CALL get_ijk_from_grid ( intermediate_grid , &
3754 cids, cide, cjds, cjde, ckds, ckde, &
3755 cims, cime, cjms, cjme, ckms, ckme, &
3756 cips, cipe, cjps, cjpe, ckps, ckpe )
3757 CALL get_ijk_from_grid ( grid , &
3758 nids, nide, njds, njde, nkds, nkde, &
3759 nims, nime, njms, njme, nkms, nkme, &
3760 nips, nipe, njps, njpe, nkps, nkpe )
3762 nide = nide - 1 !dusan
3763 njde = njde - 1 !dusan
3765 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3766 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3767 CALL nl_get_shw ( intermediate_grid%id, sw )
3768 icoord = iparstrt - sw
3769 jcoord = jparstrt - sw
3770 idim_cd = cide - cids + 1
3771 jdim_cd = cjde - cjds + 1
3773 nlev = ckde - ckds + 1
3775 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3776 #include "nest_feedbackup_pack.inc"
3778 CALL wrf_get_dm_communicator ( local_comm )
3779 CALL wrf_get_myproc( myproc )
3780 CALL wrf_get_nproc( nproc )
3782 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3784 #include "nest_feedbackup_unpack.inc"
3787 ! smooth coarse grid
3789 CALL get_ijk_from_grid ( ngrid, &
3790 nids, nide, njds, njde, nkds, nkde, &
3791 nims, nime, njms, njme, nkms, nkme, &
3792 nips, nipe, njps, njpe, nkps, nkpe )
3793 CALL get_ijk_from_grid ( grid , &
3794 ids, ide, jds, jde, kds, kde, &
3795 ims, ime, jms, jme, kms, kme, &
3796 ips, ipe, jps, jpe, kps, kpe )
3798 #include "HALO_INTERP_UP.inc"
3800 CALL get_ijk_from_grid ( grid , &
3801 cids, cide, cjds, cjde, ckds, ckde, &
3802 cims, cime, cjms, cjme, ckms, ckme, &
3803 cips, cipe, cjps, cjpe, ckps, ckpe )
3805 #include "nest_feedbackup_smooth.inc"
3808 #include <scalar_derefs.inc>
3810 END SUBROUTINE feedback_domain_nmm_part2
3812 !=================================================================================
3813 ! End of gopal's doing
3814 !=================================================================================
3817 !------------------------------------------------------------------
3819 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
3820 my_count , & ! sendcount
3821 globbuf, glob_ofst , & ! recvbuf
3822 counts , & ! recvcounts
3825 communicator , & ! communicator
3827 USE module_dm, ONLY : getrealmpitype
3829 INTEGER field_ofst, glob_ofst
3830 INTEGER my_count, communicator, root, ierr
3831 INTEGER , DIMENSION(*) :: counts, displs
3832 REAL, DIMENSION(*) :: Field, globbuf
3836 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3837 my_count , & ! sendcount
3838 getrealmpitype() , & ! sendtype
3839 globbuf( glob_ofst ) , & ! recvbuf
3840 counts , & ! recvcounts
3842 getrealmpitype() , & ! recvtype
3844 communicator , & ! communicator
3848 END SUBROUTINE wrf_gatherv_real
3850 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
3851 my_count , & ! sendcount
3852 globbuf, glob_ofst , & ! recvbuf
3853 counts , & ! recvcounts
3856 communicator , & ! communicator
3860 INTEGER field_ofst, glob_ofst
3861 INTEGER my_count, communicator, root, ierr
3862 INTEGER , DIMENSION(*) :: counts, displs
3863 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3864 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3865 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3866 ! if we were not indexing the globbuf and Field arrays it would not even matter
3867 REAL, DIMENSION(*) :: Field, globbuf
3871 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3872 my_count , & ! sendcount
3873 MPI_DOUBLE_PRECISION , & ! sendtype
3874 globbuf( glob_ofst ) , & ! recvbuf
3875 counts , & ! recvcounts
3877 MPI_DOUBLE_PRECISION , & ! recvtype
3879 communicator , & ! communicator
3883 END SUBROUTINE wrf_gatherv_double
3885 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
3886 my_count , & ! sendcount
3887 globbuf, glob_ofst , & ! recvbuf
3888 counts , & ! recvcounts
3891 communicator , & ! communicator
3894 INTEGER field_ofst, glob_ofst
3895 INTEGER my_count, communicator, root, ierr
3896 INTEGER , DIMENSION(*) :: counts, displs
3897 INTEGER, DIMENSION(*) :: Field, globbuf
3901 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3902 my_count , & ! sendcount
3903 MPI_INTEGER , & ! sendtype
3904 globbuf( glob_ofst ) , & ! recvbuf
3905 counts , & ! recvcounts
3907 MPI_INTEGER , & ! recvtype
3909 communicator , & ! communicator
3913 END SUBROUTINE wrf_gatherv_integer
3916 SUBROUTINE wrf_scatterv_real ( &
3917 globbuf, glob_ofst , & ! recvbuf
3918 counts , & ! recvcounts
3919 Field, field_ofst, &
3920 my_count , & ! sendcount
3923 communicator , & ! communicator
3925 USE module_dm, ONLY : getrealmpitype
3927 INTEGER field_ofst, glob_ofst
3928 INTEGER my_count, communicator, root, ierr
3929 INTEGER , DIMENSION(*) :: counts, displs
3930 REAL, DIMENSION(*) :: Field, globbuf
3934 CALL mpi_scatterv( &
3935 globbuf( glob_ofst ) , & ! recvbuf
3936 counts , & ! recvcounts
3938 getrealmpitype() , & ! recvtype
3939 Field( field_ofst ), & ! sendbuf
3940 my_count , & ! sendcount
3941 getrealmpitype() , & ! sendtype
3943 communicator , & ! communicator
3947 END SUBROUTINE wrf_scatterv_real
3949 SUBROUTINE wrf_scatterv_double ( &
3950 globbuf, glob_ofst , & ! recvbuf
3951 counts , & ! recvcounts
3952 Field, field_ofst, &
3953 my_count , & ! sendcount
3956 communicator , & ! communicator
3960 INTEGER field_ofst, glob_ofst
3961 INTEGER my_count, communicator, root, ierr
3962 INTEGER , DIMENSION(*) :: counts, displs
3963 REAL, DIMENSION(*) :: Field, globbuf
3966 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3967 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3968 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3969 ! if we were not indexing the globbuf and Field arrays it would not even matter
3971 CALL mpi_scatterv( &
3972 globbuf( glob_ofst ) , & ! recvbuf
3973 counts , & ! recvcounts
3975 MPI_DOUBLE_PRECISION , & ! recvtype
3976 Field( field_ofst ), & ! sendbuf
3977 my_count , & ! sendcount
3978 MPI_DOUBLE_PRECISION , & ! sendtype
3980 communicator , & ! communicator
3984 END SUBROUTINE wrf_scatterv_double
3986 SUBROUTINE wrf_scatterv_integer ( &
3987 globbuf, glob_ofst , & ! recvbuf
3988 counts , & ! recvcounts
3989 Field, field_ofst, &
3990 my_count , & ! sendcount
3993 communicator , & ! communicator
3996 INTEGER field_ofst, glob_ofst
3997 INTEGER my_count, communicator, root, ierr
3998 INTEGER , DIMENSION(*) :: counts, displs
3999 INTEGER, DIMENSION(*) :: Field, globbuf
4003 CALL mpi_scatterv( &
4004 globbuf( glob_ofst ) , & ! recvbuf
4005 counts , & ! recvcounts
4007 MPI_INTEGER , & ! recvtype
4008 Field( field_ofst ), & ! sendbuf
4009 my_count , & ! sendcount
4010 MPI_INTEGER , & ! sendtype
4012 communicator , & ! communicator
4016 END SUBROUTINE wrf_scatterv_integer
4017 ! end new stuff 20070124
4019 SUBROUTINE wrf_dm_define_comms ( grid )
4020 USE module_domain, ONLY : domain
4022 TYPE(domain) , INTENT (INOUT) :: grid
4024 END SUBROUTINE wrf_dm_define_comms
4026 SUBROUTINE tfp_message( fname, lno )
4031 WRITE(mess,*)'tfp_message: ',trim(fname),lno
4032 CALL wrf_message(mess)
4033 # ifdef ALLOW_OVERDECOMP
4034 CALL task_for_point_message ! defined in RSL_LITE/task_for_point.c
4036 CALL wrf_error_fatal(mess)
4039 END SUBROUTINE tfp_message
4041 SUBROUTINE set_dm_debug
4044 dm_debug_flag = .TRUE.
4045 END SUBROUTINE set_dm_debug
4046 SUBROUTINE reset_dm_debug
4049 dm_debug_flag = .FALSE.
4050 END SUBROUTINE reset_dm_debug
4051 SUBROUTINE get_dm_debug ( arg )
4056 END SUBROUTINE get_dm_debug