7 USE module_driver_constants
11 #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
12 INTEGER, PARAMETER :: max_halo_width = 6
14 INTEGER, PARAMETER :: max_halo_width = 6 ! 5
17 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
19 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
20 INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
21 INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
22 LOGICAL :: dm_debug_flag = .FALSE.
24 INTERFACE wrf_dm_maxval
25 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
26 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
28 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
32 INTERFACE wrf_dm_minval ! gopal's doing
33 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
34 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
36 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
43 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
45 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
50 IF ( MOD( P, M ) .EQ. 0 ) THEN
52 IF ( ABS(M-N) .LT. MINI &
53 .AND. M .GE. PROCMIN_M &
54 .AND. N .GE. PROCMIN_N &
62 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
63 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
64 CALL wrf_message ( TRIM ( wrf_err_message ) )
65 WRITE(0,*)' PROCMIN_M ', PROCMIN_M
66 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
67 CALL wrf_message ( TRIM ( wrf_err_message ) )
68 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
69 CALL wrf_message ( TRIM ( wrf_err_message ) )
70 WRITE( wrf_err_message , * )' P ', P
71 CALL wrf_message ( TRIM ( wrf_err_message ) )
72 WRITE( wrf_err_message , * )' MINM ', MINM
73 CALL wrf_message ( TRIM ( wrf_err_message ) )
74 WRITE( wrf_err_message , * )' MINN ', MINN
75 CALL wrf_message ( TRIM ( wrf_err_message ) )
76 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
79 END SUBROUTINE MPASPECT
81 SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
83 INTEGER, INTENT(IN) :: ntasks
84 INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
85 CALL nl_get_nproc_x ( 1, ntasks_x )
86 CALL nl_get_nproc_y ( 1, ntasks_y )
87 ! check if user has specified in the namelist
88 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
89 ! if only ntasks_x is specified then make it 1-d decomp in i
90 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
91 ntasks_y = ntasks / ntasks_x
92 ! if only ntasks_y is specified then make it 1-d decomp in j
93 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
94 ntasks_x = ntasks / ntasks_y
96 ! make sure user knows what they're doing
97 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
98 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
99 CALL wrf_error_fatal ( wrf_err_message )
102 ! When neither is specified, work out mesh with MPASPECT
103 ! Pass nproc_ln and nproc_nt so that number of procs in
104 ! i-dim (nproc_ln) is equal or lesser.
105 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
107 END SUBROUTINE compute_mesh
109 SUBROUTINE wrf_dm_initialize
113 INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
114 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
116 INTEGER, DIMENSION(2) :: dims, coords
117 LOGICAL, DIMENSION(2) :: isperiodic
118 LOGICAL :: reorder_mesh
120 CALL wrf_get_dm_communicator ( local_comm )
121 CALL mpi_comm_size( local_comm, ntasks, ierr )
122 CALL nl_get_reorder_mesh( 1, reorder_mesh )
123 CALL compute_mesh( ntasks, ntasks_x, ntasks_y )
124 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
125 CALL wrf_message( wrf_err_message )
127 CALL mpi_comm_rank( local_comm, mytask, ierr )
128 ! extra code to reorder the communicator 20051212jm
129 IF ( reorder_mesh ) THEN
130 ALLOCATE (ranks(ntasks))
131 CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
132 CALL mpi_comm_group ( local_comm2, group, ierr )
135 ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
137 CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
139 CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
141 new_local_comm = local_comm
143 ! end extra code to reorder the communicator 20051212jm
144 dims(1) = ntasks_y ! rows
145 dims(2) = ntasks_x ! columns
146 isperiodic(1) = .false.
147 isperiodic(2) = .false.
148 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
149 dims(1) = ntasks_y ! rows
150 dims(2) = ntasks_x ! columns
151 isperiodic(1) = .true.
152 isperiodic(2) = .true.
153 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
155 CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
156 CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
157 ! write(0,*)'periodic coords ',mytask, coords
159 CALL mpi_comm_rank( local_communicator, mytask, ierr )
160 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
161 ! write(0,*)'non periodic coords ',mytask, coords
162 mytask_x = coords(2) ! col task (x)
163 mytask_y = coords(1) ! row task (y)
164 CALL nl_set_nproc_x ( 1, ntasks_x )
165 CALL nl_set_nproc_y ( 1, ntasks_y )
167 ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
168 ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
169 ! in other words, local_comm_x has all the processes with the same rank in Y
170 CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
171 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
172 CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
173 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
174 CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
175 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
177 CALL wrf_set_dm_communicator ( local_communicator )
188 END SUBROUTINE wrf_dm_initialize
190 SUBROUTINE get_dm_max_halo_width( id, width )
192 INTEGER, INTENT(IN) :: id
193 INTEGER, INTENT(OUT) :: width
194 IF ( id .EQ. 1 ) THEN ! this is coarse domain
195 width = max_halo_width
197 width = max_halo_width + 3
200 END SUBROUTINE get_dm_max_halo_width
202 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
203 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
204 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
205 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
206 sp1x , ep1x , sm1x , em1x , &
207 sp2x , ep2x , sm2x , em2x , &
208 sp3x , ep3x , sm3x , em3x , &
209 sp1y , ep1y , sm1y , em1y , &
210 sp2y , ep2y , sm2y , em2y , &
211 sp3y , ep3y , sm3y , em3y , &
214 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
217 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
218 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
219 sm1 , em1 , sm2 , em2 , sm3 , em3
220 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
221 sm1x , em1x , sm2x , em2x , sm3x , em3x
222 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
223 sm1y , em1y , sm2y , em2y , sm3y , em3y
224 INTEGER, INTENT(IN) :: id, parent_id
225 TYPE(domain),POINTER :: parent
228 INTEGER :: ids, ide, jds, jde, kds, kde
229 INTEGER :: ims, ime, jms, jme, kms, kme
230 INTEGER :: ips, ipe, jps, jpe, kps, kpe
231 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
232 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
233 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
234 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
236 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
237 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
238 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
239 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
240 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
241 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
242 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
244 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
245 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
246 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
248 INTEGER :: idim , jdim , kdim , rem , a, b
249 INTEGER :: i, j, ni, nj, Px, Py, P
251 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
253 INTEGER :: idim_cd, jdim_cd, ierr
256 TYPE(domain), POINTER :: intermediate_grid
257 TYPE(domain), POINTER :: nest_grid
258 CHARACTER*256 :: mess
260 INTEGER parent_max_halo_width
261 INTEGER thisdomain_max_halo_width
263 SELECT CASE ( model_data_order )
264 ! need to finish other cases
265 CASE ( DATA_ORDER_ZXY )
266 ids = sd2 ; ide = ed2
267 jds = sd3 ; jde = ed3
268 kds = sd1 ; kde = ed1
269 CASE ( DATA_ORDER_XYZ )
270 ids = sd1 ; ide = ed1
271 jds = sd2 ; jde = ed2
272 kds = sd3 ; kde = ed3
273 CASE ( DATA_ORDER_XZY )
274 ids = sd1 ; ide = ed1
275 jds = sd3 ; jde = ed3
276 kds = sd2 ; kde = ed2
277 CASE ( DATA_ORDER_YXZ)
278 ids = sd2 ; ide = ed2
279 jds = sd1 ; jde = ed1
280 kds = sd3 ; kde = ed3
283 CALL nl_get_max_dom( 1 , max_dom )
285 CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
286 IF ( id .GT. 1 ) THEN
287 CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
290 CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
291 ids, ide, jds, jde, kds, kde, &
292 ims, ime, jms, jme, kms, kme, &
293 imsx, imex, jmsx, jmex, kmsx, kmex, &
294 imsy, imey, jmsy, jmey, kmsy, kmey, &
295 ips, ipe, jps, jpe, kps, kpe, &
296 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
297 ipsy, ipey, jpsy, jpey, kpsy, kpey )
299 ! ensure that the every parent domain point has a full set of nested points under it
300 ! even at the borders. Do this by making sure the number of nest points is a multiple of
301 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
302 ! is the subject of the scatter gather comms with the parent
304 IF ( id .GT. 1 ) THEN
305 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
306 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
307 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
310 SELECT CASE ( model_data_order )
311 CASE ( DATA_ORDER_ZXY )
312 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
313 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
314 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
315 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
316 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
317 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
318 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
319 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
320 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
321 CASE ( DATA_ORDER_ZYX )
322 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
323 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
324 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
325 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
326 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
327 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
328 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
329 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
330 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
331 CASE ( DATA_ORDER_XYZ )
332 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
333 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
334 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
335 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
336 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
337 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
338 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
339 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
340 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
341 CASE ( DATA_ORDER_YXZ)
342 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
343 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
344 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
345 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
346 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
347 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
348 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
349 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
350 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
351 CASE ( DATA_ORDER_XZY )
352 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
353 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
354 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
355 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
356 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
357 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
358 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
359 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
360 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
361 CASE ( DATA_ORDER_YZX )
362 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
363 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
364 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
365 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
366 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
367 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
368 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
369 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
370 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
374 WRITE(wrf_err_message,*)'*************************************'
375 CALL wrf_message( TRIM(wrf_err_message) )
376 WRITE(wrf_err_message,*)'Parent domain'
377 CALL wrf_message( TRIM(wrf_err_message) )
378 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
379 CALL wrf_message( TRIM(wrf_err_message) )
380 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
381 CALL wrf_message( TRIM(wrf_err_message) )
382 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
383 CALL wrf_message( TRIM(wrf_err_message) )
384 WRITE(wrf_err_message,*)'*************************************'
385 CALL wrf_message( TRIM(wrf_err_message) )
388 IF ( id .GT. 1 ) THEN
390 CALL nl_get_shw( id, shw )
391 CALL nl_get_i_parent_start( id , i_parent_start )
392 CALL nl_get_j_parent_start( id , j_parent_start )
393 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
395 SELECT CASE ( model_data_order )
396 CASE ( DATA_ORDER_ZXY )
400 c_kds = sd1 ; c_kde = ed1
401 CASE ( DATA_ORDER_ZYX )
405 c_kds = sd1 ; c_kde = ed1
406 CASE ( DATA_ORDER_XYZ )
410 c_kds = sd3 ; c_kde = ed3
411 CASE ( DATA_ORDER_YXZ)
415 c_kds = sd3 ; c_kde = ed3
416 CASE ( DATA_ORDER_XZY )
420 c_kds = sd2 ; c_kde = ed2
421 CASE ( DATA_ORDER_YZX )
425 c_kds = sd2 ; c_kde = ed2
428 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
429 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
431 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
432 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
434 ! we want the intermediate domain to be decomposed the
435 ! the same as the underlying nest. So try this:
438 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
441 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
442 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
444 IF ( Px .EQ. mytask_x ) THEN
446 IF ( c_ips .EQ. -1 ) c_ips = i
449 IF ( ierr .NE. 0 ) THEN
450 CALL tfp_message(__FILE__,__LINE__)
452 IF (c_ips .EQ. -1 ) THEN
458 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
461 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
462 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
466 IF ( Py .EQ. mytask_y ) THEN
468 IF ( c_jps .EQ. -1 ) c_jps = j
471 IF ( ierr .NE. 0 ) THEN
472 CALL tfp_message(__FILE__,__LINE__)
474 IF (c_jps .EQ. -1 ) THEN
479 IF ( c_ips <= c_ipe ) THEN
480 ! extend the patch dimensions out shw along edges of domain
481 IF ( mytask_x .EQ. 0 ) THEN
484 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
487 c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
488 c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
496 IF ( c_jps <= c_jpe ) THEN
497 ! extend the patch dimensions out shw along edges of domain
498 IF ( mytask_y .EQ. 0 ) THEN
501 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
504 c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
505 c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
516 WRITE(wrf_err_message,*)'*************************************'
517 CALL wrf_message( TRIM(wrf_err_message) )
518 WRITE(wrf_err_message,*)'Nesting domain'
519 CALL wrf_message( TRIM(wrf_err_message) )
520 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
521 CALL wrf_message( TRIM(wrf_err_message) )
522 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
523 CALL wrf_message( TRIM(wrf_err_message) )
524 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
525 CALL wrf_message( TRIM(wrf_err_message) )
526 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
527 CALL wrf_message( TRIM(wrf_err_message) )
528 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
529 CALL wrf_message( TRIM(wrf_err_message) )
530 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
531 CALL wrf_message( TRIM(wrf_err_message) )
532 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
533 CALL wrf_message( TRIM(wrf_err_message) )
534 WRITE(wrf_err_message,*)'*************************************'
535 CALL wrf_message( TRIM(wrf_err_message) )
537 SELECT CASE ( model_data_order )
538 CASE ( DATA_ORDER_ZXY )
539 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
540 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
541 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
542 CASE ( DATA_ORDER_ZYX )
543 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
544 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
545 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
546 CASE ( DATA_ORDER_XYZ )
547 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
548 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
549 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
550 CASE ( DATA_ORDER_YXZ)
551 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
552 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
553 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
554 CASE ( DATA_ORDER_XZY )
555 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
556 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
557 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
558 CASE ( DATA_ORDER_YZX )
559 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
560 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
561 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
564 ALLOCATE ( intermediate_grid )
565 ALLOCATE ( intermediate_grid%parents( max_parents ) )
566 ALLOCATE ( intermediate_grid%nests( max_nests ) )
567 intermediate_grid%allocated=.false.
568 NULLIFY( intermediate_grid%sibling )
570 NULLIFY( intermediate_grid%nests(i)%ptr )
572 NULLIFY (intermediate_grid%next)
573 NULLIFY (intermediate_grid%same_level)
574 NULLIFY (intermediate_grid%i_start)
575 NULLIFY (intermediate_grid%j_start)
576 NULLIFY (intermediate_grid%i_end)
577 NULLIFY (intermediate_grid%j_end)
578 intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
579 intermediate_grid%num_nests = 0
580 intermediate_grid%num_siblings = 0
581 intermediate_grid%num_parents = 1
582 intermediate_grid%max_tiles = 0
583 intermediate_grid%num_tiles_spec = 0
584 CALL find_grid_by_id ( id, head_grid, nest_grid )
586 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
587 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
588 intermediate_grid%num_parents = 1
590 intermediate_grid%is_intermediate = .TRUE.
591 SELECT CASE ( model_data_order )
592 CASE ( DATA_ORDER_ZXY )
593 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
594 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
595 CASE ( DATA_ORDER_ZYX )
596 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
597 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
598 CASE ( DATA_ORDER_XYZ )
599 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
600 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
601 CASE ( DATA_ORDER_YXZ)
602 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
603 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
604 CASE ( DATA_ORDER_XZY )
605 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
606 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
607 CASE ( DATA_ORDER_YZX )
608 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
609 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
611 intermediate_grid%nids = ids
612 intermediate_grid%nide = ide
613 intermediate_grid%njds = jds
614 intermediate_grid%njde = jde
616 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
617 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
619 intermediate_grid%sm31x = c_sm1x
620 intermediate_grid%em31x = c_em1x
621 intermediate_grid%sm32x = c_sm2x
622 intermediate_grid%em32x = c_em2x
623 intermediate_grid%sm33x = c_sm3x
624 intermediate_grid%em33x = c_em3x
625 intermediate_grid%sm31y = c_sm1y
626 intermediate_grid%em31y = c_em1y
627 intermediate_grid%sm32y = c_sm2y
628 intermediate_grid%em32y = c_em2y
629 intermediate_grid%sm33y = c_sm3y
630 intermediate_grid%em33y = c_em3y
632 #if defined(SGIALTIX) && (! defined(MOVE_NESTS) )
633 ! allocate space for the intermediate domain
634 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
635 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
636 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
637 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
638 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
639 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
640 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
642 intermediate_grid%sd31 = c_sd1
643 intermediate_grid%ed31 = c_ed1
644 intermediate_grid%sp31 = c_sp1
645 intermediate_grid%ep31 = c_ep1
646 intermediate_grid%sm31 = c_sm1
647 intermediate_grid%em31 = c_em1
648 intermediate_grid%sd32 = c_sd2
649 intermediate_grid%ed32 = c_ed2
650 intermediate_grid%sp32 = c_sp2
651 intermediate_grid%ep32 = c_ep2
652 intermediate_grid%sm32 = c_sm2
653 intermediate_grid%em32 = c_em2
654 intermediate_grid%sd33 = c_sd3
655 intermediate_grid%ed33 = c_ed3
656 intermediate_grid%sp33 = c_sp3
657 intermediate_grid%ep33 = c_ep3
658 intermediate_grid%sm33 = c_sm3
659 intermediate_grid%em33 = c_em3
661 CALL med_add_config_info_to_grid ( intermediate_grid )
663 intermediate_grid%dx = parent%dx
664 intermediate_grid%dy = parent%dy
665 intermediate_grid%dt = parent%dt
669 END SUBROUTINE patch_domain_rsl_lite
671 SUBROUTINE compute_memory_dims_rsl_lite ( &
672 id , maxhalowidth , &
674 ids, ide, jds, jde, kds, kde, &
675 ims, ime, jms, jme, kms, kme, &
676 imsx, imex, jmsx, jmex, kmsx, kmex, &
677 imsy, imey, jmsy, jmey, kmsy, kmey, &
678 ips, ipe, jps, jpe, kps, kpe, &
679 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
680 ipsy, ipey, jpsy, jpey, kpsy, kpey )
683 INTEGER, INTENT(IN) :: id , maxhalowidth
684 INTEGER, INTENT(IN) :: shw, bdx, bdy
685 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
686 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
687 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
688 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
689 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
690 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
691 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
693 INTEGER Px, Py, P, i, j, k, ierr
695 #if ( ! NMM_CORE == 1 )
703 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
705 IF ( Px .EQ. mytask_x ) THEN
707 IF ( ips .EQ. -1 ) ips = i
710 IF ( ierr .NE. 0 ) THEN
711 CALL tfp_message(__FILE__,__LINE__)
713 ! handle setting the memory dimensions where there are no X elements assigned to this proc
714 IF (ips .EQ. -1 ) THEN
722 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
724 IF ( Py .EQ. mytask_y ) THEN
726 IF ( jps .EQ. -1 ) jps = j
729 IF ( ierr .NE. 0 ) THEN
730 CALL tfp_message(__FILE__,__LINE__)
732 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
733 IF (jps .EQ. -1 ) THEN
738 !begin: wig; 12-Mar-2008
739 ! This appears redundant with the conditionals above, but we get cases with only
740 ! one of the directions being set to "missing" when turning off extra processors.
741 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
742 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
748 !end: wig; 12-Mar-2008
751 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
753 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
754 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
755 ! corresponds to grid dimension.
759 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
762 ! +------------------+ <- this edge is costly; see below
764 ! The aim is to avoid all-to-all communication over whole
765 ! communicator. Instead, when possible, use a transpose scheme that requires
766 ! all-to-all within dimensional communicators; that is, communicators
767 ! defined for the processes in a rank or column of the processor mesh. Note,
768 ! however, it is not possible to create a ring of transposes between
769 ! xy-yz-xz decompositions without at least one of the edges in the ring
770 ! being fully all-to-all (in other words, one of the tranpose edges must
771 ! rotate and not just transpose a plane of the model grid within the
772 ! processor mesh). The issue is then, where should we put this costly edge
773 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
774 ! we chose a scheme most natural for models that use parallel spectral
775 ! transforms, where the costly edge is the one that goes from the xz to
776 ! the xy decomposition. (May be implemented as just a two step transpose
779 ! Additional notational convention, below. The 'x' or 'y' appended to the
780 ! dimension start or end variable refers to which grid dimension is all
781 ! on-processor in the given decomposition. That is ipsx and ipex are the
782 ! start and end for the i-dimension in the zy decomposition where x is
783 ! on-processor. ('z' is assumed for xy decomposition and not appended to
784 ! the ips, ipe, etc. variable names).
793 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
795 IF ( Px .EQ. mytask_x ) THEN
797 IF ( kpsx .EQ. -1 ) kpsx = k
800 IF ( ierr .NE. 0 ) THEN
801 CALL tfp_message(__FILE__,__LINE__)
804 ! handle case where no levels are assigned to this process
805 ! no iterations. Do same for I and J. Need to handle memory alloc below.
806 IF (kpsx .EQ. -1 ) THEN
815 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
817 IF ( Py .EQ. mytask_y ) THEN
819 IF ( jpsx .EQ. -1 ) jpsx = j
822 IF ( ierr .NE. 0 ) THEN
823 CALL tfp_message(__FILE__,__LINE__)
825 IF (jpsx .EQ. -1 ) THEN
830 !begin: wig; 12-Mar-2008
831 ! This appears redundant with the conditionals above, but we get cases with only
832 ! one of the directions being set to "missing" when turning off extra processors.
833 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
834 IF (ipex .EQ. -1 .or. jpex .EQ. -1) THEN
840 !end: wig; 12-Mar-2008
842 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
844 kpsy = kpsx ! same as above
845 kpey = kpex ! same as above
851 CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
852 1, 1, ierr ) ! x and y for proc mesh reversed
853 IF ( Py .EQ. mytask_y ) THEN
855 IF ( ipsy .EQ. -1 ) ipsy = i
858 IF ( ierr .NE. 0 ) THEN
859 CALL tfp_message(__FILE__,__LINE__)
861 IF (ipsy .EQ. -1 ) THEN
869 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
870 ! adjust decomposition to reflect. 20051020 JM
875 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
877 IF ( Px .EQ. mytask_x ) THEN
879 IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
880 IF ( ips .EQ. -1 ) ips = i
883 IF ( ierr .NE. 0 ) THEN
884 CALL tfp_message(__FILE__,__LINE__)
890 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
892 IF ( Py .EQ. mytask_y ) THEN
894 IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
895 IF ( jps .EQ. -1 ) jps = j
898 IF ( ierr .NE. 0 ) THEN
899 CALL tfp_message(__FILE__,__LINE__)
903 ! extend the patch dimensions out shw along edges of domain
904 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
905 IF ( mytask_x .EQ. 0 ) THEN
909 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
913 IF ( mytask_y .EQ. 0 ) THEN
917 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
921 ENDIF !wig; 11-Mar-2008
933 ! handle setting the memory dimensions where there are no levels assigned to this proc
934 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
938 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
943 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
947 ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
948 ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
954 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
955 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
963 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
967 jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
968 jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
974 ! handle setting the memory dimensions where there are no X elements assigned to this proc
975 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
983 END SUBROUTINE compute_memory_dims_rsl_lite
985 ! internal, used below for switching the argument to MPI calls
986 ! if reals are being autopromoted to doubles in the build of WRF
987 INTEGER function getrealmpitype()
991 INTEGER rtypesize, dtypesize, ierr
992 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
993 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
994 IF ( RWORDSIZE .EQ. rtypesize ) THEN
995 getrealmpitype = MPI_REAL
996 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
997 getrealmpitype = MPI_DOUBLE_PRECISION
999 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
1002 ! required dummy initialization for function that is never called
1006 END FUNCTION getrealmpitype
1008 REAL FUNCTION wrf_dm_max_real ( inval )
1014 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
1015 wrf_dm_max_real = retval
1018 wrf_dm_max_real = inval
1020 END FUNCTION wrf_dm_max_real
1022 REAL FUNCTION wrf_dm_min_real ( inval )
1028 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
1029 wrf_dm_min_real = retval
1032 wrf_dm_min_real = inval
1034 END FUNCTION wrf_dm_min_real
1036 SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
1044 CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, local_communicator, ierr )
1046 retval(1:n) = inval(1:n)
1048 END SUBROUTINE wrf_dm_min_reals
1050 REAL FUNCTION wrf_dm_sum_real ( inval )
1056 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
1057 wrf_dm_sum_real = retval
1060 wrf_dm_sum_real = inval
1062 END FUNCTION wrf_dm_sum_real
1064 SUBROUTINE wrf_dm_sum_reals (inval, retval)
1066 REAL, INTENT(IN) :: inval(:)
1067 REAL, INTENT(OUT) :: retval(:)
1071 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
1075 END SUBROUTINE wrf_dm_sum_reals
1077 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
1081 INTEGER inval, retval
1083 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
1084 wrf_dm_sum_integer = retval
1087 wrf_dm_sum_integer = inval
1089 END FUNCTION wrf_dm_sum_integer
1091 INTEGER FUNCTION wrf_dm_bxor_integer ( inval )
1095 INTEGER inval, retval
1097 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_BXOR, local_communicator, ierr )
1098 wrf_dm_bxor_integer = retval
1101 wrf_dm_bxor_integer = inval
1103 END FUNCTION wrf_dm_bxor_integer
1105 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
1109 REAL val, val_all( ntasks )
1110 INTEGER idex, jdex, ierr
1112 INTEGER dex_all (2,ntasks)
1115 dex(1) = idex ; dex(2) = jdex
1116 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1117 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
1119 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1121 IF ( val_all(i) .GT. val ) THEN
1129 INTEGER idex, jdex, ierr
1131 END SUBROUTINE wrf_dm_maxval_real
1133 #ifndef PROMOTE_FLOAT
1134 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
1138 DOUBLE PRECISION val, val_all( ntasks )
1139 INTEGER idex, jdex, ierr
1141 INTEGER dex_all (2,ntasks)
1144 dex(1) = idex ; dex(2) = jdex
1145 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1146 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
1148 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1150 IF ( val_all(i) .GT. val ) THEN
1157 DOUBLE PRECISION val
1158 INTEGER idex, jdex, ierr
1160 END SUBROUTINE wrf_dm_maxval_doubleprecision
1163 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
1167 INTEGER val, val_all( ntasks )
1168 INTEGER idex, jdex, ierr
1170 INTEGER dex_all (2,ntasks)
1173 dex(1) = idex ; dex(2) = jdex
1174 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1175 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
1177 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1179 IF ( val_all(i) .GT. val ) THEN
1189 END SUBROUTINE wrf_dm_maxval_integer
1191 ! For HWRF some additional computation is required. This is gopal's doing
1193 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
1195 REAL val, val_all( ntasks )
1196 INTEGER idex, jdex, ierr
1198 INTEGER dex_all (2,ntasks)
1200 ! Collective operation. Each processor calls passing a local value and its index; on return
1201 ! all processors are passed back the maximum of all values passed and its index.
1208 CALL wrf_get_dm_communicator ( comm )
1209 dex(1) = idex ; dex(2) = jdex
1210 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1211 CALL mpi_allgather ( val, 1, MPI_REAL, val_all , 1, MPI_REAL, comm, ierr )
1213 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1215 IF ( val_all(i) .LT. val ) THEN
1222 END SUBROUTINE wrf_dm_minval_real
1224 #ifndef PROMOTE_FLOAT
1225 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1227 DOUBLE PRECISION val, val_all( ntasks )
1228 INTEGER idex, jdex, ierr
1230 INTEGER dex_all (2,ntasks)
1232 ! Collective operation. Each processor calls passing a local value and its index; on return
1233 ! all processors are passed back the maximum of all values passed and its index.
1240 CALL wrf_get_dm_communicator ( comm )
1241 dex(1) = idex ; dex(2) = jdex
1242 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1243 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1245 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1247 IF ( val_all(i) .LT. val ) THEN
1254 END SUBROUTINE wrf_dm_minval_doubleprecision
1257 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1259 INTEGER val, val_all( ntasks )
1260 INTEGER idex, jdex, ierr
1262 INTEGER dex_all (2,ntasks)
1264 ! Collective operation. Each processor calls passing a local value and its index; on return
1265 ! all processors are passed back the maximum of all values passed and its index.
1272 CALL wrf_get_dm_communicator ( comm )
1273 dex(1) = idex ; dex(2) = jdex
1274 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1275 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1277 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1279 IF ( val_all(i) .LT. val ) THEN
1286 END SUBROUTINE wrf_dm_minval_integer ! End of gopal's doing
1288 SUBROUTINE split_communicator
1293 INTEGER mpi_comm_here, mpi_comm_local, comdup, mytask, ntasks, ierr, io_status
1294 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1295 INTEGER thread_support_provided, thread_support_requested
1298 INTEGER, ALLOCATABLE :: icolor(:)
1299 INTEGER tasks_per_split
1300 NAMELIST /namelist_split/ tasks_per_split
1302 CALL MPI_INITIALIZED( mpi_inited, ierr )
1303 IF ( .NOT. mpi_inited ) THEN
1304 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1305 thread_support_requested = MPI_THREAD_FUNNELED
1306 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
1307 IF ( thread_support_provided .lt. thread_support_requested ) THEN
1308 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
1311 CALL mpi_init ( ierr )
1313 mpi_comm_here = MPI_COMM_WORLD
1315 CALL atm_cmp_start( mpi_comm_here ) ! atmospheric side of HWRF coupler will split MPI_COMM_WORLD and return communicator as argument
1317 CALL wrf_set_dm_communicator( mpi_comm_here )
1319 CALL wrf_get_dm_communicator( mpi_comm_here )
1320 CALL wrf_termio_dup( mpi_comm_here )
1322 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1323 CALL mpi_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1325 IF ( mytask .EQ. 0 ) THEN
1326 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1327 tasks_per_split = ntasks
1328 READ ( 27 , NML = namelist_split, IOSTAT=io_status )
1331 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1332 IF ( io_status .NE. 0 ) THEN
1333 RETURN ! just ignore and return
1335 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1336 IF ( tasks_per_split .GT. ntasks .OR. tasks_per_split .LE. 0 ) RETURN
1337 IF ( mod( ntasks, tasks_per_split ) .NE. 0 ) THEN
1338 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1341 ALLOCATE( icolor(ntasks) )
1343 DO WHILE ( j .LT. ntasks / tasks_per_split )
1344 DO i = 1, tasks_per_split
1345 icolor( i + j * tasks_per_split ) = j
1350 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1351 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
1352 CALL wrf_set_dm_communicator( mpi_comm_local )
1354 DEALLOCATE( icolor )
1356 END SUBROUTINE split_communicator
1358 SUBROUTINE init_module_dm
1361 INTEGER mpi_comm_local, mpi_comm_here, ierr, mytask, nproc
1364 CALL mpi_initialized( mpi_inited, ierr )
1365 IF ( .NOT. mpi_inited ) THEN
1366 ! If MPI has not been initialized then initialize it and
1367 ! make comm_world the communicator
1368 ! Otherwise, something else (e.g. split_communicator) has already
1369 ! initialized MPI, so just grab the communicator that
1370 ! should already be stored and use that.
1371 CALL mpi_init ( ierr )
1372 mpi_comm_here = MPI_COMM_WORLD
1373 CALL wrf_set_dm_communicator ( mpi_comm_here )
1375 CALL wrf_get_dm_communicator( mpi_comm_local )
1376 CALL wrf_termio_dup( mpi_comm_local )
1378 END SUBROUTINE init_module_dm
1381 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
1382 USE module_domain, ONLY : domain
1384 TYPE (domain), INTENT(INOUT) :: parent, nest
1385 INTEGER, INTENT(IN) :: dx,dy
1387 END SUBROUTINE wrf_dm_move_nest
1389 !------------------------------------------------------------------------------
1390 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
1393 mp_local_cobmask, errf )
1395 !------------------------------------------------------------------------------
1396 ! PURPOSE: Do MPI allgatherv operation across processors to get the
1397 ! errors at each observation point on all processors.
1399 !------------------------------------------------------------------------------
1401 INTEGER, INTENT(IN) :: nsta ! Observation index.
1402 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
1403 INTEGER, INTENT(IN) :: niobf ! Number of observations.
1404 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
1405 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
1406 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
1407 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
1412 ! Local declarations
1413 integer i, n, nlocal_dot, nlocal_crs
1414 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
1415 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
1416 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
1417 REAL PBL_BUFFER(NIOBF) ! Buffer for holding (real) KPBL index
1418 INTEGER N_BUFFER(NIOBF)
1419 REAL FULL_BUFFER(NIOBF)
1420 INTEGER IFULL_BUFFER(NIOBF)
1421 INTEGER IDISPLACEMENT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1422 INTEGER ICOUNT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1424 INTEGER :: MPI_COMM_COMP ! MPI group communicator
1425 INTEGER :: NPROCS ! Number of processors
1426 INTEGER :: IERR ! Error code from MPI routines
1428 ! Get communicator for MPI operations.
1429 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
1431 ! Get rank of monitor processor and broadcast to others.
1432 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
1437 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
1438 NLOCAL_DOT = NLOCAL_DOT + 1
1439 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
1440 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
1441 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
1442 N_BUFFER(NLOCAL_DOT) = N
1445 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1446 ICOUNT,1,MPI_INTEGER, &
1450 IDISPLACEMENT(1) = 0
1452 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1454 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1455 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1456 MPI_INTEGER, MPI_COMM_COMP, IERR)
1458 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1459 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1460 MPI_REAL, MPI_COMM_COMP, IERR)
1462 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1464 ! SURF PRESS AT U-POINTS
1465 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1466 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1467 MPI_REAL, MPI_COMM_COMP, IERR)
1469 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1472 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
1473 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1474 MPI_REAL, MPI_COMM_COMP, IERR)
1476 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1482 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
1483 NLOCAL_DOT = NLOCAL_DOT + 1
1484 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
1485 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
1486 N_BUFFER(NLOCAL_DOT) = N
1489 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1490 ICOUNT,1,MPI_INTEGER, &
1494 IDISPLACEMENT(1) = 0
1496 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1498 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1499 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1500 MPI_INTEGER, MPI_COMM_COMP, IERR)
1502 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1503 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1504 MPI_REAL, MPI_COMM_COMP, IERR)
1506 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1508 ! SURF PRESS AT V-POINTS
1509 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1510 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1511 MPI_REAL, MPI_COMM_COMP, IERR)
1513 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1516 ! DO THE CROSS FIELDS, T AND Q
1519 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
1520 NLOCAL_CRS = NLOCAL_CRS + 1
1521 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
1522 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
1523 PBL_BUFFER(NLOCAL_CRS) = ERRF(5,N) ! KPBL
1524 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
1525 N_BUFFER(NLOCAL_CRS) = N
1528 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
1529 ICOUNT,1,MPI_INTEGER, &
1531 IDISPLACEMENT(1) = 0
1533 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1535 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
1536 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1537 MPI_INTEGER, MPI_COMM_COMP, IERR)
1539 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
1540 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1541 MPI_REAL, MPI_COMM_COMP, IERR)
1544 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1547 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
1548 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1549 MPI_REAL, MPI_COMM_COMP, IERR)
1551 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1554 CALL MPI_ALLGATHERV( PBL_BUFFER, NLOCAL_CRS, MPI_REAL, &
1555 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1556 MPI_REAL, MPI_COMM_COMP, IERR)
1558 ERRF(5,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1560 ! SURF PRESS AT MASS POINTS
1561 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
1562 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1563 MPI_REAL, MPI_COMM_COMP, IERR)
1565 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1568 END SUBROUTINE get_full_obs_vector
1572 SUBROUTINE wrf_dm_maxtile_real ( val , tile)
1574 REAL val, val_all( ntasks )
1579 ! Collective operation. Each processor calls passing a local value and its index; on return
1580 ! all processors are passed back the maximum of all values passed and its tile number.
1587 CALL wrf_get_dm_communicator ( comm )
1588 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1592 IF ( val_all(i) .GT. val ) THEN
1598 END SUBROUTINE wrf_dm_maxtile_real
1601 SUBROUTINE wrf_dm_mintile_real ( val , tile)
1603 REAL val, val_all( ntasks )
1608 ! Collective operation. Each processor calls passing a local value and its index; on return
1609 ! all processors are passed back the minimum of all values passed and its tile number.
1616 CALL wrf_get_dm_communicator ( comm )
1617 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1621 IF ( val_all(i) .LT. val ) THEN
1627 END SUBROUTINE wrf_dm_mintile_real
1630 SUBROUTINE wrf_dm_mintile_double ( val , tile)
1632 DOUBLE PRECISION val, val_all( ntasks )
1637 ! Collective operation. Each processor calls passing a local value and its index; on return
1638 ! all processors are passed back the minimum of all values passed and its tile number.
1645 CALL wrf_get_dm_communicator ( comm )
1646 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1650 IF ( val_all(i) .LT. val ) THEN
1656 END SUBROUTINE wrf_dm_mintile_double
1659 SUBROUTINE wrf_dm_tile_val_int ( val , tile)
1661 INTEGER val, val_all( ntasks )
1666 ! Collective operation. Get value from input tile.
1673 CALL wrf_get_dm_communicator ( comm )
1674 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1677 END SUBROUTINE wrf_dm_tile_val_int
1679 SUBROUTINE wrf_get_hostname ( str )
1683 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1688 END SUBROUTINE wrf_get_hostname
1690 SUBROUTINE wrf_get_hostid ( hostid )
1693 INTEGER i, sz, n, cs
1694 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1697 END SUBROUTINE wrf_get_hostid
1699 END MODULE module_dm
1701 !=========================================================================
1702 ! wrf_dm_patch_domain has to be outside the module because it is called
1703 ! by a routine in module_domain but depends on module domain
1705 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
1706 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1707 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1708 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1709 sp1x , ep1x , sm1x , em1x , &
1710 sp2x , ep2x , sm2x , em2x , &
1711 sp3x , ep3x , sm3x , em3x , &
1712 sp1y , ep1y , sm1y , em1y , &
1713 sp2y , ep2y , sm2y , em2y , &
1714 sp3y , ep3y , sm3y , em3y , &
1716 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
1717 USE module_dm, ONLY : patch_domain_rsl_lite
1720 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
1721 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
1722 sm1 , em1 , sm2 , em2 , sm3 , em3
1723 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
1724 sm1x , em1x , sm2x , em2x , sm3x , em3x
1725 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
1726 sm1y , em1y , sm2y , em2y , sm3y , em3y
1727 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
1729 TYPE(domain), POINTER :: parent
1730 TYPE(domain), POINTER :: grid_ptr
1732 ! this is necessary because we cannot pass parent directly into
1733 ! wrf_dm_patch_domain because creating the correct interface definitions
1734 ! would generate a circular USE reference between module_domain and module_dm
1735 ! see comment this date in module_domain for more information. JM 20020416
1738 grid_ptr => head_grid
1739 CALL find_grid_by_id( parent_id , grid_ptr , parent )
1741 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
1742 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1743 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1744 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1745 sp1x , ep1x , sm1x , em1x , &
1746 sp2x , ep2x , sm2x , em2x , &
1747 sp3x , ep3x , sm3x , em3x , &
1748 sp1y , ep1y , sm1y , em1y , &
1749 sp2y , ep2y , sm2y , em2y , &
1750 sp3y , ep3y , sm3y , em3y , &
1754 END SUBROUTINE wrf_dm_patch_domain
1756 SUBROUTINE wrf_termio_dup( comm )
1758 INTEGER, INTENT(IN) :: comm
1759 INTEGER mytask, ntasks
1763 CALL mpi_comm_size(comm, ntasks, ierr )
1764 CALL mpi_comm_rank(comm, mytask, ierr )
1765 write(0,*)'starting wrf task ',mytask,' of ',ntasks
1766 CALL rsl_error_dup1( mytask )
1771 END SUBROUTINE wrf_termio_dup
1773 SUBROUTINE wrf_get_myproc( myproc )
1774 USE module_dm , ONLY : mytask
1779 END SUBROUTINE wrf_get_myproc
1781 SUBROUTINE wrf_get_nproc( nproc )
1782 USE module_dm , ONLY : ntasks
1787 END SUBROUTINE wrf_get_nproc
1789 SUBROUTINE wrf_get_nprocx( nprocx )
1790 USE module_dm , ONLY : ntasks_x
1795 END SUBROUTINE wrf_get_nprocx
1797 SUBROUTINE wrf_get_nprocy( nprocy )
1798 USE module_dm , ONLY : ntasks_y
1803 END SUBROUTINE wrf_get_nprocy
1805 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
1806 USE module_dm , ONLY : local_communicator
1815 CHARACTER*1 BUF(size)
1818 CALL BYTE_BCAST ( buf , size, local_communicator )
1821 END SUBROUTINE wrf_dm_bcast_bytes
1823 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
1827 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
1832 INTEGER ibuf(256),i,n
1835 ! Root task is required to have the correct value of N1, other tasks
1836 ! might not have the correct value.
1837 CALL wrf_dm_bcast_integer( n , 1 )
1838 IF (n .GT. 256) n = 256
1841 ibuf(I) = ichar(buf(I:I))
1843 CALL wrf_dm_bcast_integer( ibuf, n )
1846 buf(i:i) = char(ibuf(i))
1851 END SUBROUTINE wrf_dm_bcast_string
1853 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
1857 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
1859 END SUBROUTINE wrf_dm_bcast_integer
1861 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
1864 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1865 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1866 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1867 ! since we were not indexing the globbuf and Field arrays it does not matter
1869 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
1871 END SUBROUTINE wrf_dm_bcast_double
1873 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
1877 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
1879 END SUBROUTINE wrf_dm_bcast_real
1881 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
1885 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
1887 END SUBROUTINE wrf_dm_bcast_logical
1889 SUBROUTINE write_68( grid, v , s , &
1890 ids, ide, jds, jde, kds, kde, &
1891 ims, ime, jms, jme, kms, kme, &
1892 its, ite, jts, jte, kts, kte )
1893 USE module_domain, ONLY : domain
1895 TYPE(domain) , INTENT (INOUT) :: grid
1897 INTEGER ids, ide, jds, jde, kds, kde, &
1898 ims, ime, jms, jme, kms, kme, &
1899 its, ite, jts, jte, kts, kte
1900 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
1904 logical, external :: wrf_dm_on_monitor
1905 real globbuf( ids:ide, kds:kde, jds:jde )
1906 character*3 ord, stag
1908 if ( kds == kde ) then
1911 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1912 ids, ide, jds, jde, kds, kde, &
1913 ims, ime, jms, jme, kms, kme, &
1914 its, ite, jts, jte, kts, kte )
1919 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1920 ids, ide, kds, kde, jds, jde, &
1921 ims, ime, kms, kme, jms, jme, &
1922 its, ite, kts, kte, jts, jte )
1926 if ( wrf_dm_on_monitor() ) THEN
1927 WRITE(68,*) ide-ids+1, jde-jds+1 , s
1930 WRITE(68,*) globbuf(i,1,j)
1938 SUBROUTINE wrf_abort
1943 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
1947 END SUBROUTINE wrf_abort
1949 SUBROUTINE wrf_dm_shutdown
1953 CALL MPI_FINALIZE( ierr )
1956 END SUBROUTINE wrf_dm_shutdown
1958 LOGICAL FUNCTION wrf_dm_on_monitor()
1962 INTEGER tsk, ierr, mpi_comm_local
1963 CALL wrf_get_dm_communicator( mpi_comm_local )
1964 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
1965 wrf_dm_on_monitor = tsk .EQ. 0
1967 wrf_dm_on_monitor = .TRUE.
1970 END FUNCTION wrf_dm_on_monitor
1972 SUBROUTINE rsl_comm_iter_init(shw,ps,pe)
1974 INTEGER iter, plus_send_start, plus_recv_start, &
1975 minus_send_start, minus_recv_start
1976 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
1977 minus_send_start, minus_recv_start
1979 minus_send_start = ps
1980 minus_recv_start = ps-1
1981 plus_send_start = pe
1982 plus_recv_start = pe+1
1983 END SUBROUTINE rsl_comm_iter_init
1985 LOGICAL FUNCTION rsl_comm_iter ( id , is_intermediate, &
1986 shw , xy , ds, de_in, ps, pe, nds,nde, &
1987 sendbeg_m, sendw_m, sendbeg_p, sendw_p, &
1988 recvbeg_m, recvw_m, recvbeg_p, recvw_p )
1989 USE module_dm, ONLY : ntasks_x, ntasks_y, mytask_x, mytask_y
1991 INTEGER, INTENT(IN) :: id,shw,xy,ds,de_in,ps,pe,nds,nde
1992 LOGICAL, INTENT(IN) :: is_intermediate ! treated differently, coarse but with same decomp as nest
1993 INTEGER, INTENT(OUT) :: sendbeg_m, sendw_m, sendbeg_p, sendw_p
1994 INTEGER, INTENT(OUT) :: recvbeg_m, recvw_m, recvbeg_p, recvw_p
1995 INTEGER k, kn, ni, nj, de, Px, Py, nt, me, lb, ub, ierr
1997 INTEGER iter, plus_send_start, plus_recv_start, &
1998 minus_send_start, minus_recv_start
1999 INTEGER parent_grid_ratio, parent_start
2000 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
2001 minus_send_start, minus_recv_start
2003 #if (NMM_CORE == 1 )
2004 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
2005 ! adjust decomposition to reflect. 20081206 JM
2011 IF ( xy .EQ. 1 ) THEN ! X/I axis
2014 IF ( is_intermediate ) THEN
2015 CALL nl_get_i_parent_start(id,parent_start)
2016 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2021 IF ( is_intermediate ) THEN
2022 CALL nl_get_j_parent_start(id,parent_start)
2023 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2033 IF ( me .GT. 0 ) THEN
2034 lb = minus_send_start
2038 IF ( is_intermediate ) THEN
2039 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2040 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2042 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2044 IF ( Px .NE. me+(iter-1) ) THEN
2047 minus_send_start = minus_send_start+1
2048 sendw_m = sendw_m + 1
2054 IF ( me .GT. 0 ) THEN
2055 ub = minus_recv_start
2057 DO k = minus_recv_start,ps-shw,-1
2059 IF ( is_intermediate ) THEN
2060 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2061 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2063 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2065 IF ( Px .NE. me-iter ) THEN
2068 minus_recv_start = minus_recv_start-1
2069 recvw_m = recvw_m + 1
2076 IF ( me .LT. nt-1 ) THEN
2077 ub = plus_send_start
2078 sendbeg_p = pe - ub + 1
2079 DO k = ub,pe-shw+1,-1
2081 IF ( is_intermediate ) THEN
2082 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2083 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2085 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2087 IF ( Px .NE. me-(iter-1) ) THEN
2090 plus_send_start = plus_send_start - 1
2091 sendw_p = sendw_p + 1
2097 IF ( me .LT. nt-1 ) THEN
2098 lb = plus_recv_start
2102 IF ( is_intermediate ) THEN
2103 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2104 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2106 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2108 IF ( Px .NE. me+iter ) THEN
2111 plus_recv_start = plus_recv_start + 1
2112 recvw_p = recvw_p + 1
2116 if ( iter .eq. 1 ) then
2121 sendw_m = 0 ; sendw_p = 0 ; recvw_m = 0 ; recvw_p = 0
2122 sendbeg_m = 1 ; if ( me .GT. 0 ) sendw_m = shw ;
2123 sendbeg_p = 1 ; if ( me .LT. nt-1 ) sendw_p = shw
2124 recvbeg_m = 1 ; if ( me .GT. 0 ) recvw_m = shw ;
2125 recvbeg_p = 1 ; if ( me .LT. nt-1 ) recvw_p = shw ;
2127 ! write(0,*)'shw ', shw , ' xy ',xy
2128 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2129 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2130 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2133 ! write(0,*)'shw ', shw , ' xy ',xy
2134 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2135 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2136 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2138 rsl_comm_iter = went
2139 END FUNCTION rsl_comm_iter
2141 INTEGER FUNCTION wrf_dm_monitor_rank()
2143 wrf_dm_monitor_rank = 0
2145 END FUNCTION wrf_dm_monitor_rank
2147 SUBROUTINE wrf_get_dm_communicator ( communicator )
2148 USE module_dm , ONLY : local_communicator
2150 INTEGER , INTENT(OUT) :: communicator
2151 communicator = local_communicator
2153 END SUBROUTINE wrf_get_dm_communicator
2155 SUBROUTINE wrf_get_dm_communicator_x ( communicator )
2156 USE module_dm , ONLY : local_communicator_x
2158 INTEGER , INTENT(OUT) :: communicator
2159 communicator = local_communicator_x
2161 END SUBROUTINE wrf_get_dm_communicator_x
2163 SUBROUTINE wrf_get_dm_communicator_y ( communicator )
2164 USE module_dm , ONLY : local_communicator_y
2166 INTEGER , INTENT(OUT) :: communicator
2167 communicator = local_communicator_y
2169 END SUBROUTINE wrf_get_dm_communicator_y
2171 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
2172 USE module_dm , ONLY : local_iocommunicator
2174 INTEGER , INTENT(OUT) :: iocommunicator
2175 iocommunicator = local_iocommunicator
2177 END SUBROUTINE wrf_get_dm_iocommunicator
2179 SUBROUTINE wrf_set_dm_communicator ( communicator )
2180 USE module_dm , ONLY : local_communicator
2182 INTEGER , INTENT(IN) :: communicator
2183 local_communicator = communicator
2185 END SUBROUTINE wrf_set_dm_communicator
2187 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
2188 USE module_dm , ONLY : local_iocommunicator
2190 INTEGER , INTENT(IN) :: iocommunicator
2191 local_iocommunicator = iocommunicator
2193 END SUBROUTINE wrf_set_dm_iocommunicator
2195 SUBROUTINE wrf_get_dm_ntasks_x ( retval )
2196 USE module_dm , ONLY : ntasks_x
2198 INTEGER , INTENT(OUT) :: retval
2201 END SUBROUTINE wrf_get_dm_ntasks_x
2203 SUBROUTINE wrf_get_dm_ntasks_y ( retval )
2204 USE module_dm , ONLY : ntasks_y
2206 INTEGER , INTENT(OUT) :: retval
2209 END SUBROUTINE wrf_get_dm_ntasks_y
2212 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2214 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
2215 DS1,DE1,DS2,DE2,DS3,DE3,&
2216 MS1,ME1,MS2,ME2,MS3,ME3,&
2217 PS1,PE1,PS2,PE2,PS3,PE3 )
2219 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2220 MS1,ME1,MS2,ME2,MS3,ME3,&
2221 PS1,PE1,PS2,PE2,PS3,PE3
2222 CHARACTER *(*) stagger,ordering
2227 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
2228 DS1,DE1,DS2,DE2,DS3,DE3,&
2229 MS1,ME1,MS2,ME2,MS3,ME3,&
2230 PS1,PE1,PS2,PE2,PS3,PE3 )
2233 END SUBROUTINE wrf_patch_to_global_real
2235 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
2236 DS1,DE1,DS2,DE2,DS3,DE3,&
2237 MS1,ME1,MS2,ME2,MS3,ME3,&
2238 PS1,PE1,PS2,PE2,PS3,PE3 )
2240 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2241 MS1,ME1,MS2,ME2,MS3,ME3,&
2242 PS1,PE1,PS2,PE2,PS3,PE3
2243 CHARACTER *(*) stagger,ordering
2245 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2246 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2247 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2248 ! since we were not indexing the globbuf and Field arrays it does not matter
2252 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
2253 DS1,DE1,DS2,DE2,DS3,DE3,&
2254 MS1,ME1,MS2,ME2,MS3,ME3,&
2255 PS1,PE1,PS2,PE2,PS3,PE3 )
2258 END SUBROUTINE wrf_patch_to_global_double
2261 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
2262 DS1,DE1,DS2,DE2,DS3,DE3,&
2263 MS1,ME1,MS2,ME2,MS3,ME3,&
2264 PS1,PE1,PS2,PE2,PS3,PE3 )
2266 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2267 MS1,ME1,MS2,ME2,MS3,ME3,&
2268 PS1,PE1,PS2,PE2,PS3,PE3
2269 CHARACTER *(*) stagger,ordering
2274 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
2275 DS1,DE1,DS2,DE2,DS3,DE3,&
2276 MS1,ME1,MS2,ME2,MS3,ME3,&
2277 PS1,PE1,PS2,PE2,PS3,PE3 )
2280 END SUBROUTINE wrf_patch_to_global_integer
2283 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
2284 DS1,DE1,DS2,DE2,DS3,DE3,&
2285 MS1,ME1,MS2,ME2,MS3,ME3,&
2286 PS1,PE1,PS2,PE2,PS3,PE3 )
2288 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2289 MS1,ME1,MS2,ME2,MS3,ME3,&
2290 PS1,PE1,PS2,PE2,PS3,PE3
2291 CHARACTER *(*) stagger,ordering
2296 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
2297 DS1,DE1,DS2,DE2,DS3,DE3,&
2298 MS1,ME1,MS2,ME2,MS3,ME3,&
2299 PS1,PE1,PS2,PE2,PS3,PE3 )
2302 END SUBROUTINE wrf_patch_to_global_logical
2305 # define FRSTELEM (1)
2310 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
2311 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2312 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2313 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2314 USE module_driver_constants
2316 USE module_wrf_error, ONLY : wrf_at_debug_level
2317 USE module_dm, ONLY : local_communicator, ntasks
2320 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2321 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2322 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2323 CHARACTER *(*) stagger,ordering
2324 INTEGER domdesc,typesize,ierr
2328 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2329 MS1,ME1,MS2,ME2,MS3,ME3,&
2330 PS1,PE1,PS2,PE2,PS3,PE3
2331 INTEGER ids,ide,jds,jde,kds,kde,&
2332 ims,ime,jms,jme,kms,kme,&
2333 ips,ipe,jps,jpe,kps,kpe
2334 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2336 INTEGER i, j, k, ndim
2337 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2338 ! allocated further down, after the D indices are potentially recalculated for staggering
2339 REAL, ALLOCATABLE :: tmpbuf( : )
2340 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2342 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2343 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2344 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2346 SELECT CASE ( TRIM(ordering) )
2350 ndim = 3 ! where appropriate
2353 SELECT CASE ( TRIM(ordering) )
2355 ! the non-staggered variables come in at one-less than
2356 ! domain dimensions, but code wants full domain spec, so
2357 ! adjust if not staggered
2358 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2359 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2360 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2362 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2363 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2364 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2366 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2367 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2368 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2370 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2371 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2372 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2376 ! moved to here to be after the potential recalculations of D dims
2377 IF ( wrf_dm_on_monitor() ) THEN
2378 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2380 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2382 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
2384 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2385 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2386 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2388 IF ( typesize .EQ. RWORDSIZE ) THEN
2389 CALL just_patch_r ( buf , locbuf , size(locbuf), &
2390 PS1, PE1, PS2, PE2, PS3, PE3 , &
2391 MS1, ME1, MS2, ME2, MS3, ME3 )
2392 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2393 CALL just_patch_i ( buf , locbuf , size(locbuf), &
2394 PS1, PE1, PS2, PE2, PS3, PE3 , &
2395 MS1, ME1, MS2, ME2, MS3, ME3 )
2396 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2397 CALL just_patch_d ( buf , locbuf , size(locbuf), &
2398 PS1, PE1, PS2, PE2, PS3, PE3 , &
2399 MS1, ME1, MS2, ME2, MS3, ME3 )
2400 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2401 CALL just_patch_l ( buf , locbuf , size(locbuf), &
2402 PS1, PE1, PS2, PE2, PS3, PE3 , &
2403 MS1, ME1, MS2, ME2, MS3, ME3 )
2406 ! defined in external/io_quilt
2407 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2411 CALL collect_on_comm0 ( local_communicator , typesize , &
2412 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
2413 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
2415 ndim = len(TRIM(ordering))
2417 IF ( wrf_at_debug_level(500) ) THEN
2421 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
2423 IF ( typesize .EQ. RWORDSIZE ) THEN
2424 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
2425 DS1, DE1, DS2, DE2, DS3, DE3 , &
2427 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2428 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
2429 DS1, DE1, DS2, DE2, DS3, DE3 , &
2431 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2432 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
2433 DS1, DE1, DS2, DE2, DS3, DE3 , &
2435 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2436 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
2437 DS1, DE1, DS2, DE2, DS3, DE3 , &
2443 IF ( wrf_at_debug_level(500) ) THEN
2444 CALL end_timing('wrf_patch_to_global_generic')
2446 DEALLOCATE( tmpbuf )
2449 END SUBROUTINE wrf_patch_to_global_generic
2451 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
2452 PS1,PE1,PS2,PE2,PS3,PE3, &
2453 MS1,ME1,MS2,ME2,MS3,ME3 )
2455 INTEGER , INTENT(IN) :: noutbuf
2456 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2457 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2458 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2459 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
2461 INTEGER :: i,j,k,n , icurs
2466 outbuf( icurs ) = inbuf( i, j, k )
2472 END SUBROUTINE just_patch_i
2474 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
2475 PS1,PE1,PS2,PE2,PS3,PE3, &
2476 MS1,ME1,MS2,ME2,MS3,ME3 )
2478 INTEGER , INTENT(IN) :: noutbuf
2479 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2480 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2481 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2482 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2484 INTEGER :: i,j,k , icurs
2489 outbuf( icurs ) = inbuf( i, j, k )
2495 END SUBROUTINE just_patch_r
2497 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
2498 PS1,PE1,PS2,PE2,PS3,PE3, &
2499 MS1,ME1,MS2,ME2,MS3,ME3 )
2501 INTEGER , INTENT(IN) :: noutbuf
2502 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2503 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2504 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2505 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2507 INTEGER :: i,j,k,n , icurs
2512 outbuf( icurs ) = inbuf( i, j, k )
2518 END SUBROUTINE just_patch_d
2520 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
2521 PS1,PE1,PS2,PE2,PS3,PE3, &
2522 MS1,ME1,MS2,ME2,MS3,ME3 )
2524 INTEGER , INTENT(IN) :: noutbuf
2525 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2526 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2527 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2528 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2530 INTEGER :: i,j,k,n , icurs
2535 outbuf( icurs ) = inbuf( i, j, k )
2541 END SUBROUTINE just_patch_l
2544 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
2545 DS1,DE1,DS2,DE2,DS3,DE3, &
2547 USE module_dm, ONLY : ntasks
2549 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2550 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2551 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2553 INTEGER :: i,j,k,n , icurs
2556 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2557 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2558 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2559 outbuf( i, j, k ) = inbuf( icurs )
2567 END SUBROUTINE patch_2_outbuf_r
2569 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
2570 DS1,DE1,DS2,DE2,DS3,DE3,&
2572 USE module_dm, ONLY : ntasks
2574 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2575 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2576 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2578 INTEGER :: i,j,k,n , icurs
2581 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2582 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2583 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2584 outbuf( i, j, k ) = inbuf( icurs )
2591 END SUBROUTINE patch_2_outbuf_i
2593 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
2594 DS1,DE1,DS2,DE2,DS3,DE3,&
2596 USE module_dm, ONLY : ntasks
2598 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2599 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2600 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2602 INTEGER :: i,j,k,n , icurs
2605 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2606 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2607 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2608 outbuf( i, j, k ) = inbuf( icurs )
2615 END SUBROUTINE patch_2_outbuf_d
2617 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
2618 DS1,DE1,DS2,DE2,DS3,DE3,&
2620 USE module_dm, ONLY : ntasks
2622 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2623 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2624 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2626 INTEGER :: i,j,k,n , icurs
2629 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2630 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2631 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2632 outbuf( i, j, k ) = inbuf( icurs )
2639 END SUBROUTINE patch_2_outbuf_l
2641 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2643 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
2644 DS1,DE1,DS2,DE2,DS3,DE3,&
2645 MS1,ME1,MS2,ME2,MS3,ME3,&
2646 PS1,PE1,PS2,PE2,PS3,PE3 )
2648 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2649 MS1,ME1,MS2,ME2,MS3,ME3,&
2650 PS1,PE1,PS2,PE2,PS3,PE3
2651 CHARACTER *(*) stagger,ordering
2656 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
2657 DS1,DE1,DS2,DE2,DS3,DE3,&
2658 MS1,ME1,MS2,ME2,MS3,ME3,&
2659 PS1,PE1,PS2,PE2,PS3,PE3 )
2661 END SUBROUTINE wrf_global_to_patch_real
2663 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
2664 DS1,DE1,DS2,DE2,DS3,DE3,&
2665 MS1,ME1,MS2,ME2,MS3,ME3,&
2666 PS1,PE1,PS2,PE2,PS3,PE3 )
2668 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2669 MS1,ME1,MS2,ME2,MS3,ME3,&
2670 PS1,PE1,PS2,PE2,PS3,PE3
2671 CHARACTER *(*) stagger,ordering
2673 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2674 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2675 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2676 ! since we were not indexing the globbuf and Field arrays it does not matter
2680 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
2681 DS1,DE1,DS2,DE2,DS3,DE3,&
2682 MS1,ME1,MS2,ME2,MS3,ME3,&
2683 PS1,PE1,PS2,PE2,PS3,PE3 )
2685 END SUBROUTINE wrf_global_to_patch_double
2688 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
2689 DS1,DE1,DS2,DE2,DS3,DE3,&
2690 MS1,ME1,MS2,ME2,MS3,ME3,&
2691 PS1,PE1,PS2,PE2,PS3,PE3 )
2693 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2694 MS1,ME1,MS2,ME2,MS3,ME3,&
2695 PS1,PE1,PS2,PE2,PS3,PE3
2696 CHARACTER *(*) stagger,ordering
2701 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
2702 DS1,DE1,DS2,DE2,DS3,DE3,&
2703 MS1,ME1,MS2,ME2,MS3,ME3,&
2704 PS1,PE1,PS2,PE2,PS3,PE3 )
2706 END SUBROUTINE wrf_global_to_patch_integer
2708 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
2709 DS1,DE1,DS2,DE2,DS3,DE3,&
2710 MS1,ME1,MS2,ME2,MS3,ME3,&
2711 PS1,PE1,PS2,PE2,PS3,PE3 )
2713 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2714 MS1,ME1,MS2,ME2,MS3,ME3,&
2715 PS1,PE1,PS2,PE2,PS3,PE3
2716 CHARACTER *(*) stagger,ordering
2721 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
2722 DS1,DE1,DS2,DE2,DS3,DE3,&
2723 MS1,ME1,MS2,ME2,MS3,ME3,&
2724 PS1,PE1,PS2,PE2,PS3,PE3 )
2726 END SUBROUTINE wrf_global_to_patch_logical
2728 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
2729 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2730 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2731 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2732 USE module_dm, ONLY : local_communicator, ntasks
2733 USE module_driver_constants
2735 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2736 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2737 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2738 CHARACTER *(*) stagger,ordering
2739 INTEGER domdesc,typesize,ierr
2743 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2744 MS1,ME1,MS2,ME2,MS3,ME3,&
2745 PS1,PE1,PS2,PE2,PS3,PE3
2746 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2748 INTEGER i,j,k,ord,ord2d,ndim
2749 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2750 REAL, ALLOCATABLE :: tmpbuf( : )
2751 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2753 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2754 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2755 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2757 SELECT CASE ( TRIM(ordering) )
2761 ndim = 3 ! where appropriate
2764 SELECT CASE ( TRIM(ordering) )
2766 ! the non-staggered variables come in at one-less than
2767 ! domain dimensions, but code wants full domain spec, so
2768 ! adjust if not staggered
2769 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2770 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2771 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2773 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2774 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2775 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2777 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2778 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2779 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2781 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2782 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2783 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2787 ! moved to here to be after the potential recalculations of D dims
2788 IF ( wrf_dm_on_monitor() ) THEN
2789 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2791 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2793 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
2795 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2796 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2797 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2799 ! defined in external/io_quilt
2800 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2803 ndim = len(TRIM(ordering))
2805 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
2806 IF ( typesize .EQ. RWORDSIZE ) THEN
2807 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
2808 DS1, DE1, DS2, DE2, DS3, DE3 , &
2809 MS1, ME1, MS2, ME2, MS3, ME3 , &
2811 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2812 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
2813 DS1, DE1, DS2, DE2, DS3, DE3 , &
2815 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2816 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
2817 DS1, DE1, DS2, DE2, DS3, DE3 , &
2819 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2820 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
2821 DS1, DE1, DS2, DE2, DS3, DE3 , &
2826 CALL dist_on_comm0 ( local_communicator , typesize , &
2827 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
2828 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
2830 IF ( typesize .EQ. RWORDSIZE ) THEN
2831 CALL all_sub_r ( locbuf , buf , &
2832 PS1, PE1, PS2, PE2, PS3, PE3 , &
2833 MS1, ME1, MS2, ME2, MS3, ME3 )
2835 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2836 CALL all_sub_i ( locbuf , buf , &
2837 PS1, PE1, PS2, PE2, PS3, PE3 , &
2838 MS1, ME1, MS2, ME2, MS3, ME3 )
2839 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2840 CALL all_sub_d ( locbuf , buf , &
2841 PS1, PE1, PS2, PE2, PS3, PE3 , &
2842 MS1, ME1, MS2, ME2, MS3, ME3 )
2843 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2844 CALL all_sub_l ( locbuf , buf , &
2845 PS1, PE1, PS2, PE2, PS3, PE3 , &
2846 MS1, ME1, MS2, ME2, MS3, ME3 )
2850 DEALLOCATE ( tmpbuf )
2853 END SUBROUTINE wrf_global_to_patch_generic
2855 SUBROUTINE all_sub_i ( inbuf , outbuf, &
2856 PS1,PE1,PS2,PE2,PS3,PE3, &
2857 MS1,ME1,MS2,ME2,MS3,ME3 )
2859 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2860 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2861 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2862 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2864 INTEGER :: i,j,k,n , icurs
2869 outbuf( i, j, k ) = inbuf ( icurs )
2875 END SUBROUTINE all_sub_i
2877 SUBROUTINE all_sub_r ( inbuf , outbuf, &
2878 PS1,PE1,PS2,PE2,PS3,PE3, &
2879 MS1,ME1,MS2,ME2,MS3,ME3 )
2881 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2882 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2883 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2884 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2886 INTEGER :: i,j,k,n , icurs
2891 outbuf( i, j, k ) = inbuf ( icurs )
2898 END SUBROUTINE all_sub_r
2900 SUBROUTINE all_sub_d ( inbuf , outbuf, &
2901 PS1,PE1,PS2,PE2,PS3,PE3, &
2902 MS1,ME1,MS2,ME2,MS3,ME3 )
2904 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2905 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2906 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2907 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2909 INTEGER :: i,j,k,n , icurs
2914 outbuf( i, j, k ) = inbuf ( icurs )
2920 END SUBROUTINE all_sub_d
2922 SUBROUTINE all_sub_l ( inbuf , outbuf, &
2923 PS1,PE1,PS2,PE2,PS3,PE3, &
2924 MS1,ME1,MS2,ME2,MS3,ME3 )
2926 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2927 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2928 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2929 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2931 INTEGER :: i,j,k,n , icurs
2936 outbuf( i, j, k ) = inbuf ( icurs )
2942 END SUBROUTINE all_sub_l
2944 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
2945 DS1,DE1,DS2,DE2,DS3,DE3, &
2946 MS1, ME1, MS2, ME2, MS3, ME3 , &
2948 USE module_dm, ONLY : ntasks
2950 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2951 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2952 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2953 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2955 INTEGER :: i,j,k,n , icurs
2959 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2960 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2961 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2962 outbuf( icurs ) = inbuf( i,j,k )
2969 END SUBROUTINE outbuf_2_patch_r
2971 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
2972 DS1,DE1,DS2,DE2,DS3,DE3,&
2974 USE module_dm, ONLY : ntasks
2976 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
2977 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2978 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2980 INTEGER :: i,j,k,n , icurs
2983 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2984 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2985 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2986 outbuf( icurs ) = inbuf( i,j,k )
2993 END SUBROUTINE outbuf_2_patch_i
2995 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
2996 DS1,DE1,DS2,DE2,DS3,DE3,&
2998 USE module_dm, ONLY : ntasks
3000 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
3001 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3002 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3004 INTEGER :: i,j,k,n , icurs
3007 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3008 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3009 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3010 outbuf( icurs ) = inbuf( i,j,k )
3017 END SUBROUTINE outbuf_2_patch_d
3019 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
3020 DS1,DE1,DS2,DE2,DS3,DE3,&
3022 USE module_dm, ONLY : ntasks
3024 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3025 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3026 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3028 INTEGER :: i,j,k,n , icurs
3031 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3032 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3033 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3034 outbuf( icurs ) = inbuf( i,j,k )
3041 END SUBROUTINE outbuf_2_patch_l
3045 !------------------------------------------------------------------
3047 #if ( EM_CORE == 1 && DA_CORE != 1 )
3049 !------------------------------------------------------------------
3051 SUBROUTINE force_domain_em_part2 ( grid, ngrid, config_flags &
3053 #include "dummy_new_args.inc"
3056 USE module_state_description
3057 USE module_domain, ONLY : domain, get_ijk_from_grid
3058 USE module_configure, ONLY : grid_config_rec_type
3059 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, local_communicator, mytask
3060 USE module_comm_nesting_dm, ONLY : halo_force_down_sub
3063 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3064 TYPE(domain), POINTER :: ngrid
3065 #include <dummy_new_decl.inc>
3067 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3068 TYPE (grid_config_rec_type) :: config_flags
3070 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3071 cims, cime, cjms, cjme, ckms, ckme, &
3072 cips, cipe, cjps, cjpe, ckps, ckpe
3073 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3074 nims, nime, njms, njme, nkms, nkme, &
3075 nips, nipe, njps, njpe, nkps, nkpe
3076 INTEGER :: ids, ide, jds, jde, kds, kde, &
3077 ims, ime, jms, jme, kms, kme, &
3078 ips, ipe, jps, jpe, kps, kpe
3079 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7,itrace
3080 REAL dummy_xs, dummy_xe, dummy_ys, dummy_ye
3082 CALL get_ijk_from_grid ( grid , &
3083 cids, cide, cjds, cjde, ckds, ckde, &
3084 cims, cime, cjms, cjme, ckms, ckme, &
3085 cips, cipe, cjps, cjpe, ckps, ckpe )
3086 CALL get_ijk_from_grid ( ngrid , &
3087 nids, nide, njds, njde, nkds, nkde, &
3088 nims, nime, njms, njme, nkms, nkme, &
3089 nips, nipe, njps, njpe, nkps, nkpe )
3091 nlev = ckde - ckds + 1
3093 #include "nest_interpdown_unpack.inc"
3095 CALL get_ijk_from_grid ( grid , &
3096 ids, ide, jds, jde, kds, kde, &
3097 ims, ime, jms, jme, kms, kme, &
3098 ips, ipe, jps, jpe, kps, kpe )
3100 #include "HALO_FORCE_DOWN.inc"
3102 ! code here to interpolate the data into the nested domain
3103 # include "nest_forcedown_interp.inc"
3106 END SUBROUTINE force_domain_em_part2
3108 !------------------------------------------------------------------
3110 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
3112 #include "dummy_new_args.inc"
3115 USE module_state_description
3116 USE module_domain, ONLY : domain, get_ijk_from_grid
3117 USE module_configure, ONLY : grid_config_rec_type
3118 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
3119 mytask, get_dm_max_halo_width
3123 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3124 TYPE(domain), POINTER :: intermediate_grid
3125 TYPE(domain), POINTER :: ngrid
3126 #include <dummy_new_decl.inc>
3128 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3129 INTEGER iparstrt,jparstrt,sw
3130 TYPE (grid_config_rec_type) :: config_flags
3132 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3133 cims, cime, cjms, cjme, ckms, ckme, &
3134 cips, cipe, cjps, cjpe, ckps, ckpe
3135 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
3136 iims, iime, ijms, ijme, ikms, ikme, &
3137 iips, iipe, ijps, ijpe, ikps, ikpe
3138 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3139 nims, nime, njms, njme, nkms, nkme, &
3140 nips, nipe, njps, njpe, nkps, nkpe
3142 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3144 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
3145 INTEGER thisdomain_max_halo_width
3146 INTEGER local_comm, myproc, nproc
3148 CALL wrf_get_dm_communicator ( local_comm )
3149 CALL wrf_get_myproc( myproc )
3150 CALL wrf_get_nproc( nproc )
3152 CALL get_ijk_from_grid ( grid , &
3153 cids, cide, cjds, cjde, ckds, ckde, &
3154 cims, cime, cjms, cjme, ckms, ckme, &
3155 cips, cipe, cjps, cjpe, ckps, ckpe )
3156 CALL get_ijk_from_grid ( intermediate_grid , &
3157 iids, iide, ijds, ijde, ikds, ikde, &
3158 iims, iime, ijms, ijme, ikms, ikme, &
3159 iips, iipe, ijps, ijpe, ikps, ikpe )
3160 CALL get_ijk_from_grid ( ngrid , &
3161 nids, nide, njds, njde, nkds, nkde, &
3162 nims, nime, njms, njme, nkms, nkme, &
3163 nips, nipe, njps, njpe, nkps, nkpe )
3165 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
3166 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3167 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3168 CALL nl_get_shw ( intermediate_grid%id, sw )
3169 icoord = iparstrt - sw
3170 jcoord = jparstrt - sw
3171 idim_cd = iide - iids + 1
3172 jdim_cd = ijde - ijds + 1
3174 nlev = ckde - ckds + 1
3176 ! get max_halo_width for parent. It may be smaller if it is moad
3177 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
3179 #include "nest_interpdown_pack.inc"
3181 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
3184 END SUBROUTINE interp_domain_em_part1
3186 !------------------------------------------------------------------
3188 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, config_flags &
3190 #include "dummy_new_args.inc"
3193 USE module_state_description
3194 USE module_domain, ONLY : domain, get_ijk_from_grid
3195 USE module_configure, ONLY : grid_config_rec_type
3196 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
3197 mytask, get_dm_max_halo_width
3198 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
3201 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3202 TYPE(domain), POINTER :: ngrid
3203 #include <dummy_new_decl.inc>
3205 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3206 TYPE (grid_config_rec_type) :: config_flags
3208 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3209 cims, cime, cjms, cjme, ckms, ckme, &
3210 cips, cipe, cjps, cjpe, ckps, ckpe
3211 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3212 nims, nime, njms, njme, nkms, nkme, &
3213 nips, nipe, njps, njpe, nkps, nkpe
3214 INTEGER :: ids, ide, jds, jde, kds, kde, &
3215 ims, ime, jms, jme, kms, kme, &
3216 ips, ipe, jps, jpe, kps, kpe
3218 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3222 INTEGER thisdomain_max_halo_width
3224 CALL get_ijk_from_grid ( grid , &
3225 cids, cide, cjds, cjde, ckds, ckde, &
3226 cims, cime, cjms, cjme, ckms, ckme, &
3227 cips, cipe, cjps, cjpe, ckps, ckpe )
3228 CALL get_ijk_from_grid ( ngrid , &
3229 nids, nide, njds, njde, nkds, nkde, &
3230 nims, nime, njms, njme, nkms, nkme, &
3231 nips, nipe, njps, njpe, nkps, nkpe )
3233 nlev = ckde - ckds + 1
3235 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3237 #include "nest_interpdown_unpack.inc"
3239 CALL get_ijk_from_grid ( grid , &
3240 ids, ide, jds, jde, kds, kde, &
3241 ims, ime, jms, jme, kms, kme, &
3242 ips, ipe, jps, jpe, kps, kpe )
3244 #include "HALO_INTERP_DOWN.inc"
3246 # include "nest_interpdown_interp.inc"
3249 END SUBROUTINE interp_domain_em_part2
3251 !------------------------------------------------------------------
3253 SUBROUTINE feedback_nest_prep ( grid, config_flags &
3255 #include "dummy_new_args.inc"
3258 USE module_state_description
3259 USE module_domain, ONLY : domain, get_ijk_from_grid
3260 USE module_configure, ONLY : grid_config_rec_type
3261 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask
3262 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
3265 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3266 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3267 ! soil temp, moisture, etc., has vertical dim
3268 ! of soil categories
3269 #include <dummy_new_decl.inc>
3271 INTEGER :: ids, ide, jds, jde, kds, kde, &
3272 ims, ime, jms, jme, kms, kme, &
3273 ips, ipe, jps, jpe, kps, kpe
3275 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3277 INTEGER :: idum1, idum2
3280 CALL get_ijk_from_grid ( grid , &
3281 ids, ide, jds, jde, kds, kde, &
3282 ims, ime, jms, jme, kms, kme, &
3283 ips, ipe, jps, jpe, kps, kpe )
3286 #include "HALO_INTERP_UP.inc"
3289 END SUBROUTINE feedback_nest_prep
3291 !------------------------------------------------------------------
3293 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
3295 #include "dummy_new_args.inc"
3298 USE module_state_description
3299 USE module_domain, ONLY : domain, get_ijk_from_grid
3300 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3301 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3302 ipe_save, jpe_save, ips_save, jps_save
3306 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3307 TYPE(domain), POINTER :: ngrid
3308 #include <dummy_new_decl.inc>
3310 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3311 TYPE(domain), POINTER :: xgrid
3312 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3314 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3315 cims, cime, cjms, cjme, ckms, ckme, &
3316 cips, cipe, cjps, cjpe, ckps, ckpe
3317 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3318 nims, nime, njms, njme, nkms, nkme, &
3319 nips, nipe, njps, njpe, nkps, nkpe
3321 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3323 INTEGER local_comm, myproc, nproc, idum1, idum2
3324 INTEGER thisdomain_max_halo_width
3327 SUBROUTINE feedback_nest_prep ( grid, config_flags &
3329 #include "dummy_new_args.inc"
3332 USE module_state_description
3333 USE module_domain, ONLY : domain
3334 USE module_configure, ONLY : grid_config_rec_type
3336 TYPE (grid_config_rec_type) :: config_flags
3337 TYPE(domain), TARGET :: grid
3338 #include <dummy_new_decl.inc>
3339 END SUBROUTINE feedback_nest_prep
3343 CALL wrf_get_dm_communicator ( local_comm )
3344 CALL wrf_get_myproc( myproc )
3345 CALL wrf_get_nproc( nproc )
3349 CALL get_ijk_from_grid ( grid , &
3350 cids, cide, cjds, cjde, ckds, ckde, &
3351 cims, cime, cjms, cjme, ckms, ckme, &
3352 cips, cipe, cjps, cjpe, ckps, ckpe )
3354 CALL get_ijk_from_grid ( ngrid , &
3355 nids, nide, njds, njde, nkds, nkde, &
3356 nims, nime, njms, njme, nkms, nkme, &
3357 nips, nipe, njps, njpe, nkps, nkpe )
3359 nlev = ckde - ckds + 1
3361 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
3362 jps_save = ngrid%j_parent_start
3363 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
3364 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
3366 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3367 ! in a separate routine because the HALOs need the data to be dereference from the
3368 ! grid data structure and, in this routine, the dereferenced fields are related to
3369 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3370 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3371 ! to point to intermediate domain.
3373 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3374 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3378 CALL feedback_nest_prep ( grid, nconfig_flags &
3380 #include "actual_new_args.inc"
3384 ! put things back so grid is intermediate grid
3387 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3389 ! "interp" (basically copy) ngrid onto intermediate grid
3391 #include "nest_feedbackup_interp.inc"
3394 END SUBROUTINE feedback_domain_em_part1
3396 !------------------------------------------------------------------
3398 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
3400 #include "dummy_new_args.inc"
3403 USE module_state_description
3404 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3405 USE module_configure, ONLY : grid_config_rec_type, model_config_rec
3406 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3407 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3408 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
3413 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3414 TYPE(domain), POINTER :: intermediate_grid
3415 TYPE(domain), POINTER :: ngrid
3417 #include <dummy_new_decl.inc>
3419 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3420 TYPE (grid_config_rec_type) :: config_flags
3422 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3423 cims, cime, cjms, cjme, ckms, ckme, &
3424 cips, cipe, cjps, cjpe, ckps, ckpe
3425 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3426 nims, nime, njms, njme, nkms, nkme, &
3427 nips, nipe, njps, njpe, nkps, nkpe
3428 INTEGER :: ids, ide, jds, jde, kds, kde, &
3429 ims, ime, jms, jme, kms, kme, &
3430 ips, ipe, jps, jpe, kps, kpe
3432 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3434 INTEGER icoord, jcoord, idim_cd, jdim_cd
3435 INTEGER local_comm, myproc, nproc
3436 INTEGER iparstrt, jparstrt, sw, thisdomain_max_halo_width
3439 character*256 :: timestr
3442 LOGICAL, EXTERNAL :: cd_feedback_mask
3444 ! On entry to this routine,
3445 ! "grid" refers to the parent domain
3446 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3447 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3448 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3450 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3451 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3452 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3453 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3454 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3455 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3459 CALL domain_clock_get( grid, current_timestr=timestr )
3461 CALL get_ijk_from_grid ( intermediate_grid , &
3462 cids, cide, cjds, cjde, ckds, ckde, &
3463 cims, cime, cjms, cjme, ckms, ckme, &
3464 cips, cipe, cjps, cjpe, ckps, ckpe )
3465 CALL get_ijk_from_grid ( grid , &
3466 nids, nide, njds, njde, nkds, nkde, &
3467 nims, nime, njms, njme, nkms, nkme, &
3468 nips, nipe, njps, njpe, nkps, nkpe )
3470 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3471 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3472 CALL nl_get_shw ( intermediate_grid%id, sw )
3473 icoord = iparstrt - sw
3474 jcoord = jparstrt - sw
3475 idim_cd = cide - cids + 1
3476 jdim_cd = cjde - cjds + 1
3478 nlev = ckde - ckds + 1
3480 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
3482 #include "nest_feedbackup_pack.inc"
3484 CALL wrf_get_dm_communicator ( local_comm )
3485 CALL wrf_get_myproc( myproc )
3486 CALL wrf_get_nproc( nproc )
3488 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3490 #define NEST_INFLUENCE(A,B) A = B
3491 #include "nest_feedbackup_unpack.inc"
3493 ! smooth coarse grid
3494 CALL get_ijk_from_grid ( ngrid, &
3495 nids, nide, njds, njde, nkds, nkde, &
3496 nims, nime, njms, njme, nkms, nkme, &
3497 nips, nipe, njps, njpe, nkps, nkpe )
3498 CALL get_ijk_from_grid ( grid , &
3499 ids, ide, jds, jde, kds, kde, &
3500 ims, ime, jms, jme, kms, kme, &
3501 ips, ipe, jps, jpe, kps, kpe )
3503 #include "HALO_INTERP_UP.inc"
3505 CALL get_ijk_from_grid ( grid , &
3506 cids, cide, cjds, cjde, ckds, ckde, &
3507 cims, cime, cjms, cjme, ckms, ckme, &
3508 cips, cipe, cjps, cjpe, ckps, ckpe )
3510 #include "nest_feedbackup_smooth.inc"
3513 END SUBROUTINE feedback_domain_em_part2
3516 #if ( NMM_CORE == 1 && NMM_NEST == 1 )
3517 !==============================================================================
3518 ! NMM nesting infrastructure extended from EM core. This is gopal's doing.
3519 !==============================================================================
3521 SUBROUTINE interp_domain_nmm_part1 ( grid, intermediate_grid, ngrid, config_flags &
3523 #include "dummy_new_args.inc"
3526 USE module_state_description
3527 USE module_domain, ONLY : domain, get_ijk_from_grid
3528 USE module_configure, ONLY : grid_config_rec_type
3529 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3530 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3534 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3535 TYPE(domain), POINTER :: intermediate_grid
3536 TYPE(domain), POINTER :: ngrid
3537 #include <dummy_new_decl.inc>
3539 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3540 INTEGER iparstrt,jparstrt,sw
3541 TYPE (grid_config_rec_type) :: config_flags
3543 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3544 cims, cime, cjms, cjme, ckms, ckme, &
3545 cips, cipe, cjps, cjpe, ckps, ckpe
3546 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
3547 iims, iime, ijms, ijme, ikms, ikme, &
3548 iips, iipe, ijps, ijpe, ikps, ikpe
3549 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3550 nims, nime, njms, njme, nkms, nkme, &
3551 nips, nipe, njps, njpe, nkps, nkpe
3553 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3555 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
3556 INTEGER local_comm, myproc, nproc
3557 INTEGER thisdomain_max_halo_width
3559 CALL wrf_get_dm_communicator ( local_comm )
3560 CALL wrf_get_myproc( myproc )
3561 CALL wrf_get_nproc( nproc )
3564 !#include <scalar_derefs.inc>
3566 CALL get_ijk_from_grid ( grid , &
3567 cids, cide, cjds, cjde, ckds, ckde, &
3568 cims, cime, cjms, cjme, ckms, ckme, &
3569 cips, cipe, cjps, cjpe, ckps, ckpe )
3570 CALL get_ijk_from_grid ( intermediate_grid , &
3571 iids, iide, ijds, ijde, ikds, ikde, &
3572 iims, iime, ijms, ijme, ikms, ikme, &
3573 iips, iipe, ijps, ijpe, ikps, ikpe )
3574 CALL get_ijk_from_grid ( ngrid , &
3575 nids, nide, njds, njde, nkds, nkde, &
3576 nims, nime, njms, njme, nkms, nkme, &
3577 nips, nipe, njps, njpe, nkps, nkpe )
3579 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
3580 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3581 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3582 CALL nl_get_shw ( intermediate_grid%id, sw )
3583 icoord = iparstrt - sw
3584 jcoord = jparstrt - sw
3585 idim_cd = iide - iids + 1
3586 jdim_cd = ijde - ijds + 1
3588 nlev = ckde - ckds + 1
3590 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3591 #include "nest_interpdown_pack.inc"
3593 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
3596 !#include <scalar_derefs.inc>
3598 END SUBROUTINE interp_domain_nmm_part1
3600 !------------------------------------------------------------------
3602 SUBROUTINE interp_domain_nmm_part2 ( grid, ngrid, config_flags &
3604 #include "dummy_new_args.inc"
3607 USE module_state_description
3608 USE module_domain, ONLY : domain, get_ijk_from_grid
3609 USE module_configure, ONLY : grid_config_rec_type
3610 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3611 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3612 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
3615 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3616 TYPE(domain), POINTER :: ngrid
3617 #include <dummy_new_decl.inc>
3619 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3620 TYPE (grid_config_rec_type) :: config_flags
3622 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3623 cims, cime, cjms, cjme, ckms, ckme, &
3624 cips, cipe, cjps, cjpe, ckps, ckpe
3625 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3626 nims, nime, njms, njme, nkms, nkme, &
3627 nips, nipe, njps, njpe, nkps, nkpe
3628 INTEGER :: ids, ide, jds, jde, kds, kde, &
3629 ims, ime, jms, jme, kms, kme, &
3630 ips, ipe, jps, jpe, kps, kpe
3632 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3637 !#ifdef DEREF_KLUDGE
3638 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3639 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3640 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3641 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3643 #include "deref_kludge.h"
3646 !#include <scalar_derefs.inc>
3647 CALL get_ijk_from_grid ( grid , &
3648 cids, cide, cjds, cjde, ckds, ckde, &
3649 cims, cime, cjms, cjme, ckms, ckme, &
3650 cips, cipe, cjps, cjpe, ckps, ckpe )
3651 CALL get_ijk_from_grid ( ngrid , &
3652 nids, nide, njds, njde, nkds, nkde, &
3653 nims, nime, njms, njme, nkms, nkme, &
3654 nips, nipe, njps, njpe, nkps, nkpe )
3656 nlev = ckde - ckds + 1
3658 #include "nest_interpdown_unpack.inc"
3660 CALL get_ijk_from_grid ( grid , &
3661 ids, ide, jds, jde, kds, kde, &
3662 ims, ime, jms, jme, kms, kme, &
3663 ips, ipe, jps, jpe, kps, kpe )
3665 #include "HALO_INTERP_DOWN.inc"
3667 #include "nest_interpdown_interp.inc"
3670 !#include <scalar_derefs.inc>
3673 END SUBROUTINE interp_domain_nmm_part2
3675 !------------------------------------------------------------------
3677 SUBROUTINE force_domain_nmm_part1 ( grid, intermediate_grid, config_flags &
3679 #include "dummy_new_args.inc"
3682 USE module_state_description
3683 USE module_domain, ONLY : domain, get_ijk_from_grid
3684 USE module_configure, ONLY : grid_config_rec_type
3685 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3686 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3689 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3690 TYPE(domain), POINTER :: intermediate_grid
3691 #include <dummy_new_decl.inc>
3693 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3694 TYPE (grid_config_rec_type) :: config_flags
3696 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3697 cims, cime, cjms, cjme, ckms, ckme, &
3698 cips, cipe, cjps, cjpe, ckps, ckpe
3699 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3700 nims, nime, njms, njme, nkms, nkme, &
3701 nips, nipe, njps, njpe, nkps, nkpe
3703 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3706 !#include <scalar_derefs.inc>
3708 CALL get_ijk_from_grid ( grid , &
3709 cids, cide, cjds, cjde, ckds, ckde, &
3710 cims, cime, cjms, cjme, ckms, ckme, &
3711 cips, cipe, cjps, cjpe, ckps, ckpe )
3713 CALL get_ijk_from_grid ( intermediate_grid , &
3714 nids, nide, njds, njde, nkds, nkde, &
3715 nims, nime, njms, njme, nkms, nkme, &
3716 nips, nipe, njps, njpe, nkps, nkpe )
3718 nlev = ckde - ckds + 1
3720 #include "nest_forcedown_pack.inc"
3722 ! WRITE(0,*)'I have completed PACKING of BCs data successfully'
3725 !#include <scalar_derefs.inc>
3727 END SUBROUTINE force_domain_nmm_part1
3729 !==============================================================================================
3731 SUBROUTINE force_domain_nmm_part2 ( grid, ngrid, config_flags &
3733 #include "dummy_new_args.inc"
3736 USE module_state_description
3737 USE module_domain, ONLY : domain, get_ijk_from_grid
3738 USE module_configure, ONLY : grid_config_rec_type
3739 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3740 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3741 USE module_comm_dm, ONLY : HALO_NMM_FORCE_DOWN1_sub
3744 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3745 TYPE(domain), POINTER :: ngrid
3746 #include <dummy_new_decl.inc>
3748 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3749 TYPE (grid_config_rec_type) :: config_flags
3751 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3752 cims, cime, cjms, cjme, ckms, ckme, &
3753 cips, cipe, cjps, cjpe, ckps, ckpe
3754 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3755 nims, nime, njms, njme, nkms, nkme, &
3756 nips, nipe, njps, njpe, nkps, nkpe
3757 INTEGER :: ids, ide, jds, jde, kds, kde, &
3758 ims, ime, jms, jme, kms, kme, &
3759 ips, ipe, jps, jpe, kps, kpe
3761 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3762 REAL dummy_xs, dummy_xe, dummy_ys, dummy_ye
3766 !#ifdef DEREF_KLUDGE
3767 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3768 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3769 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3770 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3772 #include "deref_kludge.h"
3775 !#include <scalar_derefs.inc>
3777 CALL get_ijk_from_grid ( grid , &
3778 cids, cide, cjds, cjde, ckds, ckde, &
3779 cims, cime, cjms, cjme, ckms, ckme, &
3780 cips, cipe, cjps, cjpe, ckps, ckpe )
3781 CALL get_ijk_from_grid ( ngrid , &
3782 nids, nide, njds, njde, nkds, nkde, &
3783 nims, nime, njms, njme, nkms, nkme, &
3784 nips, nipe, njps, njpe, nkps, nkpe )
3786 nlev = ckde - ckds + 1
3788 #include "nest_interpdown_unpack.inc"
3790 CALL get_ijk_from_grid ( grid , &
3791 ids, ide, jds, jde, kds, kde, &
3792 ims, ime, jms, jme, kms, kme, &
3793 ips, ipe, jps, jpe, kps, kpe )
3795 #include "HALO_NMM_FORCE_DOWN1.inc"
3797 ! code here to interpolate the data into the nested domain
3798 #include "nest_forcedown_interp.inc"
3801 !#include <scalar_derefs.inc>
3804 END SUBROUTINE force_domain_nmm_part2
3806 !================================================================================
3808 ! This routine exists only to call a halo on a domain (the nest)
3809 ! gets called from feedback_domain_em_part1, below. This is needed
3810 ! because the halo code expects the fields being exchanged to have
3811 ! been dereferenced from the grid data structure, but in feedback_domain_em_part1
3812 ! the grid data structure points to the coarse domain, not the nest.
3813 ! And we want the halo exchange on the nest, so that the code in
3814 ! em_nest_feedbackup_interp.inc will work correctly on multi-p. JM 20040308
3817 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3819 #include "dummy_new_args.inc"
3822 USE module_state_description
3823 USE module_domain, ONLY : domain, get_ijk_from_grid
3824 USE module_configure, ONLY : grid_config_rec_type
3825 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3826 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3827 USE module_comm_dm, ONLY : HALO_NMM_WEIGHTS_sub
3830 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3831 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3832 ! soil temp, moisture, etc., has vertical dim
3833 ! of soil categories
3834 #include <dummy_new_decl.inc>
3836 INTEGER :: ids, ide, jds, jde, kds, kde, &
3837 ims, ime, jms, jme, kms, kme, &
3838 ips, ipe, jps, jpe, kps, kpe
3840 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3842 INTEGER :: idum1, idum2
3845 !#ifdef DEREF_KLUDGE
3846 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3847 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3848 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3849 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3851 #include "deref_kludge.h"
3854 !#include <scalar_derefs.inc>
3856 CALL get_ijk_from_grid ( grid , &
3857 ids, ide, jds, jde, kds, kde, &
3858 ims, ime, jms, jme, kms, kme, &
3859 ips, ipe, jps, jpe, kps, kpe )
3862 #include "HALO_NMM_WEIGHTS.inc"
3866 !#include <scalar_derefs.inc>
3868 END SUBROUTINE feedback_nest_prep_nmm
3870 !------------------------------------------------------------------
3872 SUBROUTINE feedback_domain_nmm_part1 ( grid, ngrid, config_flags &
3874 #include "dummy_new_args.inc"
3877 USE module_state_description
3878 USE module_domain, ONLY : domain, get_ijk_from_grid
3879 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3880 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3881 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3884 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3885 TYPE(domain), POINTER :: ngrid
3886 #include <dummy_new_decl.inc>
3888 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3889 TYPE(domain), POINTER :: xgrid
3890 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3892 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3893 cims, cime, cjms, cjme, ckms, ckme, &
3894 cips, cipe, cjps, cjpe, ckps, ckpe
3895 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3896 nims, nime, njms, njme, nkms, nkme, &
3897 nips, nipe, njps, njpe, nkps, nkpe
3899 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3901 INTEGER local_comm, myproc, nproc, idum1, idum2
3903 !#ifdef DEREF_KLUDGE
3904 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3905 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3906 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3907 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3911 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3913 #include "dummy_new_args.inc"
3916 USE module_state_description
3917 USE module_domain, ONLY : domain
3918 USE module_configure, ONLY : grid_config_rec_type
3920 TYPE (grid_config_rec_type) :: config_flags
3921 TYPE(domain), TARGET :: grid
3922 #include <dummy_new_decl.inc>
3923 END SUBROUTINE feedback_nest_prep_nmm
3927 !#include <scalar_derefs.inc>
3929 CALL wrf_get_dm_communicator ( local_comm )
3930 CALL wrf_get_myproc( myproc )
3931 CALL wrf_get_nproc( nproc )
3936 CALL get_ijk_from_grid ( grid , &
3937 cids, cide, cjds, cjde, ckds, ckde, &
3938 cims, cime, cjms, cjme, ckms, ckme, &
3939 cips, cipe, cjps, cjpe, ckps, ckpe )
3941 CALL get_ijk_from_grid ( ngrid , &
3942 nids, nide, njds, njde, nkds, nkde, &
3943 nims, nime, njms, njme, nkms, nkme, &
3944 nips, nipe, njps, njpe, nkps, nkpe )
3946 nlev = ckde - ckds + 1
3948 ips_save = ngrid%i_parent_start ! +1 not used in ipe_save & jpe_save
3949 jps_save = ngrid%j_parent_start ! because of one extra namelist point
3950 ipe_save = ngrid%i_parent_start + (nide-nids) / ngrid%parent_grid_ratio
3951 jpe_save = ngrid%j_parent_start + (njde-njds) / ngrid%parent_grid_ratio
3953 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3954 ! in a separate routine because the HALOs need the data to be dereference from the
3955 ! grid data structure and, in this routine, the dereferenced fields are related to
3956 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3957 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3958 ! to point to intermediate domain.
3960 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3961 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3964 #include "deref_kludge.h"
3965 CALL feedback_nest_prep_nmm ( grid, config_flags &
3967 #include "actual_new_args.inc"
3971 ! put things back so grid is intermediate grid
3974 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3976 ! "interp" (basically copy) ngrid onto intermediate grid
3978 #include "nest_feedbackup_interp.inc"
3981 !#include <scalar_derefs.inc>
3983 END SUBROUTINE feedback_domain_nmm_part1
3985 !------------------------------------------------------------------
3987 SUBROUTINE feedback_domain_nmm_part2 ( grid, intermediate_grid, ngrid , config_flags &
3989 #include "dummy_new_args.inc"
3992 USE module_state_description
3993 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3994 USE module_configure, ONLY : grid_config_rec_type
3995 USE module_dm, ONLY : get_dm_max_halo_width, ips_save, ipe_save, &
3996 jps_save, jpe_save, ntasks, mytask, ntasks_x, ntasks_y, &
3997 local_communicator, itrace
3998 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
4003 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
4004 TYPE(domain), POINTER :: intermediate_grid
4005 TYPE(domain), POINTER :: ngrid
4007 #include <dummy_new_decl.inc>
4009 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
4010 TYPE (grid_config_rec_type) :: config_flags
4012 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
4013 cims, cime, cjms, cjme, ckms, ckme, &
4014 cips, cipe, cjps, cjpe, ckps, ckpe
4015 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4016 nims, nime, njms, njme, nkms, nkme, &
4017 nips, nipe, njps, njpe, nkps, nkpe
4018 INTEGER :: ids, ide, jds, jde, kds, kde, &
4019 ims, ime, jms, jme, kms, kme, &
4020 ips, ipe, jps, jpe, kps, kpe
4022 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4024 INTEGER icoord, jcoord, idim_cd, jdim_cd
4025 INTEGER local_comm, myproc, nproc
4026 INTEGER iparstrt, jparstrt, sw
4027 INTEGER thisdomain_max_halo_width
4029 character*256 :: timestr
4033 LOGICAL, EXTERNAL :: cd_feedback_mask
4034 LOGICAL, EXTERNAL :: cd_feedback_mask_v
4037 !#include <scalar_derefs.inc>
4039 ! On entry to this routine,
4040 ! "grid" refers to the parent domain
4041 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
4042 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
4043 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
4045 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
4046 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
4047 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
4048 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
4049 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
4050 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
4053 nest_influence = 0.5
4054 #define NEST_INFLUENCE(A,B) A = nest_influence*(B) + (1.0-nest_influence)*(A)
4057 CALL domain_clock_get( grid, current_timestr=timestr )
4059 CALL get_ijk_from_grid ( intermediate_grid , &
4060 cids, cide, cjds, cjde, ckds, ckde, &
4061 cims, cime, cjms, cjme, ckms, ckme, &
4062 cips, cipe, cjps, cjpe, ckps, ckpe )
4063 CALL get_ijk_from_grid ( grid , &
4064 nids, nide, njds, njde, nkds, nkde, &
4065 nims, nime, njms, njme, nkms, nkme, &
4066 nips, nipe, njps, njpe, nkps, nkpe )
4068 nide = nide - 1 !dusan
4069 njde = njde - 1 !dusan
4071 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
4072 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
4073 CALL nl_get_shw ( intermediate_grid%id, sw )
4074 icoord = iparstrt - sw
4075 jcoord = jparstrt - sw
4076 idim_cd = cide - cids + 1
4077 jdim_cd = cjde - cjds + 1
4079 nlev = ckde - ckds + 1
4081 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
4082 #include "nest_feedbackup_pack.inc"
4084 CALL wrf_get_dm_communicator ( local_comm )
4085 CALL wrf_get_myproc( myproc )
4086 CALL wrf_get_nproc( nproc )
4088 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
4090 #include "nest_feedbackup_unpack.inc"
4093 ! smooth coarse grid
4095 CALL get_ijk_from_grid ( ngrid, &
4096 nids, nide, njds, njde, nkds, nkde, &
4097 nims, nime, njms, njme, nkms, nkme, &
4098 nips, nipe, njps, njpe, nkps, nkpe )
4099 CALL get_ijk_from_grid ( grid , &
4100 ids, ide, jds, jde, kds, kde, &
4101 ims, ime, jms, jme, kms, kme, &
4102 ips, ipe, jps, jpe, kps, kpe )
4104 #include "HALO_INTERP_UP.inc"
4106 CALL get_ijk_from_grid ( grid , &
4107 cids, cide, cjds, cjde, ckds, ckde, &
4108 cims, cime, cjms, cjme, ckms, ckme, &
4109 cips, cipe, cjps, cjpe, ckps, ckpe )
4111 #include "nest_feedbackup_smooth.inc"
4114 !#include <scalar_derefs.inc>
4116 END SUBROUTINE feedback_domain_nmm_part2
4118 !=================================================================================
4119 ! End of gopal's doing
4120 !=================================================================================
4123 !------------------------------------------------------------------
4125 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
4126 my_count , & ! sendcount
4127 globbuf, glob_ofst , & ! recvbuf
4128 counts , & ! recvcounts
4131 communicator , & ! communicator
4133 USE module_dm, ONLY : getrealmpitype
4135 INTEGER field_ofst, glob_ofst
4136 INTEGER my_count, communicator, root, ierr
4137 INTEGER , DIMENSION(*) :: counts, displs
4138 REAL, DIMENSION(*) :: Field, globbuf
4142 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4143 my_count , & ! sendcount
4144 getrealmpitype() , & ! sendtype
4145 globbuf( glob_ofst ) , & ! recvbuf
4146 counts , & ! recvcounts
4148 getrealmpitype() , & ! recvtype
4150 communicator , & ! communicator
4154 END SUBROUTINE wrf_gatherv_real
4156 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
4157 my_count , & ! sendcount
4158 globbuf, glob_ofst , & ! recvbuf
4159 counts , & ! recvcounts
4162 communicator , & ! communicator
4166 INTEGER field_ofst, glob_ofst
4167 INTEGER my_count, communicator, root, ierr
4168 INTEGER , DIMENSION(*) :: counts, displs
4169 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
4170 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
4171 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
4172 ! if we were not indexing the globbuf and Field arrays it would not even matter
4173 REAL, DIMENSION(*) :: Field, globbuf
4177 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4178 my_count , & ! sendcount
4179 MPI_DOUBLE_PRECISION , & ! sendtype
4180 globbuf( glob_ofst ) , & ! recvbuf
4181 counts , & ! recvcounts
4183 MPI_DOUBLE_PRECISION , & ! recvtype
4185 communicator , & ! communicator
4189 END SUBROUTINE wrf_gatherv_double
4191 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
4192 my_count , & ! sendcount
4193 globbuf, glob_ofst , & ! recvbuf
4194 counts , & ! recvcounts
4197 communicator , & ! communicator
4200 INTEGER field_ofst, glob_ofst
4201 INTEGER my_count, communicator, root, ierr
4202 INTEGER , DIMENSION(*) :: counts, displs
4203 INTEGER, DIMENSION(*) :: Field, globbuf
4207 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4208 my_count , & ! sendcount
4209 MPI_INTEGER , & ! sendtype
4210 globbuf( glob_ofst ) , & ! recvbuf
4211 counts , & ! recvcounts
4213 MPI_INTEGER , & ! recvtype
4215 communicator , & ! communicator
4219 END SUBROUTINE wrf_gatherv_integer
4222 SUBROUTINE wrf_scatterv_real ( &
4223 globbuf, glob_ofst , & ! recvbuf
4224 counts , & ! recvcounts
4225 Field, field_ofst, &
4226 my_count , & ! sendcount
4229 communicator , & ! communicator
4231 USE module_dm, ONLY : getrealmpitype
4233 INTEGER field_ofst, glob_ofst
4234 INTEGER my_count, communicator, root, ierr
4235 INTEGER , DIMENSION(*) :: counts, displs
4236 REAL, DIMENSION(*) :: Field, globbuf
4240 CALL mpi_scatterv( &
4241 globbuf( glob_ofst ) , & ! recvbuf
4242 counts , & ! recvcounts
4244 getrealmpitype() , & ! recvtype
4245 Field( field_ofst ), & ! sendbuf
4246 my_count , & ! sendcount
4247 getrealmpitype() , & ! sendtype
4249 communicator , & ! communicator
4253 END SUBROUTINE wrf_scatterv_real
4255 SUBROUTINE wrf_scatterv_double ( &
4256 globbuf, glob_ofst , & ! recvbuf
4257 counts , & ! recvcounts
4258 Field, field_ofst, &
4259 my_count , & ! sendcount
4262 communicator , & ! communicator
4265 INTEGER field_ofst, glob_ofst
4266 INTEGER my_count, communicator, root, ierr
4267 INTEGER , DIMENSION(*) :: counts, displs
4268 REAL, DIMENSION(*) :: Field, globbuf
4271 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
4272 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
4273 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
4274 ! if we were not indexing the globbuf and Field arrays it would not even matter
4276 CALL mpi_scatterv( &
4277 globbuf( glob_ofst ) , & ! recvbuf
4278 counts , & ! recvcounts
4280 MPI_DOUBLE_PRECISION , & ! recvtype
4281 Field( field_ofst ), & ! sendbuf
4282 my_count , & ! sendcount
4283 MPI_DOUBLE_PRECISION , & ! sendtype
4285 communicator , & ! communicator
4289 END SUBROUTINE wrf_scatterv_double
4291 SUBROUTINE wrf_scatterv_integer ( &
4292 globbuf, glob_ofst , & ! recvbuf
4293 counts , & ! recvcounts
4294 Field, field_ofst, &
4295 my_count , & ! sendcount
4298 communicator , & ! communicator
4301 INTEGER field_ofst, glob_ofst
4302 INTEGER my_count, communicator, root, ierr
4303 INTEGER , DIMENSION(*) :: counts, displs
4304 INTEGER, DIMENSION(*) :: Field, globbuf
4308 CALL mpi_scatterv( &
4309 globbuf( glob_ofst ) , & ! recvbuf
4310 counts , & ! recvcounts
4312 MPI_INTEGER , & ! recvtype
4313 Field( field_ofst ), & ! sendbuf
4314 my_count , & ! sendcount
4315 MPI_INTEGER , & ! sendtype
4317 communicator , & ! communicator
4321 END SUBROUTINE wrf_scatterv_integer
4322 ! end new stuff 20070124
4324 SUBROUTINE wrf_dm_gatherv ( v, elemsize , km_s, km_e, wordsz )
4326 INTEGER elemsize, km_s, km_e, wordsz
4328 IF ( wordsz .EQ. DWORDSIZE ) THEN
4329 CALL wrf_dm_gatherv_double(v, elemsize , km_s, km_e)
4331 CALL wrf_dm_gatherv_single(v, elemsize , km_s, km_e)
4333 END SUBROUTINE wrf_dm_gatherv
4335 SUBROUTINE wrf_dm_gatherv_double ( v, elemsize , km_s, km_e )
4337 INTEGER elemsize, km_s, km_e
4340 # ifndef USE_MPI_IN_PLACE
4341 REAL*8 v_local((km_e-km_s+1)*elemsize)
4343 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4344 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4346 send_type = MPI_DOUBLE_PRECISION
4347 CALL wrf_get_dm_communicator ( local_comm )
4348 CALL wrf_get_nproc( nproc )
4349 CALL wrf_get_myproc( myproc )
4350 ALLOCATE( recvcounts(nproc), displs(nproc) )
4351 i = (km_e-km_s+1)*elemsize
4352 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4354 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4355 # ifdef USE_MPI_IN_PLACE
4356 CALL mpi_allgatherv( MPI_IN_PLACE, &
4358 DO i = 1,elemsize*(km_e-km_s+1)
4359 v_local(i) = v(i+elemsize*km_s-1)
4361 CALL mpi_allgatherv( v_local, &
4363 (km_e-km_s+1)*elemsize, &
4371 DEALLOCATE(recvcounts)
4375 END SUBROUTINE wrf_dm_gatherv_double
4377 SUBROUTINE wrf_dm_gatherv_single ( v, elemsize , km_s, km_e )
4379 INTEGER elemsize, km_s, km_e
4382 # ifndef USE_MPI_IN_PLACE
4383 REAL*4 v_local((km_e-km_s+1)*elemsize)
4385 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4386 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4388 send_type = MPI_REAL
4389 CALL wrf_get_dm_communicator ( local_comm )
4390 CALL wrf_get_nproc( nproc )
4391 CALL wrf_get_myproc( myproc )
4392 ALLOCATE( recvcounts(nproc), displs(nproc) )
4393 i = (km_e-km_s+1)*elemsize
4394 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4396 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4397 # ifdef USE_MPI_IN_PLACE
4398 CALL mpi_allgatherv( MPI_IN_PLACE, &
4400 DO i = 1,elemsize*(km_e-km_s+1)
4401 v_local(i) = v(i+elemsize*km_s-1)
4403 CALL mpi_allgatherv( v_local, &
4405 (km_e-km_s+1)*elemsize, &
4413 DEALLOCATE(recvcounts)
4417 END SUBROUTINE wrf_dm_gatherv_single
4419 SUBROUTINE wrf_dm_decomp1d( nt, km_s, km_e )
4421 INTEGER, INTENT(IN) :: nt
4422 INTEGER, INTENT(OUT) :: km_s, km_e
4424 INTEGER nn, nnp, na, nb
4425 INTEGER myproc, nproc
4427 CALL wrf_get_myproc(myproc)
4428 CALL wrf_get_nproc(nproc)
4429 nn = nt / nproc ! min number done by this task
4431 if ( myproc .lt. mod( nt, nproc ) ) nnp = nnp + 1 ! distribute remainder
4433 na = min( myproc, mod(nt,nproc) ) ! Number of blocks with remainder that precede this one
4434 nb = max( 0, myproc - na ) ! number of blocks without a remainder that precede this one
4435 km_s = na * ( nn+1) + nb * nn ! starting iteration for this task
4436 km_e = km_s + nnp - 1 ! ending iteration for this task
4437 END SUBROUTINE wrf_dm_decomp1d
4440 SUBROUTINE wrf_dm_define_comms ( grid )
4441 USE module_domain, ONLY : domain
4443 TYPE(domain) , INTENT (INOUT) :: grid
4445 END SUBROUTINE wrf_dm_define_comms
4447 SUBROUTINE tfp_message( fname, lno )
4452 WRITE(mess,*)'tfp_message: ',trim(fname),lno
4453 CALL wrf_message(mess)
4454 # ifdef ALLOW_OVERDECOMP
4455 CALL task_for_point_message ! defined in RSL_LITE/task_for_point.c
4457 CALL wrf_error_fatal(mess)
4460 END SUBROUTINE tfp_message
4462 SUBROUTINE set_dm_debug
4463 USE module_dm, ONLY : dm_debug_flag
4465 dm_debug_flag = .TRUE.
4466 END SUBROUTINE set_dm_debug
4467 SUBROUTINE reset_dm_debug
4468 USE module_dm, ONLY : dm_debug_flag
4470 dm_debug_flag = .FALSE.
4471 END SUBROUTINE reset_dm_debug
4472 SUBROUTINE get_dm_debug ( arg )
4473 USE module_dm, ONLY : dm_debug_flag
4477 END SUBROUTINE get_dm_debug