7 USE module_driver_constants
11 #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
12 INTEGER, PARAMETER :: max_halo_width = 6
14 INTEGER, PARAMETER :: max_halo_width = 6 ! 5
17 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
19 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
20 INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
21 INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
22 LOGICAL :: dm_debug_flag = .FALSE.
24 INTERFACE wrf_dm_maxval
25 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
26 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
28 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
32 INTERFACE wrf_dm_minval ! gopal's doing
33 #if ( defined(PROMOTE_FLOAT) || ( RWORDSIZE == DWORDSIZE ) )
34 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
36 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
43 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
45 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
50 IF ( MOD( P, M ) .EQ. 0 ) THEN
52 IF ( ABS(M-N) .LT. MINI &
53 .AND. M .GE. PROCMIN_M &
54 .AND. N .GE. PROCMIN_N &
62 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
63 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
64 CALL wrf_message ( TRIM ( wrf_err_message ) )
65 WRITE(0,*)' PROCMIN_M ', PROCMIN_M
66 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
67 CALL wrf_message ( TRIM ( wrf_err_message ) )
68 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
69 CALL wrf_message ( TRIM ( wrf_err_message ) )
70 WRITE( wrf_err_message , * )' P ', P
71 CALL wrf_message ( TRIM ( wrf_err_message ) )
72 WRITE( wrf_err_message , * )' MINM ', MINM
73 CALL wrf_message ( TRIM ( wrf_err_message ) )
74 WRITE( wrf_err_message , * )' MINN ', MINN
75 CALL wrf_message ( TRIM ( wrf_err_message ) )
76 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
79 END SUBROUTINE MPASPECT
81 SUBROUTINE compute_mesh( ntasks , ntasks_x, ntasks_y )
83 INTEGER, INTENT(IN) :: ntasks
84 INTEGER, INTENT(OUT) :: ntasks_x, ntasks_y
85 CALL nl_get_nproc_x ( 1, ntasks_x )
86 CALL nl_get_nproc_y ( 1, ntasks_y )
87 ! check if user has specified in the namelist
88 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
89 ! if only ntasks_x is specified then make it 1-d decomp in i
90 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
91 ntasks_y = ntasks / ntasks_x
92 ! if only ntasks_y is specified then make it 1-d decomp in j
93 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
94 ntasks_x = ntasks / ntasks_y
96 ! make sure user knows what they're doing
97 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
98 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
99 CALL wrf_error_fatal ( wrf_err_message )
102 ! When neither is specified, work out mesh with MPASPECT
103 ! Pass nproc_ln and nproc_nt so that number of procs in
104 ! i-dim (nproc_ln) is equal or lesser.
105 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
107 END SUBROUTINE compute_mesh
109 SUBROUTINE wrf_dm_initialize
113 INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
114 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
116 INTEGER, DIMENSION(2) :: dims, coords
117 LOGICAL, DIMENSION(2) :: isperiodic
118 LOGICAL :: reorder_mesh
120 CALL wrf_get_dm_communicator ( local_comm )
121 CALL mpi_comm_size( local_comm, ntasks, ierr )
122 CALL nl_get_reorder_mesh( 1, reorder_mesh )
123 CALL compute_mesh( ntasks, ntasks_x, ntasks_y )
124 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
125 CALL wrf_message( wrf_err_message )
127 CALL mpi_comm_rank( local_comm, mytask, ierr )
128 ! extra code to reorder the communicator 20051212jm
129 IF ( reorder_mesh ) THEN
130 ALLOCATE (ranks(ntasks))
131 CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
132 CALL mpi_comm_group ( local_comm2, group, ierr )
135 ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
137 CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
139 CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
141 new_local_comm = local_comm
143 ! end extra code to reorder the communicator 20051212jm
144 dims(1) = ntasks_y ! rows
145 dims(2) = ntasks_x ! columns
146 isperiodic(1) = .false.
147 isperiodic(2) = .false.
148 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
149 dims(1) = ntasks_y ! rows
150 dims(2) = ntasks_x ! columns
151 isperiodic(1) = .true.
152 isperiodic(2) = .true.
153 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
155 CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
156 CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
157 ! write(0,*)'periodic coords ',mytask, coords
159 CALL mpi_comm_rank( local_communicator, mytask, ierr )
160 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
161 ! write(0,*)'non periodic coords ',mytask, coords
162 mytask_x = coords(2) ! col task (x)
163 mytask_y = coords(1) ! row task (y)
164 CALL nl_set_nproc_x ( 1, ntasks_x )
165 CALL nl_set_nproc_y ( 1, ntasks_y )
167 ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
168 ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
169 ! in other words, local_comm_x has all the processes with the same rank in Y
170 CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
171 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
172 CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
173 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
174 CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
175 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
177 CALL wrf_set_dm_communicator ( local_communicator )
188 END SUBROUTINE wrf_dm_initialize
190 SUBROUTINE get_dm_max_halo_width( id, width )
192 INTEGER, INTENT(IN) :: id
193 INTEGER, INTENT(OUT) :: width
194 IF ( id .EQ. 1 ) THEN ! this is coarse domain
195 width = max_halo_width
197 width = max_halo_width + 3
200 END SUBROUTINE get_dm_max_halo_width
202 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
203 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
204 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
205 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
206 sp1x , ep1x , sm1x , em1x , &
207 sp2x , ep2x , sm2x , em2x , &
208 sp3x , ep3x , sm3x , em3x , &
209 sp1y , ep1y , sm1y , em1y , &
210 sp2y , ep2y , sm2y , em2y , &
211 sp3y , ep3y , sm3y , em3y , &
214 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
217 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
218 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
219 sm1 , em1 , sm2 , em2 , sm3 , em3
220 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
221 sm1x , em1x , sm2x , em2x , sm3x , em3x
222 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
223 sm1y , em1y , sm2y , em2y , sm3y , em3y
224 INTEGER, INTENT(IN) :: id, parent_id
225 TYPE(domain),POINTER :: parent
228 INTEGER :: ids, ide, jds, jde, kds, kde
229 INTEGER :: ims, ime, jms, jme, kms, kme
230 INTEGER :: ips, ipe, jps, jpe, kps, kpe
231 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
232 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
233 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
234 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
236 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
237 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
238 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
239 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
240 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
241 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
242 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
244 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
245 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
246 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
248 INTEGER :: idim , jdim , kdim , rem , a, b
249 INTEGER :: i, j, ni, nj, Px, Py, P
251 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
253 INTEGER :: idim_cd, jdim_cd, ierr
256 TYPE(domain), POINTER :: intermediate_grid
257 TYPE(domain), POINTER :: nest_grid
258 CHARACTER*256 :: mess
260 INTEGER parent_max_halo_width
261 INTEGER thisdomain_max_halo_width
263 SELECT CASE ( model_data_order )
264 ! need to finish other cases
265 CASE ( DATA_ORDER_ZXY )
266 ids = sd2 ; ide = ed2
267 jds = sd3 ; jde = ed3
268 kds = sd1 ; kde = ed1
269 CASE ( DATA_ORDER_XYZ )
270 ids = sd1 ; ide = ed1
271 jds = sd2 ; jde = ed2
272 kds = sd3 ; kde = ed3
273 CASE ( DATA_ORDER_XZY )
274 ids = sd1 ; ide = ed1
275 jds = sd3 ; jde = ed3
276 kds = sd2 ; kde = ed2
277 CASE ( DATA_ORDER_YXZ)
278 ids = sd2 ; ide = ed2
279 jds = sd1 ; jde = ed1
280 kds = sd3 ; kde = ed3
283 CALL nl_get_max_dom( 1 , max_dom )
285 CALL get_dm_max_halo_width( id , thisdomain_max_halo_width )
286 IF ( id .GT. 1 ) THEN
287 CALL get_dm_max_halo_width( parent%id , parent_max_halo_width )
290 CALL compute_memory_dims_rsl_lite ( id, thisdomain_max_halo_width, 0 , bdx, bdy, &
291 ids, ide, jds, jde, kds, kde, &
292 ims, ime, jms, jme, kms, kme, &
293 imsx, imex, jmsx, jmex, kmsx, kmex, &
294 imsy, imey, jmsy, jmey, kmsy, kmey, &
295 ips, ipe, jps, jpe, kps, kpe, &
296 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
297 ipsy, ipey, jpsy, jpey, kpsy, kpey )
299 ! ensure that the every parent domain point has a full set of nested points under it
300 ! even at the borders. Do this by making sure the number of nest points is a multiple of
301 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
302 ! is the subject of the scatter gather comms with the parent
304 IF ( id .GT. 1 ) THEN
305 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
306 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
307 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
310 SELECT CASE ( model_data_order )
311 CASE ( DATA_ORDER_ZXY )
312 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
313 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
314 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
315 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
316 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
317 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
318 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
319 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
320 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
321 CASE ( DATA_ORDER_ZYX )
322 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
323 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
324 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
325 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
326 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
327 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
328 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
329 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
330 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
331 CASE ( DATA_ORDER_XYZ )
332 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
333 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
334 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
335 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
336 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
337 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
338 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
339 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
340 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
341 CASE ( DATA_ORDER_YXZ)
342 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
343 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
344 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
345 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
346 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
347 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
348 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
349 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
350 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
351 CASE ( DATA_ORDER_XZY )
352 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
353 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
354 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
355 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
356 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
357 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
358 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
359 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
360 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
361 CASE ( DATA_ORDER_YZX )
362 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
363 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
364 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
365 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
366 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
367 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
368 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
369 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
370 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
374 WRITE(wrf_err_message,*)'*************************************'
375 CALL wrf_message( TRIM(wrf_err_message) )
376 WRITE(wrf_err_message,*)'Parent domain'
377 CALL wrf_message( TRIM(wrf_err_message) )
378 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
379 CALL wrf_message( TRIM(wrf_err_message) )
380 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
381 CALL wrf_message( TRIM(wrf_err_message) )
382 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
383 CALL wrf_message( TRIM(wrf_err_message) )
384 WRITE(wrf_err_message,*)'*************************************'
385 CALL wrf_message( TRIM(wrf_err_message) )
388 IF ( id .GT. 1 ) THEN
390 CALL nl_get_shw( id, shw )
391 CALL nl_get_i_parent_start( id , i_parent_start )
392 CALL nl_get_j_parent_start( id , j_parent_start )
393 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
395 SELECT CASE ( model_data_order )
396 CASE ( DATA_ORDER_ZXY )
400 c_kds = sd1 ; c_kde = ed1
401 CASE ( DATA_ORDER_ZYX )
405 c_kds = sd1 ; c_kde = ed1
406 CASE ( DATA_ORDER_XYZ )
410 c_kds = sd3 ; c_kde = ed3
411 CASE ( DATA_ORDER_YXZ)
415 c_kds = sd3 ; c_kde = ed3
416 CASE ( DATA_ORDER_XZY )
420 c_kds = sd2 ; c_kde = ed2
421 CASE ( DATA_ORDER_YZX )
425 c_kds = sd2 ; c_kde = ed2
428 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
429 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
431 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
432 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
434 ! we want the intermediate domain to be decomposed the
435 ! the same as the underlying nest. So try this:
438 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
441 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
442 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
444 IF ( Px .EQ. mytask_x ) THEN
446 IF ( c_ips .EQ. -1 ) c_ips = i
449 IF ( ierr .NE. 0 ) THEN
450 CALL tfp_message(__FILE__,__LINE__)
452 IF (c_ips .EQ. -1 ) THEN
458 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
461 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
462 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
466 IF ( Py .EQ. mytask_y ) THEN
468 IF ( c_jps .EQ. -1 ) c_jps = j
471 IF ( ierr .NE. 0 ) THEN
472 CALL tfp_message(__FILE__,__LINE__)
474 IF (c_jps .EQ. -1 ) THEN
479 IF ( c_ips <= c_ipe ) THEN
480 ! extend the patch dimensions out shw along edges of domain
481 IF ( mytask_x .EQ. 0 ) THEN
484 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
487 c_ims = max( c_ips - max(shw,thisdomain_max_halo_width), c_ids - bdx ) - 1
488 c_ime = min( c_ipe + max(shw,thisdomain_max_halo_width), c_ide + bdx ) + 1
496 IF ( c_jps <= c_jpe ) THEN
497 ! extend the patch dimensions out shw along edges of domain
498 IF ( mytask_y .EQ. 0 ) THEN
501 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
504 c_jms = max( c_jps - max(shw,thisdomain_max_halo_width), c_jds - bdx ) - 1
505 c_jme = min( c_jpe + max(shw,thisdomain_max_halo_width), c_jde + bdx ) + 1
516 WRITE(wrf_err_message,*)'*************************************'
517 CALL wrf_message( TRIM(wrf_err_message) )
518 WRITE(wrf_err_message,*)'Nesting domain'
519 CALL wrf_message( TRIM(wrf_err_message) )
520 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
521 CALL wrf_message( TRIM(wrf_err_message) )
522 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
523 CALL wrf_message( TRIM(wrf_err_message) )
524 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
525 CALL wrf_message( TRIM(wrf_err_message) )
526 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
527 CALL wrf_message( TRIM(wrf_err_message) )
528 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
529 CALL wrf_message( TRIM(wrf_err_message) )
530 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
531 CALL wrf_message( TRIM(wrf_err_message) )
532 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
533 CALL wrf_message( TRIM(wrf_err_message) )
534 WRITE(wrf_err_message,*)'*************************************'
535 CALL wrf_message( TRIM(wrf_err_message) )
537 SELECT CASE ( model_data_order )
538 CASE ( DATA_ORDER_ZXY )
539 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
540 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
541 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
542 CASE ( DATA_ORDER_ZYX )
543 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
544 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
545 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
546 CASE ( DATA_ORDER_XYZ )
547 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
548 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
549 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
550 CASE ( DATA_ORDER_YXZ)
551 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
552 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
553 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
554 CASE ( DATA_ORDER_XZY )
555 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
556 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
557 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
558 CASE ( DATA_ORDER_YZX )
559 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
560 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
561 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
564 ALLOCATE ( intermediate_grid )
565 ALLOCATE ( intermediate_grid%parents( max_parents ) )
566 ALLOCATE ( intermediate_grid%nests( max_nests ) )
567 intermediate_grid%allocated=.false.
568 NULLIFY( intermediate_grid%sibling )
570 NULLIFY( intermediate_grid%nests(i)%ptr )
572 NULLIFY (intermediate_grid%next)
573 NULLIFY (intermediate_grid%same_level)
574 NULLIFY (intermediate_grid%i_start)
575 NULLIFY (intermediate_grid%j_start)
576 NULLIFY (intermediate_grid%i_end)
577 NULLIFY (intermediate_grid%j_end)
578 intermediate_grid%id = id ! these must be the same. Other parts of code depend on it (see gen_comms.c)
579 intermediate_grid%num_nests = 0
580 intermediate_grid%num_siblings = 0
581 intermediate_grid%num_parents = 1
582 intermediate_grid%max_tiles = 0
583 intermediate_grid%num_tiles_spec = 0
584 CALL find_grid_by_id ( id, head_grid, nest_grid )
586 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
587 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
588 intermediate_grid%num_parents = 1
590 intermediate_grid%is_intermediate = .TRUE.
591 SELECT CASE ( model_data_order )
592 CASE ( DATA_ORDER_ZXY )
593 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd33
594 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd33
595 CASE ( DATA_ORDER_ZYX )
596 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd32
597 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd32
598 CASE ( DATA_ORDER_XYZ )
599 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd32
600 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd32
601 CASE ( DATA_ORDER_YXZ)
602 intermediate_grid%nids = nest_grid%sd32 ; intermediate_grid%njds = nest_grid%sd31
603 intermediate_grid%nide = nest_grid%ed32 ; intermediate_grid%njde = nest_grid%sd31
604 CASE ( DATA_ORDER_XZY )
605 intermediate_grid%nids = nest_grid%sd31 ; intermediate_grid%njds = nest_grid%sd33
606 intermediate_grid%nide = nest_grid%ed31 ; intermediate_grid%njde = nest_grid%sd33
607 CASE ( DATA_ORDER_YZX )
608 intermediate_grid%nids = nest_grid%sd33 ; intermediate_grid%njds = nest_grid%sd31
609 intermediate_grid%nide = nest_grid%ed33 ; intermediate_grid%njde = nest_grid%sd31
611 intermediate_grid%nids = ids
612 intermediate_grid%nide = ide
613 intermediate_grid%njds = jds
614 intermediate_grid%njde = jde
616 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
617 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
619 intermediate_grid%sm31x = c_sm1x
620 intermediate_grid%em31x = c_em1x
621 intermediate_grid%sm32x = c_sm2x
622 intermediate_grid%em32x = c_em2x
623 intermediate_grid%sm33x = c_sm3x
624 intermediate_grid%em33x = c_em3x
625 intermediate_grid%sm31y = c_sm1y
626 intermediate_grid%em31y = c_em1y
627 intermediate_grid%sm32y = c_sm2y
628 intermediate_grid%em32y = c_em2y
629 intermediate_grid%sm33y = c_sm3y
630 intermediate_grid%em33y = c_em3y
632 #if defined(SGIALTIX) && (! defined(MOVE_NESTS) )
633 ! allocate space for the intermediate domain
634 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
635 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
636 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
637 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, &
638 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y, &
639 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
640 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
642 intermediate_grid%sd31 = c_sd1
643 intermediate_grid%ed31 = c_ed1
644 intermediate_grid%sp31 = c_sp1
645 intermediate_grid%ep31 = c_ep1
646 intermediate_grid%sm31 = c_sm1
647 intermediate_grid%em31 = c_em1
648 intermediate_grid%sd32 = c_sd2
649 intermediate_grid%ed32 = c_ed2
650 intermediate_grid%sp32 = c_sp2
651 intermediate_grid%ep32 = c_ep2
652 intermediate_grid%sm32 = c_sm2
653 intermediate_grid%em32 = c_em2
654 intermediate_grid%sd33 = c_sd3
655 intermediate_grid%ed33 = c_ed3
656 intermediate_grid%sp33 = c_sp3
657 intermediate_grid%ep33 = c_ep3
658 intermediate_grid%sm33 = c_sm3
659 intermediate_grid%em33 = c_em3
661 CALL med_add_config_info_to_grid ( intermediate_grid )
663 intermediate_grid%dx = parent%dx
664 intermediate_grid%dy = parent%dy
665 intermediate_grid%dt = parent%dt
669 END SUBROUTINE patch_domain_rsl_lite
671 SUBROUTINE compute_memory_dims_rsl_lite ( &
672 id , maxhalowidth , &
674 ids, ide, jds, jde, kds, kde, &
675 ims, ime, jms, jme, kms, kme, &
676 imsx, imex, jmsx, jmex, kmsx, kmex, &
677 imsy, imey, jmsy, jmey, kmsy, kmey, &
678 ips, ipe, jps, jpe, kps, kpe, &
679 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
680 ipsy, ipey, jpsy, jpey, kpsy, kpey )
683 INTEGER, INTENT(IN) :: id , maxhalowidth
684 INTEGER, INTENT(IN) :: shw, bdx, bdy
685 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
686 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
687 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
688 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
689 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
690 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
691 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
693 INTEGER Px, Py, P, i, j, k, ierr
695 #if ( ! NMM_CORE == 1 )
703 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
705 IF ( Px .EQ. mytask_x ) THEN
707 IF ( ips .EQ. -1 ) ips = i
710 IF ( ierr .NE. 0 ) THEN
711 CALL tfp_message(__FILE__,__LINE__)
713 ! handle setting the memory dimensions where there are no X elements assigned to this proc
714 IF (ips .EQ. -1 ) THEN
722 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py, &
724 IF ( Py .EQ. mytask_y ) THEN
726 IF ( jps .EQ. -1 ) jps = j
729 IF ( ierr .NE. 0 ) THEN
730 CALL tfp_message(__FILE__,__LINE__)
732 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
733 IF (jps .EQ. -1 ) THEN
738 !begin: wig; 12-Mar-2008
739 ! This appears redundant with the conditionals above, but we get cases with only
740 ! one of the directions being set to "missing" when turning off extra processors.
741 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
742 IF (ipe .EQ. -1 .or. jpe .EQ. -1) THEN
748 !end: wig; 12-Mar-2008
751 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
753 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
754 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
755 ! corresponds to grid dimension.
759 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
762 ! +------------------+ <- this edge is costly; see below
764 ! The aim is to avoid all-to-all communication over whole
765 ! communicator. Instead, when possible, use a transpose scheme that requires
766 ! all-to-all within dimensional communicators; that is, communicators
767 ! defined for the processes in a rank or column of the processor mesh. Note,
768 ! however, it is not possible to create a ring of transposes between
769 ! xy-yz-xz decompositions without at least one of the edges in the ring
770 ! being fully all-to-all (in other words, one of the tranpose edges must
771 ! rotate and not just transpose a plane of the model grid within the
772 ! processor mesh). The issue is then, where should we put this costly edge
773 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
774 ! we chose a scheme most natural for models that use parallel spectral
775 ! transforms, where the costly edge is the one that goes from the xz to
776 ! the xy decomposition. (May be implemented as just a two step transpose
779 ! Additional notational convention, below. The 'x' or 'y' appended to the
780 ! dimension start or end variable refers to which grid dimension is all
781 ! on-processor in the given decomposition. That is ipsx and ipex are the
782 ! start and end for the i-dimension in the zy decomposition where x is
783 ! on-processor. ('z' is assumed for xy decomposition and not appended to
784 ! the ips, ipe, etc. variable names).
793 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
795 IF ( Px .EQ. mytask_x ) THEN
797 IF ( kpsx .EQ. -1 ) kpsx = k
800 IF ( ierr .NE. 0 ) THEN
801 CALL tfp_message(__FILE__,__LINE__)
804 ! handle case where no levels are assigned to this process
805 ! no iterations. Do same for I and J. Need to handle memory alloc below.
806 IF (kpsx .EQ. -1 ) THEN
815 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py, &
817 IF ( Py .EQ. mytask_y ) THEN
819 IF ( jpsx .EQ. -1 ) jpsx = j
822 IF ( ierr .NE. 0 ) THEN
823 CALL tfp_message(__FILE__,__LINE__)
825 IF (jpsx .EQ. -1 ) THEN
830 !begin: wig; 12-Mar-2008
831 ! This appears redundant with the conditionals above, but we get cases with only
832 ! one of the directions being set to "missing" when turning off extra processors.
833 ! This may break the handling of setting only one of nproc_x or nproc_y via the namelist.
834 IF (ipex .EQ. -1 .or. jpex .EQ. -1) THEN
840 !end: wig; 12-Mar-2008
842 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
844 kpsy = kpsx ! same as above
845 kpey = kpex ! same as above
851 CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px, &
852 1, 1, ierr ) ! x and y for proc mesh reversed
853 IF ( Py .EQ. mytask_y ) THEN
855 IF ( ipsy .EQ. -1 ) ipsy = i
858 IF ( ierr .NE. 0 ) THEN
859 CALL tfp_message(__FILE__,__LINE__)
861 IF (ipsy .EQ. -1 ) THEN
869 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
870 ! adjust decomposition to reflect. 20051020 JM
875 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
877 IF ( Px .EQ. mytask_x ) THEN
879 IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
880 IF ( ips .EQ. -1 ) ips = i
883 IF ( ierr .NE. 0 ) THEN
884 CALL tfp_message(__FILE__,__LINE__)
890 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py, &
892 IF ( Py .EQ. mytask_y ) THEN
894 IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
895 IF ( jps .EQ. -1 ) jps = j
898 IF ( ierr .NE. 0 ) THEN
899 CALL tfp_message(__FILE__,__LINE__)
903 ! extend the patch dimensions out shw along edges of domain
904 IF ( ips < ipe .and. jps < jpe ) THEN !wig; 11-Mar-2008
905 IF ( mytask_x .EQ. 0 ) THEN
909 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
913 IF ( mytask_y .EQ. 0 ) THEN
917 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
921 ENDIF !wig; 11-Mar-2008
933 ! handle setting the memory dimensions where there are no levels assigned to this proc
934 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
938 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
943 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
947 ims = max( ips - max(shw,maxhalowidth), ids - bdx ) - 1
948 ime = min( ipe + max(shw,maxhalowidth), ide + bdx ) + 1
954 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
955 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
963 IF ( (jps .EQ. 0 .AND. jpe .EQ. -1) .OR. (ips .EQ. 0 .AND. ipe .EQ. -1) ) THEN
967 jms = max( jps - max(shw,maxhalowidth), jds - bdy ) - 1
968 jme = min( jpe + max(shw,maxhalowidth), jde + bdy ) + 1
974 ! handle setting the memory dimensions where there are no X elements assigned to this proc
975 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
983 END SUBROUTINE compute_memory_dims_rsl_lite
985 ! internal, used below for switching the argument to MPI calls
986 ! if reals are being autopromoted to doubles in the build of WRF
987 INTEGER function getrealmpitype()
991 INTEGER rtypesize, dtypesize, ierr
992 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
993 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
994 IF ( RWORDSIZE .EQ. rtypesize ) THEN
995 getrealmpitype = MPI_REAL
996 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
997 getrealmpitype = MPI_DOUBLE_PRECISION
999 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
1002 ! required dummy initialization for function that is never called
1006 END FUNCTION getrealmpitype
1008 REAL FUNCTION wrf_dm_max_real ( inval )
1014 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
1015 wrf_dm_max_real = retval
1018 wrf_dm_max_real = inval
1020 END FUNCTION wrf_dm_max_real
1022 REAL FUNCTION wrf_dm_min_real ( inval )
1028 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
1029 wrf_dm_min_real = retval
1032 wrf_dm_min_real = inval
1034 END FUNCTION wrf_dm_min_real
1036 SUBROUTINE wrf_dm_min_reals ( inval, retval, n )
1044 CALL mpi_allreduce ( inval, retval , n, getrealmpitype(), MPI_MIN, local_communicator, ierr )
1046 retval(1:n) = inval(1:n)
1048 END SUBROUTINE wrf_dm_min_reals
1050 REAL FUNCTION wrf_dm_sum_real ( inval )
1056 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
1057 wrf_dm_sum_real = retval
1060 wrf_dm_sum_real = inval
1062 END FUNCTION wrf_dm_sum_real
1064 SUBROUTINE wrf_dm_sum_reals (inval, retval)
1066 REAL, INTENT(IN) :: inval(:)
1067 REAL, INTENT(OUT) :: retval(:)
1071 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
1075 END SUBROUTINE wrf_dm_sum_reals
1077 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
1081 INTEGER inval, retval
1083 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
1084 wrf_dm_sum_integer = retval
1087 wrf_dm_sum_integer = inval
1089 END FUNCTION wrf_dm_sum_integer
1091 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
1095 REAL val, val_all( ntasks )
1096 INTEGER idex, jdex, ierr
1098 INTEGER dex_all (2,ntasks)
1101 dex(1) = idex ; dex(2) = jdex
1102 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1103 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
1105 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1107 IF ( val_all(i) .GT. val ) THEN
1115 INTEGER idex, jdex, ierr
1117 END SUBROUTINE wrf_dm_maxval_real
1119 #ifndef PROMOTE_FLOAT
1120 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
1124 DOUBLE PRECISION val, val_all( ntasks )
1125 INTEGER idex, jdex, ierr
1127 INTEGER dex_all (2,ntasks)
1130 dex(1) = idex ; dex(2) = jdex
1131 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1132 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
1134 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1136 IF ( val_all(i) .GT. val ) THEN
1143 DOUBLE PRECISION val
1144 INTEGER idex, jdex, ierr
1146 END SUBROUTINE wrf_dm_maxval_doubleprecision
1149 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
1153 INTEGER val, val_all( ntasks )
1154 INTEGER idex, jdex, ierr
1156 INTEGER dex_all (2,ntasks)
1159 dex(1) = idex ; dex(2) = jdex
1160 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
1161 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
1163 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1165 IF ( val_all(i) .GT. val ) THEN
1175 END SUBROUTINE wrf_dm_maxval_integer
1177 ! For HWRF some additional computation is required. This is gopal's doing
1179 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
1181 REAL val, val_all( ntasks )
1182 INTEGER idex, jdex, ierr
1184 INTEGER dex_all (2,ntasks)
1186 ! Collective operation. Each processor calls passing a local value and its index; on return
1187 ! all processors are passed back the maximum of all values passed and its index.
1194 CALL wrf_get_dm_communicator ( comm )
1195 dex(1) = idex ; dex(2) = jdex
1196 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1197 CALL mpi_allgather ( val, 1, MPI_REAL, val_all , 1, MPI_REAL, comm, ierr )
1199 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1201 IF ( val_all(i) .LT. val ) THEN
1208 END SUBROUTINE wrf_dm_minval_real
1210 #ifndef PROMOTE_FLOAT
1211 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1213 DOUBLE PRECISION val, val_all( ntasks )
1214 INTEGER idex, jdex, ierr
1216 INTEGER dex_all (2,ntasks)
1218 ! Collective operation. Each processor calls passing a local value and its index; on return
1219 ! all processors are passed back the maximum of all values passed and its index.
1226 CALL wrf_get_dm_communicator ( comm )
1227 dex(1) = idex ; dex(2) = jdex
1228 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1229 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1231 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1233 IF ( val_all(i) .LT. val ) THEN
1240 END SUBROUTINE wrf_dm_minval_doubleprecision
1243 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1245 INTEGER val, val_all( ntasks )
1246 INTEGER idex, jdex, ierr
1248 INTEGER dex_all (2,ntasks)
1250 ! Collective operation. Each processor calls passing a local value and its index; on return
1251 ! all processors are passed back the maximum of all values passed and its index.
1258 CALL wrf_get_dm_communicator ( comm )
1259 dex(1) = idex ; dex(2) = jdex
1260 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1261 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1263 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1265 IF ( val_all(i) .LT. val ) THEN
1272 END SUBROUTINE wrf_dm_minval_integer ! End of gopal's doing
1274 SUBROUTINE split_communicator
1279 INTEGER mpi_comm_here, mpi_comm_local, comdup, mytask, ntasks, ierr, io_status
1280 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1281 INTEGER thread_support_provided, thread_support_requested
1284 INTEGER, ALLOCATABLE :: icolor(:)
1285 INTEGER tasks_per_split
1286 NAMELIST /namelist_split/ tasks_per_split
1288 CALL MPI_INITIALIZED( mpi_inited, ierr )
1289 IF ( .NOT. mpi_inited ) THEN
1290 # if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
1291 thread_support_requested = MPI_THREAD_FUNNELED
1292 CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
1293 IF ( thread_support_provided .lt. thread_support_requested ) THEN
1294 CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
1297 CALL mpi_init ( ierr )
1299 mpi_comm_here = MPI_COMM_WORLD
1301 CALL atm_cmp_start( mpi_comm_here ) ! atmospheric side of HWRF coupler will split MPI_COMM_WORLD and return communicator as argument
1303 CALL wrf_set_dm_communicator( mpi_comm_here )
1305 CALL wrf_get_dm_communicator( mpi_comm_here )
1306 CALL wrf_termio_dup( mpi_comm_here )
1308 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1309 CALL mpi_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1311 IF ( mytask .EQ. 0 ) THEN
1312 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1313 tasks_per_split = ntasks
1314 READ ( 27 , NML = namelist_split, IOSTAT=io_status )
1317 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1318 IF ( io_status .NE. 0 ) THEN
1319 RETURN ! just ignore and return
1321 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1322 IF ( tasks_per_split .GT. ntasks .OR. tasks_per_split .LE. 0 ) RETURN
1323 IF ( mod( ntasks, tasks_per_split ) .NE. 0 ) THEN
1324 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1327 ALLOCATE( icolor(ntasks) )
1329 DO WHILE ( j .LT. ntasks / tasks_per_split )
1330 DO i = 1, tasks_per_split
1331 icolor( i + j * tasks_per_split ) = j
1336 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1337 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
1338 CALL wrf_set_dm_communicator( mpi_comm_local )
1340 DEALLOCATE( icolor )
1342 END SUBROUTINE split_communicator
1344 SUBROUTINE init_module_dm
1347 INTEGER mpi_comm_local, mpi_comm_here, ierr, mytask, nproc
1350 CALL mpi_initialized( mpi_inited, ierr )
1351 IF ( .NOT. mpi_inited ) THEN
1352 ! If MPI has not been initialized then initialize it and
1353 ! make comm_world the communicator
1354 ! Otherwise, something else (e.g. split_communicator) has already
1355 ! initialized MPI, so just grab the communicator that
1356 ! should already be stored and use that.
1357 CALL mpi_init ( ierr )
1358 mpi_comm_here = MPI_COMM_WORLD
1359 CALL wrf_set_dm_communicator ( mpi_comm_here )
1361 CALL wrf_get_dm_communicator( mpi_comm_local )
1362 CALL wrf_termio_dup( mpi_comm_local )
1364 END SUBROUTINE init_module_dm
1367 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
1368 USE module_domain, ONLY : domain
1370 TYPE (domain), INTENT(INOUT) :: parent, nest
1371 INTEGER, INTENT(IN) :: dx,dy
1373 END SUBROUTINE wrf_dm_move_nest
1375 !------------------------------------------------------------------------------
1376 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
1379 mp_local_cobmask, errf )
1381 !------------------------------------------------------------------------------
1382 ! PURPOSE: Do MPI allgatherv operation across processors to get the
1383 ! errors at each observation point on all processors.
1385 !------------------------------------------------------------------------------
1387 INTEGER, INTENT(IN) :: nsta ! Observation index.
1388 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
1389 INTEGER, INTENT(IN) :: niobf ! Number of observations.
1390 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
1391 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
1392 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
1393 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
1398 ! Local declarations
1399 integer i, n, nlocal_dot, nlocal_crs
1400 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
1401 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
1402 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
1403 REAL PBL_BUFFER(NIOBF) ! Buffer for holding (real) KPBL index
1404 INTEGER N_BUFFER(NIOBF)
1405 REAL FULL_BUFFER(NIOBF)
1406 INTEGER IFULL_BUFFER(NIOBF)
1407 INTEGER IDISPLACEMENT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1408 INTEGER ICOUNT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1410 INTEGER :: MPI_COMM_COMP ! MPI group communicator
1411 INTEGER :: NPROCS ! Number of processors
1412 INTEGER :: IERR ! Error code from MPI routines
1414 ! Get communicator for MPI operations.
1415 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
1417 ! Get rank of monitor processor and broadcast to others.
1418 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
1423 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
1424 NLOCAL_DOT = NLOCAL_DOT + 1
1425 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
1426 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
1427 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
1428 N_BUFFER(NLOCAL_DOT) = N
1431 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1432 ICOUNT,1,MPI_INTEGER, &
1436 IDISPLACEMENT(1) = 0
1438 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1440 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1441 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1442 MPI_INTEGER, MPI_COMM_COMP, IERR)
1444 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1445 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1446 MPI_REAL, MPI_COMM_COMP, IERR)
1448 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1450 ! SURF PRESS AT U-POINTS
1451 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1452 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1453 MPI_REAL, MPI_COMM_COMP, IERR)
1455 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1458 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
1459 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1460 MPI_REAL, MPI_COMM_COMP, IERR)
1462 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1468 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
1469 NLOCAL_DOT = NLOCAL_DOT + 1
1470 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
1471 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
1472 N_BUFFER(NLOCAL_DOT) = N
1475 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1476 ICOUNT,1,MPI_INTEGER, &
1480 IDISPLACEMENT(1) = 0
1482 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1484 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1485 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1486 MPI_INTEGER, MPI_COMM_COMP, IERR)
1488 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1489 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1490 MPI_REAL, MPI_COMM_COMP, IERR)
1492 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1494 ! SURF PRESS AT V-POINTS
1495 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1496 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1497 MPI_REAL, MPI_COMM_COMP, IERR)
1499 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1502 ! DO THE CROSS FIELDS, T AND Q
1505 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
1506 NLOCAL_CRS = NLOCAL_CRS + 1
1507 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
1508 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
1509 PBL_BUFFER(NLOCAL_CRS) = ERRF(5,N) ! KPBL
1510 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
1511 N_BUFFER(NLOCAL_CRS) = N
1514 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
1515 ICOUNT,1,MPI_INTEGER, &
1517 IDISPLACEMENT(1) = 0
1519 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1521 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
1522 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1523 MPI_INTEGER, MPI_COMM_COMP, IERR)
1525 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
1526 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1527 MPI_REAL, MPI_COMM_COMP, IERR)
1530 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1533 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
1534 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1535 MPI_REAL, MPI_COMM_COMP, IERR)
1537 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1540 CALL MPI_ALLGATHERV( PBL_BUFFER, NLOCAL_CRS, MPI_REAL, &
1541 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1542 MPI_REAL, MPI_COMM_COMP, IERR)
1544 ERRF(5,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1546 ! SURF PRESS AT MASS POINTS
1547 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
1548 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1549 MPI_REAL, MPI_COMM_COMP, IERR)
1551 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1554 END SUBROUTINE get_full_obs_vector
1558 SUBROUTINE wrf_dm_maxtile_real ( val , tile)
1560 REAL val, val_all( ntasks )
1565 ! Collective operation. Each processor calls passing a local value and its index; on return
1566 ! all processors are passed back the maximum of all values passed and its tile number.
1573 CALL wrf_get_dm_communicator ( comm )
1574 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1578 IF ( val_all(i) .GT. val ) THEN
1584 END SUBROUTINE wrf_dm_maxtile_real
1587 SUBROUTINE wrf_dm_mintile_real ( val , tile)
1589 REAL val, val_all( ntasks )
1594 ! Collective operation. Each processor calls passing a local value and its index; on return
1595 ! all processors are passed back the minimum of all values passed and its tile number.
1602 CALL wrf_get_dm_communicator ( comm )
1603 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), comm, ierr )
1607 IF ( val_all(i) .LT. val ) THEN
1613 END SUBROUTINE wrf_dm_mintile_real
1616 SUBROUTINE wrf_dm_mintile_double ( val , tile)
1618 DOUBLE PRECISION val, val_all( ntasks )
1623 ! Collective operation. Each processor calls passing a local value and its index; on return
1624 ! all processors are passed back the minimum of all values passed and its tile number.
1631 CALL wrf_get_dm_communicator ( comm )
1632 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1636 IF ( val_all(i) .LT. val ) THEN
1642 END SUBROUTINE wrf_dm_mintile_double
1645 SUBROUTINE wrf_dm_tile_val_int ( val , tile)
1647 INTEGER val, val_all( ntasks )
1652 ! Collective operation. Get value from input tile.
1659 CALL wrf_get_dm_communicator ( comm )
1660 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1663 END SUBROUTINE wrf_dm_tile_val_int
1665 SUBROUTINE wrf_get_hostname ( str )
1669 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1674 END SUBROUTINE wrf_get_hostname
1676 SUBROUTINE wrf_get_hostid ( hostid )
1679 INTEGER i, sz, n, cs
1680 CALL rsl_lite_get_hostname( tmp, 512, n, cs )
1683 END SUBROUTINE wrf_get_hostid
1685 END MODULE module_dm
1687 !=========================================================================
1688 ! wrf_dm_patch_domain has to be outside the module because it is called
1689 ! by a routine in module_domain but depends on module domain
1691 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
1692 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1693 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1694 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1695 sp1x , ep1x , sm1x , em1x , &
1696 sp2x , ep2x , sm2x , em2x , &
1697 sp3x , ep3x , sm3x , em3x , &
1698 sp1y , ep1y , sm1y , em1y , &
1699 sp2y , ep2y , sm2y , em2y , &
1700 sp3y , ep3y , sm3y , em3y , &
1702 USE module_domain, ONLY : domain, head_grid, find_grid_by_id
1703 USE module_dm, ONLY : patch_domain_rsl_lite
1706 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
1707 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
1708 sm1 , em1 , sm2 , em2 , sm3 , em3
1709 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
1710 sm1x , em1x , sm2x , em2x , sm3x , em3x
1711 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
1712 sm1y , em1y , sm2y , em2y , sm3y , em3y
1713 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
1715 TYPE(domain), POINTER :: parent
1716 TYPE(domain), POINTER :: grid_ptr
1718 ! this is necessary because we cannot pass parent directly into
1719 ! wrf_dm_patch_domain because creating the correct interface definitions
1720 ! would generate a circular USE reference between module_domain and module_dm
1721 ! see comment this date in module_domain for more information. JM 20020416
1724 grid_ptr => head_grid
1725 CALL find_grid_by_id( parent_id , grid_ptr , parent )
1727 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
1728 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1729 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1730 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1731 sp1x , ep1x , sm1x , em1x , &
1732 sp2x , ep2x , sm2x , em2x , &
1733 sp3x , ep3x , sm3x , em3x , &
1734 sp1y , ep1y , sm1y , em1y , &
1735 sp2y , ep2y , sm2y , em2y , &
1736 sp3y , ep3y , sm3y , em3y , &
1740 END SUBROUTINE wrf_dm_patch_domain
1742 SUBROUTINE wrf_termio_dup( comm )
1744 INTEGER, INTENT(IN) :: comm
1745 INTEGER mytask, ntasks
1749 CALL mpi_comm_size(comm, ntasks, ierr )
1750 CALL mpi_comm_rank(comm, mytask, ierr )
1751 write(0,*)'starting wrf task ',mytask,' of ',ntasks
1752 CALL rsl_error_dup1( mytask )
1757 END SUBROUTINE wrf_termio_dup
1759 SUBROUTINE wrf_get_myproc( myproc )
1760 USE module_dm , ONLY : mytask
1765 END SUBROUTINE wrf_get_myproc
1767 SUBROUTINE wrf_get_nproc( nproc )
1768 USE module_dm , ONLY : ntasks
1773 END SUBROUTINE wrf_get_nproc
1775 SUBROUTINE wrf_get_nprocx( nprocx )
1776 USE module_dm , ONLY : ntasks_x
1781 END SUBROUTINE wrf_get_nprocx
1783 SUBROUTINE wrf_get_nprocy( nprocy )
1784 USE module_dm , ONLY : ntasks_y
1789 END SUBROUTINE wrf_get_nprocy
1791 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
1792 USE module_dm , ONLY : local_communicator
1801 CHARACTER*1 BUF(size)
1804 CALL BYTE_BCAST ( buf , size, local_communicator )
1807 END SUBROUTINE wrf_dm_bcast_bytes
1809 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
1813 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
1818 INTEGER ibuf(256),i,n
1821 ! Root task is required to have the correct value of N1, other tasks
1822 ! might not have the correct value.
1823 CALL wrf_dm_bcast_integer( n , 1 )
1824 IF (n .GT. 256) n = 256
1827 ibuf(I) = ichar(buf(I:I))
1829 CALL wrf_dm_bcast_integer( ibuf, n )
1832 buf(i:i) = char(ibuf(i))
1837 END SUBROUTINE wrf_dm_bcast_string
1839 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
1843 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
1845 END SUBROUTINE wrf_dm_bcast_integer
1847 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
1850 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1851 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1852 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1853 ! since we were not indexing the globbuf and Field arrays it does not matter
1855 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
1857 END SUBROUTINE wrf_dm_bcast_double
1859 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
1863 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
1865 END SUBROUTINE wrf_dm_bcast_real
1867 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
1871 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
1873 END SUBROUTINE wrf_dm_bcast_logical
1875 SUBROUTINE write_68( grid, v , s , &
1876 ids, ide, jds, jde, kds, kde, &
1877 ims, ime, jms, jme, kms, kme, &
1878 its, ite, jts, jte, kts, kte )
1879 USE module_domain, ONLY : domain
1881 TYPE(domain) , INTENT (INOUT) :: grid
1883 INTEGER ids, ide, jds, jde, kds, kde, &
1884 ims, ime, jms, jme, kms, kme, &
1885 its, ite, jts, jte, kts, kte
1886 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
1890 logical, external :: wrf_dm_on_monitor
1891 real globbuf( ids:ide, kds:kde, jds:jde )
1892 character*3 ord, stag
1894 if ( kds == kde ) then
1897 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1898 ids, ide, jds, jde, kds, kde, &
1899 ims, ime, jms, jme, kms, kme, &
1900 its, ite, jts, jte, kts, kte )
1905 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1906 ids, ide, kds, kde, jds, jde, &
1907 ims, ime, kms, kme, jms, jme, &
1908 its, ite, kts, kte, jts, jte )
1912 if ( wrf_dm_on_monitor() ) THEN
1913 WRITE(68,*) ide-ids+1, jde-jds+1 , s
1916 WRITE(68,*) globbuf(i,1,j)
1924 SUBROUTINE wrf_abort
1929 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
1933 END SUBROUTINE wrf_abort
1935 SUBROUTINE wrf_dm_shutdown
1939 CALL MPI_FINALIZE( ierr )
1942 END SUBROUTINE wrf_dm_shutdown
1944 LOGICAL FUNCTION wrf_dm_on_monitor()
1948 INTEGER tsk, ierr, mpi_comm_local
1949 CALL wrf_get_dm_communicator( mpi_comm_local )
1950 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
1951 wrf_dm_on_monitor = tsk .EQ. 0
1953 wrf_dm_on_monitor = .TRUE.
1956 END FUNCTION wrf_dm_on_monitor
1958 SUBROUTINE rsl_comm_iter_init(shw,ps,pe)
1960 INTEGER iter, plus_send_start, plus_recv_start, &
1961 minus_send_start, minus_recv_start
1962 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
1963 minus_send_start, minus_recv_start
1965 minus_send_start = ps
1966 minus_recv_start = ps-1
1967 plus_send_start = pe
1968 plus_recv_start = pe+1
1969 END SUBROUTINE rsl_comm_iter_init
1971 LOGICAL FUNCTION rsl_comm_iter ( id , is_intermediate, &
1972 shw , xy , ds, de_in, ps, pe, nds,nde, &
1973 sendbeg_m, sendw_m, sendbeg_p, sendw_p, &
1974 recvbeg_m, recvw_m, recvbeg_p, recvw_p )
1975 USE module_dm, ONLY : ntasks_x, ntasks_y, mytask_x, mytask_y
1977 INTEGER, INTENT(IN) :: id,shw,xy,ds,de_in,ps,pe,nds,nde
1978 LOGICAL, INTENT(IN) :: is_intermediate ! treated differently, coarse but with same decomp as nest
1979 INTEGER, INTENT(OUT) :: sendbeg_m, sendw_m, sendbeg_p, sendw_p
1980 INTEGER, INTENT(OUT) :: recvbeg_m, recvw_m, recvbeg_p, recvw_p
1981 INTEGER k, kn, ni, nj, de, Px, Py, nt, me, lb, ub, ierr
1983 INTEGER iter, plus_send_start, plus_recv_start, &
1984 minus_send_start, minus_recv_start
1985 INTEGER parent_grid_ratio, parent_start
1986 COMMON /rcii/ iter, plus_send_start, plus_recv_start, &
1987 minus_send_start, minus_recv_start
1989 #if (NMM_CORE == 1 )
1990 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
1991 ! adjust decomposition to reflect. 20081206 JM
1997 IF ( xy .EQ. 1 ) THEN ! X/I axis
2000 IF ( is_intermediate ) THEN
2001 CALL nl_get_i_parent_start(id,parent_start)
2002 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2007 IF ( is_intermediate ) THEN
2008 CALL nl_get_j_parent_start(id,parent_start)
2009 CALL nl_get_parent_grid_ratio(id,parent_grid_ratio)
2019 IF ( me .GT. 0 ) THEN
2020 lb = minus_send_start
2024 IF ( is_intermediate ) THEN
2025 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2026 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2028 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2030 IF ( Px .NE. me+(iter-1) ) THEN
2033 minus_send_start = minus_send_start+1
2034 sendw_m = sendw_m + 1
2040 IF ( me .GT. 0 ) THEN
2041 ub = minus_recv_start
2043 DO k = minus_recv_start,ps-shw,-1
2045 IF ( is_intermediate ) THEN
2046 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2047 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2049 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2051 IF ( Px .NE. me-iter ) THEN
2054 minus_recv_start = minus_recv_start-1
2055 recvw_m = recvw_m + 1
2062 IF ( me .LT. nt-1 ) THEN
2063 ub = plus_send_start
2064 sendbeg_p = pe - ub + 1
2065 DO k = ub,pe-shw+1,-1
2067 IF ( is_intermediate ) THEN
2068 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2069 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2071 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2073 IF ( Px .NE. me-(iter-1) ) THEN
2076 plus_send_start = plus_send_start - 1
2077 sendw_p = sendw_p + 1
2083 IF ( me .LT. nt-1 ) THEN
2084 lb = plus_recv_start
2088 IF ( is_intermediate ) THEN
2089 kn = ( k - parent_start ) * parent_grid_ratio + 1 + 1 ;
2090 CALL task_for_point (kn,1,nds,nde,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2092 CALL task_for_point (k,1,ds,de,1,1,nt,1,Px,Py,1,1,ierr) ! assume same alg. for x and y and just use x
2094 IF ( Px .NE. me+iter ) THEN
2097 plus_recv_start = plus_recv_start + 1
2098 recvw_p = recvw_p + 1
2102 if ( iter .eq. 1 ) then
2107 sendw_m = 0 ; sendw_p = 0 ; recvw_m = 0 ; recvw_p = 0
2108 sendbeg_m = 1 ; if ( me .GT. 0 ) sendw_m = shw ;
2109 sendbeg_p = 1 ; if ( me .LT. nt-1 ) sendw_p = shw
2110 recvbeg_m = 1 ; if ( me .GT. 0 ) recvw_m = shw ;
2111 recvbeg_p = 1 ; if ( me .LT. nt-1 ) recvw_p = shw ;
2113 ! write(0,*)'shw ', shw , ' xy ',xy
2114 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2115 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2116 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2119 ! write(0,*)'shw ', shw , ' xy ',xy
2120 ! write(0,*)' ds, de, ps, pe, nds,nde ',ds, de, ps, pe, nds,nde
2121 ! write(0,*)'sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p '
2122 ! write(0,*)sendbeg_m, sendw_m, sendbeg_p, sendw_p, recvbeg_m, recvw_m, recvbeg_p, recvw_p
2124 rsl_comm_iter = went
2125 END FUNCTION rsl_comm_iter
2127 INTEGER FUNCTION wrf_dm_monitor_rank()
2129 wrf_dm_monitor_rank = 0
2131 END FUNCTION wrf_dm_monitor_rank
2133 SUBROUTINE wrf_get_dm_communicator ( communicator )
2134 USE module_dm , ONLY : local_communicator
2136 INTEGER , INTENT(OUT) :: communicator
2137 communicator = local_communicator
2139 END SUBROUTINE wrf_get_dm_communicator
2141 SUBROUTINE wrf_get_dm_communicator_x ( communicator )
2142 USE module_dm , ONLY : local_communicator_x
2144 INTEGER , INTENT(OUT) :: communicator
2145 communicator = local_communicator_x
2147 END SUBROUTINE wrf_get_dm_communicator_x
2149 SUBROUTINE wrf_get_dm_communicator_y ( communicator )
2150 USE module_dm , ONLY : local_communicator_y
2152 INTEGER , INTENT(OUT) :: communicator
2153 communicator = local_communicator_y
2155 END SUBROUTINE wrf_get_dm_communicator_y
2157 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
2158 USE module_dm , ONLY : local_iocommunicator
2160 INTEGER , INTENT(OUT) :: iocommunicator
2161 iocommunicator = local_iocommunicator
2163 END SUBROUTINE wrf_get_dm_iocommunicator
2165 SUBROUTINE wrf_set_dm_communicator ( communicator )
2166 USE module_dm , ONLY : local_communicator
2168 INTEGER , INTENT(IN) :: communicator
2169 local_communicator = communicator
2171 END SUBROUTINE wrf_set_dm_communicator
2173 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
2174 USE module_dm , ONLY : local_iocommunicator
2176 INTEGER , INTENT(IN) :: iocommunicator
2177 local_iocommunicator = iocommunicator
2179 END SUBROUTINE wrf_set_dm_iocommunicator
2181 SUBROUTINE wrf_get_dm_ntasks_x ( retval )
2182 USE module_dm , ONLY : ntasks_x
2184 INTEGER , INTENT(OUT) :: retval
2187 END SUBROUTINE wrf_get_dm_ntasks_x
2189 SUBROUTINE wrf_get_dm_ntasks_y ( retval )
2190 USE module_dm , ONLY : ntasks_y
2192 INTEGER , INTENT(OUT) :: retval
2195 END SUBROUTINE wrf_get_dm_ntasks_y
2198 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2200 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
2201 DS1,DE1,DS2,DE2,DS3,DE3,&
2202 MS1,ME1,MS2,ME2,MS3,ME3,&
2203 PS1,PE1,PS2,PE2,PS3,PE3 )
2205 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2206 MS1,ME1,MS2,ME2,MS3,ME3,&
2207 PS1,PE1,PS2,PE2,PS3,PE3
2208 CHARACTER *(*) stagger,ordering
2213 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
2214 DS1,DE1,DS2,DE2,DS3,DE3,&
2215 MS1,ME1,MS2,ME2,MS3,ME3,&
2216 PS1,PE1,PS2,PE2,PS3,PE3 )
2219 END SUBROUTINE wrf_patch_to_global_real
2221 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
2222 DS1,DE1,DS2,DE2,DS3,DE3,&
2223 MS1,ME1,MS2,ME2,MS3,ME3,&
2224 PS1,PE1,PS2,PE2,PS3,PE3 )
2226 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2227 MS1,ME1,MS2,ME2,MS3,ME3,&
2228 PS1,PE1,PS2,PE2,PS3,PE3
2229 CHARACTER *(*) stagger,ordering
2231 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2232 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2233 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2234 ! since we were not indexing the globbuf and Field arrays it does not matter
2238 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
2239 DS1,DE1,DS2,DE2,DS3,DE3,&
2240 MS1,ME1,MS2,ME2,MS3,ME3,&
2241 PS1,PE1,PS2,PE2,PS3,PE3 )
2244 END SUBROUTINE wrf_patch_to_global_double
2247 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
2248 DS1,DE1,DS2,DE2,DS3,DE3,&
2249 MS1,ME1,MS2,ME2,MS3,ME3,&
2250 PS1,PE1,PS2,PE2,PS3,PE3 )
2252 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2253 MS1,ME1,MS2,ME2,MS3,ME3,&
2254 PS1,PE1,PS2,PE2,PS3,PE3
2255 CHARACTER *(*) stagger,ordering
2260 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
2261 DS1,DE1,DS2,DE2,DS3,DE3,&
2262 MS1,ME1,MS2,ME2,MS3,ME3,&
2263 PS1,PE1,PS2,PE2,PS3,PE3 )
2266 END SUBROUTINE wrf_patch_to_global_integer
2269 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
2270 DS1,DE1,DS2,DE2,DS3,DE3,&
2271 MS1,ME1,MS2,ME2,MS3,ME3,&
2272 PS1,PE1,PS2,PE2,PS3,PE3 )
2274 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2275 MS1,ME1,MS2,ME2,MS3,ME3,&
2276 PS1,PE1,PS2,PE2,PS3,PE3
2277 CHARACTER *(*) stagger,ordering
2282 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
2283 DS1,DE1,DS2,DE2,DS3,DE3,&
2284 MS1,ME1,MS2,ME2,MS3,ME3,&
2285 PS1,PE1,PS2,PE2,PS3,PE3 )
2288 END SUBROUTINE wrf_patch_to_global_logical
2291 # define FRSTELEM (1)
2296 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
2297 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2298 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2299 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2300 USE module_driver_constants
2302 USE module_wrf_error, ONLY : wrf_at_debug_level
2303 USE module_dm, ONLY : local_communicator, ntasks
2306 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2307 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2308 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2309 CHARACTER *(*) stagger,ordering
2310 INTEGER domdesc,typesize,ierr
2314 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2315 MS1,ME1,MS2,ME2,MS3,ME3,&
2316 PS1,PE1,PS2,PE2,PS3,PE3
2317 INTEGER ids,ide,jds,jde,kds,kde,&
2318 ims,ime,jms,jme,kms,kme,&
2319 ips,ipe,jps,jpe,kps,kpe
2320 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2322 INTEGER i, j, k, ndim
2323 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2324 ! allocated further down, after the D indices are potentially recalculated for staggering
2325 REAL, ALLOCATABLE :: tmpbuf( : )
2326 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2328 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2329 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2330 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2332 SELECT CASE ( TRIM(ordering) )
2336 ndim = 3 ! where appropriate
2339 SELECT CASE ( TRIM(ordering) )
2341 ! the non-staggered variables come in at one-less than
2342 ! domain dimensions, but code wants full domain spec, so
2343 ! adjust if not staggered
2344 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2345 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2346 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2348 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2349 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2350 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2352 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2353 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2354 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2356 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2357 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2358 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2362 ! moved to here to be after the potential recalculations of D dims
2363 IF ( wrf_dm_on_monitor() ) THEN
2364 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2366 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2368 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
2370 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2371 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2372 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2374 IF ( typesize .EQ. RWORDSIZE ) THEN
2375 CALL just_patch_r ( buf , locbuf , size(locbuf), &
2376 PS1, PE1, PS2, PE2, PS3, PE3 , &
2377 MS1, ME1, MS2, ME2, MS3, ME3 )
2378 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2379 CALL just_patch_i ( buf , locbuf , size(locbuf), &
2380 PS1, PE1, PS2, PE2, PS3, PE3 , &
2381 MS1, ME1, MS2, ME2, MS3, ME3 )
2382 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2383 CALL just_patch_d ( buf , locbuf , size(locbuf), &
2384 PS1, PE1, PS2, PE2, PS3, PE3 , &
2385 MS1, ME1, MS2, ME2, MS3, ME3 )
2386 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2387 CALL just_patch_l ( buf , locbuf , size(locbuf), &
2388 PS1, PE1, PS2, PE2, PS3, PE3 , &
2389 MS1, ME1, MS2, ME2, MS3, ME3 )
2392 ! defined in external/io_quilt
2393 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2397 CALL collect_on_comm0 ( local_communicator , typesize , &
2398 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
2399 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
2401 ndim = len(TRIM(ordering))
2403 IF ( wrf_at_debug_level(500) ) THEN
2407 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
2409 IF ( typesize .EQ. RWORDSIZE ) THEN
2410 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
2411 DS1, DE1, DS2, DE2, DS3, DE3 , &
2413 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2414 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
2415 DS1, DE1, DS2, DE2, DS3, DE3 , &
2417 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2418 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
2419 DS1, DE1, DS2, DE2, DS3, DE3 , &
2421 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2422 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
2423 DS1, DE1, DS2, DE2, DS3, DE3 , &
2429 IF ( wrf_at_debug_level(500) ) THEN
2430 CALL end_timing('wrf_patch_to_global_generic')
2432 DEALLOCATE( tmpbuf )
2435 END SUBROUTINE wrf_patch_to_global_generic
2437 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
2438 PS1,PE1,PS2,PE2,PS3,PE3, &
2439 MS1,ME1,MS2,ME2,MS3,ME3 )
2441 INTEGER , INTENT(IN) :: noutbuf
2442 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2443 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2444 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2445 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
2447 INTEGER :: i,j,k,n , icurs
2452 outbuf( icurs ) = inbuf( i, j, k )
2458 END SUBROUTINE just_patch_i
2460 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
2461 PS1,PE1,PS2,PE2,PS3,PE3, &
2462 MS1,ME1,MS2,ME2,MS3,ME3 )
2464 INTEGER , INTENT(IN) :: noutbuf
2465 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2466 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2467 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2468 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2470 INTEGER :: i,j,k , icurs
2475 outbuf( icurs ) = inbuf( i, j, k )
2481 END SUBROUTINE just_patch_r
2483 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
2484 PS1,PE1,PS2,PE2,PS3,PE3, &
2485 MS1,ME1,MS2,ME2,MS3,ME3 )
2487 INTEGER , INTENT(IN) :: noutbuf
2488 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2489 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2490 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2491 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2493 INTEGER :: i,j,k,n , icurs
2498 outbuf( icurs ) = inbuf( i, j, k )
2504 END SUBROUTINE just_patch_d
2506 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
2507 PS1,PE1,PS2,PE2,PS3,PE3, &
2508 MS1,ME1,MS2,ME2,MS3,ME3 )
2510 INTEGER , INTENT(IN) :: noutbuf
2511 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
2512 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2513 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2514 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
2516 INTEGER :: i,j,k,n , icurs
2521 outbuf( icurs ) = inbuf( i, j, k )
2527 END SUBROUTINE just_patch_l
2530 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
2531 DS1,DE1,DS2,DE2,DS3,DE3, &
2533 USE module_dm, ONLY : ntasks
2535 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2536 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2537 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2539 INTEGER :: i,j,k,n , icurs
2542 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2543 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2544 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2545 outbuf( i, j, k ) = inbuf( icurs )
2553 END SUBROUTINE patch_2_outbuf_r
2555 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
2556 DS1,DE1,DS2,DE2,DS3,DE3,&
2558 USE module_dm, ONLY : ntasks
2560 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2561 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2562 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2564 INTEGER :: i,j,k,n , icurs
2567 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2568 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2569 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2570 outbuf( i, j, k ) = inbuf( icurs )
2577 END SUBROUTINE patch_2_outbuf_i
2579 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
2580 DS1,DE1,DS2,DE2,DS3,DE3,&
2582 USE module_dm, ONLY : ntasks
2584 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2585 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2586 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2588 INTEGER :: i,j,k,n , icurs
2591 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2592 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2593 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2594 outbuf( i, j, k ) = inbuf( icurs )
2601 END SUBROUTINE patch_2_outbuf_d
2603 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
2604 DS1,DE1,DS2,DE2,DS3,DE3,&
2606 USE module_dm, ONLY : ntasks
2608 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2609 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2610 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2612 INTEGER :: i,j,k,n , icurs
2615 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2616 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2617 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2618 outbuf( i, j, k ) = inbuf( icurs )
2625 END SUBROUTINE patch_2_outbuf_l
2627 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2629 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
2630 DS1,DE1,DS2,DE2,DS3,DE3,&
2631 MS1,ME1,MS2,ME2,MS3,ME3,&
2632 PS1,PE1,PS2,PE2,PS3,PE3 )
2634 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2635 MS1,ME1,MS2,ME2,MS3,ME3,&
2636 PS1,PE1,PS2,PE2,PS3,PE3
2637 CHARACTER *(*) stagger,ordering
2642 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
2643 DS1,DE1,DS2,DE2,DS3,DE3,&
2644 MS1,ME1,MS2,ME2,MS3,ME3,&
2645 PS1,PE1,PS2,PE2,PS3,PE3 )
2647 END SUBROUTINE wrf_global_to_patch_real
2649 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
2650 DS1,DE1,DS2,DE2,DS3,DE3,&
2651 MS1,ME1,MS2,ME2,MS3,ME3,&
2652 PS1,PE1,PS2,PE2,PS3,PE3 )
2654 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2655 MS1,ME1,MS2,ME2,MS3,ME3,&
2656 PS1,PE1,PS2,PE2,PS3,PE3
2657 CHARACTER *(*) stagger,ordering
2659 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2660 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2661 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2662 ! since we were not indexing the globbuf and Field arrays it does not matter
2666 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
2667 DS1,DE1,DS2,DE2,DS3,DE3,&
2668 MS1,ME1,MS2,ME2,MS3,ME3,&
2669 PS1,PE1,PS2,PE2,PS3,PE3 )
2671 END SUBROUTINE wrf_global_to_patch_double
2674 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
2675 DS1,DE1,DS2,DE2,DS3,DE3,&
2676 MS1,ME1,MS2,ME2,MS3,ME3,&
2677 PS1,PE1,PS2,PE2,PS3,PE3 )
2679 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2680 MS1,ME1,MS2,ME2,MS3,ME3,&
2681 PS1,PE1,PS2,PE2,PS3,PE3
2682 CHARACTER *(*) stagger,ordering
2687 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
2688 DS1,DE1,DS2,DE2,DS3,DE3,&
2689 MS1,ME1,MS2,ME2,MS3,ME3,&
2690 PS1,PE1,PS2,PE2,PS3,PE3 )
2692 END SUBROUTINE wrf_global_to_patch_integer
2694 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
2695 DS1,DE1,DS2,DE2,DS3,DE3,&
2696 MS1,ME1,MS2,ME2,MS3,ME3,&
2697 PS1,PE1,PS2,PE2,PS3,PE3 )
2699 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2700 MS1,ME1,MS2,ME2,MS3,ME3,&
2701 PS1,PE1,PS2,PE2,PS3,PE3
2702 CHARACTER *(*) stagger,ordering
2707 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
2708 DS1,DE1,DS2,DE2,DS3,DE3,&
2709 MS1,ME1,MS2,ME2,MS3,ME3,&
2710 PS1,PE1,PS2,PE2,PS3,PE3 )
2712 END SUBROUTINE wrf_global_to_patch_logical
2714 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
2715 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2716 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2717 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2718 USE module_dm, ONLY : local_communicator, ntasks
2719 USE module_driver_constants
2721 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2722 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2723 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2724 CHARACTER *(*) stagger,ordering
2725 INTEGER domdesc,typesize,ierr
2729 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2730 MS1,ME1,MS2,ME2,MS3,ME3,&
2731 PS1,PE1,PS2,PE2,PS3,PE3
2732 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2734 INTEGER i,j,k,ord,ord2d,ndim
2735 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2736 REAL, ALLOCATABLE :: tmpbuf( : )
2737 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2739 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2740 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2741 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2743 SELECT CASE ( TRIM(ordering) )
2747 ndim = 3 ! where appropriate
2750 SELECT CASE ( TRIM(ordering) )
2752 ! the non-staggered variables come in at one-less than
2753 ! domain dimensions, but code wants full domain spec, so
2754 ! adjust if not staggered
2755 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2756 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2757 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2759 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2760 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2761 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2763 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2764 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2765 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2767 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2768 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2769 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2773 ! moved to here to be after the potential recalculations of D dims
2774 IF ( wrf_dm_on_monitor() ) THEN
2775 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2777 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2779 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
2781 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2782 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2783 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2785 ! defined in external/io_quilt
2786 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2789 ndim = len(TRIM(ordering))
2791 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
2792 IF ( typesize .EQ. RWORDSIZE ) THEN
2793 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
2794 DS1, DE1, DS2, DE2, DS3, DE3 , &
2795 MS1, ME1, MS2, ME2, MS3, ME3 , &
2797 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2798 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
2799 DS1, DE1, DS2, DE2, DS3, DE3 , &
2801 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2802 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
2803 DS1, DE1, DS2, DE2, DS3, DE3 , &
2805 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2806 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
2807 DS1, DE1, DS2, DE2, DS3, DE3 , &
2812 CALL dist_on_comm0 ( local_communicator , typesize , &
2813 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
2814 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
2816 IF ( typesize .EQ. RWORDSIZE ) THEN
2817 CALL all_sub_r ( locbuf , buf , &
2818 PS1, PE1, PS2, PE2, PS3, PE3 , &
2819 MS1, ME1, MS2, ME2, MS3, ME3 )
2821 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2822 CALL all_sub_i ( locbuf , buf , &
2823 PS1, PE1, PS2, PE2, PS3, PE3 , &
2824 MS1, ME1, MS2, ME2, MS3, ME3 )
2825 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2826 CALL all_sub_d ( locbuf , buf , &
2827 PS1, PE1, PS2, PE2, PS3, PE3 , &
2828 MS1, ME1, MS2, ME2, MS3, ME3 )
2829 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2830 CALL all_sub_l ( locbuf , buf , &
2831 PS1, PE1, PS2, PE2, PS3, PE3 , &
2832 MS1, ME1, MS2, ME2, MS3, ME3 )
2836 DEALLOCATE ( tmpbuf )
2839 END SUBROUTINE wrf_global_to_patch_generic
2841 SUBROUTINE all_sub_i ( inbuf , outbuf, &
2842 PS1,PE1,PS2,PE2,PS3,PE3, &
2843 MS1,ME1,MS2,ME2,MS3,ME3 )
2845 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2846 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2847 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2848 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2850 INTEGER :: i,j,k,n , icurs
2855 outbuf( i, j, k ) = inbuf ( icurs )
2861 END SUBROUTINE all_sub_i
2863 SUBROUTINE all_sub_r ( inbuf , outbuf, &
2864 PS1,PE1,PS2,PE2,PS3,PE3, &
2865 MS1,ME1,MS2,ME2,MS3,ME3 )
2867 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2868 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2869 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2870 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2872 INTEGER :: i,j,k,n , icurs
2877 outbuf( i, j, k ) = inbuf ( icurs )
2884 END SUBROUTINE all_sub_r
2886 SUBROUTINE all_sub_d ( inbuf , outbuf, &
2887 PS1,PE1,PS2,PE2,PS3,PE3, &
2888 MS1,ME1,MS2,ME2,MS3,ME3 )
2890 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2891 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2892 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2893 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2895 INTEGER :: i,j,k,n , icurs
2900 outbuf( i, j, k ) = inbuf ( icurs )
2906 END SUBROUTINE all_sub_d
2908 SUBROUTINE all_sub_l ( inbuf , outbuf, &
2909 PS1,PE1,PS2,PE2,PS3,PE3, &
2910 MS1,ME1,MS2,ME2,MS3,ME3 )
2912 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2913 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2914 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2915 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2917 INTEGER :: i,j,k,n , icurs
2922 outbuf( i, j, k ) = inbuf ( icurs )
2928 END SUBROUTINE all_sub_l
2930 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
2931 DS1,DE1,DS2,DE2,DS3,DE3, &
2932 MS1, ME1, MS2, ME2, MS3, ME3 , &
2934 USE module_dm, ONLY : ntasks
2936 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2937 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2938 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2939 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2941 INTEGER :: i,j,k,n , icurs
2945 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2946 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2947 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2948 outbuf( icurs ) = inbuf( i,j,k )
2955 END SUBROUTINE outbuf_2_patch_r
2957 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
2958 DS1,DE1,DS2,DE2,DS3,DE3,&
2960 USE module_dm, ONLY : ntasks
2962 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
2963 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2964 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2966 INTEGER :: i,j,k,n , icurs
2969 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2970 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2971 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2972 outbuf( icurs ) = inbuf( i,j,k )
2979 END SUBROUTINE outbuf_2_patch_i
2981 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
2982 DS1,DE1,DS2,DE2,DS3,DE3,&
2984 USE module_dm, ONLY : ntasks
2986 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
2987 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2988 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2990 INTEGER :: i,j,k,n , icurs
2993 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2994 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2995 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2996 outbuf( icurs ) = inbuf( i,j,k )
3003 END SUBROUTINE outbuf_2_patch_d
3005 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
3006 DS1,DE1,DS2,DE2,DS3,DE3,&
3008 USE module_dm, ONLY : ntasks
3010 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
3011 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
3012 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
3014 INTEGER :: i,j,k,n , icurs
3017 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
3018 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
3019 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
3020 outbuf( icurs ) = inbuf( i,j,k )
3027 END SUBROUTINE outbuf_2_patch_l
3031 !------------------------------------------------------------------
3033 #if ( EM_CORE == 1 && DA_CORE != 1 )
3035 !------------------------------------------------------------------
3037 SUBROUTINE force_domain_em_part2 ( grid, ngrid, config_flags &
3039 #include "dummy_new_args.inc"
3042 USE module_state_description
3043 USE module_domain, ONLY : domain, get_ijk_from_grid
3044 USE module_configure, ONLY : grid_config_rec_type
3045 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, local_communicator, mytask
3046 USE module_comm_nesting_dm, ONLY : halo_force_down_sub
3049 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3050 TYPE(domain), POINTER :: ngrid
3051 #include <dummy_new_decl.inc>
3053 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3054 TYPE (grid_config_rec_type) :: config_flags
3056 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3057 cims, cime, cjms, cjme, ckms, ckme, &
3058 cips, cipe, cjps, cjpe, ckps, ckpe
3059 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3060 nims, nime, njms, njme, nkms, nkme, &
3061 nips, nipe, njps, njpe, nkps, nkpe
3062 INTEGER :: ids, ide, jds, jde, kds, kde, &
3063 ims, ime, jms, jme, kms, kme, &
3064 ips, ipe, jps, jpe, kps, kpe
3065 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7,itrace
3066 REAL dummy_xs, dummy_xe, dummy_ys, dummy_ye
3068 CALL get_ijk_from_grid ( grid , &
3069 cids, cide, cjds, cjde, ckds, ckde, &
3070 cims, cime, cjms, cjme, ckms, ckme, &
3071 cips, cipe, cjps, cjpe, ckps, ckpe )
3072 CALL get_ijk_from_grid ( ngrid , &
3073 nids, nide, njds, njde, nkds, nkde, &
3074 nims, nime, njms, njme, nkms, nkme, &
3075 nips, nipe, njps, njpe, nkps, nkpe )
3077 nlev = ckde - ckds + 1
3079 #include "nest_interpdown_unpack.inc"
3081 CALL get_ijk_from_grid ( grid , &
3082 ids, ide, jds, jde, kds, kde, &
3083 ims, ime, jms, jme, kms, kme, &
3084 ips, ipe, jps, jpe, kps, kpe )
3086 #include "HALO_FORCE_DOWN.inc"
3088 ! code here to interpolate the data into the nested domain
3089 # include "nest_forcedown_interp.inc"
3092 END SUBROUTINE force_domain_em_part2
3094 !------------------------------------------------------------------
3096 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
3098 #include "dummy_new_args.inc"
3101 USE module_state_description
3102 USE module_domain, ONLY : domain, get_ijk_from_grid
3103 USE module_configure, ONLY : grid_config_rec_type
3104 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
3105 mytask, get_dm_max_halo_width
3109 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3110 TYPE(domain), POINTER :: intermediate_grid
3111 TYPE(domain), POINTER :: ngrid
3112 #include <dummy_new_decl.inc>
3114 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3115 INTEGER iparstrt,jparstrt,sw
3116 TYPE (grid_config_rec_type) :: config_flags
3118 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3119 cims, cime, cjms, cjme, ckms, ckme, &
3120 cips, cipe, cjps, cjpe, ckps, ckpe
3121 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
3122 iims, iime, ijms, ijme, ikms, ikme, &
3123 iips, iipe, ijps, ijpe, ikps, ikpe
3124 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3125 nims, nime, njms, njme, nkms, nkme, &
3126 nips, nipe, njps, njpe, nkps, nkpe
3128 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3130 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
3131 INTEGER thisdomain_max_halo_width
3132 INTEGER local_comm, myproc, nproc
3134 CALL wrf_get_dm_communicator ( local_comm )
3135 CALL wrf_get_myproc( myproc )
3136 CALL wrf_get_nproc( nproc )
3138 CALL get_ijk_from_grid ( grid , &
3139 cids, cide, cjds, cjde, ckds, ckde, &
3140 cims, cime, cjms, cjme, ckms, ckme, &
3141 cips, cipe, cjps, cjpe, ckps, ckpe )
3142 CALL get_ijk_from_grid ( intermediate_grid , &
3143 iids, iide, ijds, ijde, ikds, ikde, &
3144 iims, iime, ijms, ijme, ikms, ikme, &
3145 iips, iipe, ijps, ijpe, ikps, ikpe )
3146 CALL get_ijk_from_grid ( ngrid , &
3147 nids, nide, njds, njde, nkds, nkde, &
3148 nims, nime, njms, njme, nkms, nkme, &
3149 nips, nipe, njps, njpe, nkps, nkpe )
3151 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
3152 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3153 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3154 CALL nl_get_shw ( intermediate_grid%id, sw )
3155 icoord = iparstrt - sw
3156 jcoord = jparstrt - sw
3157 idim_cd = iide - iids + 1
3158 jdim_cd = ijde - ijds + 1
3160 nlev = ckde - ckds + 1
3162 ! get max_halo_width for parent. It may be smaller if it is moad
3163 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
3165 #include "nest_interpdown_pack.inc"
3167 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
3170 END SUBROUTINE interp_domain_em_part1
3172 !------------------------------------------------------------------
3174 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, config_flags &
3176 #include "dummy_new_args.inc"
3179 USE module_state_description
3180 USE module_domain, ONLY : domain, get_ijk_from_grid
3181 USE module_configure, ONLY : grid_config_rec_type
3182 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, &
3183 mytask, get_dm_max_halo_width
3184 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
3187 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3188 TYPE(domain), POINTER :: ngrid
3189 #include <dummy_new_decl.inc>
3191 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3192 TYPE (grid_config_rec_type) :: config_flags
3194 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3195 cims, cime, cjms, cjme, ckms, ckme, &
3196 cips, cipe, cjps, cjpe, ckps, ckpe
3197 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3198 nims, nime, njms, njme, nkms, nkme, &
3199 nips, nipe, njps, njpe, nkps, nkpe
3200 INTEGER :: ids, ide, jds, jde, kds, kde, &
3201 ims, ime, jms, jme, kms, kme, &
3202 ips, ipe, jps, jpe, kps, kpe
3204 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3208 INTEGER thisdomain_max_halo_width
3210 CALL get_ijk_from_grid ( grid , &
3211 cids, cide, cjds, cjde, ckds, ckde, &
3212 cims, cime, cjms, cjme, ckms, ckme, &
3213 cips, cipe, cjps, cjpe, ckps, ckpe )
3214 CALL get_ijk_from_grid ( ngrid , &
3215 nids, nide, njds, njde, nkds, nkde, &
3216 nims, nime, njms, njme, nkms, nkme, &
3217 nips, nipe, njps, njpe, nkps, nkpe )
3219 nlev = ckde - ckds + 1
3221 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3223 #include "nest_interpdown_unpack.inc"
3225 CALL get_ijk_from_grid ( grid , &
3226 ids, ide, jds, jde, kds, kde, &
3227 ims, ime, jms, jme, kms, kme, &
3228 ips, ipe, jps, jpe, kps, kpe )
3230 #include "HALO_INTERP_DOWN.inc"
3232 # include "nest_interpdown_interp.inc"
3235 END SUBROUTINE interp_domain_em_part2
3237 !------------------------------------------------------------------
3239 SUBROUTINE feedback_nest_prep ( grid, config_flags &
3241 #include "dummy_new_args.inc"
3244 USE module_state_description
3245 USE module_domain, ONLY : domain, get_ijk_from_grid
3246 USE module_configure, ONLY : grid_config_rec_type
3247 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask
3248 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
3251 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3252 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3253 ! soil temp, moisture, etc., has vertical dim
3254 ! of soil categories
3255 #include <dummy_new_decl.inc>
3257 INTEGER :: ids, ide, jds, jde, kds, kde, &
3258 ims, ime, jms, jme, kms, kme, &
3259 ips, ipe, jps, jpe, kps, kpe
3261 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3263 INTEGER :: idum1, idum2
3266 CALL get_ijk_from_grid ( grid , &
3267 ids, ide, jds, jde, kds, kde, &
3268 ims, ime, jms, jme, kms, kme, &
3269 ips, ipe, jps, jpe, kps, kpe )
3272 #include "HALO_INTERP_UP.inc"
3275 END SUBROUTINE feedback_nest_prep
3277 !------------------------------------------------------------------
3279 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
3281 #include "dummy_new_args.inc"
3284 USE module_state_description
3285 USE module_domain, ONLY : domain, get_ijk_from_grid
3286 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3287 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3288 ipe_save, jpe_save, ips_save, jps_save
3292 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3293 TYPE(domain), POINTER :: ngrid
3294 #include <dummy_new_decl.inc>
3296 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3297 TYPE(domain), POINTER :: xgrid
3298 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3300 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3301 cims, cime, cjms, cjme, ckms, ckme, &
3302 cips, cipe, cjps, cjpe, ckps, ckpe
3303 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3304 nims, nime, njms, njme, nkms, nkme, &
3305 nips, nipe, njps, njpe, nkps, nkpe
3307 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3309 INTEGER local_comm, myproc, nproc, idum1, idum2
3310 INTEGER thisdomain_max_halo_width
3313 SUBROUTINE feedback_nest_prep ( grid, config_flags &
3315 #include "dummy_new_args.inc"
3318 USE module_state_description
3319 USE module_domain, ONLY : domain
3320 USE module_configure, ONLY : grid_config_rec_type
3322 TYPE (grid_config_rec_type) :: config_flags
3323 TYPE(domain), TARGET :: grid
3324 #include <dummy_new_decl.inc>
3325 END SUBROUTINE feedback_nest_prep
3329 CALL wrf_get_dm_communicator ( local_comm )
3330 CALL wrf_get_myproc( myproc )
3331 CALL wrf_get_nproc( nproc )
3335 CALL get_ijk_from_grid ( grid , &
3336 cids, cide, cjds, cjde, ckds, ckde, &
3337 cims, cime, cjms, cjme, ckms, ckme, &
3338 cips, cipe, cjps, cjpe, ckps, ckpe )
3340 CALL get_ijk_from_grid ( ngrid , &
3341 nids, nide, njds, njde, nkds, nkde, &
3342 nims, nime, njms, njme, nkms, nkme, &
3343 nips, nipe, njps, njpe, nkps, nkpe )
3345 nlev = ckde - ckds + 1
3347 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
3348 jps_save = ngrid%j_parent_start
3349 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
3350 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
3352 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3353 ! in a separate routine because the HALOs need the data to be dereference from the
3354 ! grid data structure and, in this routine, the dereferenced fields are related to
3355 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3356 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3357 ! to point to intermediate domain.
3359 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3360 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3364 CALL feedback_nest_prep ( grid, nconfig_flags &
3366 #include "actual_new_args.inc"
3370 ! put things back so grid is intermediate grid
3373 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3375 ! "interp" (basically copy) ngrid onto intermediate grid
3377 #include "nest_feedbackup_interp.inc"
3380 END SUBROUTINE feedback_domain_em_part1
3382 !------------------------------------------------------------------
3384 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
3386 #include "dummy_new_args.inc"
3389 USE module_state_description
3390 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3391 USE module_configure, ONLY : grid_config_rec_type, model_config_rec
3392 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3393 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3394 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
3399 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3400 TYPE(domain), POINTER :: intermediate_grid
3401 TYPE(domain), POINTER :: ngrid
3403 #include <dummy_new_decl.inc>
3405 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3406 TYPE (grid_config_rec_type) :: config_flags
3408 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3409 cims, cime, cjms, cjme, ckms, ckme, &
3410 cips, cipe, cjps, cjpe, ckps, ckpe
3411 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3412 nims, nime, njms, njme, nkms, nkme, &
3413 nips, nipe, njps, njpe, nkps, nkpe
3414 INTEGER :: ids, ide, jds, jde, kds, kde, &
3415 ims, ime, jms, jme, kms, kme, &
3416 ips, ipe, jps, jpe, kps, kpe
3418 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3420 INTEGER icoord, jcoord, idim_cd, jdim_cd
3421 INTEGER local_comm, myproc, nproc
3422 INTEGER iparstrt, jparstrt, sw, thisdomain_max_halo_width
3425 character*256 :: timestr
3428 LOGICAL, EXTERNAL :: cd_feedback_mask
3430 ! On entry to this routine,
3431 ! "grid" refers to the parent domain
3432 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3433 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3434 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3436 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3437 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3438 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3439 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3440 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3441 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3445 CALL domain_clock_get( grid, current_timestr=timestr )
3447 CALL get_ijk_from_grid ( intermediate_grid , &
3448 cids, cide, cjds, cjde, ckds, ckde, &
3449 cims, cime, cjms, cjme, ckms, ckme, &
3450 cips, cipe, cjps, cjpe, ckps, ckpe )
3451 CALL get_ijk_from_grid ( grid , &
3452 nids, nide, njds, njde, nkds, nkde, &
3453 nims, nime, njms, njme, nkms, nkme, &
3454 nips, nipe, njps, njpe, nkps, nkpe )
3456 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3457 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3458 CALL nl_get_shw ( intermediate_grid%id, sw )
3459 icoord = iparstrt - sw
3460 jcoord = jparstrt - sw
3461 idim_cd = cide - cids + 1
3462 jdim_cd = cjde - cjds + 1
3464 nlev = ckde - ckds + 1
3466 CALL get_dm_max_halo_width ( grid%id , thisdomain_max_halo_width )
3468 #include "nest_feedbackup_pack.inc"
3470 CALL wrf_get_dm_communicator ( local_comm )
3471 CALL wrf_get_myproc( myproc )
3472 CALL wrf_get_nproc( nproc )
3474 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3476 #define NEST_INFLUENCE(A,B) A = B
3477 #include "nest_feedbackup_unpack.inc"
3479 ! smooth coarse grid
3480 CALL get_ijk_from_grid ( ngrid, &
3481 nids, nide, njds, njde, nkds, nkde, &
3482 nims, nime, njms, njme, nkms, nkme, &
3483 nips, nipe, njps, njpe, nkps, nkpe )
3484 CALL get_ijk_from_grid ( grid , &
3485 ids, ide, jds, jde, kds, kde, &
3486 ims, ime, jms, jme, kms, kme, &
3487 ips, ipe, jps, jpe, kps, kpe )
3489 #include "HALO_INTERP_UP.inc"
3491 CALL get_ijk_from_grid ( grid , &
3492 cids, cide, cjds, cjde, ckds, ckde, &
3493 cims, cime, cjms, cjme, ckms, ckme, &
3494 cips, cipe, cjps, cjpe, ckps, ckpe )
3496 #include "nest_feedbackup_smooth.inc"
3499 END SUBROUTINE feedback_domain_em_part2
3502 #if ( NMM_CORE == 1 && NMM_NEST == 1 )
3503 !==============================================================================
3504 ! NMM nesting infrastructure extended from EM core. This is gopal's doing.
3505 !==============================================================================
3507 SUBROUTINE interp_domain_nmm_part1 ( grid, intermediate_grid, ngrid, config_flags &
3509 #include "dummy_new_args.inc"
3512 USE module_state_description
3513 USE module_domain, ONLY : domain, get_ijk_from_grid
3514 USE module_configure, ONLY : grid_config_rec_type
3515 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3516 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3520 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3521 TYPE(domain), POINTER :: intermediate_grid
3522 TYPE(domain), POINTER :: ngrid
3523 #include <dummy_new_decl.inc>
3525 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3526 INTEGER iparstrt,jparstrt,sw
3527 TYPE (grid_config_rec_type) :: config_flags
3529 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3530 cims, cime, cjms, cjme, ckms, ckme, &
3531 cips, cipe, cjps, cjpe, ckps, ckpe
3532 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
3533 iims, iime, ijms, ijme, ikms, ikme, &
3534 iips, iipe, ijps, ijpe, ikps, ikpe
3535 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3536 nims, nime, njms, njme, nkms, nkme, &
3537 nips, nipe, njps, njpe, nkps, nkpe
3539 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3541 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
3542 INTEGER local_comm, myproc, nproc
3543 INTEGER thisdomain_max_halo_width
3545 CALL wrf_get_dm_communicator ( local_comm )
3546 CALL wrf_get_myproc( myproc )
3547 CALL wrf_get_nproc( nproc )
3550 !#include <scalar_derefs.inc>
3552 CALL get_ijk_from_grid ( grid , &
3553 cids, cide, cjds, cjde, ckds, ckde, &
3554 cims, cime, cjms, cjme, ckms, ckme, &
3555 cips, cipe, cjps, cjpe, ckps, ckpe )
3556 CALL get_ijk_from_grid ( intermediate_grid , &
3557 iids, iide, ijds, ijde, ikds, ikde, &
3558 iims, iime, ijms, ijme, ikms, ikme, &
3559 iips, iipe, ijps, ijpe, ikps, ikpe )
3560 CALL get_ijk_from_grid ( ngrid , &
3561 nids, nide, njds, njde, nkds, nkde, &
3562 nims, nime, njms, njme, nkms, nkme, &
3563 nips, nipe, njps, njpe, nkps, nkpe )
3565 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
3566 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3567 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3568 CALL nl_get_shw ( intermediate_grid%id, sw )
3569 icoord = iparstrt - sw
3570 jcoord = jparstrt - sw
3571 idim_cd = iide - iids + 1
3572 jdim_cd = ijde - ijds + 1
3574 nlev = ckde - ckds + 1
3576 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
3577 #include "nest_interpdown_pack.inc"
3579 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
3582 !#include <scalar_derefs.inc>
3584 END SUBROUTINE interp_domain_nmm_part1
3586 !------------------------------------------------------------------
3588 SUBROUTINE interp_domain_nmm_part2 ( grid, ngrid, config_flags &
3590 #include "dummy_new_args.inc"
3593 USE module_state_description
3594 USE module_domain, ONLY : domain, get_ijk_from_grid
3595 USE module_configure, ONLY : grid_config_rec_type
3596 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3597 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3598 USE module_comm_nesting_dm, ONLY : halo_interp_down_sub
3601 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3602 TYPE(domain), POINTER :: ngrid
3603 #include <dummy_new_decl.inc>
3605 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3606 TYPE (grid_config_rec_type) :: config_flags
3608 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3609 cims, cime, cjms, cjme, ckms, ckme, &
3610 cips, cipe, cjps, cjpe, ckps, ckpe
3611 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3612 nims, nime, njms, njme, nkms, nkme, &
3613 nips, nipe, njps, njpe, nkps, nkpe
3614 INTEGER :: ids, ide, jds, jde, kds, kde, &
3615 ims, ime, jms, jme, kms, kme, &
3616 ips, ipe, jps, jpe, kps, kpe
3618 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3623 !#ifdef DEREF_KLUDGE
3624 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3625 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3626 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3627 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3629 #include "deref_kludge.h"
3632 !#include <scalar_derefs.inc>
3633 CALL get_ijk_from_grid ( grid , &
3634 cids, cide, cjds, cjde, ckds, ckde, &
3635 cims, cime, cjms, cjme, ckms, ckme, &
3636 cips, cipe, cjps, cjpe, ckps, ckpe )
3637 CALL get_ijk_from_grid ( ngrid , &
3638 nids, nide, njds, njde, nkds, nkde, &
3639 nims, nime, njms, njme, nkms, nkme, &
3640 nips, nipe, njps, njpe, nkps, nkpe )
3642 nlev = ckde - ckds + 1
3644 #include "nest_interpdown_unpack.inc"
3646 CALL get_ijk_from_grid ( grid , &
3647 ids, ide, jds, jde, kds, kde, &
3648 ims, ime, jms, jme, kms, kme, &
3649 ips, ipe, jps, jpe, kps, kpe )
3651 #include "HALO_INTERP_DOWN.inc"
3653 #include "nest_interpdown_interp.inc"
3656 !#include <scalar_derefs.inc>
3659 END SUBROUTINE interp_domain_nmm_part2
3661 !------------------------------------------------------------------
3663 SUBROUTINE force_domain_nmm_part1 ( grid, intermediate_grid, config_flags &
3665 #include "dummy_new_args.inc"
3668 USE module_state_description
3669 USE module_domain, ONLY : domain, get_ijk_from_grid
3670 USE module_configure, ONLY : grid_config_rec_type
3671 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3672 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3675 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3676 TYPE(domain), POINTER :: intermediate_grid
3677 #include <dummy_new_decl.inc>
3679 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3680 TYPE (grid_config_rec_type) :: config_flags
3682 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3683 cims, cime, cjms, cjme, ckms, ckme, &
3684 cips, cipe, cjps, cjpe, ckps, ckpe
3685 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3686 nims, nime, njms, njme, nkms, nkme, &
3687 nips, nipe, njps, njpe, nkps, nkpe
3689 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3692 !#include <scalar_derefs.inc>
3694 CALL get_ijk_from_grid ( grid , &
3695 cids, cide, cjds, cjde, ckds, ckde, &
3696 cims, cime, cjms, cjme, ckms, ckme, &
3697 cips, cipe, cjps, cjpe, ckps, ckpe )
3699 CALL get_ijk_from_grid ( intermediate_grid , &
3700 nids, nide, njds, njde, nkds, nkde, &
3701 nims, nime, njms, njme, nkms, nkme, &
3702 nips, nipe, njps, njpe, nkps, nkpe )
3704 nlev = ckde - ckds + 1
3706 #include "nest_forcedown_pack.inc"
3708 ! WRITE(0,*)'I have completed PACKING of BCs data successfully'
3711 !#include <scalar_derefs.inc>
3713 END SUBROUTINE force_domain_nmm_part1
3715 !==============================================================================================
3717 SUBROUTINE force_domain_nmm_part2 ( grid, ngrid, config_flags &
3719 #include "dummy_new_args.inc"
3722 USE module_state_description
3723 USE module_domain, ONLY : domain, get_ijk_from_grid
3724 USE module_configure, ONLY : grid_config_rec_type
3725 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3726 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3727 USE module_comm_dm, ONLY : HALO_NMM_FORCE_DOWN1_sub
3730 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3731 TYPE(domain), POINTER :: ngrid
3732 #include <dummy_new_decl.inc>
3734 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3735 TYPE (grid_config_rec_type) :: config_flags
3737 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3738 cims, cime, cjms, cjme, ckms, ckme, &
3739 cips, cipe, cjps, cjpe, ckps, ckpe
3740 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3741 nims, nime, njms, njme, nkms, nkme, &
3742 nips, nipe, njps, njpe, nkps, nkpe
3743 INTEGER :: ids, ide, jds, jde, kds, kde, &
3744 ims, ime, jms, jme, kms, kme, &
3745 ips, ipe, jps, jpe, kps, kpe
3747 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3748 REAL dummy_xs, dummy_xe, dummy_ys, dummy_ye
3752 !#ifdef DEREF_KLUDGE
3753 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3754 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3755 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3756 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3758 #include "deref_kludge.h"
3761 !#include <scalar_derefs.inc>
3763 CALL get_ijk_from_grid ( grid , &
3764 cids, cide, cjds, cjde, ckds, ckde, &
3765 cims, cime, cjms, cjme, ckms, ckme, &
3766 cips, cipe, cjps, cjpe, ckps, ckpe )
3767 CALL get_ijk_from_grid ( ngrid , &
3768 nids, nide, njds, njde, nkds, nkde, &
3769 nims, nime, njms, njme, nkms, nkme, &
3770 nips, nipe, njps, njpe, nkps, nkpe )
3772 nlev = ckde - ckds + 1
3774 #include "nest_interpdown_unpack.inc"
3776 CALL get_ijk_from_grid ( grid , &
3777 ids, ide, jds, jde, kds, kde, &
3778 ims, ime, jms, jme, kms, kme, &
3779 ips, ipe, jps, jpe, kps, kpe )
3781 #include "HALO_NMM_FORCE_DOWN1.inc"
3783 ! code here to interpolate the data into the nested domain
3784 #include "nest_forcedown_interp.inc"
3787 !#include <scalar_derefs.inc>
3790 END SUBROUTINE force_domain_nmm_part2
3792 !================================================================================
3794 ! This routine exists only to call a halo on a domain (the nest)
3795 ! gets called from feedback_domain_em_part1, below. This is needed
3796 ! because the halo code expects the fields being exchanged to have
3797 ! been dereferenced from the grid data structure, but in feedback_domain_em_part1
3798 ! the grid data structure points to the coarse domain, not the nest.
3799 ! And we want the halo exchange on the nest, so that the code in
3800 ! em_nest_feedbackup_interp.inc will work correctly on multi-p. JM 20040308
3803 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3805 #include "dummy_new_args.inc"
3808 USE module_state_description
3809 USE module_domain, ONLY : domain, get_ijk_from_grid
3810 USE module_configure, ONLY : grid_config_rec_type
3811 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3812 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3813 USE module_comm_dm, ONLY : HALO_NMM_WEIGHTS_sub
3816 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3817 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3818 ! soil temp, moisture, etc., has vertical dim
3819 ! of soil categories
3820 #include <dummy_new_decl.inc>
3822 INTEGER :: ids, ide, jds, jde, kds, kde, &
3823 ims, ime, jms, jme, kms, kme, &
3824 ips, ipe, jps, jpe, kps, kpe
3826 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3828 INTEGER :: idum1, idum2
3831 !#ifdef DEREF_KLUDGE
3832 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3833 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3834 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3835 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3837 #include "deref_kludge.h"
3840 !#include <scalar_derefs.inc>
3842 CALL get_ijk_from_grid ( grid , &
3843 ids, ide, jds, jde, kds, kde, &
3844 ims, ime, jms, jme, kms, kme, &
3845 ips, ipe, jps, jpe, kps, kpe )
3848 #include "HALO_NMM_WEIGHTS.inc"
3852 !#include <scalar_derefs.inc>
3854 END SUBROUTINE feedback_nest_prep_nmm
3856 !------------------------------------------------------------------
3858 SUBROUTINE feedback_domain_nmm_part1 ( grid, ngrid, config_flags &
3860 #include "dummy_new_args.inc"
3863 USE module_state_description
3864 USE module_domain, ONLY : domain, get_ijk_from_grid
3865 USE module_configure, ONLY : grid_config_rec_type, model_config_rec, model_to_grid_config_rec
3866 USE module_dm, ONLY : ntasks, ntasks_x, ntasks_y, itrace, local_communicator, mytask, &
3867 ipe_save, jpe_save, ips_save, jps_save, get_dm_max_halo_width
3870 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3871 TYPE(domain), POINTER :: ngrid
3872 #include <dummy_new_decl.inc>
3874 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3875 TYPE(domain), POINTER :: xgrid
3876 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3878 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3879 cims, cime, cjms, cjme, ckms, ckme, &
3880 cips, cipe, cjps, cjpe, ckps, ckpe
3881 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3882 nims, nime, njms, njme, nkms, nkme, &
3883 nips, nipe, njps, njpe, nkps, nkpe
3885 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
3887 INTEGER local_comm, myproc, nproc, idum1, idum2
3889 !#ifdef DEREF_KLUDGE
3890 !! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3891 ! INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3892 ! INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3893 ! INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3897 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3899 #include "dummy_new_args.inc"
3902 USE module_state_description
3903 USE module_domain, ONLY : domain
3904 USE module_configure, ONLY : grid_config_rec_type
3906 TYPE (grid_config_rec_type) :: config_flags
3907 TYPE(domain), TARGET :: grid
3908 #include <dummy_new_decl.inc>
3909 END SUBROUTINE feedback_nest_prep_nmm
3913 !#include <scalar_derefs.inc>
3915 CALL wrf_get_dm_communicator ( local_comm )
3916 CALL wrf_get_myproc( myproc )
3917 CALL wrf_get_nproc( nproc )
3922 CALL get_ijk_from_grid ( grid , &
3923 cids, cide, cjds, cjde, ckds, ckde, &
3924 cims, cime, cjms, cjme, ckms, ckme, &
3925 cips, cipe, cjps, cjpe, ckps, ckpe )
3927 CALL get_ijk_from_grid ( ngrid , &
3928 nids, nide, njds, njde, nkds, nkde, &
3929 nims, nime, njms, njme, nkms, nkme, &
3930 nips, nipe, njps, njpe, nkps, nkpe )
3932 nlev = ckde - ckds + 1
3934 ips_save = ngrid%i_parent_start ! +1 not used in ipe_save & jpe_save
3935 jps_save = ngrid%j_parent_start ! because of one extra namelist point
3936 ipe_save = ngrid%i_parent_start + (nide-nids) / ngrid%parent_grid_ratio
3937 jpe_save = ngrid%j_parent_start + (njde-njds) / ngrid%parent_grid_ratio
3939 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3940 ! in a separate routine because the HALOs need the data to be dereference from the
3941 ! grid data structure and, in this routine, the dereferenced fields are related to
3942 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3943 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3944 ! to point to intermediate domain.
3946 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3947 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3950 #include "deref_kludge.h"
3951 CALL feedback_nest_prep_nmm ( grid, config_flags &
3953 #include "actual_new_args.inc"
3957 ! put things back so grid is intermediate grid
3960 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3962 ! "interp" (basically copy) ngrid onto intermediate grid
3964 #include "nest_feedbackup_interp.inc"
3967 !#include <scalar_derefs.inc>
3969 END SUBROUTINE feedback_domain_nmm_part1
3971 !------------------------------------------------------------------
3973 SUBROUTINE feedback_domain_nmm_part2 ( grid, intermediate_grid, ngrid , config_flags &
3975 #include "dummy_new_args.inc"
3978 USE module_state_description
3979 USE module_domain, ONLY : domain, domain_clock_get, get_ijk_from_grid
3980 USE module_configure, ONLY : grid_config_rec_type
3981 USE module_dm, ONLY : get_dm_max_halo_width, ips_save, ipe_save, &
3982 jps_save, jpe_save, ntasks, mytask, ntasks_x, ntasks_y, &
3983 local_communicator, itrace
3984 USE module_comm_nesting_dm, ONLY : halo_interp_up_sub
3989 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3990 TYPE(domain), POINTER :: intermediate_grid
3991 TYPE(domain), POINTER :: ngrid
3993 #include <dummy_new_decl.inc>
3995 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3996 TYPE (grid_config_rec_type) :: config_flags
3998 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3999 cims, cime, cjms, cjme, ckms, ckme, &
4000 cips, cipe, cjps, cjpe, ckps, ckpe
4001 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
4002 nims, nime, njms, njme, nkms, nkme, &
4003 nips, nipe, njps, njpe, nkps, nkpe
4004 INTEGER :: ids, ide, jds, jde, kds, kde, &
4005 ims, ime, jms, jme, kms, kme, &
4006 ips, ipe, jps, jpe, kps, kpe
4008 INTEGER idim1,idim2,idim3,idim4,idim5,idim6,idim7
4010 INTEGER icoord, jcoord, idim_cd, jdim_cd
4011 INTEGER local_comm, myproc, nproc
4012 INTEGER iparstrt, jparstrt, sw
4013 INTEGER thisdomain_max_halo_width
4015 character*256 :: timestr
4019 LOGICAL, EXTERNAL :: cd_feedback_mask
4020 LOGICAL, EXTERNAL :: cd_feedback_mask_v
4023 !#include <scalar_derefs.inc>
4025 ! On entry to this routine,
4026 ! "grid" refers to the parent domain
4027 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
4028 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
4029 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
4031 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
4032 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
4033 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
4034 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
4035 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
4036 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
4039 nest_influence = 0.5
4040 #define NEST_INFLUENCE(A,B) A = nest_influence*(B) + (1.0-nest_influence)*(A)
4043 CALL domain_clock_get( grid, current_timestr=timestr )
4045 CALL get_ijk_from_grid ( intermediate_grid , &
4046 cids, cide, cjds, cjde, ckds, ckde, &
4047 cims, cime, cjms, cjme, ckms, ckme, &
4048 cips, cipe, cjps, cjpe, ckps, ckpe )
4049 CALL get_ijk_from_grid ( grid , &
4050 nids, nide, njds, njde, nkds, nkde, &
4051 nims, nime, njms, njme, nkms, nkme, &
4052 nips, nipe, njps, njpe, nkps, nkpe )
4054 nide = nide - 1 !dusan
4055 njde = njde - 1 !dusan
4057 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
4058 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
4059 CALL nl_get_shw ( intermediate_grid%id, sw )
4060 icoord = iparstrt - sw
4061 jcoord = jparstrt - sw
4062 idim_cd = cide - cids + 1
4063 jdim_cd = cjde - cjds + 1
4065 nlev = ckde - ckds + 1
4067 CALL get_dm_max_halo_width ( ngrid%id , thisdomain_max_halo_width )
4068 #include "nest_feedbackup_pack.inc"
4070 CALL wrf_get_dm_communicator ( local_comm )
4071 CALL wrf_get_myproc( myproc )
4072 CALL wrf_get_nproc( nproc )
4074 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
4076 #include "nest_feedbackup_unpack.inc"
4079 ! smooth coarse grid
4081 CALL get_ijk_from_grid ( ngrid, &
4082 nids, nide, njds, njde, nkds, nkde, &
4083 nims, nime, njms, njme, nkms, nkme, &
4084 nips, nipe, njps, njpe, nkps, nkpe )
4085 CALL get_ijk_from_grid ( grid , &
4086 ids, ide, jds, jde, kds, kde, &
4087 ims, ime, jms, jme, kms, kme, &
4088 ips, ipe, jps, jpe, kps, kpe )
4090 #include "HALO_INTERP_UP.inc"
4092 CALL get_ijk_from_grid ( grid , &
4093 cids, cide, cjds, cjde, ckds, ckde, &
4094 cims, cime, cjms, cjme, ckms, ckme, &
4095 cips, cipe, cjps, cjpe, ckps, ckpe )
4097 #include "nest_feedbackup_smooth.inc"
4100 !#include <scalar_derefs.inc>
4102 END SUBROUTINE feedback_domain_nmm_part2
4104 !=================================================================================
4105 ! End of gopal's doing
4106 !=================================================================================
4109 !------------------------------------------------------------------
4111 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
4112 my_count , & ! sendcount
4113 globbuf, glob_ofst , & ! recvbuf
4114 counts , & ! recvcounts
4117 communicator , & ! communicator
4119 USE module_dm, ONLY : getrealmpitype
4121 INTEGER field_ofst, glob_ofst
4122 INTEGER my_count, communicator, root, ierr
4123 INTEGER , DIMENSION(*) :: counts, displs
4124 REAL, DIMENSION(*) :: Field, globbuf
4128 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4129 my_count , & ! sendcount
4130 getrealmpitype() , & ! sendtype
4131 globbuf( glob_ofst ) , & ! recvbuf
4132 counts , & ! recvcounts
4134 getrealmpitype() , & ! recvtype
4136 communicator , & ! communicator
4140 END SUBROUTINE wrf_gatherv_real
4142 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
4143 my_count , & ! sendcount
4144 globbuf, glob_ofst , & ! recvbuf
4145 counts , & ! recvcounts
4148 communicator , & ! communicator
4152 INTEGER field_ofst, glob_ofst
4153 INTEGER my_count, communicator, root, ierr
4154 INTEGER , DIMENSION(*) :: counts, displs
4155 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
4156 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
4157 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
4158 ! if we were not indexing the globbuf and Field arrays it would not even matter
4159 REAL, DIMENSION(*) :: Field, globbuf
4163 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4164 my_count , & ! sendcount
4165 MPI_DOUBLE_PRECISION , & ! sendtype
4166 globbuf( glob_ofst ) , & ! recvbuf
4167 counts , & ! recvcounts
4169 MPI_DOUBLE_PRECISION , & ! recvtype
4171 communicator , & ! communicator
4175 END SUBROUTINE wrf_gatherv_double
4177 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
4178 my_count , & ! sendcount
4179 globbuf, glob_ofst , & ! recvbuf
4180 counts , & ! recvcounts
4183 communicator , & ! communicator
4186 INTEGER field_ofst, glob_ofst
4187 INTEGER my_count, communicator, root, ierr
4188 INTEGER , DIMENSION(*) :: counts, displs
4189 INTEGER, DIMENSION(*) :: Field, globbuf
4193 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
4194 my_count , & ! sendcount
4195 MPI_INTEGER , & ! sendtype
4196 globbuf( glob_ofst ) , & ! recvbuf
4197 counts , & ! recvcounts
4199 MPI_INTEGER , & ! recvtype
4201 communicator , & ! communicator
4205 END SUBROUTINE wrf_gatherv_integer
4208 SUBROUTINE wrf_scatterv_real ( &
4209 globbuf, glob_ofst , & ! recvbuf
4210 counts , & ! recvcounts
4211 Field, field_ofst, &
4212 my_count , & ! sendcount
4215 communicator , & ! communicator
4217 USE module_dm, ONLY : getrealmpitype
4219 INTEGER field_ofst, glob_ofst
4220 INTEGER my_count, communicator, root, ierr
4221 INTEGER , DIMENSION(*) :: counts, displs
4222 REAL, DIMENSION(*) :: Field, globbuf
4226 CALL mpi_scatterv( &
4227 globbuf( glob_ofst ) , & ! recvbuf
4228 counts , & ! recvcounts
4230 getrealmpitype() , & ! recvtype
4231 Field( field_ofst ), & ! sendbuf
4232 my_count , & ! sendcount
4233 getrealmpitype() , & ! sendtype
4235 communicator , & ! communicator
4239 END SUBROUTINE wrf_scatterv_real
4241 SUBROUTINE wrf_scatterv_double ( &
4242 globbuf, glob_ofst , & ! recvbuf
4243 counts , & ! recvcounts
4244 Field, field_ofst, &
4245 my_count , & ! sendcount
4248 communicator , & ! communicator
4251 INTEGER field_ofst, glob_ofst
4252 INTEGER my_count, communicator, root, ierr
4253 INTEGER , DIMENSION(*) :: counts, displs
4254 REAL, DIMENSION(*) :: Field, globbuf
4257 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
4258 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
4259 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
4260 ! if we were not indexing the globbuf and Field arrays it would not even matter
4262 CALL mpi_scatterv( &
4263 globbuf( glob_ofst ) , & ! recvbuf
4264 counts , & ! recvcounts
4266 MPI_DOUBLE_PRECISION , & ! recvtype
4267 Field( field_ofst ), & ! sendbuf
4268 my_count , & ! sendcount
4269 MPI_DOUBLE_PRECISION , & ! sendtype
4271 communicator , & ! communicator
4275 END SUBROUTINE wrf_scatterv_double
4277 SUBROUTINE wrf_scatterv_integer ( &
4278 globbuf, glob_ofst , & ! recvbuf
4279 counts , & ! recvcounts
4280 Field, field_ofst, &
4281 my_count , & ! sendcount
4284 communicator , & ! communicator
4287 INTEGER field_ofst, glob_ofst
4288 INTEGER my_count, communicator, root, ierr
4289 INTEGER , DIMENSION(*) :: counts, displs
4290 INTEGER, DIMENSION(*) :: Field, globbuf
4294 CALL mpi_scatterv( &
4295 globbuf( glob_ofst ) , & ! recvbuf
4296 counts , & ! recvcounts
4298 MPI_INTEGER , & ! recvtype
4299 Field( field_ofst ), & ! sendbuf
4300 my_count , & ! sendcount
4301 MPI_INTEGER , & ! sendtype
4303 communicator , & ! communicator
4307 END SUBROUTINE wrf_scatterv_integer
4308 ! end new stuff 20070124
4310 SUBROUTINE wrf_dm_gatherv ( v, elemsize , km_s, km_e, wordsz )
4312 INTEGER elemsize, km_s, km_e, wordsz
4314 IF ( wordsz .EQ. DWORDSIZE ) THEN
4315 CALL wrf_dm_gatherv_double(v, elemsize , km_s, km_e)
4317 CALL wrf_dm_gatherv_single(v, elemsize , km_s, km_e)
4319 END SUBROUTINE wrf_dm_gatherv
4321 SUBROUTINE wrf_dm_gatherv_double ( v, elemsize , km_s, km_e )
4323 INTEGER elemsize, km_s, km_e
4326 # ifndef USE_MPI_IN_PLACE
4327 REAL*8 v_local((km_e-km_s+1)*elemsize)
4329 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4330 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4332 send_type = MPI_DOUBLE_PRECISION
4333 CALL wrf_get_dm_communicator ( local_comm )
4334 CALL wrf_get_nproc( nproc )
4335 CALL wrf_get_myproc( myproc )
4336 ALLOCATE( recvcounts(nproc), displs(nproc) )
4337 i = (km_e-km_s+1)*elemsize
4338 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4340 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4341 # ifdef USE_MPI_IN_PLACE
4342 CALL mpi_allgatherv( MPI_IN_PLACE, &
4344 DO i = 1,elemsize*(km_e-km_s+1)
4345 v_local(i) = v(i+km_s-1)
4347 CALL mpi_allgatherv( v_local, &
4349 (km_e-km_s+1)*elemsize, &
4357 DEALLOCATE(recvcounts)
4361 END SUBROUTINE wrf_dm_gatherv_double
4363 SUBROUTINE wrf_dm_gatherv_single ( v, elemsize , km_s, km_e )
4365 INTEGER elemsize, km_s, km_e
4368 # ifndef USE_MPI_IN_PLACE
4369 REAL*4 v_local((km_e-km_s+1)*elemsize)
4371 INTEGER, DIMENSION(:), ALLOCATABLE :: recvcounts, displs
4372 INTEGER send_type, myproc, nproc, local_comm, ierr, i
4374 send_type = MPI_REAL
4375 CALL wrf_get_dm_communicator ( local_comm )
4376 CALL wrf_get_nproc( nproc )
4377 CALL wrf_get_myproc( myproc )
4378 ALLOCATE( recvcounts(nproc), displs(nproc) )
4379 i = (km_e-km_s+1)*elemsize
4380 CALL mpi_allgather( i,1,MPI_INTEGER,recvcounts,1,MPI_INTEGER,local_comm,ierr) ;
4382 CALL mpi_allgather( i,1,MPI_INTEGER,displs,1,MPI_INTEGER,local_comm,ierr) ;
4383 # ifdef USE_MPI_IN_PLACE
4384 CALL mpi_allgatherv( MPI_IN_PLACE, &
4386 DO i = 1,elemsize*(km_e-km_s+1)
4387 v_local(i) = v(i+km_s-1)
4389 CALL mpi_allgatherv( v_local, &
4391 (km_e-km_s+1)*elemsize, &
4399 DEALLOCATE(recvcounts)
4403 END SUBROUTINE wrf_dm_gatherv_single
4405 SUBROUTINE wrf_dm_decomp1d( nt, km_s, km_e )
4407 INTEGER, INTENT(IN) :: nt
4408 INTEGER, INTENT(OUT) :: km_s, km_e
4410 INTEGER nn, nnp, na, nb
4411 INTEGER myproc, nproc
4413 CALL wrf_get_myproc(myproc)
4414 CALL wrf_get_nproc(nproc)
4415 nn = nt / nproc ! min number done by this task
4417 if ( myproc .lt. mod( nt, nproc ) ) nnp = nnp + 1 ! distribute remainder
4419 na = min( myproc, mod(nt,nproc) ) ! Number of blocks with remainder that precede this one
4420 nb = max( 0, myproc - na ) ! number of blocks without a remainder that precede this one
4421 km_s = na * ( nn+1) + nb * nn ! starting iteration for this task
4422 km_e = km_s + nnp - 1 ! ending iteration for this task
4423 END SUBROUTINE wrf_dm_decomp1d
4426 SUBROUTINE wrf_dm_define_comms ( grid )
4427 USE module_domain, ONLY : domain
4429 TYPE(domain) , INTENT (INOUT) :: grid
4431 END SUBROUTINE wrf_dm_define_comms
4433 SUBROUTINE tfp_message( fname, lno )
4438 WRITE(mess,*)'tfp_message: ',trim(fname),lno
4439 CALL wrf_message(mess)
4440 # ifdef ALLOW_OVERDECOMP
4441 CALL task_for_point_message ! defined in RSL_LITE/task_for_point.c
4443 CALL wrf_error_fatal(mess)
4446 END SUBROUTINE tfp_message
4448 SUBROUTINE set_dm_debug
4449 USE module_dm, ONLY : dm_debug_flag
4451 dm_debug_flag = .TRUE.
4452 END SUBROUTINE set_dm_debug
4453 SUBROUTINE reset_dm_debug
4454 USE module_dm, ONLY : dm_debug_flag
4456 dm_debug_flag = .FALSE.
4457 END SUBROUTINE reset_dm_debug
4458 SUBROUTINE get_dm_debug ( arg )
4459 USE module_dm, ONLY : dm_debug_flag
4463 END SUBROUTINE get_dm_debug