4 !TBH: $$$ move this to ../frame?
8 ! This module defines top-level wrf_init(), wrf_run(), and wrf_finalize()
15 USE module_driver_constants
17 USE module_check_a_mundo
24 USE module_dm, ONLY : wrf_dm_initialize
34 TYPE (domain) , POINTER :: keep_grid, grid_ptr, null_domain
35 TYPE (domain) , pointer :: parent_grid, new_nest
36 LOGICAL :: a_nest_was_opened
37 TYPE (grid_config_rec_type), SAVE :: config_flags
38 INTEGER :: kid, nestid
39 INTEGER :: number_at_same_level
40 INTEGER :: time_step_begin_restart
42 INTEGER :: max_dom , domain_id , fid , oid , idum1 , idum2 , ierr
43 INTEGER :: debug_level
44 LOGICAL :: input_from_file
48 INTEGER, PARAMETER :: configbuflen = 4* CONFIG_BUF_LEN
49 INTEGER :: configbuf( configbuflen )
50 LOGICAL , EXTERNAL :: wrf_dm_on_monitor
53 CHARACTER (LEN=80) :: rstname
54 CHARACTER (LEN=80) :: message
57 SUBROUTINE Setup_Timekeeping( grid )
59 TYPE(domain), POINTER :: grid
60 END SUBROUTINE Setup_Timekeeping
63 SUBROUTINE wrf_dfi_write_initialized_state( )
64 END SUBROUTINE wrf_dfi_write_initialized_state
66 SUBROUTINE wrf_dfi_startfwd_init( )
67 END SUBROUTINE wrf_dfi_startfwd_init
69 SUBROUTINE wrf_dfi_startbck_init( )
70 END SUBROUTINE wrf_dfi_startbck_init
72 SUBROUTINE wrf_dfi_bck_init( )
73 END SUBROUTINE wrf_dfi_bck_init
75 SUBROUTINE wrf_dfi_fwd_init( )
76 END SUBROUTINE wrf_dfi_fwd_init
78 SUBROUTINE wrf_dfi_fst_init( )
79 END SUBROUTINE wrf_dfi_fst_init
81 SUBROUTINE wrf_dfi_array_reset ( )
82 END SUBROUTINE wrf_dfi_array_reset
85 SUBROUTINE med_nest_initial ( parent , grid , config_flags )
88 TYPE (domain), POINTER :: grid , parent
89 TYPE (grid_config_rec_type) config_flags
90 END SUBROUTINE med_nest_initial
98 SUBROUTINE wrf_init( no_init1 )
100 ! WRF initialization routine.
108 LOGICAL, OPTIONAL, INTENT(IN) :: no_init1
109 INTEGER i, myproc, nproc, hostid, loccomm, ierr, buddcounter, mydevice
110 INTEGER, ALLOCATABLE :: hostids(:), budds(:)
111 CHARACTER*512 hostname
113 integer :: it, nt, in, devnum
115 #if defined(DM_PARALLEL) && !defined(STUBMPI) && ( defined(RUN_ON_GPU) || defined(_ACCEL))
118 #include "version_decl"
122 ! Program_name, a global variable defined in frame/module_domain.F, is
123 ! set, then a routine <a href=init_modules.html>init_modules</a> is
124 ! called. This calls all the init programs that are provided by the
125 ! modules that are linked into WRF. These include initialization of
126 ! external I/O packages. Also, some key initializations for
127 ! distributed-memory parallelism occur here if DM_PARALLEL is specified
128 ! in the compile: setting up I/O quilt processes to act as I/O servers
129 ! and dividing up MPI communicators among those as well as initializing
130 ! external communication packages such as RSL or RSL_LITE.
134 program_name = "WRF " // TRIM(release_version) // " MODEL"
136 ! Initialize WRF modules:
137 ! Phase 1 returns after MPI_INIT() (if it is called)
139 IF ( .NOT. PRESENT( no_init1 ) ) THEN
140 ! Initialize utilities (time manager, etc.)
141 #ifdef NO_LEAP_CALENDAR
142 CALL WRFU_Initialize( defaultCalKind=WRFU_CAL_NOLEAP )
144 CALL WRFU_Initialize( defaultCalKind=WRFU_CAL_GREGORIAN )
147 ! Phase 2 resumes after MPI_INIT() (if it is called)
151 ! The wrf namelist.input file is read and stored in the USE associated
152 ! structure model_config_rec, defined in frame/module_configure.F, by the
153 ! call to <a href=initial_config.html>initial_config</a>. On distributed
154 ! memory parallel runs this is done only on one processor, and then
155 ! broadcast as a buffer. For distributed-memory, the broadcast of the
156 ! configuration information is accomplished by first putting the
157 ! configuration information into a buffer (<a
158 ! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
159 ! the buffer, then setting the configuration information (<a
160 ! href=set_config_as_buffer.html>set_config_as_buffer</a>).
165 IF ( wrf_dm_on_monitor() ) THEN
168 CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
169 CALL wrf_dm_bcast_bytes( configbuf, nbytes )
170 CALL set_config_as_buffer( configbuf, configbuflen )
171 CALL wrf_dm_initialize
176 CALL set_derived_rconfigs
177 CALL check_nml_consistency
178 CALL set_physics_rconfigs
183 # if defined(DM_PARALLEL) && !defined(STUBMPI)
184 CALL wrf_get_myproc( myproc )
185 CALL wrf_get_nproc( nproc )
186 CALL wrf_get_hostid ( hostid )
187 CALL wrf_get_dm_communicator ( loccomm )
189 ALLOCATE( hostids(nproc) )
190 ALLOCATE( budds(nproc) )
191 CALL mpi_allgather( hostid, 1, MPI_INTEGER, hostids, 1, MPI_INTEGER, loccomm, ierr )
192 if ( ierr .NE. 0 ) print * ,'error in mpi_allgather ',ierr
195 ! mark the ones i am on the same node with
197 IF ( hostid .EQ. hostids(i) ) THEN
198 budds(i) = buddcounter
199 buddcounter = buddcounter + 1
202 mydevice = budds(myproc+1)
203 DEALLOCATE( hostids )
206 in = acc_get_num_devices(acc_device_nvidia)
207 if (in .le. 0) print *, 'error: No GPUS present: ',in
209 !$OMP PARALLEL SHARED(mydevice,in) PRIVATE(it,nt,devnum)
210 it = omp_get_thread_num()
211 nt = omp_get_num_threads()
212 devnum = mod(mod(mydevice*nt,in) + it, in)
214 print *, "Process, Thread, Device: ",mydevice, it, devnum
216 call acc_set_device_num(devnum, acc_device_nvidia)
222 devnum = mod(mod(mydevice*nt,in) + it, in)
224 print *, "Process, Thread, Device: ",mydevice, it, devnum
226 call acc_set_device_num(devnum, acc_device_nvidia)
231 CALL wrf_get_myproc( myproc )
232 CALL wrf_get_nproc( nproc )
234 CALL wrf_get_hostid ( hostid )
235 CALL wrf_get_dm_communicator ( loccomm )
236 ALLOCATE( hostids(nproc) )
237 ALLOCATE( budds(nproc) )
238 CALL mpi_allgather( hostid, 1, MPI_INTEGER, hostids, 1, MPI_INTEGER, loccomm, ierr )
239 if ( ierr .NE. 0 ) write(0,*)__FILE__,__LINE__,'error in mpi_allgather ',ierr
242 ! mark the ones i am on the same node with
244 IF ( hostid .EQ. hostids(i) ) THEN
245 budds(i) = buddcounter
246 buddcounter = buddcounter + 1
249 mydevice = budds(myproc+1)
250 DEALLOCATE( hostids )
255 CALL wsm5_gpu_init( myproc, nproc, mydevice )
259 ! Among the configuration variables read from the namelist is
260 ! debug_level. This is retrieved using nl_get_debug_level (Registry
261 ! generated and defined in frame/module_configure.F). The value is then
262 ! used to set the debug-print information level for use by <a
263 ! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
264 ! of zero (the default) causes no information to be printed when the
265 ! model runs. The higher the number (up to 1000) the more information is
270 CALL nl_get_debug_level ( 1, debug_level )
271 CALL set_wrf_debug_level ( debug_level )
273 ! allocated and configure the mother domain
275 NULLIFY( null_domain )
278 ! RSL is required for WRF nesting options.
279 ! The non-MPI build that allows nesting is only supported on machines
280 ! with the -DSTUBMPI option. Check to see if the WRF model is being asked
281 ! for a for a multi-domain run (max_dom > 1, from the namelist). If so,
282 ! then we check to make sure that we are under the parallel
283 ! run option or we are on an acceptable machine.
286 CALL nl_get_max_dom( 1, max_dom )
287 IF ( max_dom > 1 ) THEN
288 #if ( ! defined(DM_PARALLEL) && ! defined(STUBMPI) )
289 CALL wrf_error_fatal( &
290 'nesting requires either an MPI build or use of the -DSTUBMPI option' )
295 ! The top-most domain in the simulation is then allocated and configured
296 ! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
297 ! Here, in the case of this root domain, the routine is passed the
298 ! globally accessible pointer to TYPE(domain), head_grid, defined in
299 ! frame/module_domain.F. The parent is null and the child index is given
300 ! as negative, signifying none. Afterwards, because the call to
301 ! alloc_and_configure_domain may modify the model's configuration data
302 ! stored in model_config_rec, the configuration information is again
303 ! repacked into a buffer, broadcast, and unpacked on each task (for
304 ! DM_PARALLEL compiles). The call to <a
305 ! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
306 ! on this configuration information, and it must occur after the second
307 ! broadcast of the configuration information.
310 CALL wrf_message ( program_name )
311 CALL wrf_debug ( 100 , 'wrf: calling alloc_and_configure_domain ' )
312 CALL alloc_and_configure_domain ( domain_id = 1 , &
314 parent = null_domain , &
317 CALL wrf_debug ( 100 , 'wrf: calling model_to_grid_config_rec ' )
318 CALL model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
319 CALL wrf_debug ( 100 , 'wrf: calling set_scalar_indices_from_config ' )
320 CALL set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
321 CALL wrf_debug ( 100 , 'wrf: calling init_wrfio' )
325 CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
326 CALL wrf_dm_bcast_bytes( configbuf, nbytes )
327 CALL set_config_as_buffer( configbuf, configbuflen )
331 ! In case we are doing digital filter initialization, set dfi_stage = DFI_SETUP
332 ! to indicate in Setup_Timekeeping that we want forecast start and
333 ! end times at this point
334 IF ( head_grid%dfi_opt .NE. DFI_NODFI ) head_grid%dfi_stage = DFI_SETUP
337 CALL Setup_Timekeeping (head_grid)
340 ! The head grid is initialized with read-in data through the call to <a
341 ! href=med_initialdata_input.html>med_initialdata_input</a>, which is
342 ! passed the pointer head_grid and a locally declared configuration data
343 ! structure, config_flags, that is set by a call to <a
344 ! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>. It is
345 ! also necessary that the indices into the 4d tracer arrays such as
346 ! moisture be set with a call to <a
347 ! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
348 ! prior to the call to initialize the domain. Both of these calls are
349 ! told which domain they are setting up for by passing in the integer id
350 ! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
353 ! In the case that write_restart_at_0h is set to true in the namelist,
354 ! the model simply generates a restart file using the just read-in data
355 ! and then shuts down. This is used for ensemble breeding, and is not
360 CALL med_initialdata_input( head_grid , config_flags )
362 IF ( config_flags%write_restart_at_0h ) THEN
363 CALL med_restart_out ( head_grid, config_flags )
364 #ifndef AUTODOC_BUILD
365 ! prevent this from showing up before the call to integrate in the autogenerated call tree
366 CALL wrf_debug ( 0 , ' 0 h restart only wrf: SUCCESS COMPLETE WRF' )
367 ! TBH: $$$ Unscramble this later...
368 ! TBH: $$$ Need to add state to avoid calling wrf_finalize() twice when ESMF
369 ! TBH: $$$ library is used. Maybe just set clock stop_time=start_time and
370 ! TBH: $$$ do not call wrf_finalize here...
375 ! set default values for subtimes
376 head_grid%start_subtime = domain_get_start_time ( head_grid )
377 head_grid%stop_subtime = domain_get_stop_time ( head_grid )
379 ! For EM (but not DA), if this is a DFI run, we can allocate some space. We are
380 ! not allowing anyting tricky for nested DFI. If there are any nested domains,
381 ! they all need to start at the same time. Otherwise, why even do the DFI? If
382 ! the domains do not all start at the same time, then there will be inconsistencies,
383 ! which is what DFI is supposed to address.
386 IF ( head_grid%dfi_opt .NE. DFI_NODFI ) THEN
387 CALL alloc_doms_for_dfi ( head_grid )
392 END SUBROUTINE wrf_init
396 SUBROUTINE wrf_run( )
402 ! Once the top-level domain has been allocated, configured, and
403 ! initialized, the model time integration is ready to proceed. The start
404 ! and stop times for the domain are set to the start and stop time of the
405 ! model run, and then <a href=integrate.html>integrate</a> is called to
406 ! advance the domain forward through that specified time interval. On
407 ! return, the simulation is completed.
411 ! The forecast integration for the most coarse grid is now started. The
412 ! integration is from the first step (1) to the last step of the simulation.
414 CALL wrf_debug ( 100 , 'wrf: calling integrate' )
415 CALL integrate ( head_grid )
416 CALL wrf_debug ( 100 , 'wrf: back from integrate' )
418 END SUBROUTINE wrf_run
422 SUBROUTINE wrf_finalize( no_shutdown )
424 ! WRF finalize routine.
428 ! A Mediation Layer-provided
429 ! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
430 ! to allow the the model to do any I/O specific cleanup and shutdown, and
431 ! then the WRF Driver Layer routine <a
432 ! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
433 ! directed to shut down here) is called to properly end the run,
434 ! including shutting down the communications (for example, most comm
435 ! layers would call MPI_FINALIZE at this point if they're using MPI).
438 LOGICAL, OPTIONAL, INTENT(IN) :: no_shutdown
441 CALL med_shutdown_io ( head_grid , config_flags )
442 CALL wrf_debug ( 100 , 'wrf: back from med_shutdown_io' )
444 CALL wrf_debug ( 0 , 'wrf: SUCCESS COMPLETE WRF' )
446 ! Call wrf_shutdown() (which calls MPI_FINALIZE()
447 ! for DM parallel runs).
448 IF ( .NOT. PRESENT( no_shutdown ) ) THEN
449 ! Finalize time manager
454 END SUBROUTINE wrf_finalize
459 ! Runs a digital filter initialization procedure.
464 ! Initialization procedure
465 IF ( config_flags%dfi_opt .NE. DFI_NODFI ) THEN
467 SELECT CASE ( config_flags%dfi_opt )
470 wrf_err_message = 'Initializing with DFL'
471 CALL wrf_message(TRIM(wrf_err_message))
473 wrf_err_message = ' Filtering forward in time'
474 CALL wrf_message(TRIM(wrf_err_message))
476 CALL wrf_dfi_fwd_init()
479 CALL wrf_dfi_array_reset()
481 CALL wrf_dfi_fst_init()
483 IF ( config_flags%dfi_write_filtered_input ) THEN
484 CALL wrf_dfi_write_initialized_state()
488 wrf_err_message = 'Initializing with DDFI'
489 CALL wrf_message(TRIM(wrf_err_message))
491 wrf_err_message = ' Integrating backward in time'
492 CALL wrf_message(TRIM(wrf_err_message))
494 CALL wrf_dfi_bck_init()
497 wrf_err_message = ' Filtering forward in time'
498 CALL wrf_message(TRIM(wrf_err_message))
500 CALL wrf_dfi_fwd_init()
503 CALL wrf_dfi_array_reset()
505 CALL wrf_dfi_fst_init()
507 IF ( config_flags%dfi_write_filtered_input ) THEN
508 CALL wrf_dfi_write_initialized_state()
512 wrf_err_message = 'Initializing with TDFI'
513 CALL wrf_message(TRIM(wrf_err_message))
515 wrf_err_message = ' Integrating backward in time'
516 CALL wrf_message(TRIM(wrf_err_message))
518 CALL wrf_dfi_bck_init()
521 CALL wrf_dfi_array_reset()
523 wrf_err_message = ' Filtering forward in time'
524 CALL wrf_message(TRIM(wrf_err_message))
526 CALL wrf_dfi_fwd_init()
529 CALL wrf_dfi_array_reset()
531 CALL wrf_dfi_fst_init()
533 IF ( config_flags%dfi_write_filtered_input ) THEN
534 CALL wrf_dfi_write_initialized_state()
538 wrf_err_message = 'Unrecognized DFI_OPT in namelist'
539 CALL wrf_error_fatal(TRIM(wrf_err_message))
546 END SUBROUTINE wrf_dfi
548 SUBROUTINE set_derived_rconfigs
550 ! Some derived rconfig entries need to be set based on the value of other,
551 ! non-derived entries before package-dependent memory allocation takes place.
552 ! This might be employed when, for example, we want to allocate arrays in
553 ! a package that depends on the setting of two or more namelist variables.
554 ! In this subroutine, we do just that.
563 IF ( model_config_rec % dfi_opt .EQ. DFI_NODFI ) THEN
564 DO i = 1, model_config_rec % max_dom
565 model_config_rec % mp_physics_dfi(i) = -1
568 DO i = 1, model_config_rec % max_dom
569 model_config_rec % mp_physics_dfi(i) = model_config_rec % mp_physics(i)
575 IF ( model_config_rec % dyn_opt .EQ. 2 ) THEN
576 DO i = 1, model_config_rec % max_dom
577 model_config_rec % mp_physics_4dvar(i) = -1
580 DO i = 1, model_config_rec % max_dom
581 model_config_rec % mp_physics_4dvar(i) = model_config_rec % mp_physics(i)
586 END SUBROUTINE set_derived_rconfigs
588 RECURSIVE SUBROUTINE alloc_doms_for_dfi ( grid )
592 TYPE (domain) , pointer :: grid
596 TYPE (domain) , pointer :: new_nest_loc
597 TYPE (grid_config_rec_type) :: parent_config_flags
598 INTEGER :: nestid_loc , kid_loc
600 ! Are there any subdomains from this level. The output is the nestid (the domain
601 ! ID of the nest), and kid (an index to which of the parent's children this new nested
602 ! domain represents).
604 DO WHILE ( nests_to_open( grid , nestid_loc , kid_loc ) )
606 ! If we found another child domain, we continue on: allocate, set up time keeping,
609 CALL alloc_and_configure_domain ( domain_id = nestid_loc , &
610 grid = new_nest_loc , &
614 print *,'for parent domain id #',grid%id,', found child domain #',nestid_loc
615 ! Since this is a DFI run, set the DFI switches to the same for all domains.
617 new_nest_loc%dfi_opt = head_grid%dfi_opt
618 new_nest_loc%dfi_stage = DFI_SETUP
620 ! Set up time keeping for the fine grid space that was just allocated.
622 CALL Setup_Timekeeping (new_nest_loc)
624 ! With space allocated, and timers set, the fine grid can be initialized with data.
626 CALL model_to_grid_config_rec ( grid%id , model_config_rec , parent_config_flags )
627 CALL med_nest_initial ( grid , new_nest_loc , config_flags )
629 ! Here's the recursive part. For each of these child domains, we call this same routine.
630 ! This will find all of "new_nest_loc" first generation progeny.
632 CALL alloc_doms_for_dfi ( new_nest_loc )
636 END SUBROUTINE alloc_doms_for_dfi
638 END MODULE module_wrf_top