Remove deadcode: loading saved-state with informant-induced target
[hiphop-php.git] / hphp / hack / src / server / serverMain.ml
blob127a6b450d22fad6d6bb4212d7a18bb9407342a9
1 (*
2 * Copyright (c) 2015, Facebook, Inc.
3 * All rights reserved.
5 * This source code is licensed under the MIT license found in the
6 * LICENSE file in the "hack" directory of this source tree.
8 *)
10 open Hh_prelude
11 open ServerEnv
12 open Reordered_argument_collections
13 open String_utils
14 open Option.Monad_infix
16 (*****************************************************************************)
17 (* Main initialization *)
18 (*****************************************************************************)
20 let () = Printexc.record_backtrace true
22 let force_break_recheck_loop_for_test_ref = ref false
24 let force_break_recheck_loop_for_test x =
25 force_break_recheck_loop_for_test_ref := x
27 module MainInit : sig
28 val go :
29 genv ->
30 ServerArgs.options ->
31 (unit -> env) ->
32 (* init function to run while we have init lock *)
33 env
34 end = struct
35 (* This code is only executed when the options --check is NOT present *)
36 let go genv options init_fun =
37 let root = ServerArgs.root options in
38 let t = Unix.gettimeofday () in
39 let pid = Unix.getpid () in
40 begin
41 match ProcFS.first_cgroup_for_pid pid with
42 | Ok cgroup ->
43 Hh_logger.log "Server Pid: %d" pid;
44 Hh_logger.log "Server cGroup: %s" cgroup
45 | _ -> ()
46 end;
47 Hh_logger.log "Initializing Server (This might take some time)";
49 (* note: we only run periodical tasks on the root, not extras *)
50 let env = init_fun () in
51 Hh_logger.log "Server is partially ready";
52 ServerIdle.init genv root;
53 let t' = Unix.gettimeofday () in
54 Hh_logger.log "Took %f seconds." (t' -. t);
55 HackEventLogger.server_is_partially_ready ();
57 env
58 end
60 module Program = struct
61 let preinit () =
62 (* Warning: Global references inited in this function, should
63 be 'restored' in the workers, because they are not 'forked'
64 anymore. See `ServerWorker.{save/restore}_state`. *)
65 Sys_utils.set_signal
66 Sys.sigusr1
67 (Sys.Signal_handle Typing.debug_print_last_pos);
68 Sys_utils.set_signal
69 Sys.sigusr2
70 (Sys.Signal_handle
71 (fun _ ->
72 Hh_logger.log "Got sigusr2 signal. Going to shut down.";
73 Exit.exit
74 ~msg:
75 "Hh_server received a stop signal. This can happen from a large rebase/update"
76 Exit_status.Server_shutting_down_due_to_sigusr2))
78 let run_once_and_exit
79 genv
80 env
81 (save_state_result : SaveStateServiceTypes.save_state_result option) =
82 let recheck_stats =
83 Option.map
84 ~f:ServerEnv.recheck_loop_stats_to_user_telemetry
85 env.ServerEnv.last_recheck_loop_stats_for_actual_work
87 ServerError.print_error_list
88 stdout
89 ~stale_msg:None
90 ~output_json:(ServerArgs.json_mode genv.options)
91 ~error_list:
92 (List.map (Errors.get_error_list env.errorl) ~f:Errors.to_absolute)
93 ~save_state_result
94 ~recheck_stats;
96 WorkerController.force_quit_all ();
98 (* as Warnings shouldn't break CI, don't change the exit status except for Errors *)
99 let has_errors =
100 List.exists
101 ~f:(fun e ->
102 match Errors.get_severity e with
103 | Errors.Error -> true
104 | _ -> false)
105 (Errors.get_error_list env.errorl)
107 let is_saving_state_and_ignoring_errors =
108 ServerArgs.gen_saved_ignore_type_errors genv.options
109 && Option.is_some (ServerArgs.save_filename genv.options)
111 exit
112 ( if has_errors && not is_saving_state_and_ignoring_errors then
114 else
117 (* filter and relativize updated file paths *)
118 let process_updates genv updates =
119 let root = Path.to_string @@ ServerArgs.root genv.options in
120 (* Because of symlinks, we can have updates from files that aren't in
121 * the .hhconfig directory *)
122 let updates = SSet.filter updates ~f:(fun p -> string_starts_with p root) in
123 let updates = Relative_path.(relativize_set Root updates) in
124 let to_recheck =
125 Relative_path.Set.filter updates ~f:(fun update ->
126 FindUtils.file_filter (Relative_path.to_absolute update))
128 let config_in_updates =
129 Relative_path.Set.mem updates ServerConfig.filename
131 ( if config_in_updates then
132 let (new_config, _) =
133 ServerConfig.(load ~silent:false filename genv.options)
135 if not (ServerConfig.is_compatible genv.config new_config) then (
136 Hh_logger.log
137 "%s changed in an incompatible way; please restart %s.\n"
138 (Relative_path.suffix ServerConfig.filename)
139 GlobalConfig.program_name;
141 (* TODO: Notify the server monitor directly about this. *)
142 Exit.exit Exit_status.Hhconfig_changed
143 ) );
144 to_recheck
147 let finalize_init init_env typecheck_telemetry init_telemetry =
148 ServerProgress.send_warning None;
149 (* rest is just logging/telemetry *)
150 let t' = Unix.gettimeofday () in
151 let heap_size = SharedMem.heap_size () in
152 let hash_telemetry = ServerUtils.log_and_get_sharedmem_load_telemetry () in
153 let telemetry =
154 Telemetry.create ()
155 |> Telemetry.duration ~start_time:init_env.init_start_t
156 |> Telemetry.object_ ~key:"init" ~value:init_telemetry
157 |> Telemetry.object_ ~key:"typecheck" ~value:typecheck_telemetry
158 |> Telemetry.object_ ~key:"hash" ~value:hash_telemetry
159 |> Telemetry.int_ ~key:"heap_size" ~value:heap_size
161 HackEventLogger.server_is_ready telemetry;
162 Hh_logger.log
163 "SERVER_IS_READY. Heap size: %d. Took %f seconds to init. Telemetry:\n%s"
164 heap_size
165 (t' -. init_env.init_start_t)
166 (Telemetry.to_string telemetry);
169 let shutdown_persistent_client client env =
170 ClientProvider.shutdown_client client;
171 let env =
173 env with
174 pending_command_needs_writes = None;
175 persistent_client_pending_command_needs_full_check = None;
178 ServerFileSync.clear_sync_data env
180 (*****************************************************************************)
181 (* The main loop *)
182 (*****************************************************************************)
184 [@@@warning "-52"]
186 (* we have no alternative but to depend on Sys_error strings *)
188 let handle_connection_exception
189 ~(env : ServerEnv.env) ~(client : ClientProvider.client) (e : Exception.t) :
190 ServerEnv.env =
191 match Exception.to_exn e with
192 | ClientProvider.Client_went_away
193 | ServerCommandTypes.Read_command_timeout ->
194 ClientProvider.shutdown_client client;
196 (* Connection dropped off. Its unforunate that we don't actually know
197 * which connection went bad (could be any write to any connection to
198 * child processes/daemons), we just assume at this top-level that
199 * since its not caught elsewhere, its the connection to the client.
201 * TODO: Make sure the pipe exception is really about this client.*)
202 | Unix.Unix_error (Unix.EPIPE, _, _)
203 | Sys_error "Broken pipe"
204 | Sys_error "Connection reset by peer" ->
205 Hh_logger.log "Client channel went bad. Shutting down client connection";
206 ClientProvider.shutdown_client client;
208 | exn ->
209 let e = Exception.wrap exn in
210 HackEventLogger.handle_connection_exception "inner" e;
211 Hh_logger.log
212 "HANDLE_CONNECTION_EXCEPTION(inner) %s"
213 (Exception.to_string e);
214 ClientProvider.shutdown_client client;
217 [@@@warning "+52"]
219 (* CARE! scope of suppression should be only handle_connection_exception *)
221 (* f represents a non-persistent command coming from client. If executing f
222 * throws, we need to dispopose of this client (possibly recovering updated
223 * environment from Nonfatal_rpc_exception). "return" is a constructor
224 * wrapping the return value to make it match return type of f *)
225 let handle_connection_try return client env f =
226 try f () with
227 | ServerCommand.Nonfatal_rpc_exception (e, env) ->
228 return (handle_connection_exception ~env ~client e)
229 | exn ->
230 let e = Exception.wrap exn in
231 return (handle_connection_exception ~env ~client e)
233 let handle_connection_ genv env client =
234 ClientProvider.track
235 client
236 ~key:Connection_tracker.Server_start_handle_connection;
237 handle_connection_try (fun x -> ServerUtils.Done x) client env @@ fun () ->
238 match ClientProvider.read_connection_type client with
239 | ServerCommandTypes.Persistent ->
240 let f env =
241 let env =
242 match env.persistent_client with
243 | Some old_client ->
244 ClientProvider.send_push_message_to_client
245 old_client
246 ServerCommandTypes.NEW_CLIENT_CONNECTED;
247 shutdown_persistent_client old_client env
248 | None -> env
250 ClientProvider.track client ~key:Connection_tracker.Server_start_handle;
251 ClientProvider.send_response_to_client client ServerCommandTypes.Connected;
252 let env =
254 env with
255 persistent_client = Some (ClientProvider.make_persistent client);
258 (* If the client connected in the middle of recheck, let them know it's
259 * happening. *)
260 if is_full_check_started env.full_check_status then
261 ServerBusyStatus.send
263 (ServerCommandTypes.Doing_global_typecheck
264 (ServerCheckUtils.global_typecheck_kind genv env));
268 Option.is_some env.persistent_client
269 (* Cleaning up after existing client (in shutdown_persistent_client)
270 * will attempt to write to shared memory *)
271 then
272 ServerUtils.Needs_writes (env, f, true, "Cleaning up persistent client")
273 else
274 ServerUtils.Done (f env)
275 | ServerCommandTypes.Non_persistent -> ServerCommand.handle genv env client
277 let handle_persistent_connection_exception
278 ~(client : ClientProvider.client) ~(is_fatal : bool) (e : Exception.t) :
279 unit =
280 let open Marshal_tools in
281 let remote_e =
283 message = Exception.get_ctor_string e;
284 stack = Exception.get_backtrace_string e |> Exception.clean_stack;
287 let push =
288 if is_fatal then
289 ServerCommandTypes.FATAL_EXCEPTION remote_e
290 else
291 ServerCommandTypes.NONFATAL_EXCEPTION remote_e
293 begin
294 try ClientProvider.send_push_message_to_client client push with _ -> ()
295 end;
296 HackEventLogger.handle_persistent_connection_exception "inner" ~is_fatal e;
297 Hh_logger.error
298 "HANDLE_PERSISTENT_CONNECTION_EXCEPTION(inner) %s"
299 (Exception.to_string e);
302 (* Same as handle_connection_try, but for persistent clients *)
303 [@@@warning "-52"]
305 (* we have no alternative but to depend on Sys_error strings *)
307 let handle_persistent_connection_try return client env f =
308 try f () with
309 (* TODO: Make sure the pipe exception is really about this client. *)
310 | Unix.Unix_error (Unix.EPIPE, _, _)
311 | Sys_error "Connection reset by peer"
312 | Sys_error "Broken pipe"
313 | ServerCommandTypes.Read_command_timeout
314 | ServerClientProvider.Client_went_away ->
315 return
317 (shutdown_persistent_client client)
318 ~needs_writes:(Some "Client_went_away")
319 | ServerCommand.Nonfatal_rpc_exception (e, env) ->
320 handle_persistent_connection_exception ~client ~is_fatal:false e;
321 return env (fun env -> env) ~needs_writes:None
322 | exn ->
323 let e = Exception.wrap exn in
324 handle_persistent_connection_exception ~client ~is_fatal:true e;
325 let needs_writes = Some (Exception.to_string e) in
326 return env (shutdown_persistent_client client) ~needs_writes
328 [@@@warning "+52"]
330 (* CARE! scope of suppression should be only handle_persistent_connection_try *)
332 let handle_persistent_connection_ genv env client =
333 let return env f ~needs_writes =
334 match needs_writes with
335 | Some reason -> ServerUtils.Needs_writes (env, f, true, reason)
336 | None -> ServerUtils.Done (f env)
338 handle_persistent_connection_try return client env @@ fun () ->
339 let env = { env with ide_idle = false } in
340 ServerCommand.handle genv env client
342 let handle_connection genv env client client_kind =
343 ServerIdle.stamp_connection ();
345 (* This "return" is guaranteed to be run as part of main loop, when workers
346 * are not busy, so we can ignore whether it needs writes or not - it's always
347 * safe for it to write. *)
348 let return env f ~needs_writes:_ = f env in
349 match client_kind with
350 | `Persistent ->
351 handle_persistent_connection_ genv env client
352 |> ServerUtils.wrap (handle_persistent_connection_try return client)
353 | `Non_persistent ->
354 handle_connection_ genv env client
355 |> ServerUtils.wrap (handle_connection_try (fun x -> x) client)
357 let query_notifier genv env query_kind start_time =
358 let open ServerNotifierTypes in
359 let telemetry =
360 Telemetry.create () |> Telemetry.duration ~key:"start" ~start_time
362 let (env, raw_updates) =
363 match query_kind with
364 | `Sync ->
365 ( env,
366 begin
367 try Notifier_synchronous_changes (genv.notifier ())
368 with Watchman.Timeout -> Notifier_unavailable
369 end )
370 | `Async ->
371 ( { env with last_notifier_check_time = start_time },
372 genv.notifier_async () )
373 | `Skip -> (env, Notifier_async_changes SSet.empty)
375 let telemetry = Telemetry.duration telemetry ~key:"notified" ~start_time in
376 let unpack_updates = function
377 | Notifier_unavailable -> (true, SSet.empty)
378 | Notifier_state_enter _ -> (true, SSet.empty)
379 | Notifier_state_leave _ -> (true, SSet.empty)
380 | Notifier_async_changes updates -> (true, updates)
381 | Notifier_synchronous_changes updates -> (false, updates)
383 let (updates_stale, raw_updates) = unpack_updates raw_updates in
384 let rec pump_async_updates acc =
385 match genv.notifier_async_reader () with
386 | Some reader when Buffered_line_reader.is_readable reader ->
387 let (_, raw_updates) = unpack_updates (genv.notifier_async ()) in
388 pump_async_updates (SSet.union acc raw_updates)
389 | _ -> acc
391 let raw_updates = pump_async_updates raw_updates in
392 let telemetry = Telemetry.duration telemetry ~key:"pumped" ~start_time in
393 let updates = Program.process_updates genv raw_updates in
394 let telemetry =
395 telemetry
396 |> Telemetry.duration ~key:"processed" ~start_time
397 |> Telemetry.int_ ~key:"raw_updates" ~value:(SSet.cardinal raw_updates)
398 |> Telemetry.int_ ~key:"updates" ~value:(Relative_path.Set.cardinal updates)
400 if not @@ Relative_path.Set.is_empty updates then
401 HackEventLogger.notifier_returned start_time (SSet.cardinal raw_updates);
402 (env, updates, updates_stale, telemetry)
404 (* This function loops until it has processed all outstanding changes.
406 * One reason for doing this is so that, if a client makes a request,
407 * then we can process all outstanding changes prior to handling that request.
408 * That way the client will get an up-to-date answer.
410 * Another reason is to get meaningful logging in case of watchman events.
411 * When a rebase occurs, Watchman/dfind takes a while to give us the full list
412 * of updates, and it often comes in batches. To get an accurate measurement
413 * of rebase time, we use the heuristic that any changes that come in
414 * right after one rechecking round finishes to be part of the same
415 * rebase, and we don't log the recheck_end event until the update list
416 * is no longer getting populated.
418 * The above doesn't apply in presence of interruptions / cancellations -
419 * it's possible for client to request current recheck to be stopped.
421 let rec recheck_until_no_changes_left acc genv env select_outcome =
422 let start_time = Unix.gettimeofday () in
423 (* this is telemetry for the current batch, i.e. iteration: *)
424 let telemetry =
425 Telemetry.create () |> Telemetry.float_ ~key:"start_time" ~value:start_time
428 (* When a new client connects, we use the synchronous notifier.
429 * This is to get synchronous file system changes when invoking
430 * hh_client in terminal.
432 * NB: This also uses synchronous notify on establishing a persistent
433 * connection. This is harmless, but could maybe be filtered away. *)
434 let query_kind =
435 match select_outcome with
436 | ClientProvider.Select_new _ -> `Sync
437 | ClientProvider.Select_nothing ->
438 if Float.(start_time - env.last_notifier_check_time > 0.5) then
439 `Async
440 else
441 `Skip
442 (* Do not process any disk changes when there are pending persistent
443 * client requests - some of them might be edits, and we don't want to
444 * do analysis on mid-edit state of the world *)
445 | ClientProvider.Select_persistent -> `Skip
447 let (env, updates, updates_stale, query_telemetry) =
448 query_notifier genv env query_kind start_time
450 let telemetry =
451 telemetry
452 |> Telemetry.object_ ~key:"query" ~value:query_telemetry
453 |> Telemetry.duration ~key:"query_done" ~start_time
455 let acc = { acc with updates_stale } in
456 let is_idle =
457 (match select_outcome with
458 | ClientProvider.Select_persistent -> false
459 | _ -> true)
460 && (* "average person types [...] between 190 and 200 characters per minute"
461 * 60/200 = 0.3 *)
462 Float.(start_time - env.last_command_time > 0.3)
464 (* saving any file is our trigger to start full recheck *)
465 let env =
466 if Relative_path.Set.is_empty updates then
468 else
469 let disk_needs_parsing =
470 Relative_path.Set.union updates env.disk_needs_parsing
472 match env.full_recheck_on_file_changes with
473 | Paused _ ->
474 let () = Hh_logger.log "Skipping full check due to `hh --pause`" in
475 { env with disk_needs_parsing; full_check_status = Full_check_needed }
476 | _ ->
477 { env with disk_needs_parsing; full_check_status = Full_check_started }
479 let telemetry = Telemetry.duration telemetry ~key:"got_updates" ~start_time in
480 let env =
481 match env.nonpersistent_client_pending_command_needs_full_check with
482 (* We need to auto-restart the recheck to make progress towards handling
483 * this command... *)
484 | Some (_command, reason, client)
485 when is_full_check_needed env.full_check_status
486 (*... but we don't want to get into a battle with IDE edits stopping
487 * rechecks and us restarting them. We're going to heavily favor edits and
488 * restart only after a longer period since last edit. Note that we'll still
489 * start full recheck immediately after any file save. *)
490 && Float.(start_time - env.last_command_time > 5.0) ->
491 let still_there =
493 ClientProvider.ping client;
494 true
495 with ClientProvider.Client_went_away -> false
497 if still_there then (
498 Hh_logger.log "Restarting full check due to %s" reason;
499 { env with full_check_status = Full_check_started }
500 ) else (
501 ClientProvider.shutdown_client client;
503 env with
504 nonpersistent_client_pending_command_needs_full_check = None;
507 | _ -> env
509 (* Same as above, but for persistent clients *)
510 let env =
511 match env.persistent_client_pending_command_needs_full_check with
512 | Some (_command, reason) when is_full_check_needed env.full_check_status ->
513 Hh_logger.log "Restarting full check due to %s" reason;
514 { env with full_check_status = Full_check_started }
515 | _ -> env
517 let telemetry =
518 Telemetry.duration telemetry ~key:"sorted_out_client" ~start_time
520 (* We have some new, or previously un-processed updates *)
521 let full_check =
522 is_full_check_started env.full_check_status
523 (* Prioritize building search index over full rechecks. *)
524 && ( Queue.is_empty SearchServiceRunner.SearchServiceRunner.queue
525 (* Unless there is something actively waiting for this *)
526 || Option.is_some
527 env.nonpersistent_client_pending_command_needs_full_check )
529 let lazy_check =
530 (not @@ Relative_path.Set.is_empty env.ide_needs_parsing) && is_idle
532 let telemetry =
533 telemetry
534 |> Telemetry.bool_ ~key:"full_check" ~value:full_check
535 |> Telemetry.bool_ ~key:"lazy_check" ~value:lazy_check
536 |> Telemetry.duration ~key:"figured_check_kind" ~start_time
538 if (not full_check) && not lazy_check then
539 let telemetry =
540 Telemetry.string_ telemetry ~key:"check_kind" ~value:"None"
542 let acc =
543 { acc with per_batch_telemetry = telemetry :: acc.per_batch_telemetry }
545 (acc, env)
546 else
547 let check_kind =
548 if lazy_check then
549 ServerTypeCheck.Lazy_check
550 else
551 ServerTypeCheck.Full_check
553 let check_kind_str = ServerTypeCheck.check_kind_to_string check_kind in
554 let env = { env with can_interrupt = not lazy_check } in
555 let needed_full_init = env.init_env.why_needed_full_init in
556 let old_errorl = Errors.get_error_list env.errorl in
558 (* HERE'S WHERE WE DO THE HEAVY WORK! **)
559 let telemetry =
560 telemetry
561 |> Telemetry.string_ ~key:"check_kind" ~value:check_kind_str
562 |> Telemetry.duration ~key:"type_check_start" ~start_time
564 let (env, res, type_check_telemetry) =
565 CgroupProfiler.profile_memory ~event:(`Recheck check_kind_str)
566 @@ ServerTypeCheck.type_check genv env check_kind start_time
568 let telemetry =
569 telemetry
570 |> Telemetry.object_ ~key:"type_check" ~value:type_check_telemetry
571 |> Telemetry.duration ~key:"type_check_end" ~start_time
574 (* END OF HEAVY WORK *)
576 (* Final telemetry and cleanup... *)
577 let env = { env with can_interrupt = true } in
578 begin
579 match (needed_full_init, env.init_env.why_needed_full_init) with
580 | (Some needed_full_init, None) ->
581 finalize_init env.init_env telemetry needed_full_init
582 | _ -> ()
583 end;
584 ServerStamp.touch_stamp_errors old_errorl (Errors.get_error_list env.errorl);
585 let telemetry =
586 Telemetry.duration telemetry ~key:"finalized_and_touched" ~start_time
588 let acc =
590 rechecked_count =
591 acc.rechecked_count + res.ServerTypeCheck.reparse_count;
592 per_batch_telemetry = telemetry :: acc.per_batch_telemetry;
593 total_rechecked_count =
594 acc.total_rechecked_count + res.ServerTypeCheck.total_rechecked_count;
595 updates_stale = acc.updates_stale;
596 recheck_id = acc.recheck_id;
597 duration = acc.duration +. (Unix.gettimeofday () -. start_time);
598 any_full_checks = acc.any_full_checks || not lazy_check;
601 (* Avoid batching ide rechecks with disk rechecks - there might be
602 * other ide edits to process first and we want to give the main loop
603 * a chance to process them first.
604 * Similarly, if a recheck was interrupted because of arrival of command
605 * that needs writes, break the recheck loop to give that command chance
606 * to be handled in main loop.
607 * Finally, tests have ability to opt-out of batching completely. *)
609 lazy_check
610 || Option.is_some env.pending_command_needs_writes
611 || !force_break_recheck_loop_for_test_ref
612 then
613 (acc, env)
614 else
615 recheck_until_no_changes_left acc genv env select_outcome
617 let new_serve_iteration_id () = Random_id.short_string ()
619 (* This is safe to run only in the main loop, when workers are not doing
620 * anything. *)
621 let main_loop_command_handler client_kind client result =
622 match result with
623 | ServerUtils.Done env -> env
624 | ServerUtils.Needs_full_recheck (env, f, reason) ->
625 begin
626 match client_kind with
627 | `Non_persistent ->
628 (* We should not accept any new clients until this is cleared *)
629 assert (
630 Option.is_none
631 env.nonpersistent_client_pending_command_needs_full_check );
633 env with
634 nonpersistent_client_pending_command_needs_full_check =
635 Some (f, reason, client);
637 | `Persistent ->
638 (* Persistent client will not send any further commands until previous one
639 * is handled. *)
640 assert (
641 Option.is_none env.persistent_client_pending_command_needs_full_check
644 env with
645 persistent_client_pending_command_needs_full_check = Some (f, reason);
648 | ServerUtils.Needs_writes (env, f, _, _) -> f env
650 let has_pending_disk_changes genv =
651 match genv.notifier_async_reader () with
652 | Some reader when Buffered_line_reader.is_readable reader -> true
653 | _ -> false
655 let generate_and_update_recheck_id env =
656 let recheck_id = new_serve_iteration_id () in
657 let env =
659 env with
660 ServerEnv.init_env =
661 { env.ServerEnv.init_env with ServerEnv.recheck_id = Some recheck_id };
664 (env, recheck_id)
666 let serve_one_iteration genv env client_provider =
667 let (env, recheck_id) = generate_and_update_recheck_id env in
668 ServerMonitorUtils.exit_if_parent_dead ();
669 let acceptable_new_client_kind =
670 let has_default_client_pending =
671 Option.is_some env.nonpersistent_client_pending_command_needs_full_check
673 let can_accept_clients = not @@ ServerRevisionTracker.is_hg_updating () in
674 match (can_accept_clients, has_default_client_pending) with
675 (* If we are already blocked on some client, do not accept more of them.
676 * Other clients (that connect through priority pipe, or persistent clients)
677 * can still be handled - unless we are in hg.update state, where we want to
678 * stop accepting any new clients, with the exception of forced ones. *)
679 | (true, true) -> Some `Priority
680 | (true, false) -> Some `Any
681 | (false, true) -> None
682 | (false, false) -> Some `Force_dormant_start_only
684 let selected_client =
685 match acceptable_new_client_kind with
686 | None -> ClientProvider.Select_nothing
687 | Some client_kind ->
688 ClientProvider.sleep_and_check
689 client_provider
690 env.persistent_client
691 ~ide_idle:env.ide_idle
692 ~idle_gc_slice:genv.local_config.ServerLocalConfig.idle_gc_slice
693 client_kind
696 (* We'll now update any waiting clients with our status.
697 * (Or more precisely, we'll tell the monitor, so any waiting clients
698 * will know when they poll the monitor.)
700 * By updating status now at the start of the serve_one_iteration,
701 * it means there's no obligation on the "doing work" part of the previous
702 * iteration to clean up its own status-reporting once done.
703 * Caveat: that's not quite true, since ClientProvider.sleep_and_check will
704 * wait up to 1s if there are no pending requests. So theoretically we
705 * won't update our status for up to 1s after the previous work is done.
706 * That doesn't really matter, since (1) if there are no pending requests
707 * then no client will even ask for status, and (2) it's worth it to
708 * keep the code clean and simple.
710 * Note: the message here might soon be replaced. If we discover disk changes
711 * that prompt a typecheck, then typechecking sends its own status updates.
712 * And if the selected_client was a request, then once we discover the nature
713 * of that request then ServerCommand.handle will send its own status updates too.
715 ServerProgress.send_progress
716 ~include_in_logs:false
717 "%s"
718 (match selected_client with
719 | ClientProvider.Select_nothing ->
720 if env.ide_idle then
721 "ready"
722 else
723 "HackIDE:active"
724 | _ -> "working");
725 let env =
726 match selected_client with
727 | ClientProvider.Select_nothing ->
728 let last_stats = env.last_recheck_loop_stats in
729 (* Ugly hack: We want GC_SHAREDMEM_RAN to record the last rechecked
730 * count so that we can figure out if the largest reclamations
731 * correspond to massive rebases. However, the logging call is done in
732 * the SharedMem module, which doesn't know anything about Server stuff.
733 * So we wrap the call here. *)
734 HackEventLogger.with_rechecked_stats
735 (List.length last_stats.per_batch_telemetry)
736 last_stats.rechecked_count
737 last_stats.total_rechecked_count
738 (fun () -> SharedMem.collect `aggressive);
739 let t = Unix.gettimeofday () in
740 if Float.(t -. env.last_idle_job_time > 0.5) then
741 let env = ServerIdle.go env in
742 { env with last_idle_job_time = t }
743 else
745 | _ -> env
747 let stage =
748 if Option.is_some env.init_env.why_needed_full_init then
749 `Init
750 else
751 `Recheck
753 HackEventLogger.with_id ~stage recheck_id @@ fun () ->
754 (* We'll first do "recheck_until_no_changes_left" to handle all outstanding changes, so that
755 * after that we'll be able to give an up-to-date answer to the client.
756 * Except: this might be stopped early in some cases, e.g. IDE checks. *)
757 let t_start_recheck = Unix.gettimeofday () in
758 let (stats, env) =
759 recheck_until_no_changes_left
760 (empty_recheck_loop_stats ~recheck_id)
761 genv
763 selected_client
765 let t_done_recheck = Unix.gettimeofday () in
766 let did_work = stats.total_rechecked_count > 0 in
767 let env =
769 env with
770 last_recheck_loop_stats = stats;
771 last_recheck_loop_stats_for_actual_work =
772 ( if did_work then
773 Some stats
774 else
775 env.last_recheck_loop_stats_for_actual_work );
778 (* push diagnostic changes to client, if necessary *)
779 let (env, diag_reason) =
780 match env.diag_subscribe with
781 | None -> (env, "no diag subscriptions")
782 | Some sub ->
783 let client = Utils.unsafe_opt env.persistent_client in
784 (* Should we hold off sending diagnostics to the client? *)
785 if ClientProvider.client_has_message client then
786 (env, "client has message")
787 else if not @@ Relative_path.Set.is_empty env.ide_needs_parsing then
788 (env, "ide_needs_parsing: processed edits but didn't recheck them yet")
789 else if has_pending_disk_changes genv then
790 (env, "has_pending_disk_changes")
791 else
792 let (sub, errors, is_truncated) =
793 Diagnostic_subscription.pop_errors sub ~global_errors:env.errorl
795 let env = { env with diag_subscribe = Some sub } in
796 let res = ServerCommandTypes.DIAGNOSTIC { errors; is_truncated } in
797 if SMap.is_empty errors then
798 (env, "is_empty errors")
799 else begin
801 ClientProvider.send_push_message_to_client client res;
802 (env, "sent push message")
803 with ClientProvider.Client_went_away ->
804 (* Leaving cleanup of this condition to handled_connection function *)
805 (env, "Client_went_away")
808 let t_sent_diagnostics = Unix.gettimeofday () in
810 if did_work then begin
811 let telemetry =
812 ServerEnv.recheck_loop_stats_to_user_telemetry stats
813 |> Telemetry.string_ ~key:"diag_reason" ~value:diag_reason
815 HackEventLogger.recheck_end
816 stats.duration
817 (List.length stats.per_batch_telemetry - 1)
818 stats.rechecked_count
819 stats.total_rechecked_count
820 (Option.some_if stats.any_full_checks telemetry);
821 Hh_logger.log
822 "RECHECK_END (recheck_id %s):\n%s"
823 recheck_id
824 (Telemetry.to_string telemetry);
825 (* we're only interested in full check data *)
826 CgroupProfiler.print_summary_memory_table ~event:(`Recheck "Full_check")
827 end;
829 let env =
830 match selected_client with
831 | ClientProvider.Select_persistent -> env
832 | ClientProvider.Select_nothing -> env
833 | ClientProvider.Select_new client ->
834 begin
836 (* client here is the new client (not the existing persistent client)
837 * whose request we're going to handle. *)
838 ClientProvider.track
839 client
840 ~key:Connection_tracker.Server_start_recheck
841 ~time:t_start_recheck;
842 ClientProvider.track
843 client
844 ~key:Connection_tracker.Server_done_recheck
845 ~time:t_done_recheck;
846 ClientProvider.track
847 client
848 ~key:Connection_tracker.Server_sent_diagnostics
849 ~time:t_sent_diagnostics;
850 let env =
851 handle_connection genv env client `Non_persistent
852 |> main_loop_command_handler `Non_persistent client
854 HackEventLogger.handled_connection t_start_recheck;
856 with exn ->
857 let e = Exception.wrap exn in
858 HackEventLogger.handle_connection_exception "outer" e;
859 Hh_logger.log
860 "HANDLE_CONNECTION_EXCEPTION(outer) [ignoring request] %s"
861 (Exception.to_string e);
865 let has_persistent_connection_request =
866 (* has_persistent_connection_request means that at the beginning of this
867 * iteration of main loop there was a request to read and handle.
868 * We'll now try to do it, but it's possible that we have ran a recheck
869 * in-between those two events, and if this recheck was non-blocking, we
870 * might have already handled this command there. Proceeding to
871 * handle_connection would then block reading a request that is not there
872 * anymore, so we need to check and update has_persistent_connection_request
873 * again. *)
874 Option.value_map
875 env.persistent_client
876 ~f:ClientProvider.has_persistent_connection_request
877 ~default:false
879 let env =
880 if has_persistent_connection_request then (
881 let client = Utils.unsafe_opt env.persistent_client in
882 (* client here is the existing persistent client *)
883 (* whose request we're going to handle. *)
884 HackEventLogger.got_persistent_client_channels t_start_recheck;
886 let env =
887 handle_connection genv env client `Persistent
888 |> main_loop_command_handler `Persistent client
890 HackEventLogger.handled_persistent_connection t_start_recheck;
892 with exn ->
893 let e = Exception.wrap exn in
894 HackEventLogger.handle_persistent_connection_exception
895 "outer"
897 ~is_fatal:true;
898 Hh_logger.log
899 "HANDLE_PERSISTENT_CONNECTION_EXCEPTION(outer) [ignoring request] %s"
900 (Exception.to_string e);
902 ) else
905 let env =
906 match env.pending_command_needs_writes with
907 | Some f -> { (f env) with pending_command_needs_writes = None }
908 | None -> env
910 let env =
911 match env.persistent_client_pending_command_needs_full_check with
912 | Some (f, _reason) when is_full_check_done env.full_check_status ->
913 { (f env) with persistent_client_pending_command_needs_full_check = None }
914 | _ -> env
916 let env =
917 match env.nonpersistent_client_pending_command_needs_full_check with
918 | Some (f, _reason, _client) when is_full_check_done env.full_check_status
921 (f env) with
922 nonpersistent_client_pending_command_needs_full_check = None;
924 | _ -> env
928 let watchman_interrupt_handler genv env =
929 let start_time = Unix.gettimeofday () in
930 let (env, updates, updates_stale, _telemetry) =
931 query_notifier genv env `Async start_time
933 (* Async updates can always be stale, so we don't care *)
934 ignore updates_stale;
935 let size = Relative_path.Set.cardinal updates in
936 if size > 0 then (
937 Hh_logger.log "Interrupted by Watchman message: %d files changed" size;
939 env with
940 disk_needs_parsing =
941 Relative_path.Set.union env.disk_needs_parsing updates;
943 MultiThreadedCall.Cancel )
944 ) else
945 (env, MultiThreadedCall.Continue)
947 let priority_client_interrupt_handler genv client_provider env =
948 let t = Unix.gettimeofday () in
949 (* For non-persistent clients that don't synchronize file contents, users
950 * expect that a query they do immediately after saving a file will reflect
951 * this file contents. Async notifications are not always fast enough to
952 * quarantee it, so we need an additional sync query before accepting such
953 * client *)
954 let (env, updates, _updates_stale, _telemetry) =
955 query_notifier genv env `Sync t
957 let size = Relative_path.Set.cardinal updates in
958 if size > 0 then (
959 Hh_logger.log "Interrupted by Watchman sync query: %d files changed" size;
961 env with
962 disk_needs_parsing =
963 Relative_path.Set.union env.disk_needs_parsing updates;
965 MultiThreadedCall.Cancel )
966 ) else
967 let idle_gc_slice = genv.local_config.ServerLocalConfig.idle_gc_slice in
968 let select_outcome =
969 if ServerRevisionTracker.is_hg_updating () then
970 ClientProvider.Select_nothing
971 else
972 ClientProvider.sleep_and_check
973 client_provider
974 env.persistent_client
975 ~ide_idle:env.ide_idle
976 ~idle_gc_slice
977 `Priority
979 let env =
980 match select_outcome with
981 | ClientProvider.Select_persistent ->
982 failwith "should only be looking at new priority clients"
983 | ClientProvider.Select_nothing ->
984 (* This is possible because client might have gone away during
985 * sleep_and_check. *)
987 | ClientProvider.Select_new client ->
988 (match handle_connection genv env client `Non_persistent with
989 | ServerUtils.Needs_full_recheck (_, _, reason) ->
990 failwith
991 ( "unexpected command needing full recheck in priority channel: "
992 ^ reason )
993 | ServerUtils.Needs_writes (_, _, _, reason) ->
994 failwith
995 ("unexpected command needing writes in priority channel: " ^ reason)
996 | ServerUtils.Done env -> env)
999 (* Global rechecks in response to file changes can be paused.
1000 Here, we check if the user requested global rechecks to be paused during
1001 the current recheck (the one that we're in the middle of). The above call
1002 to `handle_connection` could have resulted in this state change if
1003 the RPC was `PAUSE true`.
1005 If the state did change to `Paused` during the current recheck,
1006 we should cancel the current recheck.
1008 Note that `PAUSE false`, which resumes global rechecks in response to
1009 file changes, requires a full recheck by policy - see ServerCommand's
1010 `rpc_command_needs_full_check`. Commands that require a full recheck
1011 do not use `priority pipe`, so they don't end up handled here.
1012 Such commands don't interrupt MultiWorker calls, by design.
1014 The effect of `PAUSE true` during a recheck is that the recheck will be
1015 canceled, while the result of `PAUSE false` is that the client will wait
1016 for the recheck to be finished. *)
1017 let decision =
1018 match (env.full_recheck_on_file_changes, env.init_env.recheck_id) with
1019 | ( Paused { paused_recheck_id = Some paused_recheck_id; _ },
1020 Some recheck_id )
1021 when String.equal paused_recheck_id recheck_id ->
1022 MultiThreadedCall.Cancel
1023 | _ -> MultiThreadedCall.Continue
1025 (env, decision)
1027 let persistent_client_interrupt_handler genv env =
1028 match env.persistent_client with
1029 (* Several handlers can become ready simultaneously and one of them can remove
1030 * the persistent client before we get to it. *)
1031 | None -> (env, MultiThreadedCall.Continue)
1032 | Some client ->
1033 (match handle_connection genv env client `Persistent with
1034 | ServerUtils.Needs_full_recheck (env, f, reason) ->
1035 (* This should not be possible, because persistent client will not send
1036 * the next command before receiving results from the previous one. *)
1037 assert (
1038 Option.is_none env.persistent_client_pending_command_needs_full_check );
1040 env with
1041 persistent_client_pending_command_needs_full_check = Some (f, reason);
1043 MultiThreadedCall.Continue )
1044 | ServerUtils.Needs_writes (env, f, should_restart_recheck, _) ->
1045 let full_check_status =
1046 match env.full_check_status with
1047 | Full_check_started when not should_restart_recheck ->
1048 Full_check_needed
1049 | x -> x
1051 (* this should not be possible, because persistent client will not send
1052 * the next command before receiving results from the previous one *)
1053 assert (Option.is_none env.pending_command_needs_writes);
1054 ( { env with pending_command_needs_writes = Some f; full_check_status },
1055 MultiThreadedCall.Cancel )
1056 | ServerUtils.Done env -> (env, MultiThreadedCall.Continue))
1058 let setup_interrupts env client_provider =
1060 env with
1061 interrupt_handlers =
1062 (fun genv env ->
1063 let { ServerLocalConfig.interrupt_on_watchman; interrupt_on_client; _ }
1065 genv.local_config
1067 let interrupt_on_watchman =
1068 interrupt_on_watchman && env.can_interrupt
1070 let interrupt_on_client = interrupt_on_client && env.can_interrupt in
1071 let handlers =
1072 match genv.notifier_async_reader () with
1073 | Some reader when interrupt_on_watchman ->
1075 ( Buffered_line_reader.get_fd reader,
1076 watchman_interrupt_handler genv );
1078 | _ -> []
1080 let handlers =
1081 match ClientProvider.priority_fd client_provider with
1082 | Some fd when interrupt_on_client ->
1083 (fd, priority_client_interrupt_handler genv client_provider)
1084 :: handlers
1085 | _ -> handlers
1087 let handlers =
1088 match env.persistent_client >>= ClientProvider.get_client_fd with
1089 | Some fd when interrupt_on_client ->
1090 (fd, persistent_client_interrupt_handler genv) :: handlers
1091 | _ -> handlers
1093 handlers);
1096 let serve genv env in_fds =
1097 if genv.local_config.ServerLocalConfig.ide_parser_cache then
1098 Ide_parser_cache.enable ();
1099 (* During server lifetime dependency table can be not up-to-date. Because of
1100 * that, we ban access to it be default, forcing the code trying to read it to
1101 * take it into account, either by explcitely enabling reads (and being fine
1102 * with stale results), or declaring (in ServerCommand) that it requires full
1103 * check to be completed before being executed. *)
1104 let (_ : bool) =
1105 Typing_deps.allow_dependency_table_reads env.deps_mode false
1107 let () = Errors.set_allow_errors_in_default_path false in
1108 MultiThreadedCall.on_exception (fun (e, stack) ->
1109 ServerUtils.exit_on_exception e ~stack);
1110 let client_provider = ClientProvider.provider_from_file_descriptors in_fds in
1112 (* This is needed when typecheck_after_init option is disabled.
1113 * We're just filling it with placeholder telemetry values since
1114 * we don't much care about this scenario. *)
1115 let init_telemetry =
1116 Telemetry.create ()
1117 |> Telemetry.string_
1118 ~key:"mode"
1119 ~value:"serve_due_to_disabled_typecheck_after_init"
1121 let typecheck_telemetry = Telemetry.create () in
1122 if Option.is_none env.init_env.why_needed_full_init then
1123 finalize_init env.init_env typecheck_telemetry init_telemetry;
1125 let env = setup_interrupts env client_provider in
1126 let env = ref env in
1127 while true do
1128 let new_env = serve_one_iteration genv !env client_provider in
1129 env := new_env
1130 done
1132 (* Rules for whether+how to load saved-state...
1133 * 1. If hh.conf lacks "use_mini_state = true", then don't load it.
1134 * 2. If hh_server --no-load, then don't load it.
1135 * 3. If hh_server --save-mini or -s, then save but don't load it.
1136 * 4. If "hh_server --with-mini-state", then load the one specified there!
1137 * 5. If hh.conf lacks "load_state_natively_v4", then don't load it
1138 * 6. Otherwise, load it normally!
1140 let resolve_init_approach genv : ServerInit.init_approach * string =
1141 let nonce = genv.local_config.ServerLocalConfig.remote_nonce in
1142 match
1143 ( genv.local_config.ServerLocalConfig.remote_worker_key,
1144 genv.local_config.ServerLocalConfig.remote_check_id )
1145 with
1146 | (Some worker_key, Some check_id) ->
1147 let remote_init = ServerInit.{ worker_key; nonce; check_id } in
1148 (ServerInit.Remote_init remote_init, "Server_args_remote_worker")
1149 | (Some worker_key, None) ->
1150 let check_id = Random_id.short_string () in
1151 let remote_init = ServerInit.{ worker_key; nonce; check_id } in
1152 (ServerInit.Remote_init remote_init, "Server_args_remote_worker")
1153 | (None, Some check_id) ->
1154 failwith
1155 (Printf.sprintf
1156 "Remote check ID is specified (%s), but the remote worker ID is not"
1157 check_id)
1158 | (None, None) ->
1160 Option.is_some (ServerArgs.save_naming_filename genv.options)
1161 && Option.is_none (ServerArgs.save_filename genv.options)
1162 then
1163 (ServerInit.Parse_only_init, "Server_args_saving_naming")
1164 else if not genv.local_config.ServerLocalConfig.use_saved_state then
1165 (ServerInit.Full_init, "Local_config_saved_state_disabled")
1166 else if ServerArgs.no_load genv.options then
1167 (ServerInit.Full_init, "Server_args_no_load")
1168 else if Option.is_some (ServerArgs.save_filename genv.options) then
1169 (ServerInit.Full_init, "Server_args_saving_state")
1170 else if Option.is_some (ServerArgs.write_symbol_info genv.options) then
1171 (ServerInit.Write_symbol_info, "Server_args_writing_symbol_info")
1172 else (
1173 match
1174 ( genv.local_config.ServerLocalConfig.load_state_natively,
1175 ServerArgs.with_saved_state genv.options )
1176 with
1177 | (_, Some (ServerArgs.Saved_state_target_info target)) ->
1178 ( ServerInit.Saved_state_init (ServerInit.Precomputed target),
1179 "Precomputed" )
1180 | (false, None) ->
1181 (ServerInit.Full_init, "No_native_loading_or_precomputed")
1182 | (true, None) ->
1183 (* Use native loading only if the config specifies a load script,
1184 * and the local config prefers native. *)
1185 ( ServerInit.Saved_state_init ServerInit.Load_state_natively,
1186 "Load_state_natively" )
1189 let program_init genv env =
1190 Hh_logger.log "Init id: %s" env.init_env.init_id;
1191 let env =
1193 env with
1194 init_env =
1195 { env.init_env with ci_info = Some (Ci_util.begin_get_info ()) };
1198 let (init_approach, approach_name) = resolve_init_approach genv in
1199 Hh_logger.log "Initing with approach: %s" approach_name;
1200 let (env, init_type, init_error, init_error_stack, state_distance) =
1201 let (env, init_result) = ServerInit.init ~init_approach genv env in
1202 match init_approach with
1203 | ServerInit.Remote_init _ -> (env, "remote", None, None, None)
1204 | ServerInit.Write_symbol_info
1205 | ServerInit.Full_init ->
1206 (env, "fresh", None, None, None)
1207 | ServerInit.Parse_only_init -> (env, "parse-only", None, None, None)
1208 | ServerInit.Saved_state_init _ ->
1209 begin
1210 match init_result with
1211 | ServerInit.Load_state_succeeded distance ->
1212 let init_type =
1213 match
1214 Naming_table.get_forward_naming_fallback_path env.naming_table
1215 with
1216 | None -> "state_load_blob"
1217 | Some _ -> "state_load_sqlite"
1219 (env, init_type, None, None, distance)
1220 | ServerInit.Load_state_failed (err, stack) ->
1221 (env, "state_load_failed", Some err, Some stack, None)
1222 | ServerInit.Load_state_declined reason ->
1223 (env, "state_load_declined", Some reason, None, None)
1226 let env =
1228 env with
1229 init_env =
1231 env.init_env with
1232 state_distance;
1233 approach_name;
1234 init_error;
1235 init_type;
1239 Hh_logger.log "Waiting for daemon(s) to be ready...";
1240 ServerProgress.send_progress "wrapping up init...";
1241 genv.wait_until_ready ();
1242 ServerStamp.touch_stamp ();
1243 let load_script_timeout =
1244 genv.local_config.ServerLocalConfig.load_state_script_timeout
1246 EventLogger.set_init_type init_type;
1247 let telemetry = ServerUtils.log_and_get_sharedmem_load_telemetry () in
1248 HackEventLogger.init_lazy_end
1249 telemetry
1250 ~load_script_timeout
1251 ~state_distance
1252 ~approach_name
1253 ~init_error
1254 ~init_error_stack
1255 ~init_type;
1258 let num_workers options local_config =
1259 (* The number of workers is set both in hh.conf and as an optional server argument.
1260 if the two numbers given in argument and in hh.conf are different, we always take the minimum
1261 of the two.
1263 let max_procs_opt =
1264 Option.merge
1265 ~f:(fun a b ->
1266 if Int.equal a b then
1268 else (
1269 Hh_logger.log
1270 ( "Warning: both an argument --max-procs and a local config "
1271 ^^ "for max workers are given. Choosing minimum of the two." );
1272 min a b
1274 (ServerArgs.max_procs options)
1275 local_config.ServerLocalConfig.max_workers
1277 let nbr_procs = Sys_utils.nbr_procs in
1278 match max_procs_opt with
1279 | None -> nbr_procs
1280 | Some max_procs ->
1281 if max_procs <= nbr_procs then
1282 max_procs
1283 else (
1284 Hh_logger.log
1285 "Warning: max workers is higher than the number of processors. Ignoring.";
1286 nbr_procs
1289 (* The hardware we are running on are Intel Skylake and Haswell family
1290 processors with 80, 56, or 48 cores. Turns out that there are only 1/2
1291 actual CPUs, the rest are hyperthreads. Using worker processes for
1292 hyperthreads is slower than using just the number of actual computation
1293 cores. *)
1294 let modify_worker_count hack_worker_count =
1295 let n_procs = Sys_utils.nbr_procs in
1296 let workers =
1297 if hack_worker_count < n_procs then
1298 (* Already limited, use what we have *)
1299 hack_worker_count
1300 else
1301 (* Use half. *)
1302 max 1 (n_procs / 2)
1304 workers
1306 let setup_server ~informant_managed ~monitor_pid options config local_config =
1307 let num_workers = num_workers options local_config |> modify_worker_count in
1308 let handle =
1309 SharedMem.init ~num_workers (ServerConfig.sharedmem_config config)
1311 let init_id = Random_id.short_string () in
1312 let pid = Unix.getpid () in
1314 (* There are three files which are used for IPC.
1315 1. server_finale_file - we unlink it now upon startup,
1316 and upon clean exit we'll write finale-date to it.
1317 2. server_receipt_to_monitor_file - we'll unlink it now upon startup,
1318 and upon clean exit we'll unlink it.
1319 3. server_progress_file - we write "starting up" to it now upon startup,
1320 and upon clean exit we'll write "shutting down" to it.
1321 In both case of clean exit and abrupt exit there'll be leftover files.
1322 We'll rely upon tmpclean to eventually clean them up. *)
1323 let server_finale_file = ServerFiles.server_finale_file pid in
1324 let server_progress_file = ServerFiles.server_progress_file pid in
1325 let server_receipt_to_monitor_file =
1326 ServerFiles.server_receipt_to_monitor_file pid
1328 (try Unix.unlink server_finale_file with _ -> ());
1329 (try Unix.unlink server_receipt_to_monitor_file with _ -> ());
1330 ServerCommandTypesUtils.write_progress_file
1331 ~server_progress_file
1332 ~server_progress:
1334 ServerCommandTypes.server_warning = None;
1335 server_progress = "starting up";
1336 server_timestamp = Unix.gettimeofday ();
1338 Exit.add_hook_upon_clean_exit (fun finale_data ->
1339 begin
1340 try Unix.unlink server_receipt_to_monitor_file with _ -> ()
1341 end;
1342 begin
1344 Sys_utils.with_umask 0o000 (fun () ->
1345 let oc = Stdlib.open_out_bin server_finale_file in
1346 Marshal.to_channel oc finale_data [];
1347 Stdlib.close_out oc)
1348 with _ -> ()
1349 end;
1350 begin
1352 ServerCommandTypesUtils.write_progress_file
1353 ~server_progress_file
1354 ~server_progress:
1356 ServerCommandTypes.server_warning = None;
1357 server_progress = "shutting down";
1358 server_timestamp = Unix.gettimeofday ();
1360 with _ -> ()
1361 end;
1362 ());
1364 Hh_logger.log "Version: %s" Hh_version.version;
1365 Hh_logger.log "Hostname: %s" (Unix.gethostname ());
1366 let root = ServerArgs.root options in
1367 ServerDynamicView.toggle := ServerArgs.dynamic_view options;
1369 let deps_mode =
1370 match ServerArgs.save_64bit options with
1371 | Some new_edges_dir ->
1372 Typing_deps_mode.SaveCustomMode { graph = None; new_edges_dir }
1373 | None -> Typing_deps_mode.SQLiteMode
1376 (* The OCaml default is 500, but we care about minimizing the memory
1377 * overhead *)
1378 let gc_control = Caml.Gc.get () in
1379 Caml.Gc.set { gc_control with Caml.Gc.max_overhead = 200 };
1380 let { ServerLocalConfig.cpu_priority; io_priority; enable_on_nfs; _ } =
1381 local_config
1383 let hhconfig_version =
1384 config |> ServerConfig.version |> Config_file.version_to_string_opt
1386 List.iter (ServerConfig.ignored_paths config) ~f:FilesToIgnore.ignore_path;
1387 let logging_init init_id ~is_worker =
1388 let profile_owner = local_config.ServerLocalConfig.profile_owner in
1389 let profile_desc = local_config.ServerLocalConfig.profile_desc in
1390 Hh_logger.Level.set_min_level local_config.ServerLocalConfig.min_log_level;
1391 Hh_logger.Level.set_categories local_config.ServerLocalConfig.log_categories;
1393 if Sys_utils.is_test_mode () then
1394 EventLogger.init_fake ()
1395 else if is_worker then
1396 HackEventLogger.init_worker
1397 ~root
1398 ~hhconfig_version
1399 ~init_id
1400 ~custom_columns:(ServerArgs.custom_telemetry_data options)
1401 ~rollout_flags:(ServerLocalConfig.to_rollout_flags local_config)
1402 ~time:(Unix.gettimeofday ())
1403 ~profile_owner
1404 ~profile_desc
1405 else
1406 HackEventLogger.init
1407 ~root
1408 ~hhconfig_version
1409 ~init_id
1410 ~custom_columns:(ServerArgs.custom_telemetry_data options)
1411 ~informant_managed
1412 ~rollout_flags:(ServerLocalConfig.to_rollout_flags local_config)
1413 ~time:(Unix.gettimeofday ())
1414 ~max_workers:num_workers
1415 ~profile_owner
1416 ~profile_desc
1418 logging_init init_id ~is_worker:false;
1419 HackEventLogger.init_start
1420 ~experiments_config_meta:
1421 local_config.ServerLocalConfig.experiments_config_meta;
1422 let root_s = Path.to_string root in
1423 let check_mode = ServerArgs.check_mode options in
1424 if (not check_mode) && Sys_utils.is_nfs root_s && not enable_on_nfs then (
1425 Hh_logger.log "Refusing to run on %s: root is on NFS!" root_s;
1426 HackEventLogger.nfs_root ();
1427 Exit.exit Exit_status.Nfs_root
1431 ServerConfig.warn_on_non_opt_build config && not Build_id.is_build_optimized
1432 then begin
1433 let msg =
1434 Printf.sprintf
1435 "hh_server binary was built in \"%s\" mode, "
1436 Build_id.build_mode
1437 ^ "is running with Rust version of parser enabled, "
1438 ^ "and this repository's .hhconfig specifies warn_on_non_opt_build option. "
1439 ^ "Parsing with non-opt build will take significantly longer"
1441 if ServerArgs.allow_non_opt_build options then
1442 Hh_logger.log
1443 "Warning: %s. Initializing anyway due to --allow-non-opt-build option."
1445 else
1446 let msg =
1447 Printf.sprintf
1448 "Error: %s. Recompile the server in opt or dbgo mode, or pass --allow-non-opt-build to continue anyway."
1451 Hh_logger.log "%s" msg;
1452 Exit.exit ~msg Exit_status.Server_non_opt_build_mode
1453 end;
1455 Program.preinit ();
1456 Sys_utils.set_priorities ~cpu_priority ~io_priority;
1458 (* this is to transform SIGPIPE in an exception. A SIGPIPE can happen when
1459 * someone C-c the client.
1461 Sys_utils.set_signal Sys.sigpipe Sys.Signal_ignore;
1462 PidLog.init (ServerFiles.pids_file root);
1463 Option.iter monitor_pid ~f:(fun monitor_pid ->
1464 PidLog.log ~reason:"monitor" monitor_pid);
1465 PidLog.log ~reason:"main" (Unix.getpid ());
1467 (* Make a sub-init_id because we use it to name temporary files for piping to
1468 scuba logging processes. *)
1469 let worker_logging_init () =
1470 logging_init (init_id ^ "." ^ Random_id.short_string ()) ~is_worker:true
1472 let workers =
1473 let gc_control = ServerConfig.gc_control config in
1474 ServerWorker.make
1475 ~longlived_workers:local_config.ServerLocalConfig.longlived_workers
1476 ~nbr_procs:num_workers
1477 gc_control
1478 handle
1479 ~logging_init:worker_logging_init
1481 let genv = ServerEnvBuild.make_genv options config local_config workers in
1482 (genv, ServerEnvBuild.make_env genv.config ~init_id ~deps_mode)
1484 let run_once options config local_config =
1485 let (genv, env) =
1486 setup_server
1487 options
1488 config
1489 local_config
1490 ~informant_managed:false
1491 ~monitor_pid:None
1493 if not (ServerArgs.check_mode genv.options) then (
1494 Hh_logger.log "ServerMain run_once only supported in check mode.";
1495 Exit.exit Exit_status.Input_error
1498 (* The type-checking happens here *)
1499 let env = program_init genv env in
1500 (* All of saving state happens here *)
1501 let (env, save_state_results) =
1502 match
1503 ( ServerArgs.save_filename genv.options,
1504 ServerArgs.save_with_spec genv.options )
1505 with
1506 | (None, None) -> (env, None)
1507 | (Some filename, None) -> (env, ServerInit.save_state genv env filename)
1508 | (None, Some (spec : ServerArgs.save_state_spec_info)) ->
1509 (env, ServerInit.save_state genv env spec.ServerArgs.filename)
1510 | (Some _, Some _) ->
1511 failwith "Saved state file name is specified in two different ways!"
1513 let _naming_table_rows_changed =
1514 match ServerArgs.save_naming_filename genv.options with
1515 | None -> None
1516 | Some filename ->
1517 Disk.mkdir_p (Filename.dirname filename);
1518 let save_result = Naming_table.save env.naming_table filename in
1519 Hh_logger.log
1520 "Inserted symbols into the naming table:\n%s"
1521 (Naming_sqlite.show_save_result save_result);
1522 if List.length save_result.Naming_sqlite.errors > 0 then begin
1523 Sys_utils.rm_dir_tree filename;
1524 failwith "Naming table state had errors - deleting output file!"
1525 end else
1526 Some save_result
1528 (* Finish up by generating the output and the exit code *)
1529 match ServerArgs.concatenate_prefix genv.options with
1530 | Some prefix ->
1531 let prefix =
1532 Relative_path.from_root ~suffix:prefix |> Relative_path.to_absolute
1534 let text = ServerConcatenateAll.go genv env [prefix] in
1535 print_endline text;
1536 Exit.exit Exit_status.No_error
1537 | _ ->
1538 Hh_logger.log "Running in check mode";
1539 Program.run_once_and_exit genv env save_state_results
1542 * The server monitor will pass client connections to this process
1543 * via ic.
1545 let daemon_main_exn ~informant_managed options monitor_pid in_fds =
1546 Printexc.record_backtrace true;
1547 let (config, local_config) =
1548 ServerConfig.(load ~silent:false filename options)
1550 let (genv, env) =
1551 setup_server
1552 options
1553 config
1554 local_config
1555 ~informant_managed
1556 ~monitor_pid:(Some monitor_pid)
1558 if ServerArgs.check_mode genv.options then (
1559 Hh_logger.log "Invalid program args - can't run daemon in check mode.";
1560 Exit.exit Exit_status.Input_error
1562 HackEventLogger.with_id ~stage:`Init env.init_env.init_id @@ fun () ->
1563 let env = MainInit.go genv options (fun () -> program_init genv env) in
1564 CgroupProfiler.print_summary_memory_table ~event:`Init;
1565 serve genv env in_fds
1567 let daemon_main
1568 ( informant_managed,
1569 state,
1570 options,
1571 monitor_pid,
1572 priority_in_fd,
1573 force_dormant_start_only_in_fd )
1574 (default_ic, _default_oc) =
1575 (* Avoid leaking this fd further *)
1576 let () = Unix.set_close_on_exec priority_in_fd in
1577 let () = Unix.set_close_on_exec force_dormant_start_only_in_fd in
1578 let default_in_fd = Daemon.descr_of_in_channel default_ic in
1580 (* Restore the root directory and other global states from monitor *)
1581 ServerGlobalState.restore state ~worker_id:0;
1583 (* Restore hhi files every time the server restarts
1584 in case the tmp folder changes *)
1585 ignore (Hhi.get_hhi_root ());
1587 ServerUtils.with_exit_on_exception @@ fun () ->
1588 daemon_main_exn
1589 ~informant_managed
1590 options
1591 monitor_pid
1592 (default_in_fd, priority_in_fd, force_dormant_start_only_in_fd)
1594 let entry = Daemon.register_entry_point "ServerMain.daemon_main" daemon_main