Add partial pre-read functionality to browser startup (Windows).
[chromium-blink-merge.git] / base / message_pump_glib.cc
blob85fee9ac107adf88456f02555a662d28bb752da0
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_pump_glib.h"
7 #include <fcntl.h>
8 #include <math.h>
10 #include <glib.h>
12 #include "base/eintr_wrapper.h"
13 #include "base/logging.h"
14 #include "base/threading/platform_thread.h"
16 namespace {
18 // We send a byte across a pipe to wakeup the event loop.
19 const char kWorkScheduled = '\0';
21 // Return a timeout suitable for the glib loop, -1 to block forever,
22 // 0 to return right away, or a timeout in milliseconds from now.
23 int GetTimeIntervalMilliseconds(const base::TimeTicks& from) {
24 if (from.is_null())
25 return -1;
27 // Be careful here. TimeDelta has a precision of microseconds, but we want a
28 // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
29 // 6? It should be 6 to avoid executing delayed work too early.
30 int delay = static_cast<int>(
31 ceil((from - base::TimeTicks::Now()).InMillisecondsF()));
33 // If this value is negative, then we need to run delayed work soon.
34 return delay < 0 ? 0 : delay;
37 // A brief refresher on GLib:
38 // GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
39 // On each iteration of the GLib pump, it calls each source's Prepare function.
40 // This function should return TRUE if it wants GLib to call its Dispatch, and
41 // FALSE otherwise. It can also set a timeout in this case for the next time
42 // Prepare should be called again (it may be called sooner).
43 // After the Prepare calls, GLib does a poll to check for events from the
44 // system. File descriptors can be attached to the sources. The poll may block
45 // if none of the Prepare calls returned TRUE. It will block indefinitely, or
46 // by the minimum time returned by a source in Prepare.
47 // After the poll, GLib calls Check for each source that returned FALSE
48 // from Prepare. The return value of Check has the same meaning as for Prepare,
49 // making Check a second chance to tell GLib we are ready for Dispatch.
50 // Finally, GLib calls Dispatch for each source that is ready. If Dispatch
51 // returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
52 // (i.e., you can call Run from them), but Prepare and Check cannot.
53 // Finalize is called when the source is destroyed.
54 // NOTE: It is common for subsytems to want to process pending events while
55 // doing intensive work, for example the flash plugin. They usually use the
56 // following pattern (recommended by the GTK docs):
57 // while (gtk_events_pending()) {
58 // gtk_main_iteration();
59 // }
61 // gtk_events_pending just calls g_main_context_pending, which does the
62 // following:
63 // - Call prepare on all the sources.
64 // - Do the poll with a timeout of 0 (not blocking).
65 // - Call check on all the sources.
66 // - *Does not* call dispatch on the sources.
67 // - Return true if any of prepare() or check() returned true.
69 // gtk_main_iteration just calls g_main_context_iteration, which does the whole
70 // thing, respecting the timeout for the poll (and block, although it is
71 // expected not to if gtk_events_pending returned true), and call dispatch.
73 // Thus it is important to only return true from prepare or check if we
74 // actually have events or work to do. We also need to make sure we keep
75 // internal state consistent so that if prepare/check return true when called
76 // from gtk_events_pending, they will still return true when called right
77 // after, from gtk_main_iteration.
79 // For the GLib pump we try to follow the Windows UI pump model:
80 // - Whenever we receive a wakeup event or the timer for delayed work expires,
81 // we run DoWork and/or DoDelayedWork. That part will also run in the other
82 // event pumps.
83 // - We also run DoWork, DoDelayedWork, and possibly DoIdleWork in the main
84 // loop, around event handling.
86 struct WorkSource : public GSource {
87 base::MessagePumpGlib* pump;
90 gboolean WorkSourcePrepare(GSource* source,
91 gint* timeout_ms) {
92 *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
93 // We always return FALSE, so that our timeout is honored. If we were
94 // to return TRUE, the timeout would be considered to be 0 and the poll
95 // would never block. Once the poll is finished, Check will be called.
96 return FALSE;
99 gboolean WorkSourceCheck(GSource* source) {
100 // Only return TRUE if Dispatch should be called.
101 return static_cast<WorkSource*>(source)->pump->HandleCheck();
104 gboolean WorkSourceDispatch(GSource* source,
105 GSourceFunc unused_func,
106 gpointer unused_data) {
108 static_cast<WorkSource*>(source)->pump->HandleDispatch();
109 // Always return TRUE so our source stays registered.
110 return TRUE;
113 // I wish these could be const, but g_source_new wants non-const.
114 GSourceFuncs WorkSourceFuncs = {
115 WorkSourcePrepare,
116 WorkSourceCheck,
117 WorkSourceDispatch,
118 NULL
121 } // namespace
124 namespace base {
126 struct MessagePumpGlib::RunState {
127 Delegate* delegate;
128 MessagePumpDispatcher* dispatcher;
130 // Used to flag that the current Run() invocation should return ASAP.
131 bool should_quit;
133 // Used to count how many Run() invocations are on the stack.
134 int run_depth;
136 // This keeps the state of whether the pump got signaled that there was new
137 // work to be done. Since we eat the message on the wake up pipe as soon as
138 // we get it, we keep that state here to stay consistent.
139 bool has_work;
142 MessagePumpGlib::MessagePumpGlib()
143 : state_(NULL),
144 context_(g_main_context_default()),
145 wakeup_gpollfd_(new GPollFD) {
146 // Create our wakeup pipe, which is used to flag when work was scheduled.
147 int fds[2];
148 int ret = pipe(fds);
149 DCHECK_EQ(ret, 0);
150 (void)ret; // Prevent warning in release mode.
152 wakeup_pipe_read_ = fds[0];
153 wakeup_pipe_write_ = fds[1];
154 wakeup_gpollfd_->fd = wakeup_pipe_read_;
155 wakeup_gpollfd_->events = G_IO_IN;
157 work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
158 static_cast<WorkSource*>(work_source_)->pump = this;
159 g_source_add_poll(work_source_, wakeup_gpollfd_.get());
160 // Use a low priority so that we let other events in the queue go first.
161 g_source_set_priority(work_source_, G_PRIORITY_DEFAULT_IDLE);
162 // This is needed to allow Run calls inside Dispatch.
163 g_source_set_can_recurse(work_source_, TRUE);
164 g_source_attach(work_source_, context_);
167 MessagePumpGlib::~MessagePumpGlib() {
168 g_source_destroy(work_source_);
169 g_source_unref(work_source_);
170 close(wakeup_pipe_read_);
171 close(wakeup_pipe_write_);
174 void MessagePumpGlib::RunWithDispatcher(Delegate* delegate,
175 MessagePumpDispatcher* dispatcher) {
176 #ifndef NDEBUG
177 // Make sure we only run this on one thread. X/GTK only has one message pump
178 // so we can only have one UI loop per process.
179 static base::PlatformThreadId thread_id = base::PlatformThread::CurrentId();
180 DCHECK(thread_id == base::PlatformThread::CurrentId()) <<
181 "Running MessagePumpGlib on two different threads; "
182 "this is unsupported by GLib!";
183 #endif
185 RunState state;
186 state.delegate = delegate;
187 state.dispatcher = dispatcher;
188 state.should_quit = false;
189 state.run_depth = state_ ? state_->run_depth + 1 : 1;
190 state.has_work = false;
192 RunState* previous_state = state_;
193 state_ = &state;
195 // We really only do a single task for each iteration of the loop. If we
196 // have done something, assume there is likely something more to do. This
197 // will mean that we don't block on the message pump until there was nothing
198 // more to do. We also set this to true to make sure not to block on the
199 // first iteration of the loop, so RunAllPending() works correctly.
200 bool more_work_is_plausible = true;
202 // We run our own loop instead of using g_main_loop_quit in one of the
203 // callbacks. This is so we only quit our own loops, and we don't quit
204 // nested loops run by others. TODO(deanm): Is this what we want?
205 for (;;) {
206 // Don't block if we think we have more work to do.
207 bool block = !more_work_is_plausible;
209 more_work_is_plausible = g_main_context_iteration(context_, block);
210 if (state_->should_quit)
211 break;
213 more_work_is_plausible |= state_->delegate->DoWork();
214 if (state_->should_quit)
215 break;
217 more_work_is_plausible |=
218 state_->delegate->DoDelayedWork(&delayed_work_time_);
219 if (state_->should_quit)
220 break;
222 if (more_work_is_plausible)
223 continue;
225 more_work_is_plausible = state_->delegate->DoIdleWork();
226 if (state_->should_quit)
227 break;
230 state_ = previous_state;
233 // Return the timeout we want passed to poll.
234 int MessagePumpGlib::HandlePrepare() {
235 // We know we have work, but we haven't called HandleDispatch yet. Don't let
236 // the pump block so that we can do some processing.
237 if (state_ && // state_ may be null during tests.
238 state_->has_work)
239 return 0;
241 // We don't think we have work to do, but make sure not to block
242 // longer than the next time we need to run delayed work.
243 return GetTimeIntervalMilliseconds(delayed_work_time_);
246 bool MessagePumpGlib::HandleCheck() {
247 if (!state_) // state_ may be null during tests.
248 return false;
250 // We should only ever have a single message on the wakeup pipe, since we
251 // are only signaled when the queue went from empty to non-empty. The glib
252 // poll will tell us whether there was data, so this read shouldn't block.
253 if (wakeup_gpollfd_->revents & G_IO_IN) {
254 char msg;
255 if (HANDLE_EINTR(read(wakeup_pipe_read_, &msg, 1)) != 1 || msg != '!') {
256 NOTREACHED() << "Error reading from the wakeup pipe.";
258 // Since we ate the message, we need to record that we have more work,
259 // because HandleCheck() may be called without HandleDispatch being called
260 // afterwards.
261 state_->has_work = true;
264 if (state_->has_work)
265 return true;
267 if (GetTimeIntervalMilliseconds(delayed_work_time_) == 0) {
268 // The timer has expired. That condition will stay true until we process
269 // that delayed work, so we don't need to record this differently.
270 return true;
273 return false;
276 void MessagePumpGlib::HandleDispatch() {
277 state_->has_work = false;
278 if (state_->delegate->DoWork()) {
279 // NOTE: on Windows at this point we would call ScheduleWork (see
280 // MessagePumpGlib::HandleWorkMessage in message_pump_win.cc). But here,
281 // instead of posting a message on the wakeup pipe, we can avoid the
282 // syscalls and just signal that we have more work.
283 state_->has_work = true;
286 if (state_->should_quit)
287 return;
289 state_->delegate->DoDelayedWork(&delayed_work_time_);
292 void MessagePumpGlib::AddObserver(MessagePumpObserver* observer) {
293 observers_.AddObserver(observer);
296 void MessagePumpGlib::RemoveObserver(MessagePumpObserver* observer) {
297 observers_.RemoveObserver(observer);
300 void MessagePumpGlib::Run(Delegate* delegate) {
301 RunWithDispatcher(delegate, NULL);
304 void MessagePumpGlib::Quit() {
305 if (state_) {
306 state_->should_quit = true;
307 } else {
308 NOTREACHED() << "Quit called outside Run!";
312 void MessagePumpGlib::ScheduleWork() {
313 // This can be called on any thread, so we don't want to touch any state
314 // variables as we would then need locks all over. This ensures that if
315 // we are sleeping in a poll that we will wake up.
316 char msg = '!';
317 if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
318 NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
322 void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
323 // We need to wake up the loop in case the poll timeout needs to be
324 // adjusted. This will cause us to try to do work, but that's ok.
325 delayed_work_time_ = delayed_work_time;
326 ScheduleWork();
329 MessagePumpDispatcher* MessagePumpGlib::GetDispatcher() {
330 return state_ ? state_->dispatcher : NULL;
333 } // namespace base