Delete stale chunks from safe-browsing downloads store.
[chromium-blink-merge.git] / base / message_pump_win.cc
blob2b2a10e07534afcf7c7dd8de14567bd4c794d5cd
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_pump_win.h"
7 #include <math.h>
9 #include "base/debug/trace_event.h"
10 #include "base/message_loop.h"
11 #include "base/metrics/histogram.h"
12 #include "base/process_util.h"
13 #include "base/win/wrapped_window_proc.h"
15 namespace {
17 enum MessageLoopProblems {
18 MESSAGE_POST_ERROR,
19 COMPLETION_POST_ERROR,
20 SET_TIMER_ERROR,
21 MESSAGE_LOOP_PROBLEM_MAX,
24 } // namespace
26 namespace base {
28 static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow";
30 // Message sent to get an additional time slice for pumping (processing) another
31 // task (a series of such messages creates a continuous task pump).
32 static const int kMsgHaveWork = WM_USER + 1;
34 //-----------------------------------------------------------------------------
35 // MessagePumpWin public:
37 void MessagePumpWin::AddObserver(MessagePumpObserver* observer) {
38 observers_.AddObserver(observer);
41 void MessagePumpWin::RemoveObserver(MessagePumpObserver* observer) {
42 observers_.RemoveObserver(observer);
45 void MessagePumpWin::WillProcessMessage(const MSG& msg) {
46 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, WillProcessEvent(msg));
49 void MessagePumpWin::DidProcessMessage(const MSG& msg) {
50 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, DidProcessEvent(msg));
53 void MessagePumpWin::RunWithDispatcher(
54 Delegate* delegate, MessagePumpDispatcher* dispatcher) {
55 RunState s;
56 s.delegate = delegate;
57 s.dispatcher = dispatcher;
58 s.should_quit = false;
59 s.run_depth = state_ ? state_->run_depth + 1 : 1;
61 RunState* previous_state = state_;
62 state_ = &s;
64 DoRunLoop();
66 state_ = previous_state;
69 void MessagePumpWin::Quit() {
70 DCHECK(state_);
71 state_->should_quit = true;
74 //-----------------------------------------------------------------------------
75 // MessagePumpWin protected:
77 int MessagePumpWin::GetCurrentDelay() const {
78 if (delayed_work_time_.is_null())
79 return -1;
81 // Be careful here. TimeDelta has a precision of microseconds, but we want a
82 // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
83 // 6? It should be 6 to avoid executing delayed work too early.
84 double timeout =
85 ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
87 // If this value is negative, then we need to run delayed work soon.
88 int delay = static_cast<int>(timeout);
89 if (delay < 0)
90 delay = 0;
92 return delay;
95 //-----------------------------------------------------------------------------
96 // MessagePumpForUI public:
98 MessagePumpForUI::MessagePumpForUI() : instance_(NULL) {
99 InitMessageWnd();
102 MessagePumpForUI::~MessagePumpForUI() {
103 DestroyWindow(message_hwnd_);
104 UnregisterClass(kWndClass, instance_);
107 void MessagePumpForUI::ScheduleWork() {
108 if (InterlockedExchange(&have_work_, 1))
109 return; // Someone else continued the pumping.
111 // Make sure the MessagePump does some work for us.
112 BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
113 reinterpret_cast<WPARAM>(this), 0);
114 if (ret)
115 return; // There was room in the Window Message queue.
117 // We have failed to insert a have-work message, so there is a chance that we
118 // will starve tasks/timers while sitting in a nested message loop. Nested
119 // loops only look at Windows Message queues, and don't look at *our* task
120 // queues, etc., so we might not get a time slice in such. :-(
121 // We could abort here, but the fear is that this failure mode is plausibly
122 // common (queue is full, of about 2000 messages), so we'll do a near-graceful
123 // recovery. Nested loops are pretty transient (we think), so this will
124 // probably be recoverable.
125 InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
126 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
127 MESSAGE_LOOP_PROBLEM_MAX);
130 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
132 // We would *like* to provide high resolution timers. Windows timers using
133 // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
134 // mechanism because the application can enter modal windows loops where it
135 // is not running our MessageLoop; the only way to have our timers fire in
136 // these cases is to post messages there.
138 // To provide sub-10ms timers, we process timers directly from our run loop.
139 // For the common case, timers will be processed there as the run loop does
140 // its normal work. However, we *also* set the system timer so that WM_TIMER
141 // events fire. This mops up the case of timers not being able to work in
142 // modal message loops. It is possible for the SetTimer to pop and have no
143 // pending timers, because they could have already been processed by the
144 // run loop itself.
146 // We use a single SetTimer corresponding to the timer that will expire
147 // soonest. As new timers are created and destroyed, we update SetTimer.
148 // Getting a spurrious SetTimer event firing is benign, as we'll just be
149 // processing an empty timer queue.
151 delayed_work_time_ = delayed_work_time;
153 int delay_msec = GetCurrentDelay();
154 DCHECK_GE(delay_msec, 0);
155 if (delay_msec < USER_TIMER_MINIMUM)
156 delay_msec = USER_TIMER_MINIMUM;
158 // Create a WM_TIMER event that will wake us up to check for any pending
159 // timers (in case we are running within a nested, external sub-pump).
160 BOOL ret = SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
161 delay_msec, NULL);
162 if (ret)
163 return;
164 // If we can't set timers, we are in big trouble... but cross our fingers for
165 // now.
166 // TODO(jar): If we don't see this error, use a CHECK() here instead.
167 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", SET_TIMER_ERROR,
168 MESSAGE_LOOP_PROBLEM_MAX);
171 void MessagePumpForUI::PumpOutPendingPaintMessages() {
172 // If we are being called outside of the context of Run, then don't try to do
173 // any work.
174 if (!state_)
175 return;
177 // Create a mini-message-pump to force immediate processing of only Windows
178 // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking
179 // to get the job done. Actual common max is 4 peeks, but we'll be a little
180 // safe here.
181 const int kMaxPeekCount = 20;
182 int peek_count;
183 for (peek_count = 0; peek_count < kMaxPeekCount; ++peek_count) {
184 MSG msg;
185 if (!PeekMessage(&msg, NULL, 0, 0, PM_REMOVE | PM_QS_PAINT))
186 break;
187 ProcessMessageHelper(msg);
188 if (state_->should_quit) // Handle WM_QUIT.
189 break;
191 // Histogram what was really being used, to help to adjust kMaxPeekCount.
192 DHISTOGRAM_COUNTS("Loop.PumpOutPendingPaintMessages Peeks", peek_count);
195 //-----------------------------------------------------------------------------
196 // MessagePumpForUI private:
198 // static
199 LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
200 HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
201 switch (message) {
202 case kMsgHaveWork:
203 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
204 break;
205 case WM_TIMER:
206 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
207 break;
209 return DefWindowProc(hwnd, message, wparam, lparam);
212 void MessagePumpForUI::DoRunLoop() {
213 // IF this was just a simple PeekMessage() loop (servicing all possible work
214 // queues), then Windows would try to achieve the following order according
215 // to MSDN documentation about PeekMessage with no filter):
216 // * Sent messages
217 // * Posted messages
218 // * Sent messages (again)
219 // * WM_PAINT messages
220 // * WM_TIMER messages
222 // Summary: none of the above classes is starved, and sent messages has twice
223 // the chance of being processed (i.e., reduced service time).
225 for (;;) {
226 // If we do any work, we may create more messages etc., and more work may
227 // possibly be waiting in another task group. When we (for example)
228 // ProcessNextWindowsMessage(), there is a good chance there are still more
229 // messages waiting. On the other hand, when any of these methods return
230 // having done no work, then it is pretty unlikely that calling them again
231 // quickly will find any work to do. Finally, if they all say they had no
232 // work, then it is a good time to consider sleeping (waiting) for more
233 // work.
235 bool more_work_is_plausible = ProcessNextWindowsMessage();
236 if (state_->should_quit)
237 break;
239 more_work_is_plausible |= state_->delegate->DoWork();
240 if (state_->should_quit)
241 break;
243 more_work_is_plausible |=
244 state_->delegate->DoDelayedWork(&delayed_work_time_);
245 // If we did not process any delayed work, then we can assume that our
246 // existing WM_TIMER if any will fire when delayed work should run. We
247 // don't want to disturb that timer if it is already in flight. However,
248 // if we did do all remaining delayed work, then lets kill the WM_TIMER.
249 if (more_work_is_plausible && delayed_work_time_.is_null())
250 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
251 if (state_->should_quit)
252 break;
254 if (more_work_is_plausible)
255 continue;
257 more_work_is_plausible = state_->delegate->DoIdleWork();
258 if (state_->should_quit)
259 break;
261 if (more_work_is_plausible)
262 continue;
264 WaitForWork(); // Wait (sleep) until we have work to do again.
268 void MessagePumpForUI::InitMessageWnd() {
269 WNDCLASSEX wc = {0};
270 wc.cbSize = sizeof(wc);
271 wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
272 wc.hInstance = base::GetModuleFromAddress(wc.lpfnWndProc);
273 wc.lpszClassName = kWndClass;
274 instance_ = wc.hInstance;
275 RegisterClassEx(&wc);
277 message_hwnd_ =
278 CreateWindow(kWndClass, 0, 0, 0, 0, 0, 0, HWND_MESSAGE, 0, instance_, 0);
279 DCHECK(message_hwnd_);
282 void MessagePumpForUI::WaitForWork() {
283 // Wait until a message is available, up to the time needed by the timer
284 // manager to fire the next set of timers.
285 int delay = GetCurrentDelay();
286 if (delay < 0) // Negative value means no timers waiting.
287 delay = INFINITE;
289 DWORD result;
290 result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT,
291 MWMO_INPUTAVAILABLE);
293 if (WAIT_OBJECT_0 == result) {
294 // A WM_* message is available.
295 // If a parent child relationship exists between windows across threads
296 // then their thread inputs are implicitly attached.
297 // This causes the MsgWaitForMultipleObjectsEx API to return indicating
298 // that messages are ready for processing (specifically mouse messages
299 // intended for the child window. Occurs if the child window has capture)
300 // The subsequent PeekMessages call fails to return any messages thus
301 // causing us to enter a tight loop at times.
302 // The WaitMessage call below is a workaround to give the child window
303 // sometime to process its input messages.
304 MSG msg = {0};
305 DWORD queue_status = GetQueueStatus(QS_MOUSE);
306 if (HIWORD(queue_status) & QS_MOUSE &&
307 !PeekMessage(&msg, NULL, WM_MOUSEFIRST, WM_MOUSELAST, PM_NOREMOVE)) {
308 WaitMessage();
310 return;
313 DCHECK_NE(WAIT_FAILED, result) << GetLastError();
316 void MessagePumpForUI::HandleWorkMessage() {
317 // If we are being called outside of the context of Run, then don't try to do
318 // any work. This could correspond to a MessageBox call or something of that
319 // sort.
320 if (!state_) {
321 // Since we handled a kMsgHaveWork message, we must still update this flag.
322 InterlockedExchange(&have_work_, 0);
323 return;
326 // Let whatever would have run had we not been putting messages in the queue
327 // run now. This is an attempt to make our dummy message not starve other
328 // messages that may be in the Windows message queue.
329 ProcessPumpReplacementMessage();
331 // Now give the delegate a chance to do some work. He'll let us know if he
332 // needs to do more work.
333 if (state_->delegate->DoWork())
334 ScheduleWork();
337 void MessagePumpForUI::HandleTimerMessage() {
338 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
340 // If we are being called outside of the context of Run, then don't do
341 // anything. This could correspond to a MessageBox call or something of
342 // that sort.
343 if (!state_)
344 return;
346 state_->delegate->DoDelayedWork(&delayed_work_time_);
347 if (!delayed_work_time_.is_null()) {
348 // A bit gratuitous to set delayed_work_time_ again, but oh well.
349 ScheduleDelayedWork(delayed_work_time_);
353 bool MessagePumpForUI::ProcessNextWindowsMessage() {
354 // If there are sent messages in the queue then PeekMessage internally
355 // dispatches the message and returns false. We return true in this
356 // case to ensure that the message loop peeks again instead of calling
357 // MsgWaitForMultipleObjectsEx again.
358 bool sent_messages_in_queue = false;
359 DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
360 if (HIWORD(queue_status) & QS_SENDMESSAGE)
361 sent_messages_in_queue = true;
363 MSG msg;
364 if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
365 return ProcessMessageHelper(msg);
367 return sent_messages_in_queue;
370 bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
371 TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
372 "message", msg.message);
373 if (WM_QUIT == msg.message) {
374 // Repost the QUIT message so that it will be retrieved by the primary
375 // GetMessage() loop.
376 state_->should_quit = true;
377 PostQuitMessage(static_cast<int>(msg.wParam));
378 return false;
381 // While running our main message pump, we discard kMsgHaveWork messages.
382 if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
383 return ProcessPumpReplacementMessage();
385 if (CallMsgFilter(const_cast<MSG*>(&msg), kMessageFilterCode))
386 return true;
388 WillProcessMessage(msg);
390 if (state_->dispatcher) {
391 if (!state_->dispatcher->Dispatch(msg))
392 state_->should_quit = true;
393 } else {
394 TranslateMessage(&msg);
395 DispatchMessage(&msg);
398 DidProcessMessage(msg);
399 return true;
402 bool MessagePumpForUI::ProcessPumpReplacementMessage() {
403 // When we encounter a kMsgHaveWork message, this method is called to peek
404 // and process a replacement message, such as a WM_PAINT or WM_TIMER. The
405 // goal is to make the kMsgHaveWork as non-intrusive as possible, even though
406 // a continuous stream of such messages are posted. This method carefully
407 // peeks a message while there is no chance for a kMsgHaveWork to be pending,
408 // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
409 // possibly be posted), and finally dispatches that peeked replacement. Note
410 // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
412 bool have_message = false;
413 MSG msg;
414 // We should not process all window messages if we are in the context of an
415 // OS modal loop, i.e. in the context of a windows API call like MessageBox.
416 // This is to ensure that these messages are peeked out by the OS modal loop.
417 if (MessageLoop::current()->os_modal_loop()) {
418 // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above.
419 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
420 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
421 } else {
422 have_message = (0 != PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
425 DCHECK(!have_message || kMsgHaveWork != msg.message ||
426 msg.hwnd != message_hwnd_);
428 // Since we discarded a kMsgHaveWork message, we must update the flag.
429 int old_have_work = InterlockedExchange(&have_work_, 0);
430 DCHECK(old_have_work);
432 // We don't need a special time slice if we didn't have_message to process.
433 if (!have_message)
434 return false;
436 // Guarantee we'll get another time slice in the case where we go into native
437 // windows code. This ScheduleWork() may hurt performance a tiny bit when
438 // tasks appear very infrequently, but when the event queue is busy, the
439 // kMsgHaveWork events get (percentage wise) rarer and rarer.
440 ScheduleWork();
441 return ProcessMessageHelper(msg);
444 //-----------------------------------------------------------------------------
445 // MessagePumpForIO public:
447 MessagePumpForIO::MessagePumpForIO() {
448 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
449 DCHECK(port_.IsValid());
452 void MessagePumpForIO::ScheduleWork() {
453 if (InterlockedExchange(&have_work_, 1))
454 return; // Someone else continued the pumping.
456 // Make sure the MessagePump does some work for us.
457 BOOL ret = PostQueuedCompletionStatus(port_, 0,
458 reinterpret_cast<ULONG_PTR>(this),
459 reinterpret_cast<OVERLAPPED*>(this));
460 if (ret)
461 return; // Post worked perfectly.
463 // See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
464 InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed.
465 UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
466 MESSAGE_LOOP_PROBLEM_MAX);
469 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
470 // We know that we can't be blocked right now since this method can only be
471 // called on the same thread as Run, so we only need to update our record of
472 // how long to sleep when we do sleep.
473 delayed_work_time_ = delayed_work_time;
476 void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
477 IOHandler* handler) {
478 ULONG_PTR key = reinterpret_cast<ULONG_PTR>(handler);
479 HANDLE port = CreateIoCompletionPort(file_handle, port_, key, 1);
480 DPCHECK(port);
483 //-----------------------------------------------------------------------------
484 // MessagePumpForIO private:
486 void MessagePumpForIO::DoRunLoop() {
487 for (;;) {
488 // If we do any work, we may create more messages etc., and more work may
489 // possibly be waiting in another task group. When we (for example)
490 // WaitForIOCompletion(), there is a good chance there are still more
491 // messages waiting. On the other hand, when any of these methods return
492 // having done no work, then it is pretty unlikely that calling them
493 // again quickly will find any work to do. Finally, if they all say they
494 // had no work, then it is a good time to consider sleeping (waiting) for
495 // more work.
497 bool more_work_is_plausible = state_->delegate->DoWork();
498 if (state_->should_quit)
499 break;
501 more_work_is_plausible |= WaitForIOCompletion(0, NULL);
502 if (state_->should_quit)
503 break;
505 more_work_is_plausible |=
506 state_->delegate->DoDelayedWork(&delayed_work_time_);
507 if (state_->should_quit)
508 break;
510 if (more_work_is_plausible)
511 continue;
513 more_work_is_plausible = state_->delegate->DoIdleWork();
514 if (state_->should_quit)
515 break;
517 if (more_work_is_plausible)
518 continue;
520 WaitForWork(); // Wait (sleep) until we have work to do again.
524 // Wait until IO completes, up to the time needed by the timer manager to fire
525 // the next set of timers.
526 void MessagePumpForIO::WaitForWork() {
527 // We do not support nested IO message loops. This is to avoid messy
528 // recursion problems.
529 DCHECK_EQ(1, state_->run_depth) << "Cannot nest an IO message loop!";
531 int timeout = GetCurrentDelay();
532 if (timeout < 0) // Negative value means no timers waiting.
533 timeout = INFINITE;
535 WaitForIOCompletion(timeout, NULL);
538 bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
539 IOItem item;
540 if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
541 // We have to ask the system for another IO completion.
542 if (!GetIOItem(timeout, &item))
543 return false;
545 if (ProcessInternalIOItem(item))
546 return true;
549 if (item.context->handler) {
550 if (filter && item.handler != filter) {
551 // Save this item for later
552 completed_io_.push_back(item);
553 } else {
554 DCHECK_EQ(item.context->handler, item.handler);
555 WillProcessIOEvent();
556 item.handler->OnIOCompleted(item.context, item.bytes_transfered,
557 item.error);
558 DidProcessIOEvent();
560 } else {
561 // The handler must be gone by now, just cleanup the mess.
562 delete item.context;
564 return true;
567 // Asks the OS for another IO completion result.
568 bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
569 memset(item, 0, sizeof(*item));
570 ULONG_PTR key = NULL;
571 OVERLAPPED* overlapped = NULL;
572 if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
573 &overlapped, timeout)) {
574 if (!overlapped)
575 return false; // Nothing in the queue.
576 item->error = GetLastError();
577 item->bytes_transfered = 0;
580 item->handler = reinterpret_cast<IOHandler*>(key);
581 item->context = reinterpret_cast<IOContext*>(overlapped);
582 return true;
585 bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
586 if (this == reinterpret_cast<MessagePumpForIO*>(item.context) &&
587 this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
588 // This is our internal completion.
589 DCHECK(!item.bytes_transfered);
590 InterlockedExchange(&have_work_, 0);
591 return true;
593 return false;
596 // Returns a completion item that was previously received.
597 bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
598 DCHECK(!completed_io_.empty());
599 for (std::list<IOItem>::iterator it = completed_io_.begin();
600 it != completed_io_.end(); ++it) {
601 if (!filter || it->handler == filter) {
602 *item = *it;
603 completed_io_.erase(it);
604 return true;
607 return false;
610 void MessagePumpForIO::AddIOObserver(IOObserver *obs) {
611 io_observers_.AddObserver(obs);
614 void MessagePumpForIO::RemoveIOObserver(IOObserver *obs) {
615 io_observers_.RemoveObserver(obs);
618 void MessagePumpForIO::WillProcessIOEvent() {
619 FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
622 void MessagePumpForIO::DidProcessIOEvent() {
623 FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
626 } // namespace base