content: Refactor GPU memory buffer framework.
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blobb31f70d343d87e8a1a5844100edfeb2176ea0727
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
9 #include "base/hash.h"
10 #include "base/json/json_writer.h"
11 #include "base/memory/shared_memory.h"
12 #include "base/time/time.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/devtools_gpu_instrumentation.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_channel_manager.h"
17 #include "content/common/gpu/gpu_command_buffer_stub.h"
18 #include "content/common/gpu/gpu_memory_manager.h"
19 #include "content/common/gpu/gpu_memory_tracking.h"
20 #include "content/common/gpu/gpu_messages.h"
21 #include "content/common/gpu/gpu_watchdog.h"
22 #include "content/common/gpu/image_transport_surface.h"
23 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
24 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
25 #include "content/public/common/content_client.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "ui/gl/gl_bindings.h"
38 #include "ui/gl/gl_switches.h"
40 #if defined(OS_WIN)
41 #include "content/public/common/sandbox_init.h"
42 #endif
44 #if defined(OS_ANDROID)
45 #include "content/common/gpu/stream_texture_android.h"
46 #endif
48 namespace content {
49 struct WaitForCommandState {
50 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
51 : start(start), end(end), reply(reply) {}
53 int32 start;
54 int32 end;
55 scoped_ptr<IPC::Message> reply;
58 namespace {
60 // The GpuCommandBufferMemoryTracker class provides a bridge between the
61 // ContextGroup's memory type managers and the GpuMemoryManager class.
62 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
63 public:
64 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
65 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
66 CreateTrackingGroup(channel->renderer_pid(), this)) {
69 void TrackMemoryAllocatedChange(
70 size_t old_size,
71 size_t new_size,
72 gpu::gles2::MemoryTracker::Pool pool) override {
73 tracking_group_->TrackMemoryAllocatedChange(
74 old_size, new_size, pool);
77 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
78 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
81 private:
82 ~GpuCommandBufferMemoryTracker() override {}
83 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
85 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
88 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
89 // url_hash matches.
90 void FastSetActiveURL(const GURL& url, size_t url_hash) {
91 // Leave the previously set URL in the empty case -- empty URLs are given by
92 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
93 // onscreen context URL was set previously and will show up even when a crash
94 // occurs during offscreen command processing.
95 if (url.is_empty())
96 return;
97 static size_t g_last_url_hash = 0;
98 if (url_hash != g_last_url_hash) {
99 g_last_url_hash = url_hash;
100 GetContentClient()->SetActiveURL(url);
104 // The first time polling a fence, delay some extra time to allow other
105 // stubs to process some work, or else the timing of the fences could
106 // allow a pattern of alternating fast and slow frames to occur.
107 const int64 kHandleMoreWorkPeriodMs = 2;
108 const int64 kHandleMoreWorkPeriodBusyMs = 1;
110 // Prevents idle work from being starved.
111 const int64 kMaxTimeSinceIdleMs = 10;
113 class DevToolsChannelData : public base::debug::ConvertableToTraceFormat {
114 public:
115 static scoped_refptr<base::debug::ConvertableToTraceFormat> CreateForChannel(
116 GpuChannel* channel);
118 void AppendAsTraceFormat(std::string* out) const override {
119 std::string tmp;
120 base::JSONWriter::Write(value_.get(), &tmp);
121 *out += tmp;
124 private:
125 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
126 ~DevToolsChannelData() override {}
127 scoped_ptr<base::Value> value_;
128 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
131 scoped_refptr<base::debug::ConvertableToTraceFormat>
132 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
133 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
134 res->SetInteger("renderer_pid", channel->renderer_pid());
135 res->SetDouble("used_bytes", channel->GetMemoryUsage());
136 res->SetDouble("limit_bytes",
137 channel->gpu_channel_manager()
138 ->gpu_memory_manager()
139 ->GetMaximumClientAllocation());
140 return new DevToolsChannelData(res.release());
143 } // namespace
145 GpuCommandBufferStub::GpuCommandBufferStub(
146 GpuChannel* channel,
147 GpuCommandBufferStub* share_group,
148 const gfx::GLSurfaceHandle& handle,
149 gpu::gles2::MailboxManager* mailbox_manager,
150 const gfx::Size& size,
151 const gpu::gles2::DisallowedFeatures& disallowed_features,
152 const std::vector<int32>& attribs,
153 gfx::GpuPreference gpu_preference,
154 bool use_virtualized_gl_context,
155 int32 route_id,
156 int32 surface_id,
157 GpuWatchdog* watchdog,
158 bool software,
159 const GURL& active_url)
160 : channel_(channel),
161 handle_(handle),
162 initial_size_(size),
163 disallowed_features_(disallowed_features),
164 requested_attribs_(attribs),
165 gpu_preference_(gpu_preference),
166 use_virtualized_gl_context_(use_virtualized_gl_context),
167 route_id_(route_id),
168 surface_id_(surface_id),
169 software_(software),
170 last_flush_count_(0),
171 last_memory_allocation_valid_(false),
172 watchdog_(watchdog),
173 sync_point_wait_count_(0),
174 delayed_work_scheduled_(false),
175 previous_messages_processed_(0),
176 active_url_(active_url),
177 total_gpu_memory_(0) {
178 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
179 FastSetActiveURL(active_url_, active_url_hash_);
181 gpu::gles2::ContextCreationAttribHelper attrib_parser;
182 attrib_parser.Parse(requested_attribs_);
184 if (share_group) {
185 context_group_ = share_group->context_group_;
186 DCHECK(context_group_->bind_generates_resource() ==
187 attrib_parser.bind_generates_resource);
188 } else {
189 context_group_ = new gpu::gles2::ContextGroup(
190 mailbox_manager,
191 new GpuCommandBufferMemoryTracker(channel),
192 channel_->gpu_channel_manager()->shader_translator_cache(),
193 NULL,
194 attrib_parser.bind_generates_resource);
197 use_virtualized_gl_context_ |=
198 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
201 GpuCommandBufferStub::~GpuCommandBufferStub() {
202 Destroy();
204 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
205 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
208 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
209 return channel()->gpu_channel_manager()->gpu_memory_manager();
212 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
213 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
214 "GPUTask",
215 "data",
216 DevToolsChannelData::CreateForChannel(channel()));
217 // TODO(yurys): remove devtools_gpu_instrumentation call once DevTools
218 // Timeline migrates to tracing crbug.com/361045.
219 devtools_gpu_instrumentation::ScopedGpuTask task(channel());
220 FastSetActiveURL(active_url_, active_url_hash_);
222 bool have_context = false;
223 // Ensure the appropriate GL context is current before handling any IPC
224 // messages directed at the command buffer. This ensures that the message
225 // handler can assume that the context is current (not necessary for
226 // RetireSyncPoint or WaitSyncPoint).
227 if (decoder_.get() &&
228 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
229 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
230 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
231 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
232 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
233 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
234 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
235 message.type() !=
236 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
237 if (!MakeCurrent())
238 return false;
239 have_context = true;
242 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
243 // here. This is so the reply can be delayed if the scheduler is unscheduled.
244 bool handled = true;
245 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
246 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
247 OnInitialize);
248 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
249 OnSetGetBuffer);
250 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
251 OnProduceFrontBuffer);
252 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
253 OnWaitForTokenInRange);
254 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
255 OnWaitForGetOffsetInRange);
256 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
258 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
259 OnRegisterTransferBuffer);
260 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
261 OnDestroyTransferBuffer);
262 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
263 OnCreateVideoDecoder)
264 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
265 OnCreateVideoEncoder)
266 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
267 OnSetSurfaceVisible)
268 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
269 OnRetireSyncPoint)
270 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
271 OnSignalSyncPoint)
272 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
273 OnSignalQuery)
274 IPC_MESSAGE_HANDLER(
275 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
276 OnSetClientHasMemoryAllocationChangedCallback)
277 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
278 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
280 OnCreateStreamTexture)
281 IPC_MESSAGE_UNHANDLED(handled = false)
282 IPC_END_MESSAGE_MAP()
284 CheckCompleteWaits();
286 if (have_context) {
287 // Ensure that any delayed work that was created will be handled.
288 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
291 DCHECK(handled);
292 return handled;
295 bool GpuCommandBufferStub::Send(IPC::Message* message) {
296 return channel_->Send(message);
299 bool GpuCommandBufferStub::IsScheduled() {
300 return (!scheduler_.get() || scheduler_->IsScheduled());
303 bool GpuCommandBufferStub::HasMoreWork() {
304 return scheduler_.get() && scheduler_->HasMoreWork();
307 void GpuCommandBufferStub::PollWork() {
308 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
309 delayed_work_scheduled_ = false;
310 FastSetActiveURL(active_url_, active_url_hash_);
311 if (decoder_.get() && !MakeCurrent())
312 return;
314 if (scheduler_) {
315 bool fences_complete = scheduler_->PollUnscheduleFences();
316 // Perform idle work if all fences are complete.
317 if (fences_complete) {
318 uint64 current_messages_processed =
319 channel()->gpu_channel_manager()->MessagesProcessed();
320 // We're idle when no messages were processed or scheduled.
321 bool is_idle =
322 (previous_messages_processed_ == current_messages_processed) &&
323 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
324 if (!is_idle && !last_idle_time_.is_null()) {
325 base::TimeDelta time_since_idle = base::TimeTicks::Now() -
326 last_idle_time_;
327 base::TimeDelta max_time_since_idle =
328 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
330 // Force idle when it's been too long since last time we were idle.
331 if (time_since_idle > max_time_since_idle)
332 is_idle = true;
335 if (is_idle) {
336 last_idle_time_ = base::TimeTicks::Now();
337 scheduler_->PerformIdleWork();
341 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
344 bool GpuCommandBufferStub::HasUnprocessedCommands() {
345 if (command_buffer_) {
346 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
347 return command_buffer_->GetPutOffset() != state.get_offset &&
348 !gpu::error::IsError(state.error);
350 return false;
353 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
354 if (!HasMoreWork()) {
355 last_idle_time_ = base::TimeTicks();
356 return;
359 if (delayed_work_scheduled_)
360 return;
361 delayed_work_scheduled_ = true;
363 // Idle when no messages are processed between now and when
364 // PollWork is called.
365 previous_messages_processed_ =
366 channel()->gpu_channel_manager()->MessagesProcessed();
367 if (last_idle_time_.is_null())
368 last_idle_time_ = base::TimeTicks::Now();
370 // IsScheduled() returns true after passing all unschedule fences
371 // and this is when we can start performing idle work. Idle work
372 // is done synchronously so we can set delay to 0 and instead poll
373 // for more work at the rate idle work is performed. This also ensures
374 // that idle work is done as efficiently as possible without any
375 // unnecessary delays.
376 if (scheduler_.get() &&
377 scheduler_->IsScheduled() &&
378 scheduler_->HasMoreIdleWork()) {
379 delay = 0;
382 base::MessageLoop::current()->PostDelayedTask(
383 FROM_HERE,
384 base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
385 base::TimeDelta::FromMilliseconds(delay));
388 bool GpuCommandBufferStub::MakeCurrent() {
389 if (decoder_->MakeCurrent())
390 return true;
391 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
392 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
393 command_buffer_->SetParseError(gpu::error::kLostContext);
394 CheckContextLost();
395 return false;
398 void GpuCommandBufferStub::Destroy() {
399 if (wait_for_token_) {
400 Send(wait_for_token_->reply.release());
401 wait_for_token_.reset();
403 if (wait_for_get_offset_) {
404 Send(wait_for_get_offset_->reply.release());
405 wait_for_get_offset_.reset();
407 if (handle_.is_null() && !active_url_.is_empty()) {
408 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
409 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
410 active_url_));
413 memory_manager_client_state_.reset();
415 while (!sync_points_.empty())
416 OnRetireSyncPoint(sync_points_.front());
418 if (decoder_)
419 decoder_->set_engine(NULL);
421 // The scheduler has raw references to the decoder and the command buffer so
422 // destroy it before those.
423 scheduler_.reset();
425 bool have_context = false;
426 if (decoder_ && command_buffer_ &&
427 command_buffer_->GetLastState().error != gpu::error::kLostContext)
428 have_context = decoder_->MakeCurrent();
429 FOR_EACH_OBSERVER(DestructionObserver,
430 destruction_observers_,
431 OnWillDestroyStub());
433 if (decoder_) {
434 decoder_->Destroy(have_context);
435 decoder_.reset();
438 command_buffer_.reset();
440 // Remove this after crbug.com/248395 is sorted out.
441 surface_ = NULL;
444 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
445 Destroy();
446 GpuCommandBufferMsg_Initialize::WriteReplyParams(
447 reply_message, false, gpu::Capabilities());
448 Send(reply_message);
451 void GpuCommandBufferStub::OnInitialize(
452 base::SharedMemoryHandle shared_state_handle,
453 IPC::Message* reply_message) {
454 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
455 DCHECK(!command_buffer_.get());
457 scoped_ptr<base::SharedMemory> shared_state_shm(
458 new base::SharedMemory(shared_state_handle, false));
460 command_buffer_.reset(new gpu::CommandBufferService(
461 context_group_->transfer_buffer_manager()));
463 bool result = command_buffer_->Initialize();
464 DCHECK(result);
466 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
468 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
469 decoder_.get(),
470 decoder_.get()));
471 if (preemption_flag_.get())
472 scheduler_->SetPreemptByFlag(preemption_flag_);
474 decoder_->set_engine(scheduler_.get());
476 if (!handle_.is_null()) {
477 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
478 if (software_) {
479 LOG(ERROR) << "No software support.";
480 OnInitializeFailed(reply_message);
481 return;
483 #endif
485 surface_ = ImageTransportSurface::CreateSurface(
486 channel_->gpu_channel_manager(),
487 this,
488 handle_);
489 } else {
490 GpuChannelManager* manager = channel_->gpu_channel_manager();
491 surface_ = manager->GetDefaultOffscreenSurface();
494 if (!surface_.get()) {
495 DLOG(ERROR) << "Failed to create surface.";
496 OnInitializeFailed(reply_message);
497 return;
500 scoped_refptr<gfx::GLContext> context;
501 if (use_virtualized_gl_context_ && channel_->share_group()) {
502 context = channel_->share_group()->GetSharedContext();
503 if (!context.get()) {
504 context = gfx::GLContext::CreateGLContext(
505 channel_->share_group(),
506 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
507 gpu_preference_);
508 if (!context.get()) {
509 DLOG(ERROR) << "Failed to create shared context for virtualization.";
510 OnInitializeFailed(reply_message);
511 return;
513 channel_->share_group()->SetSharedContext(context.get());
515 // This should be a non-virtual GL context.
516 DCHECK(context->GetHandle());
517 context = new gpu::GLContextVirtual(
518 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
519 if (!context->Initialize(surface_.get(), gpu_preference_)) {
520 // TODO(sievers): The real context created above for the default
521 // offscreen surface might not be compatible with this surface.
522 // Need to adjust at least GLX to be able to create the initial context
523 // with a config that is compatible with onscreen and offscreen surfaces.
524 context = NULL;
526 DLOG(ERROR) << "Failed to initialize virtual GL context.";
527 OnInitializeFailed(reply_message);
528 return;
531 if (!context.get()) {
532 context = gfx::GLContext::CreateGLContext(
533 channel_->share_group(), surface_.get(), gpu_preference_);
535 if (!context.get()) {
536 DLOG(ERROR) << "Failed to create context.";
537 OnInitializeFailed(reply_message);
538 return;
541 if (!context->MakeCurrent(surface_.get())) {
542 LOG(ERROR) << "Failed to make context current.";
543 OnInitializeFailed(reply_message);
544 return;
547 if (!context->GetGLStateRestorer()) {
548 context->SetGLStateRestorer(
549 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
552 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
553 total_gpu_memory_ = 0;
555 if (!context_group_->has_program_cache()) {
556 context_group_->set_program_cache(
557 channel_->gpu_channel_manager()->program_cache());
560 // Initialize the decoder with either the view or pbuffer GLContext.
561 if (!decoder_->Initialize(surface_,
562 context,
563 !surface_id(),
564 initial_size_,
565 disallowed_features_,
566 requested_attribs_)) {
567 DLOG(ERROR) << "Failed to initialize decoder.";
568 OnInitializeFailed(reply_message);
569 return;
572 if (CommandLine::ForCurrentProcess()->HasSwitch(
573 switches::kEnableGPUServiceLogging)) {
574 decoder_->set_log_commands(true);
577 decoder_->GetLogger()->SetMsgCallback(
578 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
579 base::Unretained(this)));
580 decoder_->SetShaderCacheCallback(
581 base::Bind(&GpuCommandBufferStub::SendCachedShader,
582 base::Unretained(this)));
583 decoder_->SetWaitSyncPointCallback(
584 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
585 base::Unretained(this)));
587 command_buffer_->SetPutOffsetChangeCallback(
588 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
589 command_buffer_->SetGetBufferChangeCallback(
590 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
591 base::Unretained(scheduler_.get())));
592 command_buffer_->SetParseErrorCallback(
593 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
594 scheduler_->SetSchedulingChangedCallback(
595 base::Bind(&GpuChannel::StubSchedulingChanged,
596 base::Unretained(channel_)));
598 if (watchdog_) {
599 scheduler_->SetCommandProcessedCallback(
600 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
601 base::Unretained(this)));
604 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
605 if (!shared_state_shm->Map(kSharedStateSize)) {
606 DLOG(ERROR) << "Failed to map shared state buffer.";
607 OnInitializeFailed(reply_message);
608 return;
610 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
611 shared_state_shm.Pass(), kSharedStateSize));
613 gpu::Capabilities capabilities = decoder_->GetCapabilities();
614 capabilities.future_sync_points = channel_->allow_future_sync_points();
616 GpuCommandBufferMsg_Initialize::WriteReplyParams(
617 reply_message, true, capabilities);
618 Send(reply_message);
620 if (handle_.is_null() && !active_url_.is_empty()) {
621 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
622 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
623 active_url_));
627 void GpuCommandBufferStub::OnCreateStreamTexture(
628 uint32 texture_id, int32 stream_id, bool* succeeded) {
629 #if defined(OS_ANDROID)
630 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
631 #else
632 *succeeded = false;
633 #endif
636 void GpuCommandBufferStub::SetLatencyInfoCallback(
637 const LatencyInfoCallback& callback) {
638 latency_info_callback_ = callback;
641 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
642 // The command buffer is pairs of enum, value
643 // search for the requested attribute, return the value.
644 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
645 it != requested_attribs_.end(); ++it) {
646 if (*it++ == attr) {
647 return *it;
650 return -1;
653 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
654 IPC::Message* reply_message) {
655 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
656 if (command_buffer_)
657 command_buffer_->SetGetBuffer(shm_id);
658 Send(reply_message);
661 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
662 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
663 if (!decoder_) {
664 LOG(ERROR) << "Can't produce front buffer before initialization.";
665 return;
668 decoder_->ProduceFrontBuffer(mailbox);
671 void GpuCommandBufferStub::OnParseError() {
672 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
673 DCHECK(command_buffer_.get());
674 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
675 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
676 route_id_, state.context_lost_reason);
677 msg->set_unblock(true);
678 Send(msg);
680 // Tell the browser about this context loss as well, so it can
681 // determine whether client APIs like WebGL need to be immediately
682 // blocked from automatically running.
683 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
684 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
685 handle_.is_null(), state.context_lost_reason, active_url_));
687 CheckContextLost();
690 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
691 int32 end,
692 IPC::Message* reply_message) {
693 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
694 DCHECK(command_buffer_.get());
695 CheckContextLost();
696 if (wait_for_token_)
697 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
698 wait_for_token_ =
699 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
700 CheckCompleteWaits();
703 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
704 int32 start,
705 int32 end,
706 IPC::Message* reply_message) {
707 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
708 DCHECK(command_buffer_.get());
709 CheckContextLost();
710 if (wait_for_get_offset_) {
711 LOG(ERROR)
712 << "Got WaitForGetOffset command while currently waiting for offset.";
714 wait_for_get_offset_ =
715 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
716 CheckCompleteWaits();
719 void GpuCommandBufferStub::CheckCompleteWaits() {
720 if (wait_for_token_ || wait_for_get_offset_) {
721 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
722 if (wait_for_token_ &&
723 (gpu::CommandBuffer::InRange(
724 wait_for_token_->start, wait_for_token_->end, state.token) ||
725 state.error != gpu::error::kNoError)) {
726 ReportState();
727 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
728 wait_for_token_->reply.get(), state);
729 Send(wait_for_token_->reply.release());
730 wait_for_token_.reset();
732 if (wait_for_get_offset_ &&
733 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
734 wait_for_get_offset_->end,
735 state.get_offset) ||
736 state.error != gpu::error::kNoError)) {
737 ReportState();
738 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
739 wait_for_get_offset_->reply.get(), state);
740 Send(wait_for_get_offset_->reply.release());
741 wait_for_get_offset_.reset();
746 void GpuCommandBufferStub::OnAsyncFlush(
747 int32 put_offset,
748 uint32 flush_count,
749 const std::vector<ui::LatencyInfo>& latency_info) {
750 TRACE_EVENT1(
751 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
753 if (ui::LatencyInfo::Verify(latency_info,
754 "GpuCommandBufferStub::OnAsyncFlush") &&
755 !latency_info_callback_.is_null()) {
756 latency_info_callback_.Run(latency_info);
758 DCHECK(command_buffer_.get());
759 if (flush_count - last_flush_count_ < 0x8000000U) {
760 last_flush_count_ = flush_count;
761 command_buffer_->Flush(put_offset);
762 } else {
763 // We received this message out-of-order. This should not happen but is here
764 // to catch regressions. Ignore the message.
765 NOTREACHED() << "Received a Flush message out-of-order";
768 ReportState();
771 void GpuCommandBufferStub::OnRescheduled() {
772 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
773 command_buffer_->Flush(command_buffer_->GetPutOffset());
774 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
776 if (pre_state.get_offset != post_state.get_offset)
777 ReportState();
780 void GpuCommandBufferStub::OnRegisterTransferBuffer(
781 int32 id,
782 base::SharedMemoryHandle transfer_buffer,
783 uint32 size) {
784 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
786 // Take ownership of the memory and map it into this process.
787 // This validates the size.
788 scoped_ptr<base::SharedMemory> shared_memory(
789 new base::SharedMemory(transfer_buffer, false));
790 if (!shared_memory->Map(size)) {
791 DVLOG(0) << "Failed to map shared memory.";
792 return;
795 if (command_buffer_) {
796 command_buffer_->RegisterTransferBuffer(
797 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
801 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
802 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
804 if (command_buffer_)
805 command_buffer_->DestroyTransferBuffer(id);
808 void GpuCommandBufferStub::OnCommandProcessed() {
809 if (watchdog_)
810 watchdog_->CheckArmed();
813 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
815 void GpuCommandBufferStub::PutChanged() {
816 FastSetActiveURL(active_url_, active_url_hash_);
817 scheduler_->PutChanged();
820 void GpuCommandBufferStub::OnCreateVideoDecoder(
821 media::VideoCodecProfile profile,
822 int32 decoder_route_id,
823 IPC::Message* reply_message) {
824 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
825 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
826 decoder_route_id, this, channel_->io_message_loop());
827 decoder->Initialize(profile, reply_message);
828 // decoder is registered as a DestructionObserver of this stub and will
829 // self-delete during destruction of this stub.
832 void GpuCommandBufferStub::OnCreateVideoEncoder(
833 media::VideoFrame::Format input_format,
834 const gfx::Size& input_visible_size,
835 media::VideoCodecProfile output_profile,
836 uint32 initial_bitrate,
837 int32 encoder_route_id,
838 IPC::Message* reply_message) {
839 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
840 GpuVideoEncodeAccelerator* encoder =
841 new GpuVideoEncodeAccelerator(encoder_route_id, this);
842 encoder->Initialize(input_format,
843 input_visible_size,
844 output_profile,
845 initial_bitrate,
846 reply_message);
847 // encoder is registered as a DestructionObserver of this stub and will
848 // self-delete during destruction of this stub.
851 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
852 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
853 if (memory_manager_client_state_)
854 memory_manager_client_state_->SetVisible(visible);
857 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
858 sync_points_.push_back(sync_point);
861 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
862 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
863 sync_points_.pop_front();
864 GpuChannelManager* manager = channel_->gpu_channel_manager();
865 manager->sync_point_manager()->RetireSyncPoint(sync_point);
868 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
869 if (!sync_point)
870 return true;
871 GpuChannelManager* manager = channel_->gpu_channel_manager();
872 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
873 return true;
875 if (sync_point_wait_count_ == 0) {
876 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
877 "GpuCommandBufferStub", this);
879 scheduler_->SetScheduled(false);
880 ++sync_point_wait_count_;
881 manager->sync_point_manager()->AddSyncPointCallback(
882 sync_point,
883 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
884 this->AsWeakPtr()));
885 return scheduler_->IsScheduled();
888 void GpuCommandBufferStub::OnSyncPointRetired() {
889 --sync_point_wait_count_;
890 if (sync_point_wait_count_ == 0) {
891 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
892 "GpuCommandBufferStub", this);
894 scheduler_->SetScheduled(true);
897 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
898 GpuChannelManager* manager = channel_->gpu_channel_manager();
899 manager->sync_point_manager()->AddSyncPointCallback(
900 sync_point,
901 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
902 this->AsWeakPtr(),
903 id));
906 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
907 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
910 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
911 if (decoder_) {
912 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
913 if (query_manager) {
914 gpu::gles2::QueryManager::Query* query =
915 query_manager->GetQuery(query_id);
916 if (query) {
917 query->AddCallback(
918 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
919 this->AsWeakPtr(),
920 id));
921 return;
925 // Something went wrong, run callback immediately.
926 OnSignalSyncPointAck(id);
930 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
931 bool has_callback) {
932 TRACE_EVENT0(
933 "gpu",
934 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
935 if (has_callback) {
936 if (!memory_manager_client_state_) {
937 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
938 this, surface_id_ != 0, true));
940 } else {
941 memory_manager_client_state_.reset();
945 void GpuCommandBufferStub::OnCreateImage(int32 id,
946 gfx::GpuMemoryBufferHandle handle,
947 gfx::Size size,
948 gfx::GpuMemoryBuffer::Format format,
949 uint32 internalformat) {
950 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
952 if (!decoder_)
953 return;
955 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
956 DCHECK(image_manager);
957 if (image_manager->LookupImage(id)) {
958 LOG(ERROR) << "Image already exists with same ID.";
959 return;
962 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
963 handle, size, format, internalformat);
964 if (!image.get())
965 return;
967 image_manager->AddImage(image.get(), id);
970 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
971 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
973 if (!decoder_)
974 return;
976 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
977 DCHECK(image_manager);
978 if (!image_manager->LookupImage(id)) {
979 LOG(ERROR) << "Image with ID doesn't exist.";
980 return;
983 image_manager->RemoveImage(id);
986 void GpuCommandBufferStub::SendConsoleMessage(
987 int32 id,
988 const std::string& message) {
989 GPUCommandBufferConsoleMessage console_message;
990 console_message.id = id;
991 console_message.message = message;
992 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
993 route_id_, console_message);
994 msg->set_unblock(true);
995 Send(msg);
998 void GpuCommandBufferStub::SendCachedShader(
999 const std::string& key, const std::string& shader) {
1000 channel_->CacheShader(key, shader);
1003 void GpuCommandBufferStub::AddDestructionObserver(
1004 DestructionObserver* observer) {
1005 destruction_observers_.AddObserver(observer);
1008 void GpuCommandBufferStub::RemoveDestructionObserver(
1009 DestructionObserver* observer) {
1010 destruction_observers_.RemoveObserver(observer);
1013 void GpuCommandBufferStub::SetPreemptByFlag(
1014 scoped_refptr<gpu::PreemptionFlag> flag) {
1015 preemption_flag_ = flag;
1016 if (scheduler_)
1017 scheduler_->SetPreemptByFlag(preemption_flag_);
1020 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1021 *bytes = total_gpu_memory_;
1022 return !!total_gpu_memory_;
1025 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1026 if (!surface_.get())
1027 return gfx::Size();
1028 return surface_->GetSize();
1031 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1032 return context_group_->memory_tracker();
1035 void GpuCommandBufferStub::SetMemoryAllocation(
1036 const gpu::MemoryAllocation& allocation) {
1037 if (!last_memory_allocation_valid_ ||
1038 !allocation.Equals(last_memory_allocation_)) {
1039 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1040 route_id_, allocation));
1043 last_memory_allocation_valid_ = true;
1044 last_memory_allocation_ = allocation;
1047 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1048 bool suggest_have_frontbuffer) {
1049 // This can be called outside of OnMessageReceived, so the context needs
1050 // to be made current before calling methods on the surface.
1051 if (surface_.get() && MakeCurrent())
1052 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1055 bool GpuCommandBufferStub::CheckContextLost() {
1056 DCHECK(command_buffer_);
1057 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1058 bool was_lost = state.error == gpu::error::kLostContext;
1059 // Lose all other contexts if the reset was triggered by the robustness
1060 // extension instead of being synthetic.
1061 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1062 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1063 use_virtualized_gl_context_))
1064 channel_->LoseAllContexts();
1065 CheckCompleteWaits();
1066 return was_lost;
1069 void GpuCommandBufferStub::MarkContextLost() {
1070 if (!command_buffer_ ||
1071 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1072 return;
1074 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1075 if (decoder_)
1076 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
1077 command_buffer_->SetParseError(gpu::error::kLostContext);
1080 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1081 return GetMemoryManager()->GetClientMemoryUsage(this);
1084 void GpuCommandBufferStub::SwapBuffersCompleted(
1085 const std::vector<ui::LatencyInfo>& latency_info) {
1086 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info));
1089 } // namespace content