1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
21 #include "content/public/common/sandbox_init.h"
25 using base::MessageLoopProxy
;
29 GpuListenerInfo::GpuListenerInfo() {}
31 GpuListenerInfo::~GpuListenerInfo() {}
34 scoped_refptr
<GpuChannelHost
> GpuChannelHost::Create(
35 GpuChannelHostFactory
* factory
,
36 const gpu::GPUInfo
& gpu_info
,
37 const IPC::ChannelHandle
& channel_handle
,
38 base::WaitableEvent
* shutdown_event
,
39 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
) {
40 DCHECK(factory
->IsMainThread());
41 scoped_refptr
<GpuChannelHost
> host
=
42 new GpuChannelHost(factory
, gpu_info
, gpu_memory_buffer_manager
);
43 host
->Connect(channel_handle
, shutdown_event
);
47 GpuChannelHost::GpuChannelHost(
48 GpuChannelHostFactory
* factory
,
49 const gpu::GPUInfo
& gpu_info
,
50 gpu::GpuMemoryBufferManager
* gpu_memory_buffer_manager
)
53 gpu_memory_buffer_manager_(gpu_memory_buffer_manager
) {
54 next_transfer_buffer_id_
.GetNext();
55 next_image_id_
.GetNext();
56 next_route_id_
.GetNext();
59 void GpuChannelHost::Connect(const IPC::ChannelHandle
& channel_handle
,
60 base::WaitableEvent
* shutdown_event
) {
61 // Open a channel to the GPU process. We pass NULL as the main listener here
62 // since we need to filter everything to route it to the right thread.
63 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
64 channel_
= IPC::SyncChannel::Create(channel_handle
,
65 IPC::Channel::MODE_CLIENT
,
71 sync_filter_
= new IPC::SyncMessageFilter(shutdown_event
);
73 channel_
->AddFilter(sync_filter_
.get());
75 channel_filter_
= new MessageFilter();
77 // Install the filter last, because we intercept all leftover
79 channel_
->AddFilter(channel_filter_
.get());
82 bool GpuChannelHost::Send(IPC::Message
* msg
) {
83 // Callee takes ownership of message, regardless of whether Send is
84 // successful. See IPC::Sender.
85 scoped_ptr
<IPC::Message
> message(msg
);
86 // The GPU process never sends synchronous IPCs so clear the unblock flag to
88 message
->set_unblock(false);
90 // Currently we need to choose between two different mechanisms for sending.
91 // On the main thread we use the regular channel Send() method, on another
92 // thread we use SyncMessageFilter. We also have to be careful interpreting
93 // IsMainThread() since it might return false during shutdown,
94 // impl we are actually calling from the main thread (discard message then).
96 // TODO: Can we just always use sync_filter_ since we setup the channel
97 // without a main listener?
98 if (factory_
->IsMainThread()) {
99 // http://crbug.com/125264
100 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
101 bool result
= channel_
->Send(message
.release());
103 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
105 } else if (base::MessageLoop::current()) {
106 bool result
= sync_filter_
->Send(message
.release());
108 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
115 CommandBufferProxyImpl
* GpuChannelHost::CreateViewCommandBuffer(
117 CommandBufferProxyImpl
* share_group
,
118 const std::vector
<int32
>& attribs
,
119 const GURL
& active_url
,
120 gfx::GpuPreference gpu_preference
) {
122 "GpuChannelHost::CreateViewCommandBuffer",
126 GPUCreateCommandBufferConfig init_params
;
127 init_params
.share_group_id
=
128 share_group
? share_group
->GetRouteID() : MSG_ROUTING_NONE
;
129 init_params
.attribs
= attribs
;
130 init_params
.active_url
= active_url
;
131 init_params
.gpu_preference
= gpu_preference
;
132 int32 route_id
= GenerateRouteID();
133 CreateCommandBufferResult result
= factory_
->CreateViewCommandBuffer(
134 surface_id
, init_params
, route_id
);
135 if (result
!= CREATE_COMMAND_BUFFER_SUCCEEDED
) {
136 LOG(ERROR
) << "GpuChannelHost::CreateViewCommandBuffer failed.";
138 if (result
== CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
) {
139 // The GPU channel needs to be considered lost. The caller will
140 // then set up a new connection, and the GPU channel and any
141 // view command buffers will all be associated with the same GPU
143 DCHECK(MessageLoopProxy::current().get());
145 scoped_refptr
<base::MessageLoopProxy
> io_loop
=
146 factory_
->GetIOLoopProxy();
149 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError
,
150 channel_filter_
.get()));
156 CommandBufferProxyImpl
* command_buffer
=
157 new CommandBufferProxyImpl(this, route_id
);
158 AddRoute(route_id
, command_buffer
->AsWeakPtr());
160 AutoLock
lock(context_lock_
);
161 proxies_
[route_id
] = command_buffer
;
162 return command_buffer
;
165 CommandBufferProxyImpl
* GpuChannelHost::CreateOffscreenCommandBuffer(
166 const gfx::Size
& size
,
167 CommandBufferProxyImpl
* share_group
,
168 const std::vector
<int32
>& attribs
,
169 const GURL
& active_url
,
170 gfx::GpuPreference gpu_preference
) {
171 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
173 GPUCreateCommandBufferConfig init_params
;
174 init_params
.share_group_id
=
175 share_group
? share_group
->GetRouteID() : MSG_ROUTING_NONE
;
176 init_params
.attribs
= attribs
;
177 init_params
.active_url
= active_url
;
178 init_params
.gpu_preference
= gpu_preference
;
179 int32 route_id
= GenerateRouteID();
180 bool succeeded
= false;
181 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size
,
185 LOG(ERROR
) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
191 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
195 CommandBufferProxyImpl
* command_buffer
=
196 new CommandBufferProxyImpl(this, route_id
);
197 AddRoute(route_id
, command_buffer
->AsWeakPtr());
199 AutoLock
lock(context_lock_
);
200 proxies_
[route_id
] = command_buffer
;
201 return command_buffer
;
204 scoped_ptr
<media::VideoDecodeAccelerator
> GpuChannelHost::CreateVideoDecoder(
205 int command_buffer_route_id
) {
206 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
207 AutoLock
lock(context_lock_
);
208 ProxyMap::iterator it
= proxies_
.find(command_buffer_route_id
);
209 DCHECK(it
!= proxies_
.end());
210 return it
->second
->CreateVideoDecoder();
213 scoped_ptr
<media::VideoEncodeAccelerator
> GpuChannelHost::CreateVideoEncoder(
214 int command_buffer_route_id
) {
215 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
216 AutoLock
lock(context_lock_
);
217 ProxyMap::iterator it
= proxies_
.find(command_buffer_route_id
);
218 DCHECK(it
!= proxies_
.end());
219 return it
->second
->CreateVideoEncoder();
222 void GpuChannelHost::DestroyCommandBuffer(
223 CommandBufferProxyImpl
* command_buffer
) {
224 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
226 int route_id
= command_buffer
->GetRouteID();
227 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id
));
228 RemoveRoute(route_id
);
230 AutoLock
lock(context_lock_
);
231 proxies_
.erase(route_id
);
232 delete command_buffer
;
235 void GpuChannelHost::AddRoute(
236 int route_id
, base::WeakPtr
<IPC::Listener
> listener
) {
237 DCHECK(MessageLoopProxy::current().get());
239 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
240 io_loop
->PostTask(FROM_HERE
,
241 base::Bind(&GpuChannelHost::MessageFilter::AddRoute
,
242 channel_filter_
.get(), route_id
, listener
,
243 MessageLoopProxy::current()));
246 void GpuChannelHost::RemoveRoute(int route_id
) {
247 scoped_refptr
<base::MessageLoopProxy
> io_loop
= factory_
->GetIOLoopProxy();
248 io_loop
->PostTask(FROM_HERE
,
249 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute
,
250 channel_filter_
.get(), route_id
));
253 base::SharedMemoryHandle
GpuChannelHost::ShareToGpuProcess(
254 base::SharedMemoryHandle source_handle
) {
256 return base::SharedMemory::NULLHandle();
259 // Windows needs to explicitly duplicate the handle out to another process.
260 base::SharedMemoryHandle target_handle
;
261 if (!BrokerDuplicateHandle(source_handle
,
262 channel_
->GetPeerPID(),
264 FILE_GENERIC_READ
| FILE_GENERIC_WRITE
,
266 return base::SharedMemory::NULLHandle();
269 return target_handle
;
271 int duped_handle
= HANDLE_EINTR(dup(source_handle
.fd
));
272 if (duped_handle
< 0)
273 return base::SharedMemory::NULLHandle();
275 return base::FileDescriptor(duped_handle
, true);
279 int32
GpuChannelHost::ReserveTransferBufferId() {
280 return next_transfer_buffer_id_
.GetNext();
283 gfx::GpuMemoryBufferHandle
GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
284 const gfx::GpuMemoryBufferHandle
& source_handle
,
285 bool* requires_sync_point
) {
286 switch (source_handle
.type
) {
287 case gfx::SHARED_MEMORY_BUFFER
: {
288 gfx::GpuMemoryBufferHandle handle
;
289 handle
.type
= gfx::SHARED_MEMORY_BUFFER
;
290 handle
.handle
= ShareToGpuProcess(source_handle
.handle
);
291 *requires_sync_point
= false;
294 case gfx::IO_SURFACE_BUFFER
:
295 case gfx::SURFACE_TEXTURE_BUFFER
:
296 case gfx::OZONE_NATIVE_BUFFER
:
297 *requires_sync_point
= true;
298 return source_handle
;
301 return gfx::GpuMemoryBufferHandle();
305 int32
GpuChannelHost::ReserveImageId() {
306 return next_image_id_
.GetNext();
309 int32
GpuChannelHost::GenerateRouteID() {
310 return next_route_id_
.GetNext();
313 GpuChannelHost::~GpuChannelHost() {
314 // channel_ must be destroyed on the main thread.
315 if (!factory_
->IsMainThread())
316 factory_
->GetMainLoop()->DeleteSoon(FROM_HERE
, channel_
.release());
320 GpuChannelHost::MessageFilter::MessageFilter()
324 GpuChannelHost::MessageFilter::~MessageFilter() {}
326 void GpuChannelHost::MessageFilter::AddRoute(
328 base::WeakPtr
<IPC::Listener
> listener
,
329 scoped_refptr
<MessageLoopProxy
> loop
) {
330 DCHECK(listeners_
.find(route_id
) == listeners_
.end());
331 GpuListenerInfo info
;
332 info
.listener
= listener
;
334 listeners_
[route_id
] = info
;
337 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id
) {
338 ListenerMap::iterator it
= listeners_
.find(route_id
);
339 if (it
!= listeners_
.end())
340 listeners_
.erase(it
);
343 bool GpuChannelHost::MessageFilter::OnMessageReceived(
344 const IPC::Message
& message
) {
345 // Never handle sync message replies or we will deadlock here.
346 if (message
.is_reply())
349 ListenerMap::iterator it
= listeners_
.find(message
.routing_id());
350 if (it
== listeners_
.end())
353 const GpuListenerInfo
& info
= it
->second
;
357 base::IgnoreResult(&IPC::Listener::OnMessageReceived
),
363 void GpuChannelHost::MessageFilter::OnChannelError() {
364 // Set the lost state before signalling the proxies. That way, if they
365 // themselves post a task to recreate the context, they will not try to re-use
366 // this channel host.
368 AutoLock
lock(lock_
);
372 // Inform all the proxies that an error has occurred. This will be reported
373 // via OpenGL as a lost context.
374 for (ListenerMap::iterator it
= listeners_
.begin();
375 it
!= listeners_
.end();
377 const GpuListenerInfo
& info
= it
->second
;
380 base::Bind(&IPC::Listener::OnChannelError
, info
.listener
));
386 bool GpuChannelHost::MessageFilter::IsLost() const {
387 AutoLock
lock(lock_
);
391 } // namespace content