gpu: Associate all GpuMemoryBuffers with unique IDs.
[chromium-blink-merge.git] / content / common / gpu / client / gpu_channel_host.cc
blob77ef8752afc9ffdf8411da6f78b178de32bc3838
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
18 #include "url/gurl.h"
20 #if defined(OS_WIN)
21 #include "content/public/common/sandbox_init.h"
22 #endif
24 using base::AutoLock;
25 using base::MessageLoopProxy;
27 namespace content {
29 GpuListenerInfo::GpuListenerInfo() {}
31 GpuListenerInfo::~GpuListenerInfo() {}
33 // static
34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35 GpuChannelHostFactory* factory,
36 const gpu::GPUInfo& gpu_info,
37 const IPC::ChannelHandle& channel_handle,
38 base::WaitableEvent* shutdown_event,
39 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
40 DCHECK(factory->IsMainThread());
41 scoped_refptr<GpuChannelHost> host =
42 new GpuChannelHost(factory, gpu_info, gpu_memory_buffer_manager);
43 host->Connect(channel_handle, shutdown_event);
44 return host;
47 GpuChannelHost::GpuChannelHost(
48 GpuChannelHostFactory* factory,
49 const gpu::GPUInfo& gpu_info,
50 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
51 : factory_(factory),
52 gpu_info_(gpu_info),
53 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
54 next_transfer_buffer_id_.GetNext();
55 next_image_id_.GetNext();
56 next_route_id_.GetNext();
59 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
60 base::WaitableEvent* shutdown_event) {
61 // Open a channel to the GPU process. We pass NULL as the main listener here
62 // since we need to filter everything to route it to the right thread.
63 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
64 channel_ = IPC::SyncChannel::Create(channel_handle,
65 IPC::Channel::MODE_CLIENT,
66 NULL,
67 io_loop.get(),
68 true,
69 shutdown_event);
71 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
73 channel_->AddFilter(sync_filter_.get());
75 channel_filter_ = new MessageFilter();
77 // Install the filter last, because we intercept all leftover
78 // messages.
79 channel_->AddFilter(channel_filter_.get());
82 bool GpuChannelHost::Send(IPC::Message* msg) {
83 // Callee takes ownership of message, regardless of whether Send is
84 // successful. See IPC::Sender.
85 scoped_ptr<IPC::Message> message(msg);
86 // The GPU process never sends synchronous IPCs so clear the unblock flag to
87 // preserve order.
88 message->set_unblock(false);
90 // Currently we need to choose between two different mechanisms for sending.
91 // On the main thread we use the regular channel Send() method, on another
92 // thread we use SyncMessageFilter. We also have to be careful interpreting
93 // IsMainThread() since it might return false during shutdown,
94 // impl we are actually calling from the main thread (discard message then).
96 // TODO: Can we just always use sync_filter_ since we setup the channel
97 // without a main listener?
98 if (factory_->IsMainThread()) {
99 // http://crbug.com/125264
100 base::ThreadRestrictions::ScopedAllowWait allow_wait;
101 bool result = channel_->Send(message.release());
102 if (!result)
103 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
104 return result;
105 } else if (base::MessageLoop::current()) {
106 bool result = sync_filter_->Send(message.release());
107 if (!result)
108 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
109 return result;
112 return false;
115 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
116 int32 surface_id,
117 CommandBufferProxyImpl* share_group,
118 const std::vector<int32>& attribs,
119 const GURL& active_url,
120 gfx::GpuPreference gpu_preference) {
121 TRACE_EVENT1("gpu",
122 "GpuChannelHost::CreateViewCommandBuffer",
123 "surface_id",
124 surface_id);
126 GPUCreateCommandBufferConfig init_params;
127 init_params.share_group_id =
128 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
129 init_params.attribs = attribs;
130 init_params.active_url = active_url;
131 init_params.gpu_preference = gpu_preference;
132 int32 route_id = GenerateRouteID();
133 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
134 surface_id, init_params, route_id);
135 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
136 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
138 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
139 // The GPU channel needs to be considered lost. The caller will
140 // then set up a new connection, and the GPU channel and any
141 // view command buffers will all be associated with the same GPU
142 // process.
143 DCHECK(MessageLoopProxy::current().get());
145 scoped_refptr<base::MessageLoopProxy> io_loop =
146 factory_->GetIOLoopProxy();
147 io_loop->PostTask(
148 FROM_HERE,
149 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
150 channel_filter_.get()));
153 return NULL;
156 CommandBufferProxyImpl* command_buffer =
157 new CommandBufferProxyImpl(this, route_id);
158 AddRoute(route_id, command_buffer->AsWeakPtr());
160 AutoLock lock(context_lock_);
161 proxies_[route_id] = command_buffer;
162 return command_buffer;
165 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
166 const gfx::Size& size,
167 CommandBufferProxyImpl* share_group,
168 const std::vector<int32>& attribs,
169 const GURL& active_url,
170 gfx::GpuPreference gpu_preference) {
171 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
173 GPUCreateCommandBufferConfig init_params;
174 init_params.share_group_id =
175 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
176 init_params.attribs = attribs;
177 init_params.active_url = active_url;
178 init_params.gpu_preference = gpu_preference;
179 int32 route_id = GenerateRouteID();
180 bool succeeded = false;
181 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
182 init_params,
183 route_id,
184 &succeeded))) {
185 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
186 return NULL;
189 if (!succeeded) {
190 LOG(ERROR)
191 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
192 return NULL;
195 CommandBufferProxyImpl* command_buffer =
196 new CommandBufferProxyImpl(this, route_id);
197 AddRoute(route_id, command_buffer->AsWeakPtr());
199 AutoLock lock(context_lock_);
200 proxies_[route_id] = command_buffer;
201 return command_buffer;
204 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
205 int command_buffer_route_id) {
206 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
207 AutoLock lock(context_lock_);
208 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
209 DCHECK(it != proxies_.end());
210 return it->second->CreateVideoDecoder();
213 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
214 int command_buffer_route_id) {
215 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
216 AutoLock lock(context_lock_);
217 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
218 DCHECK(it != proxies_.end());
219 return it->second->CreateVideoEncoder();
222 void GpuChannelHost::DestroyCommandBuffer(
223 CommandBufferProxyImpl* command_buffer) {
224 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
226 int route_id = command_buffer->GetRouteID();
227 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
228 RemoveRoute(route_id);
230 AutoLock lock(context_lock_);
231 proxies_.erase(route_id);
232 delete command_buffer;
235 void GpuChannelHost::AddRoute(
236 int route_id, base::WeakPtr<IPC::Listener> listener) {
237 DCHECK(MessageLoopProxy::current().get());
239 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
240 io_loop->PostTask(FROM_HERE,
241 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
242 channel_filter_.get(), route_id, listener,
243 MessageLoopProxy::current()));
246 void GpuChannelHost::RemoveRoute(int route_id) {
247 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
248 io_loop->PostTask(FROM_HERE,
249 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
250 channel_filter_.get(), route_id));
253 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
254 base::SharedMemoryHandle source_handle) {
255 if (IsLost())
256 return base::SharedMemory::NULLHandle();
258 #if defined(OS_WIN)
259 // Windows needs to explicitly duplicate the handle out to another process.
260 base::SharedMemoryHandle target_handle;
261 if (!BrokerDuplicateHandle(source_handle,
262 channel_->GetPeerPID(),
263 &target_handle,
264 FILE_GENERIC_READ | FILE_GENERIC_WRITE,
265 0)) {
266 return base::SharedMemory::NULLHandle();
269 return target_handle;
270 #else
271 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
272 if (duped_handle < 0)
273 return base::SharedMemory::NULLHandle();
275 return base::FileDescriptor(duped_handle, true);
276 #endif
279 int32 GpuChannelHost::ReserveTransferBufferId() {
280 return next_transfer_buffer_id_.GetNext();
283 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
284 const gfx::GpuMemoryBufferHandle& source_handle,
285 bool* requires_sync_point) {
286 switch (source_handle.type) {
287 case gfx::SHARED_MEMORY_BUFFER: {
288 gfx::GpuMemoryBufferHandle handle;
289 handle.type = gfx::SHARED_MEMORY_BUFFER;
290 handle.handle = ShareToGpuProcess(source_handle.handle);
291 *requires_sync_point = false;
292 return handle;
294 case gfx::IO_SURFACE_BUFFER:
295 case gfx::SURFACE_TEXTURE_BUFFER:
296 case gfx::OZONE_NATIVE_BUFFER:
297 *requires_sync_point = true;
298 return source_handle;
299 default:
300 NOTREACHED();
301 return gfx::GpuMemoryBufferHandle();
305 int32 GpuChannelHost::ReserveImageId() {
306 return next_image_id_.GetNext();
309 int32 GpuChannelHost::GenerateRouteID() {
310 return next_route_id_.GetNext();
313 GpuChannelHost::~GpuChannelHost() {
314 // channel_ must be destroyed on the main thread.
315 if (!factory_->IsMainThread())
316 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
320 GpuChannelHost::MessageFilter::MessageFilter()
321 : lost_(false) {
324 GpuChannelHost::MessageFilter::~MessageFilter() {}
326 void GpuChannelHost::MessageFilter::AddRoute(
327 int route_id,
328 base::WeakPtr<IPC::Listener> listener,
329 scoped_refptr<MessageLoopProxy> loop) {
330 DCHECK(listeners_.find(route_id) == listeners_.end());
331 GpuListenerInfo info;
332 info.listener = listener;
333 info.loop = loop;
334 listeners_[route_id] = info;
337 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
338 ListenerMap::iterator it = listeners_.find(route_id);
339 if (it != listeners_.end())
340 listeners_.erase(it);
343 bool GpuChannelHost::MessageFilter::OnMessageReceived(
344 const IPC::Message& message) {
345 // Never handle sync message replies or we will deadlock here.
346 if (message.is_reply())
347 return false;
349 ListenerMap::iterator it = listeners_.find(message.routing_id());
350 if (it == listeners_.end())
351 return false;
353 const GpuListenerInfo& info = it->second;
354 info.loop->PostTask(
355 FROM_HERE,
356 base::Bind(
357 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
358 info.listener,
359 message));
360 return true;
363 void GpuChannelHost::MessageFilter::OnChannelError() {
364 // Set the lost state before signalling the proxies. That way, if they
365 // themselves post a task to recreate the context, they will not try to re-use
366 // this channel host.
368 AutoLock lock(lock_);
369 lost_ = true;
372 // Inform all the proxies that an error has occurred. This will be reported
373 // via OpenGL as a lost context.
374 for (ListenerMap::iterator it = listeners_.begin();
375 it != listeners_.end();
376 it++) {
377 const GpuListenerInfo& info = it->second;
378 info.loop->PostTask(
379 FROM_HERE,
380 base::Bind(&IPC::Listener::OnChannelError, info.listener));
383 listeners_.clear();
386 bool GpuChannelHost::MessageFilter::IsLost() const {
387 AutoLock lock(lock_);
388 return lost_;
391 } // namespace content