cc: Partial tile raster for BitmapTileTaskWorkerPool.
[chromium-blink-merge.git] / cc / raster / one_copy_tile_task_worker_pool.cc
blob73ca6922fe19c69da3236cf8da5172e3dcfbd259
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
7 #include <algorithm>
8 #include <limits>
10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/resource_pool.h"
17 #include "cc/resources/scoped_resource.h"
18 #include "gpu/command_buffer/client/gles2_interface.h"
19 #include "ui/gfx/gpu_memory_buffer.h"
21 namespace cc {
22 namespace {
24 class RasterBufferImpl : public RasterBuffer {
25 public:
26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
27 ResourceProvider* resource_provider,
28 ResourcePool* resource_pool,
29 ResourceFormat resource_format,
30 const Resource* output_resource,
31 uint64_t previous_content_id)
32 : worker_pool_(worker_pool),
33 resource_provider_(resource_provider),
34 resource_pool_(resource_pool),
35 output_resource_(output_resource),
36 raster_content_id_(0),
37 sequence_(0) {
38 if (worker_pool->have_persistent_gpu_memory_buffers() &&
39 previous_content_id) {
40 raster_resource_ =
41 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
42 raster_content_id_ = previous_content_id;
44 if (raster_resource_) {
45 DCHECK_EQ(resource_format, raster_resource_->format());
46 DCHECK_EQ(output_resource->size().ToString(),
47 raster_resource_->size().ToString());
48 } else {
49 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
50 resource_format);
53 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
54 resource_provider_, raster_resource_->id()));
57 ~RasterBufferImpl() override {
58 // Release write lock in case a copy was never scheduled.
59 lock_.reset();
61 // Make sure any scheduled copy operations are issued before we release the
62 // raster resource.
63 if (sequence_)
64 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66 // Return resources to pool so they can be used by another RasterBuffer
67 // instance.
68 resource_pool_->ReleaseResource(raster_resource_.Pass(),
69 raster_content_id_);
72 // Overridden from RasterBuffer:
73 void Playback(const RasterSource* raster_source,
74 const gfx::Rect& raster_full_rect,
75 const gfx::Rect& raster_dirty_rect,
76 uint64_t new_content_id,
77 float scale) override {
78 // If there's a raster_content_id_, we are reusing a resource with that
79 // content id.
80 bool reusing_raster_resource = raster_content_id_ != 0;
81 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
82 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
83 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
84 scale);
85 // Store the content id of the resource to return to the pool.
86 raster_content_id_ = new_content_id;
89 private:
90 OneCopyTileTaskWorkerPool* worker_pool_;
91 ResourceProvider* resource_provider_;
92 ResourcePool* resource_pool_;
93 const Resource* output_resource_;
94 uint64_t raster_content_id_;
95 scoped_ptr<ScopedResource> raster_resource_;
96 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
97 CopySequenceNumber sequence_;
99 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
102 // Flush interval when performing copy operations.
103 const int kCopyFlushPeriod = 4;
105 // Number of in-flight copy operations to allow.
106 const int kMaxCopyOperations = 32;
108 // Delay been checking for copy operations to complete.
109 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
111 // Number of failed attempts to allow before we perform a check that will
112 // wait for copy operations to complete if needed.
113 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
115 } // namespace
117 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
118 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
119 const Resource* src,
120 const Resource* dst,
121 const gfx::Rect& rect)
122 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
125 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
128 // static
129 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
130 base::SequencedTaskRunner* task_runner,
131 TaskGraphRunner* task_graph_runner,
132 ContextProvider* context_provider,
133 ResourceProvider* resource_provider,
134 ResourcePool* resource_pool,
135 size_t max_bytes_per_copy_operation,
136 bool have_persistent_gpu_memory_buffers) {
137 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
138 task_runner, task_graph_runner, context_provider, resource_provider,
139 resource_pool, max_bytes_per_copy_operation,
140 have_persistent_gpu_memory_buffers));
143 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
144 base::SequencedTaskRunner* task_runner,
145 TaskGraphRunner* task_graph_runner,
146 ContextProvider* context_provider,
147 ResourceProvider* resource_provider,
148 ResourcePool* resource_pool,
149 size_t max_bytes_per_copy_operation,
150 bool have_persistent_gpu_memory_buffers)
151 : task_runner_(task_runner),
152 task_graph_runner_(task_graph_runner),
153 namespace_token_(task_graph_runner->GetNamespaceToken()),
154 context_provider_(context_provider),
155 resource_provider_(resource_provider),
156 resource_pool_(resource_pool),
157 max_bytes_per_copy_operation_(max_bytes_per_copy_operation),
158 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
159 last_issued_copy_operation_(0),
160 last_flushed_copy_operation_(0),
161 lock_(),
162 copy_operation_count_cv_(&lock_),
163 issued_copy_operation_count_(0),
164 next_copy_operation_sequence_(1),
165 check_for_completed_copy_operations_pending_(false),
166 shutdown_(false),
167 weak_ptr_factory_(this),
168 task_set_finished_weak_ptr_factory_(this) {
169 DCHECK(context_provider_);
172 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
173 DCHECK_EQ(pending_copy_operations_.size(), 0u);
176 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
177 return this;
180 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
181 client_ = client;
184 void OneCopyTileTaskWorkerPool::Shutdown() {
185 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
188 base::AutoLock lock(lock_);
190 shutdown_ = true;
191 copy_operation_count_cv_.Signal();
194 TaskGraph empty;
195 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
196 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
199 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
200 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
202 #if DCHECK_IS_ON()
204 base::AutoLock lock(lock_);
205 DCHECK(!shutdown_);
207 #endif
209 if (tasks_pending_.none())
210 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
212 // Mark all task sets as pending.
213 tasks_pending_.set();
215 unsigned priority = kTileTaskPriorityBase;
217 graph_.Reset();
219 // Cancel existing OnTaskSetFinished callbacks.
220 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
222 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
224 size_t task_count[kNumberOfTaskSets] = {0};
226 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
227 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
228 task_runner_.get(),
229 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
230 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
233 resource_pool_->CheckBusyResources(false);
235 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
236 it != queue->items.end(); ++it) {
237 const TileTaskQueue::Item& item = *it;
238 RasterTask* task = item.task;
239 DCHECK(!task->HasCompleted());
241 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
242 if (!item.task_sets[task_set])
243 continue;
245 ++task_count[task_set];
247 graph_.edges.push_back(
248 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
251 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
254 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
255 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
256 kTaskSetFinishedTaskPriorityBase + task_set,
257 task_count[task_set]);
260 ScheduleTasksOnOriginThread(this, &graph_);
261 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
263 std::copy(new_task_set_finished_tasks,
264 new_task_set_finished_tasks + kNumberOfTaskSets,
265 task_set_finished_tasks_);
267 resource_pool_->ReduceResourceUsage();
269 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
270 StateAsValue());
273 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
274 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
276 task_graph_runner_->CollectCompletedTasks(namespace_token_,
277 &completed_tasks_);
279 for (Task::Vector::const_iterator it = completed_tasks_.begin();
280 it != completed_tasks_.end(); ++it) {
281 TileTask* task = static_cast<TileTask*>(it->get());
283 task->WillComplete();
284 task->CompleteOnOriginThread(this);
285 task->DidComplete();
287 task->RunReplyOnOriginThread();
289 completed_tasks_.clear();
292 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() {
293 return resource_provider_->best_texture_format();
296 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
297 const Resource* resource,
298 uint64_t resource_content_id,
299 uint64_t previous_content_id) {
300 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
301 // the dirty rect.
302 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
303 return make_scoped_ptr<RasterBuffer>(
304 new RasterBufferImpl(this, resource_provider_, resource_pool_,
305 resource_provider_->best_texture_format(), resource,
306 previous_content_id));
309 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
310 scoped_ptr<RasterBuffer> buffer) {
311 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
314 CopySequenceNumber
315 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
316 bool reusing_raster_resource,
317 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
318 raster_resource_write_lock,
319 const Resource* raster_resource,
320 const Resource* output_resource,
321 const RasterSource* raster_source,
322 const gfx::Rect& raster_full_rect,
323 const gfx::Rect& raster_dirty_rect,
324 float scale) {
325 gfx::GpuMemoryBuffer* gpu_memory_buffer =
326 raster_resource_write_lock->GetGpuMemoryBuffer();
327 if (gpu_memory_buffer) {
328 void* data = NULL;
329 bool rv = gpu_memory_buffer->Map(&data);
330 DCHECK(rv);
331 int stride;
332 gpu_memory_buffer->GetStride(&stride);
334 gfx::Rect playback_rect = raster_full_rect;
335 if (reusing_raster_resource) {
336 playback_rect.Intersect(raster_dirty_rect);
338 DCHECK(!playback_rect.IsEmpty())
339 << "Why are we rastering a tile that's not dirty?";
340 TileTaskWorkerPool::PlaybackToMemory(
341 data, raster_resource->format(), raster_resource->size(), stride,
342 raster_source, raster_full_rect, playback_rect, scale);
343 gpu_memory_buffer->Unmap();
346 base::AutoLock lock(lock_);
348 CopySequenceNumber sequence = 0;
349 size_t bytes_per_row = (BitsPerPixel(raster_resource->format()) *
350 raster_resource->size().width()) /
352 size_t chunk_size_in_rows = std::max(
353 static_cast<size_t>(1), max_bytes_per_copy_operation_ / bytes_per_row);
354 // Align chunk size to 4. Required to support compressed texture formats.
355 chunk_size_in_rows =
356 MathUtil::RoundUp(chunk_size_in_rows, static_cast<size_t>(4));
357 size_t y = 0;
358 size_t height = raster_resource->size().height();
359 while (y < height) {
360 int failed_attempts = 0;
361 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >=
362 kMaxCopyOperations) {
363 // Ignore limit when shutdown is set.
364 if (shutdown_)
365 break;
367 ++failed_attempts;
369 // Schedule a check that will also wait for operations to complete
370 // after too many failed attempts.
371 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
373 // Schedule a check for completed copy operations if too many operations
374 // are currently in-flight.
375 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
378 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
380 // Wait for in-flight copy operations to drop below limit.
381 copy_operation_count_cv_.Wait();
385 // There may be more work available, so wake up another worker thread.
386 copy_operation_count_cv_.Signal();
388 // Copy at most |chunk_size_in_rows|.
389 size_t rows_to_copy = std::min(chunk_size_in_rows, height - y);
391 // |raster_resource_write_lock| is passed to the first copy operation as it
392 // needs to be released before we can issue a copy.
393 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
394 raster_resource_write_lock.Pass(), raster_resource, output_resource,
395 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
396 y += rows_to_copy;
398 // Acquire a sequence number for this copy operation.
399 sequence = next_copy_operation_sequence_++;
401 // Post task that will advance last flushed copy operation to |sequence|
402 // if we have reached the flush period.
403 if ((sequence % kCopyFlushPeriod) == 0) {
404 task_runner_->PostTask(
405 FROM_HERE,
406 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
407 weak_ptr_factory_.GetWeakPtr(), sequence));
411 return sequence;
414 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
415 CopySequenceNumber sequence) {
416 if (last_issued_copy_operation_ >= sequence)
417 return;
419 IssueCopyOperations(sequence - last_issued_copy_operation_);
420 last_issued_copy_operation_ = sequence;
423 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
424 CopySequenceNumber sequence) {
425 if (last_flushed_copy_operation_ >= sequence)
426 return;
428 AdvanceLastIssuedCopyTo(sequence);
430 // Flush all issued copy operations.
431 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
432 last_flushed_copy_operation_ = last_issued_copy_operation_;
435 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
436 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
437 task_set);
439 DCHECK(tasks_pending_[task_set]);
440 tasks_pending_[task_set] = false;
441 if (tasks_pending_.any()) {
442 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
443 "state", StateAsValue());
444 } else {
445 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
447 client_->DidFinishRunningTileTasks(task_set);
450 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
451 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
452 count);
454 CopyOperation::Deque copy_operations;
457 base::AutoLock lock(lock_);
459 for (int64 i = 0; i < count; ++i) {
460 DCHECK(!pending_copy_operations_.empty());
461 copy_operations.push_back(pending_copy_operations_.take_front());
464 // Increment |issued_copy_operation_count_| to reflect the transition of
465 // copy operations from "pending" to "issued" state.
466 issued_copy_operation_count_ += copy_operations.size();
469 while (!copy_operations.empty()) {
470 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
472 // Remove the write lock.
473 copy_operation->src_write_lock.reset();
475 // Copy contents of source resource to destination resource.
476 resource_provider_->CopyResource(copy_operation->src->id(),
477 copy_operation->dst->id(),
478 copy_operation->rect);
482 void OneCopyTileTaskWorkerPool::
483 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
484 bool wait_if_needed) {
485 lock_.AssertAcquired();
487 if (check_for_completed_copy_operations_pending_)
488 return;
490 base::TimeTicks now = base::TimeTicks::Now();
492 // Schedule a check for completed copy operations as soon as possible but
493 // don't allow two consecutive checks to be scheduled to run less than the
494 // tick rate apart.
495 base::TimeTicks next_check_for_completed_copy_operations_time =
496 std::max(last_check_for_completed_copy_operations_time_ +
497 base::TimeDelta::FromMilliseconds(
498 kCheckForCompletedCopyOperationsTickRateMs),
499 now);
501 task_runner_->PostDelayedTask(
502 FROM_HERE,
503 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
504 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
505 next_check_for_completed_copy_operations_time - now);
507 last_check_for_completed_copy_operations_time_ =
508 next_check_for_completed_copy_operations_time;
509 check_for_completed_copy_operations_pending_ = true;
512 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
513 bool wait_if_needed) {
514 TRACE_EVENT1("cc",
515 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
516 "wait_if_needed", wait_if_needed);
518 resource_pool_->CheckBusyResources(wait_if_needed);
521 base::AutoLock lock(lock_);
523 DCHECK(check_for_completed_copy_operations_pending_);
524 check_for_completed_copy_operations_pending_ = false;
526 // The number of busy resources in the pool reflects the number of issued
527 // copy operations that have not yet completed.
528 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
530 // There may be work blocked on too many in-flight copy operations, so wake
531 // up a worker thread.
532 copy_operation_count_cv_.Signal();
536 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
537 OneCopyTileTaskWorkerPool::StateAsValue() const {
538 scoped_refptr<base::trace_event::TracedValue> state =
539 new base::trace_event::TracedValue();
541 state->BeginArray("tasks_pending");
542 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
543 state->AppendBoolean(tasks_pending_[task_set]);
544 state->EndArray();
545 state->BeginDictionary("staging_state");
546 StagingStateAsValueInto(state.get());
547 state->EndDictionary();
549 return state;
552 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
553 base::trace_event::TracedValue* staging_state) const {
554 staging_state->SetInteger("staging_resource_count",
555 resource_pool_->total_resource_count());
556 staging_state->SetInteger("bytes_used_for_staging_resources",
557 resource_pool_->total_memory_usage_bytes());
558 staging_state->SetInteger("pending_copy_count",
559 resource_pool_->total_resource_count() -
560 resource_pool_->acquired_resource_count());
561 staging_state->SetInteger("bytes_pending_copy",
562 resource_pool_->total_memory_usage_bytes() -
563 resource_pool_->acquired_memory_usage_bytes());
566 } // namespace cc