WebKit Roll 139512:139548
[chromium-blink-merge.git] / cc / tile_manager.cc
blob9ad382bafb585c68c3ddc0456161a5a680c9e05d
1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/tile_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/logging.h"
12 #include "cc/platform_color.h"
13 #include "cc/raster_worker_pool.h"
14 #include "cc/resource_pool.h"
15 #include "cc/tile.h"
17 namespace cc {
19 namespace {
21 // Determine bin based on three categories of tiles: things we need now,
22 // things we need soon, and eventually.
23 TileManagerBin BinFromTilePriority(const TilePriority& prio) {
25 // The amount of time for which we want to have prepainting coverage.
26 const double prepainting_window_time_seconds = 1.0;
27 const double backfling_guard_distance_pixels = 314.0;
29 // Explicitly limit how far ahead we will prepaint for low and non-low res.
30 const double max_lores_paint_distance_pixels = 8192.0;
31 const double max_hires_paint_distance_pixels = 4096.0;
32 if (prio.resolution == cc::LOW_RESOLUTION) {
33 if (prio.distance_to_visible_in_pixels > max_lores_paint_distance_pixels)
34 return cc::NEVER_BIN;
36 else {
37 if (prio.distance_to_visible_in_pixels > max_hires_paint_distance_pixels)
38 return cc::NEVER_BIN;
41 if (prio.time_to_needed_in_seconds() == std::numeric_limits<float>::max())
42 return NEVER_BIN;
44 if (prio.resolution == NON_IDEAL_RESOLUTION)
45 return EVENTUALLY_BIN;
47 if (prio.time_to_needed_in_seconds() == 0 ||
48 prio.distance_to_visible_in_pixels < backfling_guard_distance_pixels)
49 return NOW_BIN;
51 if (prio.time_to_needed_in_seconds() < prepainting_window_time_seconds)
52 return SOON_BIN;
54 return EVENTUALLY_BIN;
57 } // namespace
59 ManagedTileState::ManagedTileState()
60 : can_use_gpu_memory(false),
61 can_be_freed(true),
62 resource_is_being_initialized(false),
63 contents_swizzled(false),
64 need_to_gather_pixel_refs(true),
65 gpu_memmgr_stats_bin(NEVER_BIN) {
68 ManagedTileState::~ManagedTileState() {
69 DCHECK(!resource);
70 DCHECK(!resource_is_being_initialized);
73 TileManager::TileManager(
74 TileManagerClient* client,
75 ResourceProvider* resource_provider,
76 size_t num_raster_threads)
77 : client_(client),
78 resource_pool_(ResourcePool::Create(resource_provider)),
79 raster_worker_pool_(RasterWorkerPool::Create(num_raster_threads)),
80 manage_tiles_pending_(false),
81 manage_tiles_call_count_(0) {
84 TileManager::~TileManager() {
85 // Reset global state and manage. This should cause
86 // our memory usage to drop to zero.
87 global_state_ = GlobalStateThatImpactsTilePriority();
88 AssignGpuMemoryToTiles();
89 // This should finish all pending tasks and release any uninitialized
90 // resources.
91 raster_worker_pool_.reset();
92 ManageTiles();
93 DCHECK(tiles_.size() == 0);
96 void TileManager::SetGlobalState(
97 const GlobalStateThatImpactsTilePriority& global_state) {
98 global_state_ = global_state;
99 resource_pool_->SetMaxMemoryUsageBytes(global_state_.memory_limit_in_bytes);
100 ScheduleManageTiles();
103 void TileManager::RegisterTile(Tile* tile) {
104 tiles_.push_back(tile);
105 ScheduleManageTiles();
108 void TileManager::UnregisterTile(Tile* tile) {
109 for (TileList::iterator it = tiles_with_image_decoding_tasks_.begin();
110 it != tiles_with_image_decoding_tasks_.end(); it++) {
111 if (*it == tile) {
112 tiles_with_image_decoding_tasks_.erase(it);
113 break;
116 for (TileVector::iterator it = tiles_that_need_to_be_rasterized_.begin();
117 it != tiles_that_need_to_be_rasterized_.end(); it++) {
118 if (*it == tile) {
119 tiles_that_need_to_be_rasterized_.erase(it);
120 break;
123 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); it++) {
124 if (*it == tile) {
125 FreeResourcesForTile(tile);
126 tiles_.erase(it);
127 return;
130 DCHECK(false) << "Could not find tile version.";
133 void TileManager::WillModifyTilePriority(
134 Tile* tile, WhichTree tree, const TilePriority& new_priority) {
135 // TODO(nduca): Do something smarter if reprioritization turns out to be
136 // costly.
137 ScheduleManageTiles();
140 void TileManager::ScheduleManageTiles() {
141 if (manage_tiles_pending_)
142 return;
143 client_->ScheduleManageTiles();
144 manage_tiles_pending_ = true;
147 class BinComparator {
148 public:
149 bool operator() (const Tile* a, const Tile* b) const {
150 const ManagedTileState& ams = a->managed_state();
151 const ManagedTileState& bms = b->managed_state();
152 if (ams.bin != bms.bin)
153 return ams.bin < bms.bin;
155 if (ams.resolution != bms.resolution)
156 return ams.resolution < bms.resolution;
158 return
159 ams.time_to_needed_in_seconds <
160 bms.time_to_needed_in_seconds;
164 void TileManager::ManageTiles() {
165 TRACE_EVENT0("cc", "TileManager::ManageTiles");
166 manage_tiles_pending_ = false;
167 ++manage_tiles_call_count_;
169 const bool smoothness_takes_priority =
170 global_state_.smoothness_takes_priority;
172 // For each tree, bin into different categories of tiles.
173 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
174 Tile* tile = *it;
175 ManagedTileState& mts = tile->managed_state();
177 TilePriority prio;
178 if (smoothness_takes_priority)
179 prio = tile->priority(ACTIVE_TREE);
180 else
181 prio = tile->combined_priority();
183 mts.resolution = prio.resolution;
184 mts.time_to_needed_in_seconds = prio.time_to_needed_in_seconds();
185 mts.bin = BinFromTilePriority(prio);
186 mts.gpu_memmgr_stats_bin = BinFromTilePriority(tile->combined_priority());
189 // Memory limit policy works by mapping some bin states to the NEVER bin.
190 TileManagerBin bin_map[NUM_BINS];
191 if (global_state_.memory_limit_policy == ALLOW_NOTHING) {
192 bin_map[NOW_BIN] = NEVER_BIN;
193 bin_map[SOON_BIN] = NEVER_BIN;
194 bin_map[EVENTUALLY_BIN] = NEVER_BIN;
195 bin_map[NEVER_BIN] = NEVER_BIN;
196 } else if (global_state_.memory_limit_policy == ALLOW_ABSOLUTE_MINIMUM) {
197 bin_map[NOW_BIN] = NOW_BIN;
198 bin_map[SOON_BIN] = NEVER_BIN;
199 bin_map[EVENTUALLY_BIN] = NEVER_BIN;
200 bin_map[NEVER_BIN] = NEVER_BIN;
201 } else if (global_state_.memory_limit_policy == ALLOW_PREPAINT_ONLY) {
202 bin_map[NOW_BIN] = NOW_BIN;
203 bin_map[SOON_BIN] = SOON_BIN;
204 bin_map[EVENTUALLY_BIN] = NEVER_BIN;
205 bin_map[NEVER_BIN] = NEVER_BIN;
206 } else {
207 bin_map[NOW_BIN] = NOW_BIN;
208 bin_map[SOON_BIN] = SOON_BIN;
209 bin_map[EVENTUALLY_BIN] = EVENTUALLY_BIN;
210 bin_map[NEVER_BIN] = NEVER_BIN;
212 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
213 Tile* tile = *it;
214 ManagedTileState& mts = tile->managed_state();
215 mts.bin = bin_map[mts.bin];
218 // Sort by bin.
219 std::sort(tiles_.begin(), tiles_.end(), BinComparator());
221 // Assign gpu memory and determine what tiles need to be rasterized.
222 AssignGpuMemoryToTiles();
224 // Finally, kick the rasterizer.
225 DispatchMoreTasks();
228 void TileManager::CheckForCompletedSetPixels() {
229 while (!tiles_with_pending_set_pixels_.empty()) {
230 Tile* tile = tiles_with_pending_set_pixels_.front();
231 DCHECK(tile->managed_state().resource);
233 // Set pixel tasks complete in the order they are posted.
234 if (!resource_pool_->resource_provider()->didSetPixelsComplete(
235 tile->managed_state().resource->id())) {
236 break;
239 // It's now safe to release the pixel buffer.
240 resource_pool_->resource_provider()->releasePixelBuffer(
241 tile->managed_state().resource->id());
243 DidFinishTileInitialization(tile);
244 tiles_with_pending_set_pixels_.pop();
248 void TileManager::GetRenderingStats(RenderingStats* stats) {
249 raster_worker_pool_->GetRenderingStats(stats);
250 stats->totalDeferredImageCacheHitCount =
251 rendering_stats_.totalDeferredImageCacheHitCount;
252 stats->totalImageGatheringCount = rendering_stats_.totalImageGatheringCount;
253 stats->totalImageGatheringTimeInSeconds =
254 rendering_stats_.totalImageGatheringTimeInSeconds;
257 void TileManager::GetMemoryStats(
258 size_t* memoryRequiredBytes,
259 size_t* memoryNiceToHaveBytes,
260 size_t* memoryUsedBytes) {
261 *memoryRequiredBytes = 0;
262 *memoryNiceToHaveBytes = 0;
263 *memoryUsedBytes = 0;
264 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
265 Tile* tile = *it;
266 ManagedTileState& mts = tile->managed_state();
267 size_t tile_bytes = tile->bytes_consumed_if_allocated();
268 if (mts.gpu_memmgr_stats_bin == NOW_BIN)
269 *memoryRequiredBytes += tile_bytes;
270 if (mts.gpu_memmgr_stats_bin != NEVER_BIN)
271 *memoryNiceToHaveBytes += tile_bytes;
272 if (mts.can_use_gpu_memory)
273 *memoryUsedBytes += tile_bytes;
277 void TileManager::AssignGpuMemoryToTiles() {
278 TRACE_EVENT0("cc", "TileManager::AssignGpuMemoryToTiles");
279 // Some memory cannot be released. Figure out which.
280 size_t unreleasable_bytes = 0;
281 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
282 Tile* tile = *it;
283 if (!tile->managed_state().can_be_freed)
284 unreleasable_bytes += tile->bytes_consumed_if_allocated();
287 // Now give memory out to the tiles until we're out, and build
288 // the needs-to-be-rasterized queue.
289 tiles_that_need_to_be_rasterized_.erase(
290 tiles_that_need_to_be_rasterized_.begin(),
291 tiles_that_need_to_be_rasterized_.end());
293 // Reset the image decoding list so that we don't mess up with tile
294 // priorities. Tiles will be added to the image decoding list again
295 // when DispatchMoreTasks() is called.
296 tiles_with_image_decoding_tasks_.clear();
298 size_t bytes_left = global_state_.memory_limit_in_bytes - unreleasable_bytes;
299 for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
300 Tile* tile = *it;
301 size_t tile_bytes = tile->bytes_consumed_if_allocated();
302 ManagedTileState& managed_tile_state = tile->managed_state();
303 if (!managed_tile_state.can_be_freed)
304 continue;
305 if (managed_tile_state.bin == NEVER_BIN) {
306 managed_tile_state.can_use_gpu_memory = false;
307 FreeResourcesForTile(tile);
308 continue;
310 if (tile_bytes > bytes_left) {
311 managed_tile_state.can_use_gpu_memory = false;
312 FreeResourcesForTile(tile);
313 continue;
315 bytes_left -= tile_bytes;
316 managed_tile_state.can_use_gpu_memory = true;
317 if (!managed_tile_state.resource &&
318 !managed_tile_state.resource_is_being_initialized)
319 tiles_that_need_to_be_rasterized_.push_back(tile);
322 // Reverse two tiles_that_need_* vectors such that pop_back gets
323 // the highest priority tile.
324 std::reverse(
325 tiles_that_need_to_be_rasterized_.begin(),
326 tiles_that_need_to_be_rasterized_.end());
329 void TileManager::FreeResourcesForTile(Tile* tile) {
330 ManagedTileState& managed_tile_state = tile->managed_state();
331 DCHECK(managed_tile_state.can_be_freed);
332 if (managed_tile_state.resource)
333 resource_pool_->ReleaseResource(managed_tile_state.resource.Pass());
336 void TileManager::DispatchMoreTasks() {
337 // Because tiles in the image decoding list have higher priorities, we
338 // need to process those tiles first before we start to handle the tiles
339 // in the need_to_be_rasterized queue.
340 std::list<Tile*>::iterator it = tiles_with_image_decoding_tasks_.begin();
341 while (it != tiles_with_image_decoding_tasks_.end()) {
342 DispatchImageDecodeTasksForTile(*it);
343 ManagedTileState& managed_state = (*it)->managed_state();
344 if (managed_state.pending_pixel_refs.empty()) {
345 if (raster_worker_pool_->IsBusy())
346 return;
347 DispatchOneRasterTask(*it);
348 tiles_with_image_decoding_tasks_.erase(it++);
349 } else {
350 ++it;
354 // Process all tiles in the need_to_be_rasterized queue. If a tile has
355 // image decoding tasks, put it to the back of the image decoding list.
356 while (!tiles_that_need_to_be_rasterized_.empty()) {
357 Tile* tile = tiles_that_need_to_be_rasterized_.back();
358 DispatchImageDecodeTasksForTile(tile);
359 ManagedTileState& managed_state = tile->managed_state();
360 if (!managed_state.pending_pixel_refs.empty()) {
361 tiles_with_image_decoding_tasks_.push_back(tile);
362 } else {
363 if (raster_worker_pool_->IsBusy())
364 return;
365 DispatchOneRasterTask(tile);
367 tiles_that_need_to_be_rasterized_.pop_back();
371 void TileManager::GatherPixelRefsForTile(Tile* tile) {
372 TRACE_EVENT0("cc", "TileManager::GatherPixelRefsForTile");
373 ManagedTileState& managed_state = tile->managed_state();
374 if (managed_state.need_to_gather_pixel_refs) {
375 base::TimeTicks gather_begin_time = base::TimeTicks::Now();
376 const_cast<PicturePileImpl *>(tile->picture_pile())->GatherPixelRefs(
377 tile->content_rect_, managed_state.pending_pixel_refs);
378 rendering_stats_.totalImageGatheringCount++;
379 rendering_stats_.totalImageGatheringTimeInSeconds +=
380 (base::TimeTicks::Now() - gather_begin_time).InSecondsF();
381 managed_state.need_to_gather_pixel_refs = false;
385 void TileManager::DispatchImageDecodeTasksForTile(Tile* tile) {
386 GatherPixelRefsForTile(tile);
387 std::list<skia::LazyPixelRef*>& pending_pixel_refs =
388 tile->managed_state().pending_pixel_refs;
389 std::list<skia::LazyPixelRef*>::iterator it = pending_pixel_refs.begin();
390 while (it != pending_pixel_refs.end()) {
391 if (pending_decode_tasks_.end() != pending_decode_tasks_.find(
392 (*it)->getGenerationID())) {
393 ++it;
394 continue;
396 // TODO(qinmin): passing correct image size to PrepareToDecode().
397 if ((*it)->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) {
398 rendering_stats_.totalDeferredImageCacheHitCount++;
399 pending_pixel_refs.erase(it++);
400 } else {
401 if (raster_worker_pool_->IsBusy())
402 return;
403 DispatchOneImageDecodeTask(tile, *it);
404 ++it;
409 void TileManager::DispatchOneImageDecodeTask(
410 scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref) {
411 TRACE_EVENT0("cc", "TileManager::DispatchOneImageDecodeTask");
412 uint32_t pixel_ref_id = pixel_ref->getGenerationID();
413 DCHECK(pending_decode_tasks_.end() ==
414 pending_decode_tasks_.find(pixel_ref_id));
415 pending_decode_tasks_[pixel_ref_id] = pixel_ref;
417 raster_worker_pool_->PostImageDecodeTaskAndReply(
418 pixel_ref,
419 base::Bind(&TileManager::OnImageDecodeTaskCompleted,
420 base::Unretained(this),
421 tile,
422 pixel_ref_id));
425 void TileManager::OnImageDecodeTaskCompleted(
426 scoped_refptr<Tile> tile, uint32_t pixel_ref_id) {
427 TRACE_EVENT0("cc", "TileManager::OnImageDecodeTaskCompleted");
428 pending_decode_tasks_.erase(pixel_ref_id);
430 for (TileList::iterator it = tiles_with_image_decoding_tasks_.begin();
431 it != tiles_with_image_decoding_tasks_.end(); ++it) {
432 std::list<skia::LazyPixelRef*>& pixel_refs =
433 (*it)->managed_state().pending_pixel_refs;
434 for (std::list<skia::LazyPixelRef*>::iterator pixel_it =
435 pixel_refs.begin(); pixel_it != pixel_refs.end(); ++pixel_it) {
436 if (pixel_ref_id == (*pixel_it)->getGenerationID()) {
437 pixel_refs.erase(pixel_it);
438 break;
443 DispatchMoreTasks();
446 void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
447 TRACE_EVENT0("cc", "TileManager::DispatchOneRasterTask");
448 ManagedTileState& managed_tile_state = tile->managed_state();
449 DCHECK(managed_tile_state.can_use_gpu_memory);
450 scoped_ptr<ResourcePool::Resource> resource =
451 resource_pool_->AcquireResource(tile->tile_size_.size(), tile->format_);
452 resource_pool_->resource_provider()->acquirePixelBuffer(resource->id());
454 managed_tile_state.resource_is_being_initialized = true;
455 managed_tile_state.can_be_freed = false;
457 ResourceProvider::ResourceId resource_id = resource->id();
459 raster_worker_pool_->PostRasterTaskAndReply(
460 tile->picture_pile(),
461 resource_pool_->resource_provider()->mapPixelBuffer(resource_id),
462 tile->content_rect_,
463 tile->contents_scale(),
464 base::Bind(&TileManager::OnRasterTaskCompleted,
465 base::Unretained(this),
466 tile,
467 base::Passed(&resource),
468 manage_tiles_call_count_));
471 void TileManager::OnRasterTaskCompleted(
472 scoped_refptr<Tile> tile,
473 scoped_ptr<ResourcePool::Resource> resource,
474 int manage_tiles_call_count_when_dispatched) {
475 TRACE_EVENT0("cc", "TileManager::OnRasterTaskCompleted");
477 // Release raster resources.
478 resource_pool_->resource_provider()->unmapPixelBuffer(resource->id());
480 ManagedTileState& managed_tile_state = tile->managed_state();
481 managed_tile_state.can_be_freed = true;
483 // Tile can be freed after the completion of the raster task. Call
484 // AssignGpuMemoryToTiles() to re-assign gpu memory to highest priority
485 // tiles if ManageTiles() was called since task was dispatched. The result
486 // of this could be that this tile is no longer allowed to use gpu
487 // memory and in that case we need to abort initialization and free all
488 // associated resources before calling DispatchMoreTasks().
489 if (manage_tiles_call_count_when_dispatched != manage_tiles_call_count_)
490 AssignGpuMemoryToTiles();
492 // Finish resource initialization if |can_use_gpu_memory| is true.
493 if (managed_tile_state.can_use_gpu_memory) {
494 // The component order may be bgra if we're uploading bgra pixels to rgba
495 // texture. Mark contents as swizzled if image component order is
496 // different than texture format.
497 managed_tile_state.contents_swizzled =
498 !PlatformColor::sameComponentOrder(tile->format_);
500 // Tile resources can't be freed until upload has completed.
501 managed_tile_state.can_be_freed = false;
503 resource_pool_->resource_provider()->beginSetPixels(resource->id());
504 resource_pool_->resource_provider()->shallowFlushIfSupported();
505 managed_tile_state.resource = resource.Pass();
506 tiles_with_pending_set_pixels_.push(tile);
507 } else {
508 resource_pool_->resource_provider()->releasePixelBuffer(resource->id());
509 resource_pool_->ReleaseResource(resource.Pass());
510 managed_tile_state.resource_is_being_initialized = false;
513 DispatchMoreTasks();
516 void TileManager::DidFinishTileInitialization(Tile* tile) {
517 ManagedTileState& managed_tile_state = tile->managed_state();
518 DCHECK(managed_tile_state.resource);
519 managed_tile_state.resource_is_being_initialized = false;
520 managed_tile_state.can_be_freed = true;
523 } // namespace cc