1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <drm/drm_mm.h>
47 #include <linux/slab.h>
48 #include <linux/seq_file.h>
49 #include <linux/export.h>
51 extern int drm_vma_debug
;
56 * drm_mm provides a simple range allocator. The drivers are free to use the
57 * resource allocator from the linux core if it suits them, the upside of drm_mm
58 * is that it's in the DRM core. Which means that it's easier to extend for
59 * some of the crazier special purpose needs of gpus.
61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
62 * Drivers are free to embed either of them into their own suitable
63 * datastructures. drm_mm itself will not do any memory allocations of its own,
64 * so if drivers choose not to embed nodes they need to still allocate them
67 * The range allocator also supports reservation of preallocated blocks. This is
68 * useful for taking over initial mode setting configurations from the firmware,
69 * where an object needs to be created which exactly matches the firmware's
70 * scanout target. As long as the range is still free it can be inserted anytime
71 * after the allocator is initialized, which helps with avoiding looped
72 * dependencies in the driver load sequence.
74 * drm_mm maintains a stack of most recently freed holes, which of all
75 * simplistic datastructures seems to be a fairly decent approach to clustering
76 * allocations and avoiding too much fragmentation. This means free space
77 * searches are O(num_holes). Given that all the fancy features drm_mm supports
78 * something better would be fairly complex and since gfx thrashing is a fairly
79 * steep cliff not a real concern. Removing a node again is O(1).
81 * drm_mm supports a few features: Alignment and range restrictions can be
82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
83 * opaque unsigned long) which in conjunction with a driver callback can be used
84 * to implement sophisticated placement restrictions. The i915 DRM driver uses
85 * this to implement guard pages between incompatible caching domains in the
88 * Two behaviors are supported for searching and allocating: bottom-up and
89 * top-down. The default is bottom-up. Top-down allocation can be used if the
90 * memory area has different restrictions, or just to reduce fragmentation.
92 * Finally iteration helpers to walk all nodes and all holes are provided as are
93 * some basic allocator dumpers for debugging.
95 * Note that this range allocator is not thread-safe, drivers need to protect
96 * modifications with their on locking. The idea behind this is that for a full
97 * memory manager additional data needs to be protected anyway, hence internal
98 * locking would be fully redundant.
101 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
107 enum drm_mm_search_flags flags
);
109 #ifdef CONFIG_DRM_DEBUG_MM
110 #include <linux/stackdepot.h>
112 #define STACKDEPTH 32
115 static noinline
void save_stack(struct drm_mm_node
*node
)
117 unsigned long entries
[STACKDEPTH
];
118 struct stack_trace trace
= {
120 .max_entries
= STACKDEPTH
,
124 save_stack_trace(&trace
);
125 if (trace
.nr_entries
!= 0 &&
126 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
129 /* May be called under spinlock, so avoid sleeping */
130 node
->stack
= depot_save_stack(&trace
, GFP_NOWAIT
);
133 static void show_leaks(struct drm_mm
*mm
)
135 struct drm_mm_node
*node
;
136 unsigned long entries
[STACKDEPTH
];
139 buf
= kmalloc(BUFSZ
, M_DRM
, GFP_KERNEL
);
143 list_for_each_entry(node
, drm_mm_nodes(mm
), node_list
) {
144 struct stack_trace trace
= {
146 .max_entries
= STACKDEPTH
150 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
151 node
->start
, node
->size
);
155 depot_fetch_stack(node
->stack
, &trace
);
156 snprint_stack_trace(buf
, BUFSZ
, &trace
, 0);
157 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
158 node
->start
, node
->size
, buf
);
167 static void save_stack(struct drm_mm_node
*node
) { }
168 static void show_leaks(struct drm_mm
*mm
) { }
171 #define START(node) ((node)->start)
172 #define LAST(node) ((node)->start + (node)->size - 1)
174 #ifndef __DragonFly__
175 INTERVAL_TREE_DEFINE(struct drm_mm_node
, rb
,
177 START
, LAST
, static inline, drm_mm_interval_tree
)
179 static struct drm_mm_node
*
180 drm_mm_interval_tree_iter_first(struct rb_root
*rb
, u64 start
, u64 last
)
182 struct drm_mm
*mm
= container_of(rb
, typeof(*mm
), interval_tree
);
183 struct drm_mm_node
*node
;
185 drm_mm_for_each_node(node
, mm
) {
186 if (LAST(node
) >= start
&& START(node
) <= last
)
194 __drm_mm_interval_first(const struct drm_mm
*mm
, u64 start
, u64 last
)
196 return drm_mm_interval_tree_iter_first((struct rb_root
*)&mm
->interval_tree
,
197 start
, last
) ?: (struct drm_mm_node
*)&mm
->head_node
;
199 EXPORT_SYMBOL(__drm_mm_interval_first
);
201 static void drm_mm_insert_helper(struct drm_mm_node
*hole_node
,
202 struct drm_mm_node
*node
,
203 u64 size
, u64 alignment
,
205 u64 range_start
, u64 range_end
,
206 enum drm_mm_allocator_flags flags
)
208 struct drm_mm
*mm
= hole_node
->mm
;
209 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
210 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
211 u64 adj_start
= hole_start
;
212 u64 adj_end
= hole_end
;
214 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node
) || node
->allocated
);
216 if (mm
->color_adjust
)
217 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
219 adj_start
= max(adj_start
, range_start
);
220 adj_end
= min(adj_end
, range_end
);
222 if (flags
& DRM_MM_CREATE_TOP
)
223 adj_start
= adj_end
- size
;
228 div64_u64_rem(adj_start
, alignment
, &rem
);
230 if (flags
& DRM_MM_CREATE_TOP
)
233 adj_start
+= alignment
- rem
;
237 if (adj_start
== hole_start
) {
238 hole_node
->hole_follows
= 0;
239 list_del(&hole_node
->hole_stack
);
242 node
->start
= adj_start
;
248 list_add(&node
->node_list
, &hole_node
->node_list
);
250 DRM_MM_BUG_ON(node
->start
< range_start
);
251 DRM_MM_BUG_ON(node
->start
< adj_start
);
252 DRM_MM_BUG_ON(node
->start
+ node
->size
> adj_end
);
253 DRM_MM_BUG_ON(node
->start
+ node
->size
> range_end
);
255 node
->hole_follows
= 0;
256 if (__drm_mm_hole_node_start(node
) < hole_end
) {
257 list_add(&node
->hole_stack
, &mm
->hole_stack
);
258 node
->hole_follows
= 1;
265 * drm_mm_reserve_node - insert an pre-initialized node
266 * @mm: drm_mm allocator to insert @node into
267 * @node: drm_mm_node to insert
269 * This functions inserts an already set-up &drm_mm_node into the allocator,
270 * meaning that start, size and color must be set by the caller. All other
271 * fields must be cleared to 0. This is useful to initialize the allocator with
272 * preallocated objects which must be set-up before the range allocator can be
273 * set-up, e.g. when taking over a firmware framebuffer.
276 * 0 on success, -ENOSPC if there's no hole where @node is.
278 int drm_mm_reserve_node(struct drm_mm
*mm
, struct drm_mm_node
*node
)
280 u64 end
= node
->start
+ node
->size
;
281 struct drm_mm_node
*hole
;
282 u64 hole_start
, hole_end
;
283 u64 adj_start
, adj_end
;
285 end
= node
->start
+ node
->size
;
286 if (unlikely(end
<= node
->start
))
289 /* Find the relevant hole to add our node to */
290 hole
= drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
291 node
->start
, ~(u64
)0);
293 if (hole
->start
< end
)
296 hole
= list_entry(drm_mm_nodes(mm
), typeof(*hole
), node_list
);
299 hole
= list_last_entry(&hole
->node_list
, typeof(*hole
), node_list
);
300 if (!drm_mm_hole_follows(hole
))
303 adj_start
= hole_start
= __drm_mm_hole_node_start(hole
);
304 adj_end
= hole_end
= __drm_mm_hole_node_end(hole
);
306 if (mm
->color_adjust
)
307 mm
->color_adjust(hole
, node
->color
, &adj_start
, &adj_end
);
309 if (adj_start
> node
->start
|| adj_end
< end
)
315 list_add(&node
->node_list
, &hole
->node_list
);
318 drm_mm_interval_tree_add_node(hole
, node
);
321 if (node
->start
== hole_start
) {
322 hole
->hole_follows
= 0;
323 list_del(&hole
->hole_stack
);
326 node
->hole_follows
= 0;
327 if (end
!= hole_end
) {
328 list_add(&node
->hole_stack
, &mm
->hole_stack
);
329 node
->hole_follows
= 1;
336 EXPORT_SYMBOL(drm_mm_reserve_node
);
339 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
340 * @mm: drm_mm to allocate from
341 * @node: preallocate node to insert
342 * @size: size of the allocation
343 * @alignment: alignment of the allocation
344 * @color: opaque tag value to use for this node
345 * @start: start of the allowed range for this node
346 * @end: end of the allowed range for this node
347 * @sflags: flags to fine-tune the allocation search
348 * @aflags: flags to fine-tune the allocation behavior
350 * The preallocated @node must be cleared to 0.
353 * 0 on success, -ENOSPC if there's no suitable hole.
355 int drm_mm_insert_node_in_range_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
356 u64 size
, u64 alignment
,
359 enum drm_mm_search_flags sflags
,
360 enum drm_mm_allocator_flags aflags
)
362 struct drm_mm_node
*hole_node
;
364 if (WARN_ON(size
== 0))
367 hole_node
= drm_mm_search_free_in_range_generic(mm
,
368 size
, alignment
, color
,
373 drm_mm_insert_helper(hole_node
, node
,
374 size
, alignment
, color
,
378 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic
);
381 * drm_mm_remove_node - Remove a memory node from the allocator.
382 * @node: drm_mm_node to remove
384 * This just removes a node from its drm_mm allocator. The node does not need to
385 * be cleared again before it can be re-inserted into this or any other drm_mm
386 * allocator. It is a bug to call this function on a unallocated node.
388 void drm_mm_remove_node(struct drm_mm_node
*node
)
390 struct drm_mm
*mm
= node
->mm
;
391 struct drm_mm_node
*prev_node
;
393 if (drm_vma_debug
& 2) {
397 DRM_MM_BUG_ON(!node
->allocated
);
398 DRM_MM_BUG_ON(node
->scanned_block
);
401 list_entry(node
->node_list
.prev
, struct drm_mm_node
, node_list
);
403 if (drm_mm_hole_follows(node
)) {
404 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node
) ==
405 __drm_mm_hole_node_end(node
));
406 list_del(&node
->hole_stack
);
408 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node
) !=
409 __drm_mm_hole_node_end(node
));
412 if (!drm_mm_hole_follows(prev_node
)) {
413 prev_node
->hole_follows
= 1;
414 list_add(&prev_node
->hole_stack
, &mm
->hole_stack
);
416 list_move(&prev_node
->hole_stack
, &mm
->hole_stack
);
418 list_del(&node
->node_list
);
421 EXPORT_SYMBOL(drm_mm_remove_node
);
423 static int check_free_hole(u64 start
, u64 end
, u64 size
, u64 alignment
)
425 if (end
- start
< size
)
431 div64_u64_rem(start
, alignment
, &rem
);
433 start
+= alignment
- rem
;
436 return end
>= start
+ size
;
439 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
445 enum drm_mm_search_flags flags
)
447 struct drm_mm_node
*entry
;
448 struct drm_mm_node
*best
;
453 DRM_MM_BUG_ON(mm
->scan_active
);
458 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
459 flags
& DRM_MM_SEARCH_BELOW
) {
460 u64 hole_size
= adj_end
- adj_start
;
462 if (mm
->color_adjust
) {
463 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
464 if (adj_end
<= adj_start
)
468 adj_start
= max(adj_start
, start
);
469 adj_end
= min(adj_end
, end
);
471 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
474 if (!(flags
& DRM_MM_SEARCH_BEST
))
477 if (hole_size
< best_size
) {
479 best_size
= hole_size
;
487 * drm_mm_replace_node - move an allocation from @old to @new
488 * @old: drm_mm_node to remove from the allocator
489 * @new: drm_mm_node which should inherit @old's allocation
491 * This is useful for when drivers embed the drm_mm_node structure and hence
492 * can't move allocations by reassigning pointers. It's a combination of remove
493 * and insert with the guarantee that the allocation start will match.
495 void drm_mm_replace_node(struct drm_mm_node
*old
, struct drm_mm_node
*new)
497 DRM_MM_BUG_ON(!old
->allocated
);
499 list_replace(&old
->node_list
, &new->node_list
);
500 list_replace(&old
->hole_stack
, &new->hole_stack
);
501 new->hole_follows
= old
->hole_follows
;
503 new->start
= old
->start
;
504 new->size
= old
->size
;
505 new->color
= old
->color
;
510 EXPORT_SYMBOL(drm_mm_replace_node
);
513 * DOC: lru scan roster
515 * Very often GPUs need to have continuous allocations for a given object. When
516 * evicting objects to make space for a new one it is therefore not most
517 * efficient when we simply start to select all objects from the tail of an LRU
518 * until there's a suitable hole: Especially for big objects or nodes that
519 * otherwise have special allocation constraints there's a good chance we evict
520 * lots of (smaller) objects unnecessarily.
522 * The DRM range allocator supports this use-case through the scanning
523 * interfaces. First a scan operation needs to be initialized with
524 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
525 * objects to the roster, probably by walking an LRU list, but this can be
526 * freely implemented. Eviction candiates are added using
527 * drm_mm_scan_add_block() until a suitable hole is found or there are no
528 * further evictable objects. Eviction roster metadata is tracked in struct
531 * The driver must walk through all objects again in exactly the reverse
532 * order to restore the allocator state. Note that while the allocator is used
533 * in the scan mode no other operation is allowed.
535 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
536 * reported true) in the scan, and any overlapping nodes after color adjustment
537 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
538 * since freeing a node is also O(1) the overall complexity is
539 * O(scanned_objects). So like the free stack which needs to be walked before a
540 * scan operation even begins this is linear in the number of objects. It
541 * doesn't seem to hurt too badly.
545 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
547 * @mm: drm_mm to scan
548 * @size: size of the allocation
549 * @alignment: alignment of the allocation
550 * @color: opaque tag value to use for the allocation
551 * @start: start of the allowed range for the allocation
552 * @end: end of the allowed range for the allocation
553 * @flags: flags to specify how the allocation will be performed afterwards
555 * This simply sets up the scanning routines with the parameters for the desired
559 * As long as the scan list is non-empty, no other operations than
560 * adding/removing nodes to/from the scan list are allowed.
562 void drm_mm_scan_init_with_range(struct drm_mm_scan
*scan
,
571 DRM_MM_BUG_ON(start
>= end
);
572 DRM_MM_BUG_ON(!size
|| size
> end
- start
);
573 DRM_MM_BUG_ON(mm
->scan_active
);
581 scan
->alignment
= alignment
;
582 scan
->remainder_mask
= is_power_of_2(alignment
) ? alignment
- 1 : 0;
586 DRM_MM_BUG_ON(end
<= start
);
587 scan
->range_start
= start
;
588 scan
->range_end
= end
;
590 scan
->hit_start
= U64_MAX
;
593 EXPORT_SYMBOL(drm_mm_scan_init_with_range
);
596 * drm_mm_scan_add_block - add a node to the scan list
597 * @scan: the active drm_mm scanner
598 * @node: drm_mm_node to add
600 * Add a node to the scan list that might be freed to make space for the desired
604 * True if a hole has been found, false otherwise.
606 bool drm_mm_scan_add_block(struct drm_mm_scan
*scan
,
607 struct drm_mm_node
*node
)
609 struct drm_mm
*mm
= scan
->mm
;
610 struct drm_mm_node
*hole
;
611 u64 hole_start
, hole_end
;
612 u64 col_start
, col_end
;
613 u64 adj_start
, adj_end
;
615 DRM_MM_BUG_ON(node
->mm
!= mm
);
616 DRM_MM_BUG_ON(!node
->allocated
);
617 DRM_MM_BUG_ON(node
->scanned_block
);
618 node
->scanned_block
= true;
621 /* Remove this block from the node_list so that we enlarge the hole
622 * (distance between the end of our previous node and the start of
623 * or next), without poisoning the link so that we can restore it
624 * later in drm_mm_scan_remove_block().
626 hole
= list_prev_entry(node
, node_list
);
627 DRM_MM_BUG_ON(list_next_entry(hole
, node_list
) != node
);
628 __list_del_entry(&node
->node_list
);
630 hole_start
= __drm_mm_hole_node_start(hole
);
631 hole_end
= __drm_mm_hole_node_end(hole
);
633 col_start
= hole_start
;
635 if (mm
->color_adjust
)
636 mm
->color_adjust(hole
, scan
->color
, &col_start
, &col_end
);
638 adj_start
= max(col_start
, scan
->range_start
);
639 adj_end
= min(col_end
, scan
->range_end
);
640 if (adj_end
<= adj_start
|| adj_end
- adj_start
< scan
->size
)
643 if (scan
->flags
== DRM_MM_CREATE_TOP
)
644 adj_start
= adj_end
- scan
->size
;
646 if (scan
->alignment
) {
649 if (likely(scan
->remainder_mask
))
650 rem
= adj_start
& scan
->remainder_mask
;
652 div64_u64_rem(adj_start
, scan
->alignment
, &rem
);
655 if (scan
->flags
!= DRM_MM_CREATE_TOP
)
656 adj_start
+= scan
->alignment
;
657 if (adj_start
< max(col_start
, scan
->range_start
) ||
658 min(col_end
, scan
->range_end
) - adj_start
< scan
->size
)
661 if (adj_end
<= adj_start
||
662 adj_end
- adj_start
< scan
->size
)
667 scan
->hit_start
= adj_start
;
668 scan
->hit_end
= adj_start
+ scan
->size
;
670 DRM_MM_BUG_ON(scan
->hit_start
>= scan
->hit_end
);
671 DRM_MM_BUG_ON(scan
->hit_start
< hole_start
);
672 DRM_MM_BUG_ON(scan
->hit_end
> hole_end
);
676 EXPORT_SYMBOL(drm_mm_scan_add_block
);
679 * drm_mm_scan_remove_block - remove a node from the scan list
680 * @scan: the active drm_mm scanner
681 * @node: drm_mm_node to remove
683 * Nodes **must** be removed in exactly the reverse order from the scan list as
684 * they have been added (e.g. using list_add() as they are added and then
685 * list_for_each() over that eviction list to remove), otherwise the internal
686 * state of the memory manager will be corrupted.
688 * When the scan list is empty, the selected memory nodes can be freed. An
689 * immediately following drm_mm_insert_node_in_range_generic() or one of the
690 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
691 * the just freed block (because its at the top of the free_stack list).
694 * True if this block should be evicted, false otherwise. Will always
695 * return false when no hole has been found.
697 bool drm_mm_scan_remove_block(struct drm_mm_scan
*scan
,
698 struct drm_mm_node
*node
)
700 struct drm_mm_node
*prev_node
;
702 DRM_MM_BUG_ON(node
->mm
!= scan
->mm
);
703 DRM_MM_BUG_ON(!node
->scanned_block
);
704 node
->scanned_block
= false;
706 DRM_MM_BUG_ON(!node
->mm
->scan_active
);
707 node
->mm
->scan_active
--;
709 /* During drm_mm_scan_add_block() we decoupled this node leaving
710 * its pointers intact. Now that the caller is walking back along
711 * the eviction list we can restore this block into its rightful
712 * place on the full node_list. To confirm that the caller is walking
713 * backwards correctly we check that prev_node->next == node->next,
714 * i.e. both believe the same node should be on the other side of the
717 prev_node
= list_prev_entry(node
, node_list
);
718 DRM_MM_BUG_ON(list_next_entry(prev_node
, node_list
) !=
719 list_next_entry(node
, node_list
));
720 list_add(&node
->node_list
, &prev_node
->node_list
);
722 return (node
->start
+ node
->size
> scan
->hit_start
&&
723 node
->start
< scan
->hit_end
);
725 EXPORT_SYMBOL(drm_mm_scan_remove_block
);
728 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
729 * @scan: drm_mm scan with target hole
731 * After completing an eviction scan and removing the selected nodes, we may
732 * need to remove a few more nodes from either side of the target hole if
733 * mm.color_adjust is being used.
736 * A node to evict, or NULL if there are no overlapping nodes.
738 struct drm_mm_node
*drm_mm_scan_color_evict(struct drm_mm_scan
*scan
)
740 struct drm_mm
*mm
= scan
->mm
;
741 struct drm_mm_node
*hole
;
742 u64 hole_start
, hole_end
;
744 DRM_MM_BUG_ON(list_empty(&mm
->hole_stack
));
746 if (!mm
->color_adjust
)
749 hole
= list_first_entry(&mm
->hole_stack
, typeof(*hole
), hole_stack
);
750 hole_start
= __drm_mm_hole_node_start(hole
);
751 hole_end
= __drm_mm_hole_node_end(hole
);
753 DRM_MM_BUG_ON(hole_start
> scan
->hit_start
);
754 DRM_MM_BUG_ON(hole_end
< scan
->hit_end
);
756 mm
->color_adjust(hole
, scan
->color
, &hole_start
, &hole_end
);
757 if (hole_start
> scan
->hit_start
)
759 if (hole_end
< scan
->hit_end
)
760 return list_next_entry(hole
, node_list
);
764 EXPORT_SYMBOL(drm_mm_scan_color_evict
);
767 * drm_mm_init - initialize a drm-mm allocator
768 * @mm: the drm_mm structure to initialize
769 * @start: start of the range managed by @mm
770 * @size: end of the range managed by @mm
772 * Note that @mm must be cleared to 0 before calling this function.
774 void drm_mm_init(struct drm_mm
*mm
, u64 start
, u64 size
)
776 DRM_MM_BUG_ON(start
+ size
<= start
);
778 INIT_LIST_HEAD(&mm
->hole_stack
);
781 /* Clever trick to avoid a special case in the free hole tracking. */
782 INIT_LIST_HEAD(&mm
->head_node
.node_list
);
783 INIT_LIST_HEAD(&mm
->head_node
.hole_stack
);
784 mm
->head_node
.allocated
= 0;
785 mm
->head_node
.hole_follows
= 1;
786 mm
->head_node
.mm
= mm
;
787 mm
->head_node
.start
= start
+ size
;
788 mm
->head_node
.size
= start
- mm
->head_node
.start
;
789 list_add_tail(&mm
->head_node
.hole_stack
, &mm
->hole_stack
);
791 mm
->color_adjust
= NULL
;
793 EXPORT_SYMBOL(drm_mm_init
);
796 * drm_mm_takedown - clean up a drm_mm allocator
797 * @mm: drm_mm allocator to clean up
799 * Note that it is a bug to call this function on an allocator which is not
802 void drm_mm_takedown(struct drm_mm
*mm
)
804 if (WARN(!drm_mm_clean(mm
),
805 "Memory manager not clean during takedown.\n"))
808 EXPORT_SYMBOL(drm_mm_takedown
);
810 static u64
drm_mm_dump_hole(struct drm_printer
*p
, struct drm_mm_node
*entry
)
812 u64 hole_start
, hole_end
, hole_size
;
814 if (entry
->hole_follows
) {
815 hole_start
= drm_mm_hole_node_start(entry
);
816 hole_end
= drm_mm_hole_node_end(entry
);
817 hole_size
= hole_end
- hole_start
;
818 drm_printf(p
, "%#018llx-%#018llx: %llu: free\n", hole_start
,
819 hole_end
, hole_size
);
827 * drm_mm_print - print allocator state
828 * @mm: drm_mm allocator to print
829 * @p: DRM printer to use
831 void drm_mm_print(struct drm_mm
*mm
, struct drm_printer
*p
)
833 struct drm_mm_node
*entry
;
834 u64 total_used
= 0, total_free
= 0, total
= 0;
836 total_free
+= drm_mm_dump_hole(p
, &mm
->head_node
);
838 drm_mm_for_each_node(entry
, mm
) {
839 drm_printf(p
, "%#018llx-%#018llx: %llu: used\n", entry
->start
,
840 entry
->start
+ entry
->size
, entry
->size
);
841 total_used
+= entry
->size
;
842 total_free
+= drm_mm_dump_hole(p
, entry
);
844 total
= total_free
+ total_used
;
846 drm_printf(p
, "total: %llu, used %llu free %llu\n", total
,
847 total_used
, total_free
);
849 EXPORT_SYMBOL(drm_mm_print
);