kernel/mrsas: Fix a double assignment.
[dragonfly.git] / sys / dev / drm / drm_mm.c
blob06926aa46938456e38eb86062492a907f04f7f12
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
50 #define MM_UNUSED_TARGET 4
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54 struct drm_mm_node *child;
56 if (atomic)
57 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 else
59 child = kzalloc(sizeof(*child), GFP_KERNEL);
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
70 --mm->num_unused;
72 spin_unlock(&mm->unused_lock);
74 return child;
77 int drm_mm_pre_get(struct drm_mm *mm)
79 struct drm_mm_node *node;
81 spin_lock(&mm->unused_lock);
82 while (mm->num_unused < MM_UNUSED_TARGET) {
83 spin_unlock(&mm->unused_lock);
84 node = kzalloc(sizeof(*node), GFP_KERNEL);
85 spin_lock(&mm->unused_lock);
87 if (unlikely(node == NULL)) {
88 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
89 spin_unlock(&mm->unused_lock);
90 return ret;
92 ++mm->num_unused;
93 list_add_tail(&node->node_list, &mm->unused_nodes);
95 spin_unlock(&mm->unused_lock);
96 return 0;
99 /**
100 * DOC: Overview
102 * drm_mm provides a simple range allocator. The drivers are free to use the
103 * resource allocator from the linux core if it suits them, the upside of drm_mm
104 * is that it's in the DRM core. Which means that it's easier to extend for
105 * some of the crazier special purpose needs of gpus.
107 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
108 * Drivers are free to embed either of them into their own suitable
109 * datastructures. drm_mm itself will not do any allocations of its own, so if
110 * drivers choose not to embed nodes they need to still allocate them
111 * themselves.
113 * The range allocator also supports reservation of preallocated blocks. This is
114 * useful for taking over initial mode setting configurations from the firmware,
115 * where an object needs to be created which exactly matches the firmware's
116 * scanout target. As long as the range is still free it can be inserted anytime
117 * after the allocator is initialized, which helps with avoiding looped
118 * depencies in the driver load sequence.
120 * drm_mm maintains a stack of most recently freed holes, which of all
121 * simplistic datastructures seems to be a fairly decent approach to clustering
122 * allocations and avoiding too much fragmentation. This means free space
123 * searches are O(num_holes). Given that all the fancy features drm_mm supports
124 * something better would be fairly complex and since gfx thrashing is a fairly
125 * steep cliff not a real concern. Removing a node again is O(1).
127 * drm_mm supports a few features: Alignment and range restrictions can be
128 * supplied. Further more every &drm_mm_node has a color value (which is just an
129 * opaqua unsigned long) which in conjunction with a driver callback can be used
130 * to implement sophisticated placement restrictions. The i915 DRM driver uses
131 * this to implement guard pages between incompatible caching domains in the
132 * graphics TT.
134 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
135 * The default is bottom-up. Top-down allocation can be used if the memory area
136 * has different restrictions, or just to reduce fragmentation.
138 * Finally iteration helpers to walk all nodes and all holes are provided as are
139 * some basic allocator dumpers for debugging.
142 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
143 struct drm_mm_node *node,
144 u64 size, unsigned alignment,
145 unsigned long color,
146 enum drm_mm_allocator_flags flags)
148 struct drm_mm *mm = hole_node->mm;
149 u64 hole_start = drm_mm_hole_node_start(hole_node);
150 u64 hole_end = drm_mm_hole_node_end(hole_node);
151 u64 adj_start = hole_start;
152 u64 adj_end = hole_end;
154 BUG_ON(node->allocated);
156 if (mm->color_adjust)
157 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
159 if (flags & DRM_MM_CREATE_TOP)
160 adj_start = adj_end - size;
162 if (alignment) {
163 u64 tmp = adj_start;
164 unsigned rem;
166 rem = do_div(tmp, alignment);
167 if (rem) {
168 if (flags & DRM_MM_CREATE_TOP)
169 adj_start -= rem;
170 else
171 adj_start += alignment - rem;
175 BUG_ON(adj_start < hole_start);
176 BUG_ON(adj_end > hole_end);
178 if (adj_start == hole_start) {
179 hole_node->hole_follows = 0;
180 list_del(&hole_node->hole_stack);
183 node->start = adj_start;
184 node->size = size;
185 node->mm = mm;
186 node->color = color;
187 node->allocated = 1;
189 INIT_LIST_HEAD(&node->hole_stack);
190 list_add(&node->node_list, &hole_node->node_list);
192 BUG_ON(node->start + node->size > adj_end);
194 node->hole_follows = 0;
195 if (__drm_mm_hole_node_start(node) < hole_end) {
196 list_add(&node->hole_stack, &mm->hole_stack);
197 node->hole_follows = 1;
202 * drm_mm_reserve_node - insert an pre-initialized node
203 * @mm: drm_mm allocator to insert @node into
204 * @node: drm_mm_node to insert
206 * This functions inserts an already set-up drm_mm_node into the allocator,
207 * meaning that start, size and color must be set by the caller. This is useful
208 * to initialize the allocator with preallocated objects which must be set-up
209 * before the range allocator can be set-up, e.g. when taking over a firmware
210 * framebuffer.
212 * Returns:
213 * 0 on success, -ENOSPC if there's no hole where @node is.
215 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
217 struct drm_mm_node *hole;
218 u64 end = node->start + node->size;
219 u64 hole_start;
220 u64 hole_end;
222 BUG_ON(node == NULL);
224 /* Find the relevant hole to add our node to */
225 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
226 if (hole_start > node->start || hole_end < end)
227 continue;
229 node->mm = mm;
230 node->allocated = 1;
232 INIT_LIST_HEAD(&node->hole_stack);
233 list_add(&node->node_list, &hole->node_list);
235 if (node->start == hole_start) {
236 hole->hole_follows = 0;
237 list_del_init(&hole->hole_stack);
240 node->hole_follows = 0;
241 if (end != hole_end) {
242 list_add(&node->hole_stack, &mm->hole_stack);
243 node->hole_follows = 1;
246 return 0;
249 return -ENOSPC;
251 EXPORT_SYMBOL(drm_mm_reserve_node);
253 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
254 unsigned long size,
255 unsigned alignment,
256 unsigned long color,
257 int atomic)
259 struct drm_mm_node *node;
261 node = drm_mm_kmalloc(hole_node->mm, atomic);
262 if (unlikely(node == NULL))
263 return NULL;
265 drm_mm_insert_helper(hole_node, node, size, alignment, color, DRM_MM_CREATE_DEFAULT);
267 return node;
271 * drm_mm_insert_node_generic - search for space and insert @node
272 * @mm: drm_mm to allocate from
273 * @node: preallocate node to insert
274 * @size: size of the allocation
275 * @alignment: alignment of the allocation
276 * @color: opaque tag value to use for this node
277 * @sflags: flags to fine-tune the allocation search
278 * @aflags: flags to fine-tune the allocation behavior
280 * The preallocated node must be cleared to 0.
282 * Returns:
283 * 0 on success, -ENOSPC if there's no suitable hole.
285 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
286 u64 size, unsigned alignment,
287 unsigned long color,
288 enum drm_mm_search_flags sflags,
289 enum drm_mm_allocator_flags aflags)
291 struct drm_mm_node *hole_node;
293 hole_node = drm_mm_search_free_generic(mm, size, alignment,
294 color, sflags);
295 if (!hole_node)
296 return -ENOSPC;
298 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
299 return 0;
301 EXPORT_SYMBOL(drm_mm_insert_node_generic);
303 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
304 struct drm_mm_node *node,
305 u64 size, unsigned alignment,
306 unsigned long color,
307 u64 start, u64 end,
308 enum drm_mm_allocator_flags flags)
310 struct drm_mm *mm = hole_node->mm;
311 u64 hole_start = drm_mm_hole_node_start(hole_node);
312 u64 hole_end = drm_mm_hole_node_end(hole_node);
313 u64 adj_start = hole_start;
314 u64 adj_end = hole_end;
316 BUG_ON(!hole_node->hole_follows || node->allocated);
318 if (adj_start < start)
319 adj_start = start;
320 if (adj_end > end)
321 adj_end = end;
323 if (mm->color_adjust)
324 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
326 if (flags & DRM_MM_CREATE_TOP)
327 adj_start = adj_end - size;
329 if (alignment) {
330 u64 tmp = adj_start;
331 unsigned rem;
333 rem = do_div(tmp, alignment);
334 if (rem) {
335 if (flags & DRM_MM_CREATE_TOP)
336 adj_start -= rem;
337 else
338 adj_start += alignment - rem;
342 if (adj_start == hole_start) {
343 hole_node->hole_follows = 0;
344 list_del(&hole_node->hole_stack);
347 node->start = adj_start;
348 node->size = size;
349 node->mm = mm;
350 node->color = color;
351 node->allocated = 1;
353 INIT_LIST_HEAD(&node->hole_stack);
354 list_add(&node->node_list, &hole_node->node_list);
356 BUG_ON(node->start < start);
357 BUG_ON(node->start < adj_start);
358 BUG_ON(node->start + node->size > adj_end);
359 BUG_ON(node->start + node->size > end);
361 node->hole_follows = 0;
362 if (__drm_mm_hole_node_start(node) < hole_end) {
363 list_add(&node->hole_stack, &mm->hole_stack);
364 node->hole_follows = 1;
369 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
370 * @mm: drm_mm to allocate from
371 * @node: preallocate node to insert
372 * @size: size of the allocation
373 * @alignment: alignment of the allocation
374 * @color: opaque tag value to use for this node
375 * @start: start of the allowed range for this node
376 * @end: end of the allowed range for this node
377 * @sflags: flags to fine-tune the allocation search
378 * @aflags: flags to fine-tune the allocation behavior
380 * The preallocated node must be cleared to 0.
382 * Returns:
383 * 0 on success, -ENOSPC if there's no suitable hole.
385 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
386 u64 size, unsigned alignment,
387 unsigned long color,
388 u64 start, u64 end,
389 enum drm_mm_search_flags sflags,
390 enum drm_mm_allocator_flags aflags)
392 struct drm_mm_node *hole_node;
394 hole_node = drm_mm_search_free_in_range_generic(mm,
395 size, alignment, color,
396 start, end, sflags);
397 if (!hole_node)
398 return -ENOSPC;
400 drm_mm_insert_helper_range(hole_node, node,
401 size, alignment, color,
402 start, end, aflags);
403 return 0;
405 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
408 * drm_mm_remove_node - Remove a memory node from the allocator.
409 * @node: drm_mm_node to remove
411 * This just removes a node from its drm_mm allocator. The node does not need to
412 * be cleared again before it can be re-inserted into this or any other drm_mm
413 * allocator. It is a bug to call this function on a un-allocated node.
415 void drm_mm_remove_node(struct drm_mm_node *node)
417 struct drm_mm *mm = node->mm;
418 struct drm_mm_node *prev_node;
420 if (WARN_ON(!node->allocated))
421 return;
423 BUG_ON(node->scanned_block || node->scanned_prev_free
424 || node->scanned_next_free);
426 prev_node =
427 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
429 if (node->hole_follows) {
430 BUG_ON(__drm_mm_hole_node_start(node) ==
431 __drm_mm_hole_node_end(node));
432 list_del(&node->hole_stack);
433 } else
434 BUG_ON(__drm_mm_hole_node_start(node) !=
435 __drm_mm_hole_node_end(node));
438 if (!prev_node->hole_follows) {
439 prev_node->hole_follows = 1;
440 list_add(&prev_node->hole_stack, &mm->hole_stack);
441 } else
442 list_move(&prev_node->hole_stack, &mm->hole_stack);
444 list_del(&node->node_list);
445 node->allocated = 0;
447 EXPORT_SYMBOL(drm_mm_remove_node);
450 * Remove a memory node from the allocator and free the allocated struct
451 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
452 * drm_mm_get_block functions.
454 void drm_mm_put_block(struct drm_mm_node *node)
457 struct drm_mm *mm = node->mm;
459 drm_mm_remove_node(node);
461 spin_lock(&mm->unused_lock);
462 if (mm->num_unused < MM_UNUSED_TARGET) {
463 list_add(&node->node_list, &mm->unused_nodes);
464 ++mm->num_unused;
465 } else
466 kfree(node);
467 spin_unlock(&mm->unused_lock);
470 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
472 if (end - start < size)
473 return 0;
475 if (alignment) {
476 u64 tmp = start;
477 unsigned rem;
479 rem = do_div(tmp, alignment);
480 if (rem)
481 start += alignment - rem;
484 return end >= start + size;
487 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
488 u64 size,
489 unsigned alignment,
490 unsigned long color,
491 enum drm_mm_search_flags flags)
493 struct drm_mm_node *entry;
494 struct drm_mm_node *best;
495 u64 adj_start;
496 u64 adj_end;
497 u64 best_size;
499 BUG_ON(mm->scanned_blocks);
501 best = NULL;
502 best_size = ~0UL;
504 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
505 flags & DRM_MM_SEARCH_BELOW) {
506 u64 hole_size = adj_end - adj_start;
508 if (mm->color_adjust) {
509 mm->color_adjust(entry, color, &adj_start, &adj_end);
510 if (adj_end <= adj_start)
511 continue;
514 if (!check_free_hole(adj_start, adj_end, size, alignment))
515 continue;
517 if (!(flags & DRM_MM_SEARCH_BEST))
518 return entry;
520 if (hole_size < best_size) {
521 best = entry;
522 best_size = hole_size;
526 return best;
529 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
530 u64 size,
531 unsigned alignment,
532 unsigned long color,
533 u64 start,
534 u64 end,
535 enum drm_mm_search_flags flags)
537 struct drm_mm_node *entry;
538 struct drm_mm_node *best;
539 u64 adj_start;
540 u64 adj_end;
541 u64 best_size;
543 BUG_ON(mm->scanned_blocks);
545 best = NULL;
546 best_size = ~0UL;
548 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
549 flags & DRM_MM_SEARCH_BELOW) {
550 u64 hole_size = adj_end - adj_start;
552 if (adj_start < start)
553 adj_start = start;
554 if (adj_end > end)
555 adj_end = end;
557 if (mm->color_adjust) {
558 mm->color_adjust(entry, color, &adj_start, &adj_end);
559 if (adj_end <= adj_start)
560 continue;
563 if (!check_free_hole(adj_start, adj_end, size, alignment))
564 continue;
566 if (!(flags & DRM_MM_SEARCH_BEST))
567 return entry;
569 if (hole_size < best_size) {
570 best = entry;
571 best_size = hole_size;
575 return best;
579 * drm_mm_replace_node - move an allocation from @old to @new
580 * @old: drm_mm_node to remove from the allocator
581 * @new: drm_mm_node which should inherit @old's allocation
583 * This is useful for when drivers embed the drm_mm_node structure and hence
584 * can't move allocations by reassigning pointers. It's a combination of remove
585 * and insert with the guarantee that the allocation start will match.
587 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
589 list_replace(&old->node_list, &new->node_list);
590 list_replace(&old->hole_stack, &new->hole_stack);
591 new->hole_follows = old->hole_follows;
592 new->mm = old->mm;
593 new->start = old->start;
594 new->size = old->size;
595 new->color = old->color;
597 old->allocated = 0;
598 new->allocated = 1;
600 EXPORT_SYMBOL(drm_mm_replace_node);
603 * DOC: lru scan roaster
605 * Very often GPUs need to have continuous allocations for a given object. When
606 * evicting objects to make space for a new one it is therefore not most
607 * efficient when we simply start to select all objects from the tail of an LRU
608 * until there's a suitable hole: Especially for big objects or nodes that
609 * otherwise have special allocation constraints there's a good chance we evict
610 * lots of (smaller) objects unecessarily.
612 * The DRM range allocator supports this use-case through the scanning
613 * interfaces. First a scan operation needs to be initialized with
614 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
615 * objects to the roaster (probably by walking an LRU list, but this can be
616 * freely implemented) until a suitable hole is found or there's no further
617 * evitable object.
619 * The the driver must walk through all objects again in exactly the reverse
620 * order to restore the allocator state. Note that while the allocator is used
621 * in the scan mode no other operation is allowed.
623 * Finally the driver evicts all objects selected in the scan. Adding and
624 * removing an object is O(1), and since freeing a node is also O(1) the overall
625 * complexity is O(scanned_objects). So like the free stack which needs to be
626 * walked before a scan operation even begins this is linear in the number of
627 * objects. It doesn't seem to hurt badly.
631 * drm_mm_init_scan - initialize lru scanning
632 * @mm: drm_mm to scan
633 * @size: size of the allocation
634 * @alignment: alignment of the allocation
635 * @color: opaque tag value to use for the allocation
637 * This simply sets up the scanning routines with the parameters for the desired
638 * hole. Note that there's no need to specify allocation flags, since they only
639 * change the place a node is allocated from within a suitable hole.
641 * Warning:
642 * As long as the scan list is non-empty, no other operations than
643 * adding/removing nodes to/from the scan list are allowed.
645 void drm_mm_init_scan(struct drm_mm *mm,
646 u64 size,
647 unsigned alignment,
648 unsigned long color)
650 mm->scan_color = color;
651 mm->scan_alignment = alignment;
652 mm->scan_size = size;
653 mm->scanned_blocks = 0;
654 mm->scan_hit_start = 0;
655 mm->scan_hit_end = 0;
656 mm->scan_check_range = 0;
657 mm->prev_scanned_node = NULL;
659 EXPORT_SYMBOL(drm_mm_init_scan);
662 * drm_mm_init_scan - initialize range-restricted lru scanning
663 * @mm: drm_mm to scan
664 * @size: size of the allocation
665 * @alignment: alignment of the allocation
666 * @color: opaque tag value to use for the allocation
667 * @start: start of the allowed range for the allocation
668 * @end: end of the allowed range for the allocation
670 * This simply sets up the scanning routines with the parameters for the desired
671 * hole. Note that there's no need to specify allocation flags, since they only
672 * change the place a node is allocated from within a suitable hole.
674 * Warning:
675 * As long as the scan list is non-empty, no other operations than
676 * adding/removing nodes to/from the scan list are allowed.
678 void drm_mm_init_scan_with_range(struct drm_mm *mm,
679 u64 size,
680 unsigned alignment,
681 unsigned long color,
682 u64 start,
683 u64 end)
685 mm->scan_color = color;
686 mm->scan_alignment = alignment;
687 mm->scan_size = size;
688 mm->scanned_blocks = 0;
689 mm->scan_hit_start = 0;
690 mm->scan_hit_end = 0;
691 mm->scan_start = start;
692 mm->scan_end = end;
693 mm->scan_check_range = 1;
694 mm->prev_scanned_node = NULL;
696 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
699 * drm_mm_scan_add_block - add a node to the scan list
700 * @node: drm_mm_node to add
702 * Add a node to the scan list that might be freed to make space for the desired
703 * hole.
705 * Returns:
706 * True if a hole has been found, false otherwise.
708 bool drm_mm_scan_add_block(struct drm_mm_node *node)
710 struct drm_mm *mm = node->mm;
711 struct drm_mm_node *prev_node;
712 u64 hole_start, hole_end;
713 u64 adj_start, adj_end;
715 mm->scanned_blocks++;
717 BUG_ON(node->scanned_block);
718 node->scanned_block = 1;
720 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
721 node_list);
723 node->scanned_preceeds_hole = prev_node->hole_follows;
724 prev_node->hole_follows = 1;
725 list_del(&node->node_list);
726 node->node_list.prev = &prev_node->node_list;
727 node->node_list.next = &mm->prev_scanned_node->node_list;
728 mm->prev_scanned_node = node;
730 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
731 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
733 if (mm->scan_check_range) {
734 if (adj_start < mm->scan_start)
735 adj_start = mm->scan_start;
736 if (adj_end > mm->scan_end)
737 adj_end = mm->scan_end;
740 if (mm->color_adjust)
741 mm->color_adjust(prev_node, mm->scan_color,
742 &adj_start, &adj_end);
744 if (check_free_hole(adj_start, adj_end,
745 mm->scan_size, mm->scan_alignment)) {
746 mm->scan_hit_start = hole_start;
747 mm->scan_hit_end = hole_end;
748 return true;
751 return false;
753 EXPORT_SYMBOL(drm_mm_scan_add_block);
756 * drm_mm_scan_remove_block - remove a node from the scan list
757 * @node: drm_mm_node to remove
759 * Nodes _must_ be removed in the exact same order from the scan list as they
760 * have been added, otherwise the internal state of the memory manager will be
761 * corrupted.
763 * When the scan list is empty, the selected memory nodes can be freed. An
764 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
765 * return the just freed block (because its at the top of the free_stack list).
767 * Returns:
768 * True if this block should be evicted, false otherwise. Will always
769 * return false when no hole has been found.
771 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
773 struct drm_mm *mm = node->mm;
774 struct drm_mm_node *prev_node;
776 mm->scanned_blocks--;
778 BUG_ON(!node->scanned_block);
779 node->scanned_block = 0;
781 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
782 node_list);
784 prev_node->hole_follows = node->scanned_preceeds_hole;
785 list_add(&node->node_list, &prev_node->node_list);
787 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
788 node->start < mm->scan_hit_end);
790 EXPORT_SYMBOL(drm_mm_scan_remove_block);
793 * drm_mm_clean - checks whether an allocator is clean
794 * @mm: drm_mm allocator to check
796 * Returns:
797 * True if the allocator is completely free, false if there's still a node
798 * allocated in it.
800 bool drm_mm_clean(struct drm_mm * mm)
802 struct list_head *head = &mm->head_node.node_list;
804 return (head->next->next == head);
806 EXPORT_SYMBOL(drm_mm_clean);
809 * drm_mm_init - initialize a drm-mm allocator
810 * @mm: the drm_mm structure to initialize
811 * @start: start of the range managed by @mm
812 * @size: end of the range managed by @mm
814 * Note that @mm must be cleared to 0 before calling this function.
816 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
818 INIT_LIST_HEAD(&mm->hole_stack);
819 INIT_LIST_HEAD(&mm->unused_nodes);
820 mm->num_unused = 0;
821 mm->scanned_blocks = 0;
823 /* Clever trick to avoid a special case in the free hole tracking. */
824 INIT_LIST_HEAD(&mm->head_node.node_list);
825 INIT_LIST_HEAD(&mm->head_node.hole_stack);
826 mm->head_node.hole_follows = 1;
827 mm->head_node.scanned_block = 0;
828 mm->head_node.scanned_prev_free = 0;
829 mm->head_node.scanned_next_free = 0;
830 mm->head_node.mm = mm;
831 mm->head_node.start = start + size;
832 mm->head_node.size = start - mm->head_node.start;
833 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
835 mm->color_adjust = NULL;
837 EXPORT_SYMBOL(drm_mm_init);
840 * drm_mm_takedown - clean up a drm_mm allocator
841 * @mm: drm_mm allocator to clean up
843 * Note that it is a bug to call this function on an allocator which is not
844 * clean.
846 void drm_mm_takedown(struct drm_mm * mm)
848 WARN(!list_empty(&mm->head_node.node_list),
849 "Memory manager not clean during takedown.\n");
851 EXPORT_SYMBOL(drm_mm_takedown);
853 static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
854 const char *prefix)
856 u64 hole_start, hole_end, hole_size;
858 if (entry->hole_follows) {
859 hole_start = drm_mm_hole_node_start(entry);
860 hole_end = drm_mm_hole_node_end(entry);
861 hole_size = hole_end - hole_start;
862 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
863 hole_end, hole_size);
864 return hole_size;
867 return 0;
871 * drm_mm_debug_table - dump allocator state to dmesg
872 * @mm: drm_mm allocator to dump
873 * @prefix: prefix to use for dumping to dmesg
875 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
877 struct drm_mm_node *entry;
878 u64 total_used = 0, total_free = 0, total = 0;
880 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
882 drm_mm_for_each_node(entry, mm) {
883 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
884 entry->start + entry->size, entry->size);
885 total_used += entry->size;
886 total_free += drm_mm_debug_hole(entry, prefix);
888 total = total_free + total_used;
890 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
891 total_used, total_free);
893 EXPORT_SYMBOL(drm_mm_debug_table);
895 #if defined(CONFIG_DEBUG_FS)
896 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
898 u64 hole_start, hole_end, hole_size;
900 if (entry->hole_follows) {
901 hole_start = drm_mm_hole_node_start(entry);
902 hole_end = drm_mm_hole_node_end(entry);
903 hole_size = hole_end - hole_start;
904 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
905 hole_end, hole_size);
906 return hole_size;
909 return 0;
913 * drm_mm_dump_table - dump allocator state to a seq_file
914 * @m: seq_file to dump to
915 * @mm: drm_mm allocator to dump
917 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
919 struct drm_mm_node *entry;
920 u64 total_used = 0, total_free = 0, total = 0;
922 total_free += drm_mm_dump_hole(m, &mm->head_node);
924 drm_mm_for_each_node(entry, mm) {
925 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
926 entry->start + entry->size, entry->size);
927 total_used += entry->size;
928 total_free += drm_mm_dump_hole(m, entry);
930 total = total_free + total_used;
932 seq_printf(m, "total: %llu, used %llu free %llu\n", total,
933 total_used, total_free);
934 return 0;
936 EXPORT_SYMBOL(drm_mm_dump_table);
937 #endif