1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 unsigned long drm_mm_tail_space(struct drm_mm
*mm
)
53 struct list_head
*tail_node
;
54 struct drm_mm_node
*entry
;
56 tail_node
= mm
->ml_entry
.prev
;
57 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
64 int drm_mm_remove_space_from_tail(struct drm_mm
*mm
, unsigned long size
)
66 struct list_head
*tail_node
;
67 struct drm_mm_node
*entry
;
69 tail_node
= mm
->ml_entry
.prev
;
70 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
74 if (entry
->size
<= size
)
81 static struct drm_mm_node
*drm_mm_kmalloc(struct drm_mm
*mm
, int atomic
)
83 struct drm_mm_node
*child
;
86 child
= kmalloc(sizeof(*child
), GFP_ATOMIC
);
88 child
= kmalloc(sizeof(*child
), GFP_KERNEL
);
90 if (unlikely(child
== NULL
)) {
91 spin_lock(&mm
->unused_lock
);
92 if (list_empty(&mm
->unused_nodes
))
96 list_entry(mm
->unused_nodes
.next
,
97 struct drm_mm_node
, fl_entry
);
98 list_del(&child
->fl_entry
);
101 spin_unlock(&mm
->unused_lock
);
106 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
107 * drm_mm: memory manager struct we are pre-allocating for
109 * Returns 0 on success or -ENOMEM if allocation fails.
111 int drm_mm_pre_get(struct drm_mm
*mm
)
113 struct drm_mm_node
*node
;
115 spin_lock(&mm
->unused_lock
);
116 while (mm
->num_unused
< MM_UNUSED_TARGET
) {
117 spin_unlock(&mm
->unused_lock
);
118 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
119 spin_lock(&mm
->unused_lock
);
121 if (unlikely(node
== NULL
)) {
122 int ret
= (mm
->num_unused
< 2) ? -ENOMEM
: 0;
123 spin_unlock(&mm
->unused_lock
);
127 list_add_tail(&node
->fl_entry
, &mm
->unused_nodes
);
129 spin_unlock(&mm
->unused_lock
);
132 EXPORT_SYMBOL(drm_mm_pre_get
);
134 static int drm_mm_create_tail_node(struct drm_mm
*mm
,
136 unsigned long size
, int atomic
)
138 struct drm_mm_node
*child
;
140 child
= drm_mm_kmalloc(mm
, atomic
);
141 if (unlikely(child
== NULL
))
146 child
->start
= start
;
149 list_add_tail(&child
->ml_entry
, &mm
->ml_entry
);
150 list_add_tail(&child
->fl_entry
, &mm
->fl_entry
);
155 int drm_mm_add_space_to_tail(struct drm_mm
*mm
, unsigned long size
, int atomic
)
157 struct list_head
*tail_node
;
158 struct drm_mm_node
*entry
;
160 tail_node
= mm
->ml_entry
.prev
;
161 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
163 return drm_mm_create_tail_node(mm
, entry
->start
+ entry
->size
,
170 static struct drm_mm_node
*drm_mm_split_at_start(struct drm_mm_node
*parent
,
174 struct drm_mm_node
*child
;
176 child
= drm_mm_kmalloc(parent
->mm
, atomic
);
177 if (unlikely(child
== NULL
))
180 INIT_LIST_HEAD(&child
->fl_entry
);
184 child
->start
= parent
->start
;
185 child
->mm
= parent
->mm
;
187 list_add_tail(&child
->ml_entry
, &parent
->ml_entry
);
188 INIT_LIST_HEAD(&child
->fl_entry
);
190 parent
->size
-= size
;
191 parent
->start
+= size
;
196 struct drm_mm_node
*drm_mm_get_block_generic(struct drm_mm_node
*node
,
202 struct drm_mm_node
*align_splitoff
= NULL
;
206 tmp
= node
->start
% alignment
;
210 drm_mm_split_at_start(node
, alignment
- tmp
, atomic
);
211 if (unlikely(align_splitoff
== NULL
))
215 if (node
->size
== size
) {
216 list_del_init(&node
->fl_entry
);
219 node
= drm_mm_split_at_start(node
, size
, atomic
);
223 drm_mm_put_block(align_splitoff
);
227 EXPORT_SYMBOL(drm_mm_get_block_generic
);
229 struct drm_mm_node
*drm_mm_get_block_range_generic(struct drm_mm_node
*node
,
236 struct drm_mm_node
*align_splitoff
= NULL
;
240 if (node
->start
< start
)
241 wasted
+= start
- node
->start
;
243 tmp
= ((node
->start
+ wasted
) % alignment
);
246 wasted
+= alignment
- tmp
;
248 align_splitoff
= drm_mm_split_at_start(node
, wasted
, atomic
);
249 if (unlikely(align_splitoff
== NULL
))
253 if (node
->size
== size
) {
254 list_del_init(&node
->fl_entry
);
257 node
= drm_mm_split_at_start(node
, size
, atomic
);
261 drm_mm_put_block(align_splitoff
);
265 EXPORT_SYMBOL(drm_mm_get_block_range_generic
);
268 * Put a block. Merge with the previous and / or next block if they are free.
269 * Otherwise add to the free stack.
272 void drm_mm_put_block(struct drm_mm_node
*cur
)
275 struct drm_mm
*mm
= cur
->mm
;
276 struct list_head
*cur_head
= &cur
->ml_entry
;
277 struct list_head
*root_head
= &mm
->ml_entry
;
278 struct drm_mm_node
*prev_node
= NULL
;
279 struct drm_mm_node
*next_node
;
283 if (cur_head
->prev
!= root_head
) {
285 list_entry(cur_head
->prev
, struct drm_mm_node
, ml_entry
);
286 if (prev_node
->free
) {
287 prev_node
->size
+= cur
->size
;
291 if (cur_head
->next
!= root_head
) {
293 list_entry(cur_head
->next
, struct drm_mm_node
, ml_entry
);
294 if (next_node
->free
) {
296 prev_node
->size
+= next_node
->size
;
297 list_del(&next_node
->ml_entry
);
298 list_del(&next_node
->fl_entry
);
299 spin_lock(&mm
->unused_lock
);
300 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
301 list_add(&next_node
->fl_entry
,
306 spin_unlock(&mm
->unused_lock
);
308 next_node
->size
+= cur
->size
;
309 next_node
->start
= cur
->start
;
316 list_add(&cur
->fl_entry
, &mm
->fl_entry
);
318 list_del(&cur
->ml_entry
);
319 spin_lock(&mm
->unused_lock
);
320 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
321 list_add(&cur
->fl_entry
, &mm
->unused_nodes
);
325 spin_unlock(&mm
->unused_lock
);
329 EXPORT_SYMBOL(drm_mm_put_block
);
331 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
333 unsigned alignment
, int best_match
)
335 struct list_head
*list
;
336 const struct list_head
*free_stack
= &mm
->fl_entry
;
337 struct drm_mm_node
*entry
;
338 struct drm_mm_node
*best
;
339 unsigned long best_size
;
345 list_for_each(list
, free_stack
) {
346 entry
= list_entry(list
, struct drm_mm_node
, fl_entry
);
349 if (entry
->size
< size
)
353 register unsigned tmp
= entry
->start
% alignment
;
355 wasted
+= alignment
- tmp
;
358 if (entry
->size
>= size
+ wasted
) {
361 if (entry
->size
< best_size
) {
363 best_size
= entry
->size
;
370 EXPORT_SYMBOL(drm_mm_search_free
);
372 struct drm_mm_node
*drm_mm_search_free_in_range(const struct drm_mm
*mm
,
379 struct list_head
*list
;
380 const struct list_head
*free_stack
= &mm
->fl_entry
;
381 struct drm_mm_node
*entry
;
382 struct drm_mm_node
*best
;
383 unsigned long best_size
;
389 list_for_each(list
, free_stack
) {
390 entry
= list_entry(list
, struct drm_mm_node
, fl_entry
);
393 if (entry
->size
< size
)
396 if (entry
->start
> end
|| (entry
->start
+entry
->size
) < start
)
399 if (entry
->start
< start
)
400 wasted
+= start
- entry
->start
;
403 register unsigned tmp
= (entry
->start
+ wasted
) % alignment
;
405 wasted
+= alignment
- tmp
;
408 if (entry
->size
>= size
+ wasted
) {
411 if (entry
->size
< best_size
) {
413 best_size
= entry
->size
;
420 EXPORT_SYMBOL(drm_mm_search_free_in_range
);
422 int drm_mm_clean(struct drm_mm
* mm
)
424 struct list_head
*head
= &mm
->ml_entry
;
426 return (head
->next
->next
== head
);
428 EXPORT_SYMBOL(drm_mm_clean
);
430 int drm_mm_init(struct drm_mm
* mm
, unsigned long start
, unsigned long size
)
432 INIT_LIST_HEAD(&mm
->ml_entry
);
433 INIT_LIST_HEAD(&mm
->fl_entry
);
434 INIT_LIST_HEAD(&mm
->unused_nodes
);
436 spin_lock_init(&mm
->unused_lock
);
438 return drm_mm_create_tail_node(mm
, start
, size
, 0);
440 EXPORT_SYMBOL(drm_mm_init
);
442 void drm_mm_takedown(struct drm_mm
* mm
)
444 struct list_head
*bnode
= mm
->fl_entry
.next
;
445 struct drm_mm_node
*entry
;
446 struct drm_mm_node
*next
;
448 entry
= list_entry(bnode
, struct drm_mm_node
, fl_entry
);
450 if (entry
->ml_entry
.next
!= &mm
->ml_entry
||
451 entry
->fl_entry
.next
!= &mm
->fl_entry
) {
452 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
456 list_del(&entry
->fl_entry
);
457 list_del(&entry
->ml_entry
);
460 spin_lock(&mm
->unused_lock
);
461 list_for_each_entry_safe(entry
, next
, &mm
->unused_nodes
, fl_entry
) {
462 list_del(&entry
->fl_entry
);
466 spin_unlock(&mm
->unused_lock
);
468 BUG_ON(mm
->num_unused
!= 0);
470 EXPORT_SYMBOL(drm_mm_takedown
);
472 void drm_mm_debug_table(struct drm_mm
*mm
, const char *prefix
)
474 struct drm_mm_node
*entry
;
475 int total_used
= 0, total_free
= 0, total
= 0;
477 list_for_each_entry(entry
, &mm
->ml_entry
, ml_entry
) {
478 printk(KERN_DEBUG
"%s 0x%08lx-0x%08lx: %8ld: %s\n",
479 prefix
, entry
->start
, entry
->start
+ entry
->size
,
480 entry
->size
, entry
->free
? "free" : "used");
481 total
+= entry
->size
;
483 total_free
+= entry
->size
;
485 total_used
+= entry
->size
;
487 printk(KERN_DEBUG
"%s total: %d, used %d free %d\n", prefix
, total
,
488 total_used
, total_free
);
490 EXPORT_SYMBOL(drm_mm_debug_table
);
492 #if defined(CONFIG_DEBUG_FS)
493 int drm_mm_dump_table(struct seq_file
*m
, struct drm_mm
*mm
)
495 struct drm_mm_node
*entry
;
496 int total_used
= 0, total_free
= 0, total
= 0;
498 list_for_each_entry(entry
, &mm
->ml_entry
, ml_entry
) {
499 seq_printf(m
, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry
->start
, entry
->start
+ entry
->size
, entry
->size
, entry
->free
? "free" : "used");
500 total
+= entry
->size
;
502 total_free
+= entry
->size
;
504 total_used
+= entry
->size
;
506 seq_printf(m
, "total: %d, used %d free %d\n", total
, total_used
, total_free
);
509 EXPORT_SYMBOL(drm_mm_dump_table
);