1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 unsigned long drm_mm_tail_space(struct drm_mm
*mm
)
53 struct list_head
*tail_node
;
54 struct drm_mm_node
*entry
;
56 tail_node
= mm
->ml_entry
.prev
;
57 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
64 int drm_mm_remove_space_from_tail(struct drm_mm
*mm
, unsigned long size
)
66 struct list_head
*tail_node
;
67 struct drm_mm_node
*entry
;
69 tail_node
= mm
->ml_entry
.prev
;
70 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
74 if (entry
->size
<= size
)
81 static struct drm_mm_node
*drm_mm_kmalloc(struct drm_mm
*mm
, int atomic
)
83 struct drm_mm_node
*child
;
86 child
= kmalloc(sizeof(*child
), GFP_ATOMIC
);
88 child
= kmalloc(sizeof(*child
), GFP_KERNEL
);
90 if (unlikely(child
== NULL
)) {
91 spin_lock(&mm
->unused_lock
);
92 if (list_empty(&mm
->unused_nodes
))
96 list_entry(mm
->unused_nodes
.next
,
97 struct drm_mm_node
, fl_entry
);
98 list_del(&child
->fl_entry
);
101 spin_unlock(&mm
->unused_lock
);
106 int drm_mm_pre_get(struct drm_mm
*mm
)
108 struct drm_mm_node
*node
;
110 spin_lock(&mm
->unused_lock
);
111 while (mm
->num_unused
< MM_UNUSED_TARGET
) {
112 spin_unlock(&mm
->unused_lock
);
113 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
114 spin_lock(&mm
->unused_lock
);
116 if (unlikely(node
== NULL
)) {
117 int ret
= (mm
->num_unused
< 2) ? -ENOMEM
: 0;
118 spin_unlock(&mm
->unused_lock
);
122 list_add_tail(&node
->fl_entry
, &mm
->unused_nodes
);
124 spin_unlock(&mm
->unused_lock
);
127 EXPORT_SYMBOL(drm_mm_pre_get
);
129 static int drm_mm_create_tail_node(struct drm_mm
*mm
,
131 unsigned long size
, int atomic
)
133 struct drm_mm_node
*child
;
135 child
= drm_mm_kmalloc(mm
, atomic
);
136 if (unlikely(child
== NULL
))
141 child
->start
= start
;
144 list_add_tail(&child
->ml_entry
, &mm
->ml_entry
);
145 list_add_tail(&child
->fl_entry
, &mm
->fl_entry
);
150 int drm_mm_add_space_to_tail(struct drm_mm
*mm
, unsigned long size
, int atomic
)
152 struct list_head
*tail_node
;
153 struct drm_mm_node
*entry
;
155 tail_node
= mm
->ml_entry
.prev
;
156 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
158 return drm_mm_create_tail_node(mm
, entry
->start
+ entry
->size
,
165 static struct drm_mm_node
*drm_mm_split_at_start(struct drm_mm_node
*parent
,
169 struct drm_mm_node
*child
;
171 child
= drm_mm_kmalloc(parent
->mm
, atomic
);
172 if (unlikely(child
== NULL
))
175 INIT_LIST_HEAD(&child
->fl_entry
);
179 child
->start
= parent
->start
;
180 child
->mm
= parent
->mm
;
182 list_add_tail(&child
->ml_entry
, &parent
->ml_entry
);
183 INIT_LIST_HEAD(&child
->fl_entry
);
185 parent
->size
-= size
;
186 parent
->start
+= size
;
191 struct drm_mm_node
*drm_mm_get_block_generic(struct drm_mm_node
*node
,
197 struct drm_mm_node
*align_splitoff
= NULL
;
201 tmp
= node
->start
% alignment
;
205 drm_mm_split_at_start(node
, alignment
- tmp
, atomic
);
206 if (unlikely(align_splitoff
== NULL
))
210 if (node
->size
== size
) {
211 list_del_init(&node
->fl_entry
);
214 node
= drm_mm_split_at_start(node
, size
, atomic
);
218 drm_mm_put_block(align_splitoff
);
222 EXPORT_SYMBOL(drm_mm_get_block_generic
);
225 * Put a block. Merge with the previous and / or next block if they are free.
226 * Otherwise add to the free stack.
229 void drm_mm_put_block(struct drm_mm_node
*cur
)
232 struct drm_mm
*mm
= cur
->mm
;
233 struct list_head
*cur_head
= &cur
->ml_entry
;
234 struct list_head
*root_head
= &mm
->ml_entry
;
235 struct drm_mm_node
*prev_node
= NULL
;
236 struct drm_mm_node
*next_node
;
240 if (cur_head
->prev
!= root_head
) {
242 list_entry(cur_head
->prev
, struct drm_mm_node
, ml_entry
);
243 if (prev_node
->free
) {
244 prev_node
->size
+= cur
->size
;
248 if (cur_head
->next
!= root_head
) {
250 list_entry(cur_head
->next
, struct drm_mm_node
, ml_entry
);
251 if (next_node
->free
) {
253 prev_node
->size
+= next_node
->size
;
254 list_del(&next_node
->ml_entry
);
255 list_del(&next_node
->fl_entry
);
256 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
257 list_add(&next_node
->fl_entry
,
263 next_node
->size
+= cur
->size
;
264 next_node
->start
= cur
->start
;
271 list_add(&cur
->fl_entry
, &mm
->fl_entry
);
273 list_del(&cur
->ml_entry
);
274 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
275 list_add(&cur
->fl_entry
, &mm
->unused_nodes
);
282 EXPORT_SYMBOL(drm_mm_put_block
);
284 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
286 unsigned alignment
, int best_match
)
288 struct list_head
*list
;
289 const struct list_head
*free_stack
= &mm
->fl_entry
;
290 struct drm_mm_node
*entry
;
291 struct drm_mm_node
*best
;
292 unsigned long best_size
;
298 list_for_each(list
, free_stack
) {
299 entry
= list_entry(list
, struct drm_mm_node
, fl_entry
);
302 if (entry
->size
< size
)
306 register unsigned tmp
= entry
->start
% alignment
;
308 wasted
+= alignment
- tmp
;
311 if (entry
->size
>= size
+ wasted
) {
314 if (size
< best_size
) {
316 best_size
= entry
->size
;
323 EXPORT_SYMBOL(drm_mm_search_free
);
325 int drm_mm_clean(struct drm_mm
* mm
)
327 struct list_head
*head
= &mm
->ml_entry
;
329 return (head
->next
->next
== head
);
331 EXPORT_SYMBOL(drm_mm_clean
);
333 int drm_mm_init(struct drm_mm
* mm
, unsigned long start
, unsigned long size
)
335 INIT_LIST_HEAD(&mm
->ml_entry
);
336 INIT_LIST_HEAD(&mm
->fl_entry
);
337 INIT_LIST_HEAD(&mm
->unused_nodes
);
339 spin_lock_init(&mm
->unused_lock
);
341 return drm_mm_create_tail_node(mm
, start
, size
, 0);
343 EXPORT_SYMBOL(drm_mm_init
);
345 void drm_mm_takedown(struct drm_mm
* mm
)
347 struct list_head
*bnode
= mm
->fl_entry
.next
;
348 struct drm_mm_node
*entry
;
349 struct drm_mm_node
*next
;
351 entry
= list_entry(bnode
, struct drm_mm_node
, fl_entry
);
353 if (entry
->ml_entry
.next
!= &mm
->ml_entry
||
354 entry
->fl_entry
.next
!= &mm
->fl_entry
) {
355 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
359 list_del(&entry
->fl_entry
);
360 list_del(&entry
->ml_entry
);
363 spin_lock(&mm
->unused_lock
);
364 list_for_each_entry_safe(entry
, next
, &mm
->unused_nodes
, fl_entry
) {
365 list_del(&entry
->fl_entry
);
369 spin_unlock(&mm
->unused_lock
);
371 BUG_ON(mm
->num_unused
!= 0);
373 EXPORT_SYMBOL(drm_mm_takedown
);
375 #if defined(CONFIG_DEBUG_FS)
376 int drm_mm_dump_table(struct seq_file
*m
, struct drm_mm
*mm
)
378 struct drm_mm_node
*entry
;
379 int total_used
= 0, total_free
= 0, total
= 0;
381 list_for_each_entry(entry
, &mm
->ml_entry
, ml_entry
) {
382 seq_printf(m
, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry
->start
, entry
->start
+ entry
->size
, entry
->size
, entry
->free
? "free" : "used");
383 total
+= entry
->size
;
385 total_free
+= entry
->size
;
387 total_used
+= entry
->size
;
389 seq_printf(m
, "total: %d, used %d free %d\n", total
, total_free
, total_used
);
392 EXPORT_SYMBOL(drm_mm_dump_table
);