nfsd41: minor set_forechannel_maxreqs cleanup
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / drm_mm.c
blob3e47869d6daea7e9dd6e0071344094bf15a70cef
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
48 #define MM_UNUSED_TARGET 4
50 unsigned long drm_mm_tail_space(struct drm_mm *mm)
52 struct list_head *tail_node;
53 struct drm_mm_node *entry;
55 tail_node = mm->ml_entry.prev;
56 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
57 if (!entry->free)
58 return 0;
60 return entry->size;
63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65 struct list_head *tail_node;
66 struct drm_mm_node *entry;
68 tail_node = mm->ml_entry.prev;
69 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
70 if (!entry->free)
71 return -ENOMEM;
73 if (entry->size <= size)
74 return -ENOMEM;
76 entry->size -= size;
77 return 0;
80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82 struct drm_mm_node *child;
84 if (atomic)
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 else
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
92 child = NULL;
93 else {
94 child =
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
98 --mm->num_unused;
100 spin_unlock(&mm->unused_lock);
102 return child;
105 int drm_mm_pre_get(struct drm_mm *mm)
107 struct drm_mm_node *node;
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
118 return ret;
120 ++mm->num_unused;
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
123 spin_unlock(&mm->unused_lock);
124 return 0;
126 EXPORT_SYMBOL(drm_mm_pre_get);
128 static int drm_mm_create_tail_node(struct drm_mm *mm,
129 unsigned long start,
130 unsigned long size, int atomic)
132 struct drm_mm_node *child;
134 child = drm_mm_kmalloc(mm, atomic);
135 if (unlikely(child == NULL))
136 return -ENOMEM;
138 child->free = 1;
139 child->size = size;
140 child->start = start;
141 child->mm = mm;
143 list_add_tail(&child->ml_entry, &mm->ml_entry);
144 list_add_tail(&child->fl_entry, &mm->fl_entry);
146 return 0;
149 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
151 struct list_head *tail_node;
152 struct drm_mm_node *entry;
154 tail_node = mm->ml_entry.prev;
155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
156 if (!entry->free) {
157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 size, atomic);
160 entry->size += size;
161 return 0;
164 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
165 unsigned long size,
166 int atomic)
168 struct drm_mm_node *child;
170 child = drm_mm_kmalloc(parent->mm, atomic);
171 if (unlikely(child == NULL))
172 return NULL;
174 INIT_LIST_HEAD(&child->fl_entry);
176 child->free = 0;
177 child->size = size;
178 child->start = parent->start;
179 child->mm = parent->mm;
181 list_add_tail(&child->ml_entry, &parent->ml_entry);
182 INIT_LIST_HEAD(&child->fl_entry);
184 parent->size -= size;
185 parent->start += size;
186 return child;
190 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
191 unsigned long size,
192 unsigned alignment,
193 int atomic)
196 struct drm_mm_node *align_splitoff = NULL;
197 unsigned tmp = 0;
199 if (alignment)
200 tmp = node->start % alignment;
202 if (tmp) {
203 align_splitoff =
204 drm_mm_split_at_start(node, alignment - tmp, atomic);
205 if (unlikely(align_splitoff == NULL))
206 return NULL;
209 if (node->size == size) {
210 list_del_init(&node->fl_entry);
211 node->free = 0;
212 } else {
213 node = drm_mm_split_at_start(node, size, atomic);
216 if (align_splitoff)
217 drm_mm_put_block(align_splitoff);
219 return node;
221 EXPORT_SYMBOL(drm_mm_get_block_generic);
224 * Put a block. Merge with the previous and / or next block if they are free.
225 * Otherwise add to the free stack.
228 void drm_mm_put_block(struct drm_mm_node *cur)
231 struct drm_mm *mm = cur->mm;
232 struct list_head *cur_head = &cur->ml_entry;
233 struct list_head *root_head = &mm->ml_entry;
234 struct drm_mm_node *prev_node = NULL;
235 struct drm_mm_node *next_node;
237 int merged = 0;
239 if (cur_head->prev != root_head) {
240 prev_node =
241 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
242 if (prev_node->free) {
243 prev_node->size += cur->size;
244 merged = 1;
247 if (cur_head->next != root_head) {
248 next_node =
249 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
250 if (next_node->free) {
251 if (merged) {
252 prev_node->size += next_node->size;
253 list_del(&next_node->ml_entry);
254 list_del(&next_node->fl_entry);
255 if (mm->num_unused < MM_UNUSED_TARGET) {
256 list_add(&next_node->fl_entry,
257 &mm->unused_nodes);
258 ++mm->num_unused;
259 } else
260 kfree(next_node);
261 } else {
262 next_node->size += cur->size;
263 next_node->start = cur->start;
264 merged = 1;
268 if (!merged) {
269 cur->free = 1;
270 list_add(&cur->fl_entry, &mm->fl_entry);
271 } else {
272 list_del(&cur->ml_entry);
273 if (mm->num_unused < MM_UNUSED_TARGET) {
274 list_add(&cur->fl_entry, &mm->unused_nodes);
275 ++mm->num_unused;
276 } else
277 kfree(cur);
281 EXPORT_SYMBOL(drm_mm_put_block);
283 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
284 unsigned long size,
285 unsigned alignment, int best_match)
287 struct list_head *list;
288 const struct list_head *free_stack = &mm->fl_entry;
289 struct drm_mm_node *entry;
290 struct drm_mm_node *best;
291 unsigned long best_size;
292 unsigned wasted;
294 best = NULL;
295 best_size = ~0UL;
297 list_for_each(list, free_stack) {
298 entry = list_entry(list, struct drm_mm_node, fl_entry);
299 wasted = 0;
301 if (entry->size < size)
302 continue;
304 if (alignment) {
305 register unsigned tmp = entry->start % alignment;
306 if (tmp)
307 wasted += alignment - tmp;
310 if (entry->size >= size + wasted) {
311 if (!best_match)
312 return entry;
313 if (size < best_size) {
314 best = entry;
315 best_size = entry->size;
320 return best;
322 EXPORT_SYMBOL(drm_mm_search_free);
324 int drm_mm_clean(struct drm_mm * mm)
326 struct list_head *head = &mm->ml_entry;
328 return (head->next->next == head);
330 EXPORT_SYMBOL(drm_mm_clean);
332 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
334 INIT_LIST_HEAD(&mm->ml_entry);
335 INIT_LIST_HEAD(&mm->fl_entry);
336 INIT_LIST_HEAD(&mm->unused_nodes);
337 mm->num_unused = 0;
338 spin_lock_init(&mm->unused_lock);
340 return drm_mm_create_tail_node(mm, start, size, 0);
342 EXPORT_SYMBOL(drm_mm_init);
344 void drm_mm_takedown(struct drm_mm * mm)
346 struct list_head *bnode = mm->fl_entry.next;
347 struct drm_mm_node *entry;
348 struct drm_mm_node *next;
350 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
352 if (entry->ml_entry.next != &mm->ml_entry ||
353 entry->fl_entry.next != &mm->fl_entry) {
354 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
355 return;
358 list_del(&entry->fl_entry);
359 list_del(&entry->ml_entry);
360 kfree(entry);
362 spin_lock(&mm->unused_lock);
363 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
364 list_del(&entry->fl_entry);
365 kfree(entry);
366 --mm->num_unused;
368 spin_unlock(&mm->unused_lock);
370 BUG_ON(mm->num_unused != 0);
372 EXPORT_SYMBOL(drm_mm_takedown);