Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / char / drm / radeon_mem.c
blob134f894e6e4bc00f3ed245a521118a717b6dc494
1 /* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*-
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "radeon_drm.h"
35 #include "radeon_drv.h"
37 /* Very simple allocator for GART memory, working on a static range
38 * already mapped into each client's address space.
41 static struct mem_block *split_block(struct mem_block *p, int start, int size,
42 DRMFILE filp )
44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) {
46 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
47 if (!newblock)
48 goto out;
49 newblock->start = start;
50 newblock->size = p->size - (start - p->start);
51 newblock->filp = NULL;
52 newblock->next = p->next;
53 newblock->prev = p;
54 p->next->prev = newblock;
55 p->next = newblock;
56 p->size -= newblock->size;
57 p = newblock;
60 /* Maybe cut off the end of an existing block */
61 if (size < p->size) {
62 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
63 if (!newblock)
64 goto out;
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->filp = NULL;
68 newblock->next = p->next;
69 newblock->prev = p;
70 p->next->prev = newblock;
71 p->next = newblock;
72 p->size = size;
75 out:
76 /* Our block is in the middle */
77 p->filp = filp;
78 return p;
81 static struct mem_block *alloc_block( struct mem_block *heap, int size,
82 int align2, DRMFILE filp )
84 struct mem_block *p;
85 int mask = (1 << align2)-1;
87 list_for_each(p, heap) {
88 int start = (p->start + mask) & ~mask;
89 if (p->filp == 0 && start + size <= p->start + p->size)
90 return split_block( p, start, size, filp );
93 return NULL;
96 static struct mem_block *find_block( struct mem_block *heap, int start )
98 struct mem_block *p;
100 list_for_each(p, heap)
101 if (p->start == start)
102 return p;
104 return NULL;
108 static void free_block( struct mem_block *p )
110 p->filp = NULL;
112 /* Assumes a single contiguous range. Needs a special filp in
113 * 'heap' to stop it being subsumed.
115 if (p->next->filp == 0) {
116 struct mem_block *q = p->next;
117 p->size += q->size;
118 p->next = q->next;
119 p->next->prev = p;
120 drm_free(q, sizeof(*q), DRM_MEM_BUFS );
123 if (p->prev->filp == 0) {
124 struct mem_block *q = p->prev;
125 q->size += p->size;
126 q->next = p->next;
127 q->next->prev = q;
128 drm_free(p, sizeof(*q), DRM_MEM_BUFS );
132 /* Initialize. How to check for an uninitialized heap?
134 static int init_heap(struct mem_block **heap, int start, int size)
136 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS );
138 if (!blocks)
139 return DRM_ERR(ENOMEM);
141 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS );
142 if (!*heap) {
143 drm_free( blocks, sizeof(*blocks), DRM_MEM_BUFS );
144 return DRM_ERR(ENOMEM);
147 blocks->start = start;
148 blocks->size = size;
149 blocks->filp = NULL;
150 blocks->next = blocks->prev = *heap;
152 memset( *heap, 0, sizeof(**heap) );
153 (*heap)->filp = (DRMFILE) -1;
154 (*heap)->next = (*heap)->prev = blocks;
155 return 0;
159 /* Free all blocks associated with the releasing file.
161 void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
163 struct mem_block *p;
165 if (!heap || !heap->next)
166 return;
168 list_for_each(p, heap) {
169 if (p->filp == filp)
170 p->filp = NULL;
173 /* Assumes a single contiguous range. Needs a special filp in
174 * 'heap' to stop it being subsumed.
176 list_for_each(p, heap) {
177 while (p->filp == 0 && p->next->filp == 0) {
178 struct mem_block *q = p->next;
179 p->size += q->size;
180 p->next = q->next;
181 p->next->prev = p;
182 drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
187 /* Shutdown.
189 void radeon_mem_takedown( struct mem_block **heap )
191 struct mem_block *p;
193 if (!*heap)
194 return;
196 for (p = (*heap)->next ; p != *heap ; ) {
197 struct mem_block *q = p;
198 p = p->next;
199 drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
202 drm_free( *heap, sizeof(**heap),DRM_MEM_DRIVER );
203 *heap = NULL;
208 /* IOCTL HANDLERS */
210 static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
211 int region )
213 switch( region ) {
214 case RADEON_MEM_REGION_GART:
215 return &dev_priv->gart_heap;
216 case RADEON_MEM_REGION_FB:
217 return &dev_priv->fb_heap;
218 default:
219 return NULL;
223 int radeon_mem_alloc( DRM_IOCTL_ARGS )
225 DRM_DEVICE;
226 drm_radeon_private_t *dev_priv = dev->dev_private;
227 drm_radeon_mem_alloc_t alloc;
228 struct mem_block *block, **heap;
230 if ( !dev_priv ) {
231 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
232 return DRM_ERR(EINVAL);
235 DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
236 sizeof(alloc) );
238 heap = get_heap( dev_priv, alloc.region );
239 if (!heap || !*heap)
240 return DRM_ERR(EFAULT);
242 /* Make things easier on ourselves: all allocations at least
243 * 4k aligned.
245 if (alloc.alignment < 12)
246 alloc.alignment = 12;
248 block = alloc_block( *heap, alloc.size, alloc.alignment,
249 filp );
251 if (!block)
252 return DRM_ERR(ENOMEM);
254 if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
255 sizeof(int) ) ) {
256 DRM_ERROR( "copy_to_user\n" );
257 return DRM_ERR(EFAULT);
260 return 0;
265 int radeon_mem_free( DRM_IOCTL_ARGS )
267 DRM_DEVICE;
268 drm_radeon_private_t *dev_priv = dev->dev_private;
269 drm_radeon_mem_free_t memfree;
270 struct mem_block *block, **heap;
272 if ( !dev_priv ) {
273 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
274 return DRM_ERR(EINVAL);
277 DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
278 sizeof(memfree) );
280 heap = get_heap( dev_priv, memfree.region );
281 if (!heap || !*heap)
282 return DRM_ERR(EFAULT);
284 block = find_block( *heap, memfree.region_offset );
285 if (!block)
286 return DRM_ERR(EFAULT);
288 if (block->filp != filp)
289 return DRM_ERR(EPERM);
291 free_block( block );
292 return 0;
295 int radeon_mem_init_heap( DRM_IOCTL_ARGS )
297 DRM_DEVICE;
298 drm_radeon_private_t *dev_priv = dev->dev_private;
299 drm_radeon_mem_init_heap_t initheap;
300 struct mem_block **heap;
302 if ( !dev_priv ) {
303 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
304 return DRM_ERR(EINVAL);
307 DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
308 sizeof(initheap) );
310 heap = get_heap( dev_priv, initheap.region );
311 if (!heap)
312 return DRM_ERR(EFAULT);
314 if (*heap) {
315 DRM_ERROR("heap already initialized?");
316 return DRM_ERR(EFAULT);
319 return init_heap( heap, initheap.start, initheap.size );