MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / char / drm / radeon_mem.c
blob289957406c9c6f8060fb3fe5bf8db7a91d8cd8a1
1 /* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*-
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
32 #include "radeon.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "radeon_drm.h"
36 #include "radeon_drv.h"
38 /* Very simple allocator for GART memory, working on a static range
39 * already mapped into each client's address space.
42 static struct mem_block *split_block(struct mem_block *p, int start, int size,
43 DRMFILE filp )
45 /* Maybe cut off the start of an existing block */
46 if (start > p->start) {
47 struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
48 if (!newblock)
49 goto out;
50 newblock->start = start;
51 newblock->size = p->size - (start - p->start);
52 newblock->filp = NULL;
53 newblock->next = p->next;
54 newblock->prev = p;
55 p->next->prev = newblock;
56 p->next = newblock;
57 p->size -= newblock->size;
58 p = newblock;
61 /* Maybe cut off the end of an existing block */
62 if (size < p->size) {
63 struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
64 if (!newblock)
65 goto out;
66 newblock->start = start + size;
67 newblock->size = p->size - size;
68 newblock->filp = NULL;
69 newblock->next = p->next;
70 newblock->prev = p;
71 p->next->prev = newblock;
72 p->next = newblock;
73 p->size = size;
76 out:
77 /* Our block is in the middle */
78 p->filp = filp;
79 return p;
82 static struct mem_block *alloc_block( struct mem_block *heap, int size,
83 int align2, DRMFILE filp )
85 struct mem_block *p;
86 int mask = (1 << align2)-1;
88 for (p = heap->next ; p != heap ; p = p->next) {
89 int start = (p->start + mask) & ~mask;
90 if (p->filp == 0 && start + size <= p->start + p->size)
91 return split_block( p, start, size, filp );
94 return NULL;
97 static struct mem_block *find_block( struct mem_block *heap, int start )
99 struct mem_block *p;
101 for (p = heap->next ; p != heap ; p = p->next)
102 if (p->start == start)
103 return p;
105 return NULL;
109 static void free_block( struct mem_block *p )
111 p->filp = NULL;
113 /* Assumes a single contiguous range. Needs a special filp in
114 * 'heap' to stop it being subsumed.
116 if (p->next->filp == 0) {
117 struct mem_block *q = p->next;
118 p->size += q->size;
119 p->next = q->next;
120 p->next->prev = p;
121 DRM_FREE(q, sizeof(*q));
124 if (p->prev->filp == 0) {
125 struct mem_block *q = p->prev;
126 q->size += p->size;
127 q->next = p->next;
128 q->next->prev = q;
129 DRM_FREE(p, sizeof(*q));
133 /* Initialize. How to check for an uninitialized heap?
135 static int init_heap(struct mem_block **heap, int start, int size)
137 struct mem_block *blocks = DRM_MALLOC(sizeof(*blocks));
139 if (!blocks)
140 return DRM_ERR(ENOMEM);
142 *heap = DRM_MALLOC(sizeof(**heap));
143 if (!*heap) {
144 DRM_FREE( blocks, sizeof(*blocks) );
145 return DRM_ERR(ENOMEM);
148 blocks->start = start;
149 blocks->size = size;
150 blocks->filp = NULL;
151 blocks->next = blocks->prev = *heap;
153 memset( *heap, 0, sizeof(**heap) );
154 (*heap)->filp = (DRMFILE) -1;
155 (*heap)->next = (*heap)->prev = blocks;
156 return 0;
160 /* Free all blocks associated with the releasing file.
162 void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
164 struct mem_block *p;
166 if (!heap || !heap->next)
167 return;
169 for (p = heap->next ; p != heap ; p = p->next) {
170 if (p->filp == filp)
171 p->filp = NULL;
174 /* Assumes a single contiguous range. Needs a special filp in
175 * 'heap' to stop it being subsumed.
177 for (p = heap->next ; p != heap ; p = p->next) {
178 while (p->filp == 0 && p->next->filp == 0) {
179 struct mem_block *q = p->next;
180 p->size += q->size;
181 p->next = q->next;
182 p->next->prev = p;
183 DRM_FREE(q, sizeof(*q));
188 /* Shutdown.
190 void radeon_mem_takedown( struct mem_block **heap )
192 struct mem_block *p;
194 if (!*heap)
195 return;
197 for (p = (*heap)->next ; p != *heap ; ) {
198 struct mem_block *q = p;
199 p = p->next;
200 DRM_FREE(q, sizeof(*q));
203 DRM_FREE( *heap, sizeof(**heap) );
204 *heap = NULL;
209 /* IOCTL HANDLERS */
211 static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
212 int region )
214 switch( region ) {
215 case RADEON_MEM_REGION_GART:
216 return &dev_priv->gart_heap;
217 case RADEON_MEM_REGION_FB:
218 return &dev_priv->fb_heap;
219 default:
220 return NULL;
224 int radeon_mem_alloc( DRM_IOCTL_ARGS )
226 DRM_DEVICE;
227 drm_radeon_private_t *dev_priv = dev->dev_private;
228 drm_radeon_mem_alloc_t alloc;
229 struct mem_block *block, **heap;
231 if ( !dev_priv ) {
232 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
233 return DRM_ERR(EINVAL);
236 DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
237 sizeof(alloc) );
239 heap = get_heap( dev_priv, alloc.region );
240 if (!heap || !*heap)
241 return DRM_ERR(EFAULT);
243 /* Make things easier on ourselves: all allocations at least
244 * 4k aligned.
246 if (alloc.alignment < 12)
247 alloc.alignment = 12;
249 block = alloc_block( *heap, alloc.size, alloc.alignment,
250 filp );
252 if (!block)
253 return DRM_ERR(ENOMEM);
255 if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
256 sizeof(int) ) ) {
257 DRM_ERROR( "copy_to_user\n" );
258 return DRM_ERR(EFAULT);
261 return 0;
266 int radeon_mem_free( DRM_IOCTL_ARGS )
268 DRM_DEVICE;
269 drm_radeon_private_t *dev_priv = dev->dev_private;
270 drm_radeon_mem_free_t memfree;
271 struct mem_block *block, **heap;
273 if ( !dev_priv ) {
274 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
275 return DRM_ERR(EINVAL);
278 DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
279 sizeof(memfree) );
281 heap = get_heap( dev_priv, memfree.region );
282 if (!heap || !*heap)
283 return DRM_ERR(EFAULT);
285 block = find_block( *heap, memfree.region_offset );
286 if (!block)
287 return DRM_ERR(EFAULT);
289 if (block->filp != filp)
290 return DRM_ERR(EPERM);
292 free_block( block );
293 return 0;
296 int radeon_mem_init_heap( DRM_IOCTL_ARGS )
298 DRM_DEVICE;
299 drm_radeon_private_t *dev_priv = dev->dev_private;
300 drm_radeon_mem_init_heap_t initheap;
301 struct mem_block **heap;
303 if ( !dev_priv ) {
304 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
305 return DRM_ERR(EINVAL);
308 DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
309 sizeof(initheap) );
311 heap = get_heap( dev_priv, initheap.region );
312 if (!heap)
313 return DRM_ERR(EFAULT);
315 if (*heap) {
316 DRM_ERROR("heap already initialized?");
317 return DRM_ERR(EFAULT);
320 return init_heap( heap, initheap.start, initheap.size );