Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / char / drm / i915_mem.c
blobd54a3005946b724a8b3deb1a5f8dbf9188cb8b76
1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3 /**************************************************************************
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 **************************************************************************/
10 #include "drmP.h"
11 #include "drm.h"
12 #include "i915_drm.h"
13 #include "i915_drv.h"
15 /* This memory manager is integrated into the global/local lru
16 * mechanisms used by the clients. Specifically, it operates by
17 * setting the 'in_use' fields of the global LRU to indicate whether
18 * this region is privately allocated to a client.
20 * This does require the client to actually respect that field.
22 * Currently no effort is made to allocate 'private' memory in any
23 * clever way - the LRU information isn't used to determine which
24 * block to allocate, and the ring is drained prior to allocations --
25 * in other words allocation is expensive.
27 static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
29 drm_i915_private_t *dev_priv = dev->dev_private;
30 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
31 drm_tex_region_t *list;
32 unsigned shift, nr;
33 unsigned start;
34 unsigned end;
35 unsigned i;
36 int age;
38 shift = dev_priv->tex_lru_log_granularity;
39 nr = I915_NR_TEX_REGIONS;
41 start = p->start >> shift;
42 end = (p->start + p->size - 1) >> shift;
44 age = ++sarea_priv->texAge;
45 list = sarea_priv->texList;
47 /* Mark the regions with the new flag and update their age. Move
48 * them to head of list to preserve LRU semantics.
50 for (i = start; i <= end; i++) {
51 list[i].in_use = in_use;
52 list[i].age = age;
54 /* remove_from_list(i)
56 list[(unsigned)list[i].next].prev = list[i].prev;
57 list[(unsigned)list[i].prev].next = list[i].next;
59 /* insert_at_head(list, i)
61 list[i].prev = nr;
62 list[i].next = list[nr].next;
63 list[(unsigned)list[nr].next].prev = i;
64 list[nr].next = i;
68 /* Very simple allocator for agp memory, working on a static range
69 * already mapped into each client's address space.
72 static struct mem_block *split_block(struct mem_block *p, int start, int size,
73 DRMFILE filp)
75 /* Maybe cut off the start of an existing block */
76 if (start > p->start) {
77 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
78 if (!newblock)
79 goto out;
80 newblock->start = start;
81 newblock->size = p->size - (start - p->start);
82 newblock->filp = NULL;
83 newblock->next = p->next;
84 newblock->prev = p;
85 p->next->prev = newblock;
86 p->next = newblock;
87 p->size -= newblock->size;
88 p = newblock;
91 /* Maybe cut off the end of an existing block */
92 if (size < p->size) {
93 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
94 if (!newblock)
95 goto out;
96 newblock->start = start + size;
97 newblock->size = p->size - size;
98 newblock->filp = NULL;
99 newblock->next = p->next;
100 newblock->prev = p;
101 p->next->prev = newblock;
102 p->next = newblock;
103 p->size = size;
106 out:
107 /* Our block is in the middle */
108 p->filp = filp;
109 return p;
112 static struct mem_block *alloc_block(struct mem_block *heap, int size,
113 int align2, DRMFILE filp)
115 struct mem_block *p;
116 int mask = (1 << align2) - 1;
118 for (p = heap->next; p != heap; p = p->next) {
119 int start = (p->start + mask) & ~mask;
120 if (p->filp == NULL && start + size <= p->start + p->size)
121 return split_block(p, start, size, filp);
124 return NULL;
127 static struct mem_block *find_block(struct mem_block *heap, int start)
129 struct mem_block *p;
131 for (p = heap->next; p != heap; p = p->next)
132 if (p->start == start)
133 return p;
135 return NULL;
138 static void free_block(struct mem_block *p)
140 p->filp = NULL;
142 /* Assumes a single contiguous range. Needs a special filp in
143 * 'heap' to stop it being subsumed.
145 if (p->next->filp == NULL) {
146 struct mem_block *q = p->next;
147 p->size += q->size;
148 p->next = q->next;
149 p->next->prev = p;
150 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
153 if (p->prev->filp == NULL) {
154 struct mem_block *q = p->prev;
155 q->size += p->size;
156 q->next = p->next;
157 q->next->prev = q;
158 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
162 /* Initialize. How to check for an uninitialized heap?
164 static int init_heap(struct mem_block **heap, int start, int size)
166 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
168 if (!blocks)
169 return -ENOMEM;
171 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
172 if (!*heap) {
173 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
174 return -ENOMEM;
177 blocks->start = start;
178 blocks->size = size;
179 blocks->filp = NULL;
180 blocks->next = blocks->prev = *heap;
182 memset(*heap, 0, sizeof(**heap));
183 (*heap)->filp = (DRMFILE) - 1;
184 (*heap)->next = (*heap)->prev = blocks;
185 return 0;
188 /* Free all blocks associated with the releasing file.
190 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
192 struct mem_block *p;
194 if (!heap || !heap->next)
195 return;
197 for (p = heap->next; p != heap; p = p->next) {
198 if (p->filp == filp) {
199 p->filp = NULL;
200 mark_block(dev, p, 0);
204 /* Assumes a single contiguous range. Needs a special filp in
205 * 'heap' to stop it being subsumed.
207 for (p = heap->next; p != heap; p = p->next) {
208 while (p->filp == NULL && p->next->filp == NULL) {
209 struct mem_block *q = p->next;
210 p->size += q->size;
211 p->next = q->next;
212 p->next->prev = p;
213 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
218 /* Shutdown.
220 void i915_mem_takedown(struct mem_block **heap)
222 struct mem_block *p;
224 if (!*heap)
225 return;
227 for (p = (*heap)->next; p != *heap;) {
228 struct mem_block *q = p;
229 p = p->next;
230 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
233 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
234 *heap = NULL;
237 static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
239 switch (region) {
240 case I915_MEM_REGION_AGP:
241 return &dev_priv->agp_heap;
242 default:
243 return NULL;
247 /* IOCTL HANDLERS */
249 int i915_mem_alloc(DRM_IOCTL_ARGS)
251 DRM_DEVICE;
252 drm_i915_private_t *dev_priv = dev->dev_private;
253 drm_i915_mem_alloc_t alloc;
254 struct mem_block *block, **heap;
256 if (!dev_priv) {
257 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
258 return DRM_ERR(EINVAL);
261 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
262 sizeof(alloc));
264 heap = get_heap(dev_priv, alloc.region);
265 if (!heap || !*heap)
266 return DRM_ERR(EFAULT);
268 /* Make things easier on ourselves: all allocations at least
269 * 4k aligned.
271 if (alloc.alignment < 12)
272 alloc.alignment = 12;
274 block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
276 if (!block)
277 return DRM_ERR(ENOMEM);
279 mark_block(dev, block, 1);
281 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
282 DRM_ERROR("copy_to_user\n");
283 return DRM_ERR(EFAULT);
286 return 0;
289 int i915_mem_free(DRM_IOCTL_ARGS)
291 DRM_DEVICE;
292 drm_i915_private_t *dev_priv = dev->dev_private;
293 drm_i915_mem_free_t memfree;
294 struct mem_block *block, **heap;
296 if (!dev_priv) {
297 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
298 return DRM_ERR(EINVAL);
301 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
302 sizeof(memfree));
304 heap = get_heap(dev_priv, memfree.region);
305 if (!heap || !*heap)
306 return DRM_ERR(EFAULT);
308 block = find_block(*heap, memfree.region_offset);
309 if (!block)
310 return DRM_ERR(EFAULT);
312 if (block->filp != filp)
313 return DRM_ERR(EPERM);
315 mark_block(dev, block, 0);
316 free_block(block);
317 return 0;
320 int i915_mem_init_heap(DRM_IOCTL_ARGS)
322 DRM_DEVICE;
323 drm_i915_private_t *dev_priv = dev->dev_private;
324 drm_i915_mem_init_heap_t initheap;
325 struct mem_block **heap;
327 if (!dev_priv) {
328 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
329 return DRM_ERR(EINVAL);
332 DRM_COPY_FROM_USER_IOCTL(initheap,
333 (drm_i915_mem_init_heap_t __user *) data,
334 sizeof(initheap));
336 heap = get_heap(dev_priv, initheap.region);
337 if (!heap)
338 return DRM_ERR(EFAULT);
340 if (*heap) {
341 DRM_ERROR("heap already initialized?");
342 return DRM_ERR(EFAULT);
345 return init_heap(heap, initheap.start, initheap.size);