Import 2.3.18pre1
[davej-history.git] / drivers / char / drm / bufs.c
blob32469a83ac843c93886b567378de337ba912a63a
1 /* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 22:48:10 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
28 * $XFree86$
32 #define __NO_VERSION__
33 #include "drmP.h"
34 #include "linux/un.h"
36 /* Compute order. Can be made faster. */
37 int drm_order(unsigned long size)
39 int order;
40 unsigned long tmp;
42 for (order = 0, tmp = size; tmp >>= 1; ++order);
43 if (size & ~(1 << order)) ++order;
44 return order;
47 int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd,
48 unsigned long arg)
50 drm_file_t *priv = filp->private_data;
51 drm_device_t *dev = priv->dev;
52 drm_map_t *map;
54 if (!(filp->f_mode & 3)) return -EACCES; /* Require read/write */
56 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
57 if (!map) return -ENOMEM;
58 if (copy_from_user(map, (drm_map_t *)arg, sizeof(*map))) {
59 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
60 return -EFAULT;
63 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
64 map->offset, map->size, map->type);
65 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
66 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
67 return -EINVAL;
69 map->mtrr = -1;
70 map->handle = 0;
72 switch (map->type) {
73 case _DRM_REGISTERS:
74 case _DRM_FRAME_BUFFER:
75 if (map->offset + map->size < map->offset
76 || map->offset < virt_to_phys(high_memory)) {
77 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
78 return -EINVAL;
80 #ifdef CONFIG_MTRR
81 if (map->type == _DRM_FRAME_BUFFER
82 || (map->flags & _DRM_WRITE_COMBINING)) {
83 map->mtrr = mtrr_add(map->offset, map->size,
84 MTRR_TYPE_WRCOMB, 1);
86 #endif
87 map->handle = drm_ioremap(map->offset, map->size);
88 break;
91 case _DRM_SHM:
92 DRM_DEBUG("%ld %d\n", map->size, drm_order(map->size));
93 map->handle = (void *)drm_alloc_pages(drm_order(map->size)
94 - PAGE_SHIFT,
95 DRM_MEM_SAREA);
96 if (!map->handle) {
97 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
98 return -ENOMEM;
100 map->offset = (unsigned long)map->handle;
101 if (map->flags & _DRM_CONTAINS_LOCK) {
102 dev->lock.hw_lock = map->handle; /* Pointer to lock */
104 break;
105 default:
106 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
107 return -EINVAL;
110 down(&dev->struct_sem);
111 if (dev->maplist) {
112 ++dev->map_count;
113 dev->maplist = drm_realloc(dev->maplist,
114 (dev->map_count-1)
115 * sizeof(*dev->maplist),
116 dev->map_count
117 * sizeof(*dev->maplist),
118 DRM_MEM_MAPS);
119 } else {
120 dev->map_count = 1;
121 dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
122 DRM_MEM_MAPS);
124 dev->maplist[dev->map_count-1] = map;
125 up(&dev->struct_sem);
127 copy_to_user_ret((drm_map_t *)arg, map, sizeof(*map), -EFAULT);
128 if (map->type != _DRM_SHM) {
129 copy_to_user_ret(&((drm_map_t *)arg)->handle,
130 &map->offset,
131 sizeof(map->offset),
132 -EFAULT);
134 return 0;
137 int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
138 unsigned long arg)
140 drm_file_t *priv = filp->private_data;
141 drm_device_t *dev = priv->dev;
142 drm_device_dma_t *dma = dev->dma;
143 drm_buf_desc_t request;
144 int count;
145 int order;
146 int size;
147 int total;
148 int page_order;
149 drm_buf_entry_t *entry;
150 unsigned long page;
151 drm_buf_t *buf;
152 int alignment;
153 unsigned long offset;
154 int i;
155 int byte_count;
156 int page_count;
158 if (!dma) return -EINVAL;
160 copy_from_user_ret(&request,
161 (drm_buf_desc_t *)arg,
162 sizeof(request),
163 -EFAULT);
165 count = request.count;
166 order = drm_order(request.size);
167 size = 1 << order;
169 DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
170 request.count, request.size, size, order, dev->queue_count);
172 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
173 if (dev->queue_count) return -EBUSY; /* Not while in use */
175 alignment = (request.flags & DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
176 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
177 total = PAGE_SIZE << page_order;
179 spin_lock(&dev->count_lock);
180 if (dev->buf_use) {
181 spin_unlock(&dev->count_lock);
182 return -EBUSY;
184 atomic_inc(&dev->buf_alloc);
185 spin_unlock(&dev->count_lock);
187 down(&dev->struct_sem);
188 entry = &dma->bufs[order];
189 if (entry->buf_count) {
190 up(&dev->struct_sem);
191 atomic_dec(&dev->buf_alloc);
192 return -ENOMEM; /* May only call once for each order */
195 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
196 DRM_MEM_BUFS);
197 if (!entry->buflist) {
198 up(&dev->struct_sem);
199 atomic_dec(&dev->buf_alloc);
200 return -ENOMEM;
202 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
204 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
205 DRM_MEM_SEGS);
206 if (!entry->seglist) {
207 drm_free(entry->buflist,
208 count * sizeof(*entry->buflist),
209 DRM_MEM_BUFS);
210 up(&dev->struct_sem);
211 atomic_dec(&dev->buf_alloc);
212 return -ENOMEM;
214 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
216 dma->pagelist = drm_realloc(dma->pagelist,
217 dma->page_count * sizeof(*dma->pagelist),
218 (dma->page_count + (count << page_order))
219 * sizeof(*dma->pagelist),
220 DRM_MEM_PAGES);
221 DRM_DEBUG("pagelist: %d entries\n",
222 dma->page_count + (count << page_order));
225 entry->buf_size = size;
226 entry->page_order = page_order;
227 byte_count = 0;
228 page_count = 0;
229 while (entry->buf_count < count) {
230 if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
231 entry->seglist[entry->seg_count++] = page;
232 for (i = 0; i < (1 << page_order); i++) {
233 DRM_DEBUG("page %d @ 0x%08lx\n",
234 dma->page_count + page_count,
235 page + PAGE_SIZE * i);
236 dma->pagelist[dma->page_count + page_count++]
237 = page + PAGE_SIZE * i;
239 for (offset = 0;
240 offset + size <= total && entry->buf_count < count;
241 offset += alignment, ++entry->buf_count) {
242 buf = &entry->buflist[entry->buf_count];
243 buf->idx = dma->buf_count + entry->buf_count;
244 buf->total = alignment;
245 buf->order = order;
246 buf->used = 0;
247 buf->offset = (dma->byte_count + byte_count + offset);
248 buf->address = (void *)(page + offset);
249 buf->next = NULL;
250 buf->waiting = 0;
251 buf->pending = 0;
252 init_waitqueue_head(&buf->dma_wait);
253 buf->pid = 0;
254 #if DRM_DMA_HISTOGRAM
255 buf->time_queued = 0;
256 buf->time_dispatched = 0;
257 buf->time_completed = 0;
258 buf->time_freed = 0;
259 #endif
260 DRM_DEBUG("buffer %d @ %p\n",
261 entry->buf_count, buf->address);
263 byte_count += PAGE_SIZE << page_order;
266 dma->buflist = drm_realloc(dma->buflist,
267 dma->buf_count * sizeof(*dma->buflist),
268 (dma->buf_count + entry->buf_count)
269 * sizeof(*dma->buflist),
270 DRM_MEM_BUFS);
271 for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
272 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
274 dma->buf_count += entry->buf_count;
275 dma->seg_count += entry->seg_count;
276 dma->page_count += entry->seg_count << page_order;
277 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
279 drm_freelist_create(&entry->freelist, entry->buf_count);
280 for (i = 0; i < entry->buf_count; i++) {
281 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
284 up(&dev->struct_sem);
286 request.count = entry->buf_count;
287 request.size = size;
289 copy_to_user_ret((drm_buf_desc_t *)arg,
290 &request,
291 sizeof(request),
292 -EFAULT);
294 atomic_dec(&dev->buf_alloc);
295 return 0;
298 int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
299 unsigned long arg)
301 drm_file_t *priv = filp->private_data;
302 drm_device_t *dev = priv->dev;
303 drm_device_dma_t *dma = dev->dma;
304 drm_buf_info_t request;
305 int i;
306 int count;
308 if (!dma) return -EINVAL;
310 spin_lock(&dev->count_lock);
311 if (atomic_read(&dev->buf_alloc)) {
312 spin_unlock(&dev->count_lock);
313 return -EBUSY;
315 ++dev->buf_use; /* Can't allocate more after this call */
316 spin_unlock(&dev->count_lock);
318 copy_from_user_ret(&request,
319 (drm_buf_info_t *)arg,
320 sizeof(request),
321 -EFAULT);
323 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
324 if (dma->bufs[i].buf_count) ++count;
327 DRM_DEBUG("count = %d\n", count);
329 if (request.count >= count) {
330 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
331 if (dma->bufs[i].buf_count) {
332 copy_to_user_ret(&request.list[count].count,
333 &dma->bufs[i].buf_count,
334 sizeof(dma->bufs[0]
335 .buf_count),
336 -EFAULT);
337 copy_to_user_ret(&request.list[count].size,
338 &dma->bufs[i].buf_size,
339 sizeof(dma->bufs[0].buf_size),
340 -EFAULT);
341 copy_to_user_ret(&request.list[count].low_mark,
342 &dma->bufs[i]
343 .freelist.low_mark,
344 sizeof(dma->bufs[0]
345 .freelist.low_mark),
346 -EFAULT);
347 copy_to_user_ret(&request.list[count]
348 .high_mark,
349 &dma->bufs[i]
350 .freelist.high_mark,
351 sizeof(dma->bufs[0]
352 .freelist.high_mark),
353 -EFAULT);
354 DRM_DEBUG("%d %d %d %d %d\n",
356 dma->bufs[i].buf_count,
357 dma->bufs[i].buf_size,
358 dma->bufs[i].freelist.low_mark,
359 dma->bufs[i].freelist.high_mark);
360 ++count;
364 request.count = count;
366 copy_to_user_ret((drm_buf_info_t *)arg,
367 &request,
368 sizeof(request),
369 -EFAULT);
371 return 0;
374 int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
375 unsigned long arg)
377 drm_file_t *priv = filp->private_data;
378 drm_device_t *dev = priv->dev;
379 drm_device_dma_t *dma = dev->dma;
380 drm_buf_desc_t request;
381 int order;
382 drm_buf_entry_t *entry;
384 if (!dma) return -EINVAL;
386 copy_from_user_ret(&request,
387 (drm_buf_desc_t *)arg,
388 sizeof(request),
389 -EFAULT);
391 DRM_DEBUG("%d, %d, %d\n",
392 request.size, request.low_mark, request.high_mark);
393 order = drm_order(request.size);
394 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
395 entry = &dma->bufs[order];
397 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
398 return -EINVAL;
399 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
400 return -EINVAL;
402 entry->freelist.low_mark = request.low_mark;
403 entry->freelist.high_mark = request.high_mark;
405 return 0;
408 int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
409 unsigned long arg)
411 drm_file_t *priv = filp->private_data;
412 drm_device_t *dev = priv->dev;
413 drm_device_dma_t *dma = dev->dma;
414 drm_buf_free_t request;
415 int i;
416 int idx;
417 drm_buf_t *buf;
419 if (!dma) return -EINVAL;
421 copy_from_user_ret(&request,
422 (drm_buf_free_t *)arg,
423 sizeof(request),
424 -EFAULT);
426 DRM_DEBUG("%d\n", request.count);
427 for (i = 0; i < request.count; i++) {
428 copy_from_user_ret(&idx,
429 &request.list[i],
430 sizeof(idx),
431 -EFAULT);
432 if (idx < 0 || idx >= dma->buf_count) {
433 DRM_ERROR("Index %d (of %d max)\n",
434 idx, dma->buf_count - 1);
435 return -EINVAL;
437 buf = dma->buflist[idx];
438 if (buf->pid != current->pid) {
439 DRM_ERROR("Process %d freeing buffer owned by %d\n",
440 current->pid, buf->pid);
441 return -EINVAL;
443 drm_free_buffer(dev, buf);
446 return 0;
449 int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
450 unsigned long arg)
452 drm_file_t *priv = filp->private_data;
453 drm_device_t *dev = priv->dev;
454 drm_device_dma_t *dma = dev->dma;
455 int retcode = 0;
456 const int zero = 0;
457 unsigned long virtual;
458 unsigned long address;
459 drm_buf_map_t request;
460 int i;
462 if (!dma) return -EINVAL;
464 DRM_DEBUG("\n");
466 spin_lock(&dev->count_lock);
467 if (atomic_read(&dev->buf_alloc)) {
468 spin_unlock(&dev->count_lock);
469 return -EBUSY;
471 ++dev->buf_use; /* Can't allocate more after this call */
472 spin_unlock(&dev->count_lock);
474 copy_from_user_ret(&request,
475 (drm_buf_map_t *)arg,
476 sizeof(request),
477 -EFAULT);
479 if (request.count >= dma->buf_count) {
480 virtual = do_mmap(filp, 0, dma->byte_count,
481 PROT_READ|PROT_WRITE, MAP_SHARED, 0);
482 if (virtual > -1024UL) {
483 /* Real error */
484 retcode = (signed long)virtual;
485 goto done;
487 request.virtual = (void *)virtual;
489 for (i = 0; i < dma->buf_count; i++) {
490 if (copy_to_user(&request.list[i].idx,
491 &dma->buflist[i]->idx,
492 sizeof(request.list[0].idx))) {
493 retcode = -EFAULT;
494 goto done;
496 if (copy_to_user(&request.list[i].total,
497 &dma->buflist[i]->total,
498 sizeof(request.list[0].total))) {
499 retcode = -EFAULT;
500 goto done;
502 if (copy_to_user(&request.list[i].used,
503 &zero,
504 sizeof(zero))) {
505 retcode = -EFAULT;
506 goto done;
508 address = virtual + dma->buflist[i]->offset;
509 if (copy_to_user(&request.list[i].address,
510 &address,
511 sizeof(address))) {
512 retcode = -EFAULT;
513 goto done;
517 done:
518 request.count = dma->buf_count;
519 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
521 copy_to_user_ret((drm_buf_map_t *)arg,
522 &request,
523 sizeof(request),
524 -EFAULT);
526 return retcode;