Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / char / drm / bufs.c
blob28e0eb5f1cb058ee0e0832917d1f7972dbd3c4ac
1 /* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
32 #define __NO_VERSION__
33 #include <linux/config.h>
34 #include "drmP.h"
35 #include "linux/un.h"
37 /* Compute order. Can be made faster. */
38 int drm_order(unsigned long size)
40 int order;
41 unsigned long tmp;
43 for (order = 0, tmp = size; tmp >>= 1; ++order);
44 if (size & ~(1 << order)) ++order;
45 return order;
48 int drm_addmap(struct inode *inode, struct file *filp, unsigned int cmd,
49 unsigned long arg)
51 drm_file_t *priv = filp->private_data;
52 drm_device_t *dev = priv->dev;
53 drm_map_t *map;
55 if (!(filp->f_mode & 3)) return -EACCES; /* Require read/write */
57 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
58 if (!map) return -ENOMEM;
59 if (copy_from_user(map, (drm_map_t *)arg, sizeof(*map))) {
60 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
61 return -EFAULT;
64 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
65 map->offset, map->size, map->type);
66 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
67 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
68 return -EINVAL;
70 map->mtrr = -1;
71 map->handle = 0;
73 switch (map->type) {
74 case _DRM_REGISTERS:
75 case _DRM_FRAME_BUFFER:
76 #ifndef __sparc__
77 if (map->offset + map->size < map->offset
78 || map->offset < virt_to_phys(high_memory)) {
79 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
80 return -EINVAL;
82 #endif
83 #ifdef CONFIG_MTRR
84 if (map->type == _DRM_FRAME_BUFFER
85 || (map->flags & _DRM_WRITE_COMBINING)) {
86 map->mtrr = mtrr_add(map->offset, map->size,
87 MTRR_TYPE_WRCOMB, 1);
89 #endif
90 map->handle = drm_ioremap(map->offset, map->size);
91 break;
94 case _DRM_SHM:
95 map->handle = (void *)drm_alloc_pages(drm_order(map->size)
96 - PAGE_SHIFT,
97 DRM_MEM_SAREA);
98 DRM_DEBUG("%ld %d %p\n", map->size, drm_order(map->size),
99 map->handle);
100 if (!map->handle) {
101 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
102 return -ENOMEM;
104 map->offset = (unsigned long)map->handle;
105 if (map->flags & _DRM_CONTAINS_LOCK) {
106 dev->lock.hw_lock = map->handle; /* Pointer to lock */
108 break;
109 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
110 case _DRM_AGP:
111 map->offset = map->offset + dev->agp->base;
112 break;
113 #endif
114 default:
115 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
116 return -EINVAL;
119 down(&dev->struct_sem);
120 if (dev->maplist) {
121 ++dev->map_count;
122 dev->maplist = drm_realloc(dev->maplist,
123 (dev->map_count-1)
124 * sizeof(*dev->maplist),
125 dev->map_count
126 * sizeof(*dev->maplist),
127 DRM_MEM_MAPS);
128 } else {
129 dev->map_count = 1;
130 dev->maplist = drm_alloc(dev->map_count*sizeof(*dev->maplist),
131 DRM_MEM_MAPS);
133 dev->maplist[dev->map_count-1] = map;
134 up(&dev->struct_sem);
136 if (copy_to_user((drm_map_t *)arg, map, sizeof(*map)))
137 return -EFAULT;
138 if (map->type != _DRM_SHM) {
139 if (copy_to_user(&((drm_map_t *)arg)->handle,
140 &map->offset,
141 sizeof(map->offset)))
142 return -EFAULT;
144 return 0;
147 int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
148 unsigned long arg)
150 drm_file_t *priv = filp->private_data;
151 drm_device_t *dev = priv->dev;
152 drm_device_dma_t *dma = dev->dma;
153 drm_buf_desc_t request;
154 int count;
155 int order;
156 int size;
157 int total;
158 int page_order;
159 drm_buf_entry_t *entry;
160 unsigned long page;
161 drm_buf_t *buf;
162 int alignment;
163 unsigned long offset;
164 int i;
165 int byte_count;
166 int page_count;
168 if (!dma) return -EINVAL;
170 if (copy_from_user(&request,
171 (drm_buf_desc_t *)arg,
172 sizeof(request)))
173 return -EFAULT;
175 count = request.count;
176 order = drm_order(request.size);
177 size = 1 << order;
179 DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
180 request.count, request.size, size, order, dev->queue_count);
182 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
183 if (dev->queue_count) return -EBUSY; /* Not while in use */
185 alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
186 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
187 total = PAGE_SIZE << page_order;
189 spin_lock(&dev->count_lock);
190 if (dev->buf_use) {
191 spin_unlock(&dev->count_lock);
192 return -EBUSY;
194 atomic_inc(&dev->buf_alloc);
195 spin_unlock(&dev->count_lock);
197 down(&dev->struct_sem);
198 entry = &dma->bufs[order];
199 if (entry->buf_count) {
200 up(&dev->struct_sem);
201 atomic_dec(&dev->buf_alloc);
202 return -ENOMEM; /* May only call once for each order */
205 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
206 DRM_MEM_BUFS);
207 if (!entry->buflist) {
208 up(&dev->struct_sem);
209 atomic_dec(&dev->buf_alloc);
210 return -ENOMEM;
212 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
214 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
215 DRM_MEM_SEGS);
216 if (!entry->seglist) {
217 drm_free(entry->buflist,
218 count * sizeof(*entry->buflist),
219 DRM_MEM_BUFS);
220 up(&dev->struct_sem);
221 atomic_dec(&dev->buf_alloc);
222 return -ENOMEM;
224 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
226 dma->pagelist = drm_realloc(dma->pagelist,
227 dma->page_count * sizeof(*dma->pagelist),
228 (dma->page_count + (count << page_order))
229 * sizeof(*dma->pagelist),
230 DRM_MEM_PAGES);
231 DRM_DEBUG("pagelist: %d entries\n",
232 dma->page_count + (count << page_order));
235 entry->buf_size = size;
236 entry->page_order = page_order;
237 byte_count = 0;
238 page_count = 0;
239 while (entry->buf_count < count) {
240 if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
241 entry->seglist[entry->seg_count++] = page;
242 for (i = 0; i < (1 << page_order); i++) {
243 DRM_DEBUG("page %d @ 0x%08lx\n",
244 dma->page_count + page_count,
245 page + PAGE_SIZE * i);
246 dma->pagelist[dma->page_count + page_count++]
247 = page + PAGE_SIZE * i;
249 for (offset = 0;
250 offset + size <= total && entry->buf_count < count;
251 offset += alignment, ++entry->buf_count) {
252 buf = &entry->buflist[entry->buf_count];
253 buf->idx = dma->buf_count + entry->buf_count;
254 buf->total = alignment;
255 buf->order = order;
256 buf->used = 0;
257 buf->offset = (dma->byte_count + byte_count + offset);
258 buf->address = (void *)(page + offset);
259 buf->next = NULL;
260 buf->waiting = 0;
261 buf->pending = 0;
262 init_waitqueue_head(&buf->dma_wait);
263 buf->pid = 0;
264 #if DRM_DMA_HISTOGRAM
265 buf->time_queued = 0;
266 buf->time_dispatched = 0;
267 buf->time_completed = 0;
268 buf->time_freed = 0;
269 #endif
270 DRM_DEBUG("buffer %d @ %p\n",
271 entry->buf_count, buf->address);
273 byte_count += PAGE_SIZE << page_order;
276 dma->buflist = drm_realloc(dma->buflist,
277 dma->buf_count * sizeof(*dma->buflist),
278 (dma->buf_count + entry->buf_count)
279 * sizeof(*dma->buflist),
280 DRM_MEM_BUFS);
281 for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
282 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
284 dma->buf_count += entry->buf_count;
285 dma->seg_count += entry->seg_count;
286 dma->page_count += entry->seg_count << page_order;
287 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
289 drm_freelist_create(&entry->freelist, entry->buf_count);
290 for (i = 0; i < entry->buf_count; i++) {
291 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
294 up(&dev->struct_sem);
296 request.count = entry->buf_count;
297 request.size = size;
299 if (copy_to_user((drm_buf_desc_t *)arg,
300 &request,
301 sizeof(request)))
302 return -EFAULT;
304 atomic_dec(&dev->buf_alloc);
305 return 0;
308 int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
309 unsigned long arg)
311 drm_file_t *priv = filp->private_data;
312 drm_device_t *dev = priv->dev;
313 drm_device_dma_t *dma = dev->dma;
314 drm_buf_info_t request;
315 int i;
316 int count;
318 if (!dma) return -EINVAL;
320 spin_lock(&dev->count_lock);
321 if (atomic_read(&dev->buf_alloc)) {
322 spin_unlock(&dev->count_lock);
323 return -EBUSY;
325 ++dev->buf_use; /* Can't allocate more after this call */
326 spin_unlock(&dev->count_lock);
328 if (copy_from_user(&request,
329 (drm_buf_info_t *)arg,
330 sizeof(request)))
331 return -EFAULT;
333 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
334 if (dma->bufs[i].buf_count) ++count;
337 DRM_DEBUG("count = %d\n", count);
339 if (request.count >= count) {
340 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
341 if (dma->bufs[i].buf_count) {
342 if (copy_to_user(&request.list[count].count,
343 &dma->bufs[i].buf_count,
344 sizeof(dma->bufs[0]
345 .buf_count)) ||
346 copy_to_user(&request.list[count].size,
347 &dma->bufs[i].buf_size,
348 sizeof(dma->bufs[0].buf_size)) ||
349 copy_to_user(&request.list[count].low_mark,
350 &dma->bufs[i]
351 .freelist.low_mark,
352 sizeof(dma->bufs[0]
353 .freelist.low_mark)) ||
354 copy_to_user(&request.list[count]
355 .high_mark,
356 &dma->bufs[i]
357 .freelist.high_mark,
358 sizeof(dma->bufs[0]
359 .freelist.high_mark)))
360 return -EFAULT;
362 DRM_DEBUG("%d %d %d %d %d\n",
364 dma->bufs[i].buf_count,
365 dma->bufs[i].buf_size,
366 dma->bufs[i].freelist.low_mark,
367 dma->bufs[i].freelist.high_mark);
368 ++count;
372 request.count = count;
374 if (copy_to_user((drm_buf_info_t *)arg,
375 &request,
376 sizeof(request)))
377 return -EFAULT;
379 return 0;
382 int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
383 unsigned long arg)
385 drm_file_t *priv = filp->private_data;
386 drm_device_t *dev = priv->dev;
387 drm_device_dma_t *dma = dev->dma;
388 drm_buf_desc_t request;
389 int order;
390 drm_buf_entry_t *entry;
392 if (!dma) return -EINVAL;
394 if (copy_from_user(&request,
395 (drm_buf_desc_t *)arg,
396 sizeof(request)))
397 return -EFAULT;
399 DRM_DEBUG("%d, %d, %d\n",
400 request.size, request.low_mark, request.high_mark);
401 order = drm_order(request.size);
402 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
403 entry = &dma->bufs[order];
405 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
406 return -EINVAL;
407 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
408 return -EINVAL;
410 entry->freelist.low_mark = request.low_mark;
411 entry->freelist.high_mark = request.high_mark;
413 return 0;
416 int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
417 unsigned long arg)
419 drm_file_t *priv = filp->private_data;
420 drm_device_t *dev = priv->dev;
421 drm_device_dma_t *dma = dev->dma;
422 drm_buf_free_t request;
423 int i;
424 int idx;
425 drm_buf_t *buf;
427 if (!dma) return -EINVAL;
429 if (copy_from_user(&request,
430 (drm_buf_free_t *)arg,
431 sizeof(request)))
432 return -EFAULT;
434 DRM_DEBUG("%d\n", request.count);
435 for (i = 0; i < request.count; i++) {
436 if (copy_from_user(&idx,
437 &request.list[i],
438 sizeof(idx)))
439 return -EFAULT;
440 if (idx < 0 || idx >= dma->buf_count) {
441 DRM_ERROR("Index %d (of %d max)\n",
442 idx, dma->buf_count - 1);
443 return -EINVAL;
445 buf = dma->buflist[idx];
446 if (buf->pid != current->pid) {
447 DRM_ERROR("Process %d freeing buffer owned by %d\n",
448 current->pid, buf->pid);
449 return -EINVAL;
451 drm_free_buffer(dev, buf);
454 return 0;
457 int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
458 unsigned long arg)
460 drm_file_t *priv = filp->private_data;
461 drm_device_t *dev = priv->dev;
462 drm_device_dma_t *dma = dev->dma;
463 int retcode = 0;
464 const int zero = 0;
465 unsigned long virtual;
466 unsigned long address;
467 drm_buf_map_t request;
468 int i;
470 if (!dma) return -EINVAL;
472 DRM_DEBUG("\n");
474 spin_lock(&dev->count_lock);
475 if (atomic_read(&dev->buf_alloc)) {
476 spin_unlock(&dev->count_lock);
477 return -EBUSY;
479 ++dev->buf_use; /* Can't allocate more after this call */
480 spin_unlock(&dev->count_lock);
482 if (copy_from_user(&request,
483 (drm_buf_map_t *)arg,
484 sizeof(request)))
485 return -EFAULT;
487 if (request.count >= dma->buf_count) {
488 down(&current->mm->mmap_sem);
489 virtual = do_mmap(filp, 0, dma->byte_count,
490 PROT_READ|PROT_WRITE, MAP_SHARED, 0);
491 up(&current->mm->mmap_sem);
492 if (virtual > -1024UL) {
493 /* Real error */
494 retcode = (signed long)virtual;
495 goto done;
497 request.virtual = (void *)virtual;
499 for (i = 0; i < dma->buf_count; i++) {
500 if (copy_to_user(&request.list[i].idx,
501 &dma->buflist[i]->idx,
502 sizeof(request.list[0].idx))) {
503 retcode = -EFAULT;
504 goto done;
506 if (copy_to_user(&request.list[i].total,
507 &dma->buflist[i]->total,
508 sizeof(request.list[0].total))) {
509 retcode = -EFAULT;
510 goto done;
512 if (copy_to_user(&request.list[i].used,
513 &zero,
514 sizeof(zero))) {
515 retcode = -EFAULT;
516 goto done;
518 address = virtual + dma->buflist[i]->offset;
519 if (copy_to_user(&request.list[i].address,
520 &address,
521 sizeof(address))) {
522 retcode = -EFAULT;
523 goto done;
527 done:
528 request.count = dma->buf_count;
529 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
531 if (copy_to_user((drm_buf_map_t *)arg,
532 &request,
533 sizeof(request)))
534 return -EFAULT;
536 return retcode;