drm: cleanup buffer/map code
[firewire-audio.git] / drivers / char / drm / drm_bufs.c
blobfcc8d244f46f0639c591c495398d017ff7409137
1 /**
2 * \file drm_bufs.h
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
49 EXPORT_SYMBOL(drm_get_resource_len);
51 static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
52 drm_local_map_t *map)
54 struct list_head *list;
56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 entry->map->offset == map->offset) {
60 return entry->map;
64 return NULL;
67 #ifdef CONFIG_COMPAT
69 * Used to allocate 32-bit handles for _DRM_SHM regions
70 * The 0x10000000 value is chosen to be out of the way of
71 * FB/register and GART physical addresses.
73 static unsigned int map32_handle = 0x10000000;
74 #endif
76 /**
77 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
79 * \param inode device inode.
80 * \param filp file pointer.
81 * \param cmd command.
82 * \param arg pointer to a drm_map structure.
83 * \return zero on success or a negative value on error.
85 * Adjusts the memory offset to its absolute value according to the mapping
86 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
87 * applicable and if supported by the kernel.
89 int drm_addmap(drm_device_t * dev, unsigned int offset,
90 unsigned int size, drm_map_type_t type,
91 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
93 drm_map_t *map;
94 drm_map_list_t *list;
95 drm_dma_handle_t *dmah;
96 drm_local_map_t *found_map;
98 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
99 if ( !map )
100 return -ENOMEM;
102 map->offset = offset;
103 map->size = size;
104 map->flags = flags;
105 map->type = type;
107 /* Only allow shared memory to be removable since we only keep enough
108 * book keeping information about shared memory to allow for removal
109 * when processes fork.
111 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
112 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
113 return -EINVAL;
115 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
116 map->offset, map->size, map->type );
117 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
118 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
119 return -EINVAL;
121 map->mtrr = -1;
122 map->handle = NULL;
124 switch ( map->type ) {
125 case _DRM_REGISTERS:
126 case _DRM_FRAME_BUFFER:
127 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
128 if ( map->offset + map->size < map->offset ||
129 map->offset < virt_to_phys(high_memory) ) {
130 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
131 return -EINVAL;
133 #endif
134 #ifdef __alpha__
135 map->offset += dev->hose->mem_space->start;
136 #endif
137 /* Some drivers preinitialize some maps, without the X Server
138 * needing to be aware of it. Therefore, we just return success
139 * when the server tries to create a duplicate map.
141 found_map = drm_find_matching_map(dev, map);
142 if (found_map != NULL) {
143 if (found_map->size != map->size) {
144 DRM_DEBUG("Matching maps of type %d with "
145 "mismatched sizes, (%ld vs %ld)\n",
146 map->type, map->size, found_map->size);
147 found_map->size = map->size;
150 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
151 *map_ptr = found_map;
152 return 0;
155 if (drm_core_has_MTRR(dev)) {
156 if ( map->type == _DRM_FRAME_BUFFER ||
157 (map->flags & _DRM_WRITE_COMBINING) ) {
158 map->mtrr = mtrr_add( map->offset, map->size,
159 MTRR_TYPE_WRCOMB, 1 );
162 if (map->type == _DRM_REGISTERS)
163 map->handle = drm_ioremap( map->offset, map->size,
164 dev );
165 break;
167 case _DRM_SHM:
168 map->handle = vmalloc_32(map->size);
169 DRM_DEBUG( "%lu %d %p\n",
170 map->size, drm_order( map->size ), map->handle );
171 if ( !map->handle ) {
172 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
173 return -ENOMEM;
175 map->offset = (unsigned long)map->handle;
176 if ( map->flags & _DRM_CONTAINS_LOCK ) {
177 /* Prevent a 2nd X Server from creating a 2nd lock */
178 if (dev->lock.hw_lock != NULL) {
179 vfree( map->handle );
180 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
181 return -EBUSY;
183 dev->sigdata.lock =
184 dev->lock.hw_lock = map->handle; /* Pointer to lock */
186 break;
187 case _DRM_AGP:
188 if (drm_core_has_AGP(dev)) {
189 #ifdef __alpha__
190 map->offset += dev->hose->mem_space->start;
191 #endif
192 map->offset += dev->agp->base;
193 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
195 break;
196 case _DRM_SCATTER_GATHER:
197 if (!dev->sg) {
198 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
199 return -EINVAL;
201 map->offset += dev->sg->handle;
202 break;
203 case _DRM_CONSISTENT:
204 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
205 * As we're limiting the address to 2^32-1 (or less),
206 * casting it down to 32 bits is no problem, but we
207 * need to point to a 64bit variable first. */
208 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
209 if (!dmah) {
210 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211 return -ENOMEM;
213 map->handle = dmah->vaddr;
214 map->offset = (unsigned long)dmah->busaddr;
215 kfree(dmah);
216 break;
217 default:
218 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
219 return -EINVAL;
222 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
223 if(!list) {
224 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
225 return -EINVAL;
227 memset(list, 0, sizeof(*list));
228 list->map = map;
230 down(&dev->struct_sem);
231 list_add(&list->head, &dev->maplist->head);
232 #ifdef CONFIG_COMPAT
233 /* Assign a 32-bit handle for _DRM_SHM mappings */
234 /* We do it here so that dev->struct_sem protects the increment */
235 if (map->type == _DRM_SHM)
236 map->offset = map32_handle += PAGE_SIZE;
237 #endif
238 up(&dev->struct_sem);
240 *map_ptr = map;
241 return 0;
243 EXPORT_SYMBOL(drm_addmap);
245 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
246 unsigned int cmd, unsigned long arg)
248 drm_file_t *priv = filp->private_data;
249 drm_device_t *dev = priv->head->dev;
250 drm_map_t map;
251 drm_map_t *map_ptr;
252 drm_map_t __user *argp = (void __user *)arg;
253 int err;
255 if (!(filp->f_mode & 3))
256 return -EACCES; /* Require read/write */
258 if (copy_from_user(& map, argp, sizeof(map))) {
259 return -EFAULT;
262 err = drm_addmap( dev, map.offset, map.size, map.type, map.flags,
263 &map_ptr );
265 if (err) {
266 return err;
269 if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
270 return -EFAULT;
271 if (map_ptr->type != _DRM_SHM) {
272 if (copy_to_user(&argp->handle, &map_ptr->offset,
273 sizeof(map_ptr->offset)))
274 return -EFAULT;
276 return 0;
281 * Remove a map private from list and deallocate resources if the mapping
282 * isn't in use.
284 * \param inode device inode.
285 * \param filp file pointer.
286 * \param cmd command.
287 * \param arg pointer to a drm_map_t structure.
288 * \return zero on success or a negative value on error.
290 * Searches the map on drm_device::maplist, removes it from the list, see if
291 * its being used, and free any associate resource (such as MTRR's) if it's not
292 * being on use.
294 * \sa drm_addmap
296 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
298 struct list_head *list;
299 drm_map_list_t *r_list = NULL;
300 drm_dma_handle_t dmah;
302 /* Find the list entry for the map and remove it */
303 list_for_each(list, &dev->maplist->head) {
304 r_list = list_entry(list, drm_map_list_t, head);
306 if (r_list->map == map) {
307 list_del(list);
308 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
309 break;
313 /* List has wrapped around to the head pointer, or it's empty and we
314 * didn't find anything.
316 if (list == (&dev->maplist->head)) {
317 return -EINVAL;
320 switch (map->type) {
321 case _DRM_REGISTERS:
322 drm_ioremapfree(map->handle, map->size, dev);
323 /* FALLTHROUGH */
324 case _DRM_FRAME_BUFFER:
325 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
326 int retcode;
327 retcode = mtrr_del(map->mtrr, map->offset,
328 map->size);
329 DRM_DEBUG ("mtrr_del=%d\n", retcode);
331 break;
332 case _DRM_SHM:
333 vfree(map->handle);
334 break;
335 case _DRM_AGP:
336 case _DRM_SCATTER_GATHER:
337 break;
338 case _DRM_CONSISTENT:
339 dmah.vaddr = map->handle;
340 dmah.busaddr = map->offset;
341 dmah.size = map->size;
342 __drm_pci_free(dev, &dmah);
343 break;
345 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
347 return 0;
349 EXPORT_SYMBOL(drm_rmmap_locked);
351 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
353 int ret;
355 down(&dev->struct_sem);
356 ret = drm_rmmap_locked(dev, map);
357 up(&dev->struct_sem);
359 return ret;
361 EXPORT_SYMBOL(drm_rmmap);
363 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
364 * the last close of the device, and this is necessary for cleanup when things
365 * exit uncleanly. Therefore, having userland manually remove mappings seems
366 * like a pointless exercise since they're going away anyway.
368 * One use case might be after addmap is allowed for normal users for SHM and
369 * gets used by drivers that the server doesn't need to care about. This seems
370 * unlikely.
372 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
373 unsigned int cmd, unsigned long arg)
375 drm_file_t *priv = filp->private_data;
376 drm_device_t *dev = priv->head->dev;
377 drm_map_t request;
378 drm_local_map_t *map = NULL;
379 struct list_head *list;
380 int ret;
382 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
383 return -EFAULT;
386 down(&dev->struct_sem);
387 list_for_each(list, &dev->maplist->head) {
388 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
390 if (r_list->map &&
391 r_list->map->handle == request.handle &&
392 r_list->map->flags & _DRM_REMOVABLE) {
393 map = r_list->map;
394 break;
398 /* List has wrapped around to the head pointer, or its empty we didn't
399 * find anything.
401 if (list == (&dev->maplist->head)) {
402 up(&dev->struct_sem);
403 return -EINVAL;
406 if (!map)
407 return -EINVAL;
409 /* Register and framebuffer maps are permanent */
410 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
411 up(&dev->struct_sem);
412 return 0;
415 ret = drm_rmmap_locked(dev, map);
417 up(&dev->struct_sem);
419 return ret;
423 * Cleanup after an error on one of the addbufs() functions.
425 * \param dev DRM device.
426 * \param entry buffer entry where the error occurred.
428 * Frees any pages and buffers associated with the given entry.
430 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
432 int i;
434 if (entry->seg_count) {
435 for (i = 0; i < entry->seg_count; i++) {
436 if (entry->seglist[i]) {
437 drm_free_pages(entry->seglist[i],
438 entry->page_order,
439 DRM_MEM_DMA);
442 drm_free(entry->seglist,
443 entry->seg_count *
444 sizeof(*entry->seglist),
445 DRM_MEM_SEGS);
447 entry->seg_count = 0;
450 if (entry->buf_count) {
451 for (i = 0; i < entry->buf_count; i++) {
452 if (entry->buflist[i].dev_private) {
453 drm_free(entry->buflist[i].dev_private,
454 entry->buflist[i].dev_priv_size,
455 DRM_MEM_BUFS);
458 drm_free(entry->buflist,
459 entry->buf_count *
460 sizeof(*entry->buflist),
461 DRM_MEM_BUFS);
463 entry->buf_count = 0;
467 #if __OS_HAS_AGP
469 * Add AGP buffers for DMA transfers.
471 * \param dev drm_device_t to which the buffers are to be added.
472 * \param request pointer to a drm_buf_desc_t describing the request.
473 * \return zero on success or a negative number on failure.
475 * After some sanity checks creates a drm_buf structure for each buffer and
476 * reallocates the buffer list of the same size order to accommodate the new
477 * buffers.
479 int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
481 drm_device_dma_t *dma = dev->dma;
482 drm_buf_entry_t *entry;
483 drm_buf_t *buf;
484 unsigned long offset;
485 unsigned long agp_offset;
486 int count;
487 int order;
488 int size;
489 int alignment;
490 int page_order;
491 int total;
492 int byte_count;
493 int i;
494 drm_buf_t **temp_buflist;
496 if ( !dma ) return -EINVAL;
498 count = request->count;
499 order = drm_order(request->size);
500 size = 1 << order;
502 alignment = (request->flags & _DRM_PAGE_ALIGN)
503 ? PAGE_ALIGN(size) : size;
504 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
505 total = PAGE_SIZE << page_order;
507 byte_count = 0;
508 agp_offset = dev->agp->base + request->agp_start;
510 DRM_DEBUG( "count: %d\n", count );
511 DRM_DEBUG( "order: %d\n", order );
512 DRM_DEBUG( "size: %d\n", size );
513 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
514 DRM_DEBUG( "alignment: %d\n", alignment );
515 DRM_DEBUG( "page_order: %d\n", page_order );
516 DRM_DEBUG( "total: %d\n", total );
518 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
519 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
521 spin_lock( &dev->count_lock );
522 if ( dev->buf_use ) {
523 spin_unlock( &dev->count_lock );
524 return -EBUSY;
526 atomic_inc( &dev->buf_alloc );
527 spin_unlock( &dev->count_lock );
529 down( &dev->struct_sem );
530 entry = &dma->bufs[order];
531 if ( entry->buf_count ) {
532 up( &dev->struct_sem );
533 atomic_dec( &dev->buf_alloc );
534 return -ENOMEM; /* May only call once for each order */
537 if (count < 0 || count > 4096) {
538 up( &dev->struct_sem );
539 atomic_dec( &dev->buf_alloc );
540 return -EINVAL;
543 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
544 DRM_MEM_BUFS );
545 if ( !entry->buflist ) {
546 up( &dev->struct_sem );
547 atomic_dec( &dev->buf_alloc );
548 return -ENOMEM;
550 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
552 entry->buf_size = size;
553 entry->page_order = page_order;
555 offset = 0;
557 while ( entry->buf_count < count ) {
558 buf = &entry->buflist[entry->buf_count];
559 buf->idx = dma->buf_count + entry->buf_count;
560 buf->total = alignment;
561 buf->order = order;
562 buf->used = 0;
564 buf->offset = (dma->byte_count + offset);
565 buf->bus_address = agp_offset + offset;
566 buf->address = (void *)(agp_offset + offset);
567 buf->next = NULL;
568 buf->waiting = 0;
569 buf->pending = 0;
570 init_waitqueue_head( &buf->dma_wait );
571 buf->filp = NULL;
573 buf->dev_priv_size = dev->driver->dev_priv_size;
574 buf->dev_private = drm_alloc( buf->dev_priv_size,
575 DRM_MEM_BUFS );
576 if(!buf->dev_private) {
577 /* Set count correctly so we free the proper amount. */
578 entry->buf_count = count;
579 drm_cleanup_buf_error(dev,entry);
580 up( &dev->struct_sem );
581 atomic_dec( &dev->buf_alloc );
582 return -ENOMEM;
584 memset( buf->dev_private, 0, buf->dev_priv_size );
586 DRM_DEBUG( "buffer %d @ %p\n",
587 entry->buf_count, buf->address );
589 offset += alignment;
590 entry->buf_count++;
591 byte_count += PAGE_SIZE << page_order;
594 DRM_DEBUG( "byte_count: %d\n", byte_count );
596 temp_buflist = drm_realloc( dma->buflist,
597 dma->buf_count * sizeof(*dma->buflist),
598 (dma->buf_count + entry->buf_count)
599 * sizeof(*dma->buflist),
600 DRM_MEM_BUFS );
601 if(!temp_buflist) {
602 /* Free the entry because it isn't valid */
603 drm_cleanup_buf_error(dev,entry);
604 up( &dev->struct_sem );
605 atomic_dec( &dev->buf_alloc );
606 return -ENOMEM;
608 dma->buflist = temp_buflist;
610 for ( i = 0 ; i < entry->buf_count ; i++ ) {
611 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
614 dma->buf_count += entry->buf_count;
615 dma->byte_count += byte_count;
617 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
618 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
620 up( &dev->struct_sem );
622 request->count = entry->buf_count;
623 request->size = size;
625 dma->flags = _DRM_DMA_USE_AGP;
627 atomic_dec( &dev->buf_alloc );
628 return 0;
630 EXPORT_SYMBOL(drm_addbufs_agp);
631 #endif /* __OS_HAS_AGP */
633 int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
635 drm_device_dma_t *dma = dev->dma;
636 int count;
637 int order;
638 int size;
639 int total;
640 int page_order;
641 drm_buf_entry_t *entry;
642 unsigned long page;
643 drm_buf_t *buf;
644 int alignment;
645 unsigned long offset;
646 int i;
647 int byte_count;
648 int page_count;
649 unsigned long *temp_pagelist;
650 drm_buf_t **temp_buflist;
652 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
653 if ( !dma ) return -EINVAL;
655 count = request->count;
656 order = drm_order(request->size);
657 size = 1 << order;
659 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
660 request->count, request->size, size,
661 order, dev->queue_count );
663 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
664 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
666 alignment = (request->flags & _DRM_PAGE_ALIGN)
667 ? PAGE_ALIGN(size) : size;
668 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
669 total = PAGE_SIZE << page_order;
671 spin_lock( &dev->count_lock );
672 if ( dev->buf_use ) {
673 spin_unlock( &dev->count_lock );
674 return -EBUSY;
676 atomic_inc( &dev->buf_alloc );
677 spin_unlock( &dev->count_lock );
679 down( &dev->struct_sem );
680 entry = &dma->bufs[order];
681 if ( entry->buf_count ) {
682 up( &dev->struct_sem );
683 atomic_dec( &dev->buf_alloc );
684 return -ENOMEM; /* May only call once for each order */
687 if (count < 0 || count > 4096) {
688 up( &dev->struct_sem );
689 atomic_dec( &dev->buf_alloc );
690 return -EINVAL;
693 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
694 DRM_MEM_BUFS );
695 if ( !entry->buflist ) {
696 up( &dev->struct_sem );
697 atomic_dec( &dev->buf_alloc );
698 return -ENOMEM;
700 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
702 entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
703 DRM_MEM_SEGS );
704 if ( !entry->seglist ) {
705 drm_free( entry->buflist,
706 count * sizeof(*entry->buflist),
707 DRM_MEM_BUFS );
708 up( &dev->struct_sem );
709 atomic_dec( &dev->buf_alloc );
710 return -ENOMEM;
712 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
714 /* Keep the original pagelist until we know all the allocations
715 * have succeeded
717 temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
718 * sizeof(*dma->pagelist),
719 DRM_MEM_PAGES );
720 if (!temp_pagelist) {
721 drm_free( entry->buflist,
722 count * sizeof(*entry->buflist),
723 DRM_MEM_BUFS );
724 drm_free( entry->seglist,
725 count * sizeof(*entry->seglist),
726 DRM_MEM_SEGS );
727 up( &dev->struct_sem );
728 atomic_dec( &dev->buf_alloc );
729 return -ENOMEM;
731 memcpy(temp_pagelist,
732 dma->pagelist,
733 dma->page_count * sizeof(*dma->pagelist));
734 DRM_DEBUG( "pagelist: %d entries\n",
735 dma->page_count + (count << page_order) );
737 entry->buf_size = size;
738 entry->page_order = page_order;
739 byte_count = 0;
740 page_count = 0;
742 while ( entry->buf_count < count ) {
743 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
744 if ( !page ) {
745 /* Set count correctly so we free the proper amount. */
746 entry->buf_count = count;
747 entry->seg_count = count;
748 drm_cleanup_buf_error(dev, entry);
749 drm_free( temp_pagelist,
750 (dma->page_count + (count << page_order))
751 * sizeof(*dma->pagelist),
752 DRM_MEM_PAGES );
753 up( &dev->struct_sem );
754 atomic_dec( &dev->buf_alloc );
755 return -ENOMEM;
757 entry->seglist[entry->seg_count++] = page;
758 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
759 DRM_DEBUG( "page %d @ 0x%08lx\n",
760 dma->page_count + page_count,
761 page + PAGE_SIZE * i );
762 temp_pagelist[dma->page_count + page_count++]
763 = page + PAGE_SIZE * i;
765 for ( offset = 0 ;
766 offset + size <= total && entry->buf_count < count ;
767 offset += alignment, ++entry->buf_count ) {
768 buf = &entry->buflist[entry->buf_count];
769 buf->idx = dma->buf_count + entry->buf_count;
770 buf->total = alignment;
771 buf->order = order;
772 buf->used = 0;
773 buf->offset = (dma->byte_count + byte_count + offset);
774 buf->address = (void *)(page + offset);
775 buf->next = NULL;
776 buf->waiting = 0;
777 buf->pending = 0;
778 init_waitqueue_head( &buf->dma_wait );
779 buf->filp = NULL;
781 buf->dev_priv_size = dev->driver->dev_priv_size;
782 buf->dev_private = drm_alloc( buf->dev_priv_size,
783 DRM_MEM_BUFS );
784 if(!buf->dev_private) {
785 /* Set count correctly so we free the proper amount. */
786 entry->buf_count = count;
787 entry->seg_count = count;
788 drm_cleanup_buf_error(dev,entry);
789 drm_free( temp_pagelist,
790 (dma->page_count + (count << page_order))
791 * sizeof(*dma->pagelist),
792 DRM_MEM_PAGES );
793 up( &dev->struct_sem );
794 atomic_dec( &dev->buf_alloc );
795 return -ENOMEM;
797 memset( buf->dev_private, 0, buf->dev_priv_size );
799 DRM_DEBUG( "buffer %d @ %p\n",
800 entry->buf_count, buf->address );
802 byte_count += PAGE_SIZE << page_order;
805 temp_buflist = drm_realloc( dma->buflist,
806 dma->buf_count * sizeof(*dma->buflist),
807 (dma->buf_count + entry->buf_count)
808 * sizeof(*dma->buflist),
809 DRM_MEM_BUFS );
810 if (!temp_buflist) {
811 /* Free the entry because it isn't valid */
812 drm_cleanup_buf_error(dev,entry);
813 drm_free( temp_pagelist,
814 (dma->page_count + (count << page_order))
815 * sizeof(*dma->pagelist),
816 DRM_MEM_PAGES );
817 up( &dev->struct_sem );
818 atomic_dec( &dev->buf_alloc );
819 return -ENOMEM;
821 dma->buflist = temp_buflist;
823 for ( i = 0 ; i < entry->buf_count ; i++ ) {
824 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
827 /* No allocations failed, so now we can replace the orginal pagelist
828 * with the new one.
830 if (dma->page_count) {
831 drm_free(dma->pagelist,
832 dma->page_count * sizeof(*dma->pagelist),
833 DRM_MEM_PAGES);
835 dma->pagelist = temp_pagelist;
837 dma->buf_count += entry->buf_count;
838 dma->seg_count += entry->seg_count;
839 dma->page_count += entry->seg_count << page_order;
840 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
842 up( &dev->struct_sem );
844 request->count = entry->buf_count;
845 request->size = size;
847 atomic_dec( &dev->buf_alloc );
848 return 0;
851 EXPORT_SYMBOL(drm_addbufs_pci);
853 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
855 drm_device_dma_t *dma = dev->dma;
856 drm_buf_entry_t *entry;
857 drm_buf_t *buf;
858 unsigned long offset;
859 unsigned long agp_offset;
860 int count;
861 int order;
862 int size;
863 int alignment;
864 int page_order;
865 int total;
866 int byte_count;
867 int i;
868 drm_buf_t **temp_buflist;
870 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
872 if ( !dma ) return -EINVAL;
874 count = request->count;
875 order = drm_order(request->size);
876 size = 1 << order;
878 alignment = (request->flags & _DRM_PAGE_ALIGN)
879 ? PAGE_ALIGN(size) : size;
880 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
881 total = PAGE_SIZE << page_order;
883 byte_count = 0;
884 agp_offset = request->agp_start;
886 DRM_DEBUG( "count: %d\n", count );
887 DRM_DEBUG( "order: %d\n", order );
888 DRM_DEBUG( "size: %d\n", size );
889 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
890 DRM_DEBUG( "alignment: %d\n", alignment );
891 DRM_DEBUG( "page_order: %d\n", page_order );
892 DRM_DEBUG( "total: %d\n", total );
894 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
895 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
897 spin_lock( &dev->count_lock );
898 if ( dev->buf_use ) {
899 spin_unlock( &dev->count_lock );
900 return -EBUSY;
902 atomic_inc( &dev->buf_alloc );
903 spin_unlock( &dev->count_lock );
905 down( &dev->struct_sem );
906 entry = &dma->bufs[order];
907 if ( entry->buf_count ) {
908 up( &dev->struct_sem );
909 atomic_dec( &dev->buf_alloc );
910 return -ENOMEM; /* May only call once for each order */
913 if (count < 0 || count > 4096) {
914 up( &dev->struct_sem );
915 atomic_dec( &dev->buf_alloc );
916 return -EINVAL;
919 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
920 DRM_MEM_BUFS );
921 if ( !entry->buflist ) {
922 up( &dev->struct_sem );
923 atomic_dec( &dev->buf_alloc );
924 return -ENOMEM;
926 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
928 entry->buf_size = size;
929 entry->page_order = page_order;
931 offset = 0;
933 while ( entry->buf_count < count ) {
934 buf = &entry->buflist[entry->buf_count];
935 buf->idx = dma->buf_count + entry->buf_count;
936 buf->total = alignment;
937 buf->order = order;
938 buf->used = 0;
940 buf->offset = (dma->byte_count + offset);
941 buf->bus_address = agp_offset + offset;
942 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
943 buf->next = NULL;
944 buf->waiting = 0;
945 buf->pending = 0;
946 init_waitqueue_head( &buf->dma_wait );
947 buf->filp = NULL;
949 buf->dev_priv_size = dev->driver->dev_priv_size;
950 buf->dev_private = drm_alloc( buf->dev_priv_size,
951 DRM_MEM_BUFS );
952 if(!buf->dev_private) {
953 /* Set count correctly so we free the proper amount. */
954 entry->buf_count = count;
955 drm_cleanup_buf_error(dev,entry);
956 up( &dev->struct_sem );
957 atomic_dec( &dev->buf_alloc );
958 return -ENOMEM;
961 memset( buf->dev_private, 0, buf->dev_priv_size );
963 DRM_DEBUG( "buffer %d @ %p\n",
964 entry->buf_count, buf->address );
966 offset += alignment;
967 entry->buf_count++;
968 byte_count += PAGE_SIZE << page_order;
971 DRM_DEBUG( "byte_count: %d\n", byte_count );
973 temp_buflist = drm_realloc( dma->buflist,
974 dma->buf_count * sizeof(*dma->buflist),
975 (dma->buf_count + entry->buf_count)
976 * sizeof(*dma->buflist),
977 DRM_MEM_BUFS );
978 if(!temp_buflist) {
979 /* Free the entry because it isn't valid */
980 drm_cleanup_buf_error(dev,entry);
981 up( &dev->struct_sem );
982 atomic_dec( &dev->buf_alloc );
983 return -ENOMEM;
985 dma->buflist = temp_buflist;
987 for ( i = 0 ; i < entry->buf_count ; i++ ) {
988 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
991 dma->buf_count += entry->buf_count;
992 dma->byte_count += byte_count;
994 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
995 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
997 up( &dev->struct_sem );
999 request->count = entry->buf_count;
1000 request->size = size;
1002 dma->flags = _DRM_DMA_USE_SG;
1004 atomic_dec( &dev->buf_alloc );
1005 return 0;
1008 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1010 drm_device_dma_t *dma = dev->dma;
1011 drm_buf_entry_t *entry;
1012 drm_buf_t *buf;
1013 unsigned long offset;
1014 unsigned long agp_offset;
1015 int count;
1016 int order;
1017 int size;
1018 int alignment;
1019 int page_order;
1020 int total;
1021 int byte_count;
1022 int i;
1023 drm_buf_t **temp_buflist;
1025 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1026 return -EINVAL;
1028 if (!dma)
1029 return -EINVAL;
1031 count = request->count;
1032 order = drm_order(request->size);
1033 size = 1 << order;
1035 alignment = (request->flags & _DRM_PAGE_ALIGN)
1036 ? PAGE_ALIGN(size) : size;
1037 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1038 total = PAGE_SIZE << page_order;
1040 byte_count = 0;
1041 agp_offset = request->agp_start;
1043 DRM_DEBUG("count: %d\n", count);
1044 DRM_DEBUG("order: %d\n", order);
1045 DRM_DEBUG("size: %d\n", size);
1046 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1047 DRM_DEBUG("alignment: %d\n", alignment);
1048 DRM_DEBUG("page_order: %d\n", page_order);
1049 DRM_DEBUG("total: %d\n", total);
1051 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1052 return -EINVAL;
1053 if (dev->queue_count)
1054 return -EBUSY; /* Not while in use */
1056 spin_lock(&dev->count_lock);
1057 if (dev->buf_use) {
1058 spin_unlock(&dev->count_lock);
1059 return -EBUSY;
1061 atomic_inc(&dev->buf_alloc);
1062 spin_unlock(&dev->count_lock);
1064 down(&dev->struct_sem);
1065 entry = &dma->bufs[order];
1066 if (entry->buf_count) {
1067 up(&dev->struct_sem);
1068 atomic_dec(&dev->buf_alloc);
1069 return -ENOMEM; /* May only call once for each order */
1072 if (count < 0 || count > 4096) {
1073 up(&dev->struct_sem);
1074 atomic_dec(&dev->buf_alloc);
1075 return -EINVAL;
1078 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1079 DRM_MEM_BUFS);
1080 if (!entry->buflist) {
1081 up(&dev->struct_sem);
1082 atomic_dec(&dev->buf_alloc);
1083 return -ENOMEM;
1085 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1087 entry->buf_size = size;
1088 entry->page_order = page_order;
1090 offset = 0;
1092 while (entry->buf_count < count) {
1093 buf = &entry->buflist[entry->buf_count];
1094 buf->idx = dma->buf_count + entry->buf_count;
1095 buf->total = alignment;
1096 buf->order = order;
1097 buf->used = 0;
1099 buf->offset = (dma->byte_count + offset);
1100 buf->bus_address = agp_offset + offset;
1101 buf->address = (void *)(agp_offset + offset);
1102 buf->next = NULL;
1103 buf->waiting = 0;
1104 buf->pending = 0;
1105 init_waitqueue_head(&buf->dma_wait);
1106 buf->filp = NULL;
1108 buf->dev_priv_size = dev->driver->dev_priv_size;
1109 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1110 if (!buf->dev_private) {
1111 /* Set count correctly so we free the proper amount. */
1112 entry->buf_count = count;
1113 drm_cleanup_buf_error(dev, entry);
1114 up(&dev->struct_sem);
1115 atomic_dec(&dev->buf_alloc);
1116 return -ENOMEM;
1118 memset(buf->dev_private, 0, buf->dev_priv_size);
1120 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1122 offset += alignment;
1123 entry->buf_count++;
1124 byte_count += PAGE_SIZE << page_order;
1127 DRM_DEBUG("byte_count: %d\n", byte_count);
1129 temp_buflist = drm_realloc(dma->buflist,
1130 dma->buf_count * sizeof(*dma->buflist),
1131 (dma->buf_count + entry->buf_count)
1132 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1133 if (!temp_buflist) {
1134 /* Free the entry because it isn't valid */
1135 drm_cleanup_buf_error(dev, entry);
1136 up(&dev->struct_sem);
1137 atomic_dec(&dev->buf_alloc);
1138 return -ENOMEM;
1140 dma->buflist = temp_buflist;
1142 for (i = 0; i < entry->buf_count; i++) {
1143 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1146 dma->buf_count += entry->buf_count;
1147 dma->byte_count += byte_count;
1149 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1150 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1152 up(&dev->struct_sem);
1154 request->count = entry->buf_count;
1155 request->size = size;
1157 dma->flags = _DRM_DMA_USE_FB;
1159 atomic_dec(&dev->buf_alloc);
1160 return 0;
1164 * Add buffers for DMA transfers (ioctl).
1166 * \param inode device inode.
1167 * \param filp file pointer.
1168 * \param cmd command.
1169 * \param arg pointer to a drm_buf_desc_t request.
1170 * \return zero on success or a negative number on failure.
1172 * According with the memory type specified in drm_buf_desc::flags and the
1173 * build options, it dispatches the call either to addbufs_agp(),
1174 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1175 * PCI memory respectively.
1177 int drm_addbufs( struct inode *inode, struct file *filp,
1178 unsigned int cmd, unsigned long arg )
1180 drm_buf_desc_t request;
1181 drm_file_t *priv = filp->private_data;
1182 drm_device_t *dev = priv->head->dev;
1183 int ret;
1185 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1186 return -EINVAL;
1188 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1189 sizeof(request) ) )
1190 return -EFAULT;
1192 #if __OS_HAS_AGP
1193 if ( request.flags & _DRM_AGP_BUFFER )
1194 ret=drm_addbufs_agp(dev, &request);
1195 else
1196 #endif
1197 if ( request.flags & _DRM_SG_BUFFER )
1198 ret=drm_addbufs_sg(dev, &request);
1199 else if ( request.flags & _DRM_FB_BUFFER)
1200 ret=drm_addbufs_fb(dev, &request);
1201 else
1202 ret=drm_addbufs_pci(dev, &request);
1204 if (ret==0) {
1205 if (copy_to_user((void __user *)arg, &request,
1206 sizeof(request))) {
1207 ret = -EFAULT;
1210 return ret;
1215 * Get information about the buffer mappings.
1217 * This was originally mean for debugging purposes, or by a sophisticated
1218 * client library to determine how best to use the available buffers (e.g.,
1219 * large buffers can be used for image transfer).
1221 * \param inode device inode.
1222 * \param filp file pointer.
1223 * \param cmd command.
1224 * \param arg pointer to a drm_buf_info structure.
1225 * \return zero on success or a negative number on failure.
1227 * Increments drm_device::buf_use while holding the drm_device::count_lock
1228 * lock, preventing of allocating more buffers after this call. Information
1229 * about each requested buffer is then copied into user space.
1231 int drm_infobufs( struct inode *inode, struct file *filp,
1232 unsigned int cmd, unsigned long arg )
1234 drm_file_t *priv = filp->private_data;
1235 drm_device_t *dev = priv->head->dev;
1236 drm_device_dma_t *dma = dev->dma;
1237 drm_buf_info_t request;
1238 drm_buf_info_t __user *argp = (void __user *)arg;
1239 int i;
1240 int count;
1242 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1243 return -EINVAL;
1245 if ( !dma ) return -EINVAL;
1247 spin_lock( &dev->count_lock );
1248 if ( atomic_read( &dev->buf_alloc ) ) {
1249 spin_unlock( &dev->count_lock );
1250 return -EBUSY;
1252 ++dev->buf_use; /* Can't allocate more after this call */
1253 spin_unlock( &dev->count_lock );
1255 if ( copy_from_user( &request, argp, sizeof(request) ) )
1256 return -EFAULT;
1258 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1259 if ( dma->bufs[i].buf_count ) ++count;
1262 DRM_DEBUG( "count = %d\n", count );
1264 if ( request.count >= count ) {
1265 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1266 if ( dma->bufs[i].buf_count ) {
1267 drm_buf_desc_t __user *to = &request.list[count];
1268 drm_buf_entry_t *from = &dma->bufs[i];
1269 drm_freelist_t *list = &dma->bufs[i].freelist;
1270 if ( copy_to_user( &to->count,
1271 &from->buf_count,
1272 sizeof(from->buf_count) ) ||
1273 copy_to_user( &to->size,
1274 &from->buf_size,
1275 sizeof(from->buf_size) ) ||
1276 copy_to_user( &to->low_mark,
1277 &list->low_mark,
1278 sizeof(list->low_mark) ) ||
1279 copy_to_user( &to->high_mark,
1280 &list->high_mark,
1281 sizeof(list->high_mark) ) )
1282 return -EFAULT;
1284 DRM_DEBUG( "%d %d %d %d %d\n",
1286 dma->bufs[i].buf_count,
1287 dma->bufs[i].buf_size,
1288 dma->bufs[i].freelist.low_mark,
1289 dma->bufs[i].freelist.high_mark );
1290 ++count;
1294 request.count = count;
1296 if ( copy_to_user( argp, &request, sizeof(request) ) )
1297 return -EFAULT;
1299 return 0;
1303 * Specifies a low and high water mark for buffer allocation
1305 * \param inode device inode.
1306 * \param filp file pointer.
1307 * \param cmd command.
1308 * \param arg a pointer to a drm_buf_desc structure.
1309 * \return zero on success or a negative number on failure.
1311 * Verifies that the size order is bounded between the admissible orders and
1312 * updates the respective drm_device_dma::bufs entry low and high water mark.
1314 * \note This ioctl is deprecated and mostly never used.
1316 int drm_markbufs( struct inode *inode, struct file *filp,
1317 unsigned int cmd, unsigned long arg )
1319 drm_file_t *priv = filp->private_data;
1320 drm_device_t *dev = priv->head->dev;
1321 drm_device_dma_t *dma = dev->dma;
1322 drm_buf_desc_t request;
1323 int order;
1324 drm_buf_entry_t *entry;
1326 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1327 return -EINVAL;
1329 if ( !dma ) return -EINVAL;
1331 if ( copy_from_user( &request,
1332 (drm_buf_desc_t __user *)arg,
1333 sizeof(request) ) )
1334 return -EFAULT;
1336 DRM_DEBUG( "%d, %d, %d\n",
1337 request.size, request.low_mark, request.high_mark );
1338 order = drm_order( request.size );
1339 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1340 entry = &dma->bufs[order];
1342 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1343 return -EINVAL;
1344 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1345 return -EINVAL;
1347 entry->freelist.low_mark = request.low_mark;
1348 entry->freelist.high_mark = request.high_mark;
1350 return 0;
1354 * Unreserve the buffers in list, previously reserved using drmDMA.
1356 * \param inode device inode.
1357 * \param filp file pointer.
1358 * \param cmd command.
1359 * \param arg pointer to a drm_buf_free structure.
1360 * \return zero on success or a negative number on failure.
1362 * Calls free_buffer() for each used buffer.
1363 * This function is primarily used for debugging.
1365 int drm_freebufs( struct inode *inode, struct file *filp,
1366 unsigned int cmd, unsigned long arg )
1368 drm_file_t *priv = filp->private_data;
1369 drm_device_t *dev = priv->head->dev;
1370 drm_device_dma_t *dma = dev->dma;
1371 drm_buf_free_t request;
1372 int i;
1373 int idx;
1374 drm_buf_t *buf;
1376 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1377 return -EINVAL;
1379 if ( !dma ) return -EINVAL;
1381 if ( copy_from_user( &request,
1382 (drm_buf_free_t __user *)arg,
1383 sizeof(request) ) )
1384 return -EFAULT;
1386 DRM_DEBUG( "%d\n", request.count );
1387 for ( i = 0 ; i < request.count ; i++ ) {
1388 if ( copy_from_user( &idx,
1389 &request.list[i],
1390 sizeof(idx) ) )
1391 return -EFAULT;
1392 if ( idx < 0 || idx >= dma->buf_count ) {
1393 DRM_ERROR( "Index %d (of %d max)\n",
1394 idx, dma->buf_count - 1 );
1395 return -EINVAL;
1397 buf = dma->buflist[idx];
1398 if ( buf->filp != filp ) {
1399 DRM_ERROR( "Process %d freeing buffer not owned\n",
1400 current->pid );
1401 return -EINVAL;
1403 drm_free_buffer( dev, buf );
1406 return 0;
1410 * Maps all of the DMA buffers into client-virtual space (ioctl).
1412 * \param inode device inode.
1413 * \param filp file pointer.
1414 * \param cmd command.
1415 * \param arg pointer to a drm_buf_map structure.
1416 * \return zero on success or a negative number on failure.
1418 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1419 * about each buffer into user space. The PCI buffers are already mapped on the
1420 * addbufs_pci() call.
1422 int drm_mapbufs( struct inode *inode, struct file *filp,
1423 unsigned int cmd, unsigned long arg )
1425 drm_file_t *priv = filp->private_data;
1426 drm_device_t *dev = priv->head->dev;
1427 drm_device_dma_t *dma = dev->dma;
1428 drm_buf_map_t __user *argp = (void __user *)arg;
1429 int retcode = 0;
1430 const int zero = 0;
1431 unsigned long virtual;
1432 unsigned long address;
1433 drm_buf_map_t request;
1434 int i;
1436 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1437 return -EINVAL;
1439 if ( !dma ) return -EINVAL;
1441 spin_lock( &dev->count_lock );
1442 if ( atomic_read( &dev->buf_alloc ) ) {
1443 spin_unlock( &dev->count_lock );
1444 return -EBUSY;
1446 dev->buf_use++; /* Can't allocate more after this call */
1447 spin_unlock( &dev->count_lock );
1449 if ( copy_from_user( &request, argp, sizeof(request) ) )
1450 return -EFAULT;
1452 if ( request.count >= dma->buf_count ) {
1453 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1454 || (drm_core_check_feature(dev, DRIVER_SG)
1455 && (dma->flags & _DRM_DMA_USE_SG))
1456 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1457 && (dma->flags & _DRM_DMA_USE_FB))) {
1458 drm_map_t *map = dev->agp_buffer_map;
1460 if ( !map ) {
1461 retcode = -EINVAL;
1462 goto done;
1465 #if LINUX_VERSION_CODE <= 0x020402
1466 down( &current->mm->mmap_sem );
1467 #else
1468 down_write( &current->mm->mmap_sem );
1469 #endif
1470 virtual = do_mmap( filp, 0, map->size,
1471 PROT_READ | PROT_WRITE,
1472 MAP_SHARED,
1473 (unsigned long)map->offset );
1474 #if LINUX_VERSION_CODE <= 0x020402
1475 up( &current->mm->mmap_sem );
1476 #else
1477 up_write( &current->mm->mmap_sem );
1478 #endif
1479 } else {
1480 #if LINUX_VERSION_CODE <= 0x020402
1481 down( &current->mm->mmap_sem );
1482 #else
1483 down_write( &current->mm->mmap_sem );
1484 #endif
1485 virtual = do_mmap( filp, 0, dma->byte_count,
1486 PROT_READ | PROT_WRITE,
1487 MAP_SHARED, 0 );
1488 #if LINUX_VERSION_CODE <= 0x020402
1489 up( &current->mm->mmap_sem );
1490 #else
1491 up_write( &current->mm->mmap_sem );
1492 #endif
1494 if ( virtual > -1024UL ) {
1495 /* Real error */
1496 retcode = (signed long)virtual;
1497 goto done;
1499 request.virtual = (void __user *)virtual;
1501 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1502 if ( copy_to_user( &request.list[i].idx,
1503 &dma->buflist[i]->idx,
1504 sizeof(request.list[0].idx) ) ) {
1505 retcode = -EFAULT;
1506 goto done;
1508 if ( copy_to_user( &request.list[i].total,
1509 &dma->buflist[i]->total,
1510 sizeof(request.list[0].total) ) ) {
1511 retcode = -EFAULT;
1512 goto done;
1514 if ( copy_to_user( &request.list[i].used,
1515 &zero,
1516 sizeof(zero) ) ) {
1517 retcode = -EFAULT;
1518 goto done;
1520 address = virtual + dma->buflist[i]->offset; /* *** */
1521 if ( copy_to_user( &request.list[i].address,
1522 &address,
1523 sizeof(address) ) ) {
1524 retcode = -EFAULT;
1525 goto done;
1529 done:
1530 request.count = dma->buf_count;
1531 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1533 if ( copy_to_user( argp, &request, sizeof(request) ) )
1534 return -EFAULT;
1536 return retcode;
1540 * Compute size order. Returns the exponent of the smaller power of two which
1541 * is greater or equal to given number.
1543 * \param size size.
1544 * \return order.
1546 * \todo Can be made faster.
1548 int drm_order( unsigned long size )
1550 int order;
1551 unsigned long tmp;
1553 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1556 if (size & (size - 1))
1557 ++order;
1559 return order;
1561 EXPORT_SYMBOL(drm_order);