3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
49 EXPORT_SYMBOL(drm_get_resource_len
);
51 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
54 struct list_head
*list
;
56 list_for_each(list
, &dev
->maplist
->head
) {
57 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
58 if (entry
->map
&& map
->type
== entry
->map
->type
&&
59 entry
->map
->offset
== map
->offset
) {
68 * Used to allocate 32-bit handles for mappings.
70 #define START_RANGE 0x10000000
71 #define END_RANGE 0x40000000
74 static __inline__
unsigned int HandleID(unsigned long lhandle
, drm_device_t
*dev
)
76 static unsigned int map32_handle
= START_RANGE
;
79 if (lhandle
& 0xffffffff00000000) {
81 map32_handle
+= PAGE_SIZE
;
82 if (map32_handle
> END_RANGE
)
83 map32_handle
= START_RANGE
;
88 drm_map_list_t
*_entry
;
89 list_for_each_entry(_entry
, &dev
->maplist
->head
,head
) {
90 if (_entry
->user_token
== hash
)
93 if (&_entry
->head
== &dev
->maplist
->head
)
97 map32_handle
+= PAGE_SIZE
;
101 # define HandleID(x,dev) (unsigned int)(x)
105 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
107 * \param inode device inode.
108 * \param filp file pointer.
109 * \param cmd command.
110 * \param arg pointer to a drm_map structure.
111 * \return zero on success or a negative value on error.
113 * Adjusts the memory offset to its absolute value according to the mapping
114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
115 * applicable and if supported by the kernel.
117 int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
118 unsigned int size
, drm_map_type_t type
,
119 drm_map_flags_t flags
, drm_map_list_t
**maplist
)
122 drm_map_list_t
*list
;
123 drm_dma_handle_t
*dmah
;
125 map
= drm_alloc( sizeof(*map
), DRM_MEM_MAPS
);
129 map
->offset
= offset
;
134 /* Only allow shared memory to be removable since we only keep enough
135 * book keeping information about shared memory to allow for removal
136 * when processes fork.
138 if ( (map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
139 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
142 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
143 map
->offset
, map
->size
, map
->type
);
144 if ( (map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
)) ) {
145 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
151 switch ( map
->type
) {
153 case _DRM_FRAME_BUFFER
:
154 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
155 if ( map
->offset
+ map
->size
< map
->offset
||
156 map
->offset
< virt_to_phys(high_memory
) ) {
157 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
162 map
->offset
+= dev
->hose
->mem_space
->start
;
164 /* Some drivers preinitialize some maps, without the X Server
165 * needing to be aware of it. Therefore, we just return success
166 * when the server tries to create a duplicate map.
168 list
= drm_find_matching_map(dev
, map
);
170 if (list
->map
->size
!= map
->size
) {
171 DRM_DEBUG("Matching maps of type %d with "
172 "mismatched sizes, (%ld vs %ld)\n",
173 map
->type
, map
->size
, list
->map
->size
);
174 list
->map
->size
= map
->size
;
177 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
182 if (drm_core_has_MTRR(dev
)) {
183 if ( map
->type
== _DRM_FRAME_BUFFER
||
184 (map
->flags
& _DRM_WRITE_COMBINING
) ) {
185 map
->mtrr
= mtrr_add( map
->offset
, map
->size
,
186 MTRR_TYPE_WRCOMB
, 1 );
189 if (map
->type
== _DRM_REGISTERS
)
190 map
->handle
= drm_ioremap( map
->offset
, map
->size
,
195 map
->handle
= vmalloc_32(map
->size
);
196 DRM_DEBUG( "%lu %d %p\n",
197 map
->size
, drm_order( map
->size
), map
->handle
);
198 if ( !map
->handle
) {
199 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
202 map
->offset
= (unsigned long)map
->handle
;
203 if ( map
->flags
& _DRM_CONTAINS_LOCK
) {
204 /* Prevent a 2nd X Server from creating a 2nd lock */
205 if (dev
->lock
.hw_lock
!= NULL
) {
206 vfree( map
->handle
);
207 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
211 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
215 if (drm_core_has_AGP(dev
)) {
217 map
->offset
+= dev
->hose
->mem_space
->start
;
219 map
->offset
+= dev
->agp
->base
;
220 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
223 case _DRM_SCATTER_GATHER
:
225 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
228 map
->offset
+= (unsigned long)dev
->sg
->virtual;
230 case _DRM_CONSISTENT
:
231 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
232 * As we're limiting the address to 2^32-1 (or less),
233 * casting it down to 32 bits is no problem, but we
234 * need to point to a 64bit variable first. */
235 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
237 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
240 map
->handle
= dmah
->vaddr
;
241 map
->offset
= (unsigned long)dmah
->busaddr
;
245 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
249 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
251 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
254 memset(list
, 0, sizeof(*list
));
257 down(&dev
->struct_sem
);
258 list_add(&list
->head
, &dev
->maplist
->head
);
259 /* Assign a 32-bit handle */
260 /* We do it here so that dev->struct_sem protects the increment */
261 list
->user_token
= HandleID(map
->type
==_DRM_SHM
262 ? (unsigned long)map
->handle
264 up(&dev
->struct_sem
);
270 int drm_addmap(drm_device_t
*dev
, unsigned int offset
,
271 unsigned int size
, drm_map_type_t type
,
272 drm_map_flags_t flags
, drm_local_map_t
**map_ptr
)
274 drm_map_list_t
*list
;
277 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
279 *map_ptr
= list
->map
;
282 EXPORT_SYMBOL(drm_addmap
);
284 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
285 unsigned int cmd
, unsigned long arg
)
287 drm_file_t
*priv
= filp
->private_data
;
288 drm_device_t
*dev
= priv
->head
->dev
;
290 drm_map_list_t
*maplist
;
291 drm_map_t __user
*argp
= (void __user
*)arg
;
294 if (!(filp
->f_mode
& 3))
295 return -EACCES
; /* Require read/write */
297 if (copy_from_user(& map
, argp
, sizeof(map
))) {
301 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
307 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
309 if (put_user(maplist
->user_token
, &argp
->handle
))
316 * Remove a map private from list and deallocate resources if the mapping
319 * \param inode device inode.
320 * \param filp file pointer.
321 * \param cmd command.
322 * \param arg pointer to a drm_map_t structure.
323 * \return zero on success or a negative value on error.
325 * Searches the map on drm_device::maplist, removes it from the list, see if
326 * its being used, and free any associate resource (such as MTRR's) if it's not
331 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
333 struct list_head
*list
;
334 drm_map_list_t
*r_list
= NULL
;
335 drm_dma_handle_t dmah
;
337 /* Find the list entry for the map and remove it */
338 list_for_each(list
, &dev
->maplist
->head
) {
339 r_list
= list_entry(list
, drm_map_list_t
, head
);
341 if (r_list
->map
== map
) {
343 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
348 /* List has wrapped around to the head pointer, or it's empty and we
349 * didn't find anything.
351 if (list
== (&dev
->maplist
->head
)) {
357 drm_ioremapfree(map
->handle
, map
->size
, dev
);
359 case _DRM_FRAME_BUFFER
:
360 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
362 retcode
= mtrr_del(map
->mtrr
, map
->offset
,
364 DRM_DEBUG ("mtrr_del=%d\n", retcode
);
371 case _DRM_SCATTER_GATHER
:
373 case _DRM_CONSISTENT
:
374 dmah
.vaddr
= map
->handle
;
375 dmah
.busaddr
= map
->offset
;
376 dmah
.size
= map
->size
;
377 __drm_pci_free(dev
, &dmah
);
380 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
384 EXPORT_SYMBOL(drm_rmmap_locked
);
386 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
390 down(&dev
->struct_sem
);
391 ret
= drm_rmmap_locked(dev
, map
);
392 up(&dev
->struct_sem
);
396 EXPORT_SYMBOL(drm_rmmap
);
398 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
399 * the last close of the device, and this is necessary for cleanup when things
400 * exit uncleanly. Therefore, having userland manually remove mappings seems
401 * like a pointless exercise since they're going away anyway.
403 * One use case might be after addmap is allowed for normal users for SHM and
404 * gets used by drivers that the server doesn't need to care about. This seems
407 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
408 unsigned int cmd
, unsigned long arg
)
410 drm_file_t
*priv
= filp
->private_data
;
411 drm_device_t
*dev
= priv
->head
->dev
;
413 drm_local_map_t
*map
= NULL
;
414 struct list_head
*list
;
417 if (copy_from_user(&request
, (drm_map_t __user
*)arg
, sizeof(request
))) {
421 down(&dev
->struct_sem
);
422 list_for_each(list
, &dev
->maplist
->head
) {
423 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
426 r_list
->user_token
== (unsigned long) request
.handle
&&
427 r_list
->map
->flags
& _DRM_REMOVABLE
) {
433 /* List has wrapped around to the head pointer, or its empty we didn't
436 if (list
== (&dev
->maplist
->head
)) {
437 up(&dev
->struct_sem
);
444 /* Register and framebuffer maps are permanent */
445 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
446 up(&dev
->struct_sem
);
450 ret
= drm_rmmap_locked(dev
, map
);
452 up(&dev
->struct_sem
);
458 * Cleanup after an error on one of the addbufs() functions.
460 * \param dev DRM device.
461 * \param entry buffer entry where the error occurred.
463 * Frees any pages and buffers associated with the given entry.
465 static void drm_cleanup_buf_error(drm_device_t
*dev
, drm_buf_entry_t
*entry
)
469 if (entry
->seg_count
) {
470 for (i
= 0; i
< entry
->seg_count
; i
++) {
471 if (entry
->seglist
[i
]) {
472 drm_free_pages(entry
->seglist
[i
],
477 drm_free(entry
->seglist
,
479 sizeof(*entry
->seglist
),
482 entry
->seg_count
= 0;
485 if (entry
->buf_count
) {
486 for (i
= 0; i
< entry
->buf_count
; i
++) {
487 if (entry
->buflist
[i
].dev_private
) {
488 drm_free(entry
->buflist
[i
].dev_private
,
489 entry
->buflist
[i
].dev_priv_size
,
493 drm_free(entry
->buflist
,
495 sizeof(*entry
->buflist
),
498 entry
->buf_count
= 0;
504 * Add AGP buffers for DMA transfers.
506 * \param dev drm_device_t to which the buffers are to be added.
507 * \param request pointer to a drm_buf_desc_t describing the request.
508 * \return zero on success or a negative number on failure.
510 * After some sanity checks creates a drm_buf structure for each buffer and
511 * reallocates the buffer list of the same size order to accommodate the new
514 int drm_addbufs_agp(drm_device_t
*dev
, drm_buf_desc_t
*request
)
516 drm_device_dma_t
*dma
= dev
->dma
;
517 drm_buf_entry_t
*entry
;
519 unsigned long offset
;
520 unsigned long agp_offset
;
529 drm_buf_t
**temp_buflist
;
531 if ( !dma
) return -EINVAL
;
533 count
= request
->count
;
534 order
= drm_order(request
->size
);
537 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
538 ? PAGE_ALIGN(size
) : size
;
539 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
540 total
= PAGE_SIZE
<< page_order
;
543 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
545 DRM_DEBUG( "count: %d\n", count
);
546 DRM_DEBUG( "order: %d\n", order
);
547 DRM_DEBUG( "size: %d\n", size
);
548 DRM_DEBUG( "agp_offset: %lu\n", agp_offset
);
549 DRM_DEBUG( "alignment: %d\n", alignment
);
550 DRM_DEBUG( "page_order: %d\n", page_order
);
551 DRM_DEBUG( "total: %d\n", total
);
553 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
554 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
556 spin_lock( &dev
->count_lock
);
557 if ( dev
->buf_use
) {
558 spin_unlock( &dev
->count_lock
);
561 atomic_inc( &dev
->buf_alloc
);
562 spin_unlock( &dev
->count_lock
);
564 down( &dev
->struct_sem
);
565 entry
= &dma
->bufs
[order
];
566 if ( entry
->buf_count
) {
567 up( &dev
->struct_sem
);
568 atomic_dec( &dev
->buf_alloc
);
569 return -ENOMEM
; /* May only call once for each order */
572 if (count
< 0 || count
> 4096) {
573 up( &dev
->struct_sem
);
574 atomic_dec( &dev
->buf_alloc
);
578 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
580 if ( !entry
->buflist
) {
581 up( &dev
->struct_sem
);
582 atomic_dec( &dev
->buf_alloc
);
585 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
587 entry
->buf_size
= size
;
588 entry
->page_order
= page_order
;
592 while ( entry
->buf_count
< count
) {
593 buf
= &entry
->buflist
[entry
->buf_count
];
594 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
595 buf
->total
= alignment
;
599 buf
->offset
= (dma
->byte_count
+ offset
);
600 buf
->bus_address
= agp_offset
+ offset
;
601 buf
->address
= (void *)(agp_offset
+ offset
);
605 init_waitqueue_head( &buf
->dma_wait
);
608 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
609 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
611 if(!buf
->dev_private
) {
612 /* Set count correctly so we free the proper amount. */
613 entry
->buf_count
= count
;
614 drm_cleanup_buf_error(dev
,entry
);
615 up( &dev
->struct_sem
);
616 atomic_dec( &dev
->buf_alloc
);
619 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
621 DRM_DEBUG( "buffer %d @ %p\n",
622 entry
->buf_count
, buf
->address
);
626 byte_count
+= PAGE_SIZE
<< page_order
;
629 DRM_DEBUG( "byte_count: %d\n", byte_count
);
631 temp_buflist
= drm_realloc( dma
->buflist
,
632 dma
->buf_count
* sizeof(*dma
->buflist
),
633 (dma
->buf_count
+ entry
->buf_count
)
634 * sizeof(*dma
->buflist
),
637 /* Free the entry because it isn't valid */
638 drm_cleanup_buf_error(dev
,entry
);
639 up( &dev
->struct_sem
);
640 atomic_dec( &dev
->buf_alloc
);
643 dma
->buflist
= temp_buflist
;
645 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
646 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
649 dma
->buf_count
+= entry
->buf_count
;
650 dma
->byte_count
+= byte_count
;
652 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
653 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
655 up( &dev
->struct_sem
);
657 request
->count
= entry
->buf_count
;
658 request
->size
= size
;
660 dma
->flags
= _DRM_DMA_USE_AGP
;
662 atomic_dec( &dev
->buf_alloc
);
665 EXPORT_SYMBOL(drm_addbufs_agp
);
666 #endif /* __OS_HAS_AGP */
668 int drm_addbufs_pci(drm_device_t
*dev
, drm_buf_desc_t
*request
)
670 drm_device_dma_t
*dma
= dev
->dma
;
676 drm_buf_entry_t
*entry
;
680 unsigned long offset
;
684 unsigned long *temp_pagelist
;
685 drm_buf_t
**temp_buflist
;
687 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
)) return -EINVAL
;
688 if ( !dma
) return -EINVAL
;
690 count
= request
->count
;
691 order
= drm_order(request
->size
);
694 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
695 request
->count
, request
->size
, size
,
696 order
, dev
->queue_count
);
698 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
699 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
701 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
702 ? PAGE_ALIGN(size
) : size
;
703 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
704 total
= PAGE_SIZE
<< page_order
;
706 spin_lock( &dev
->count_lock
);
707 if ( dev
->buf_use
) {
708 spin_unlock( &dev
->count_lock
);
711 atomic_inc( &dev
->buf_alloc
);
712 spin_unlock( &dev
->count_lock
);
714 down( &dev
->struct_sem
);
715 entry
= &dma
->bufs
[order
];
716 if ( entry
->buf_count
) {
717 up( &dev
->struct_sem
);
718 atomic_dec( &dev
->buf_alloc
);
719 return -ENOMEM
; /* May only call once for each order */
722 if (count
< 0 || count
> 4096) {
723 up( &dev
->struct_sem
);
724 atomic_dec( &dev
->buf_alloc
);
728 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
730 if ( !entry
->buflist
) {
731 up( &dev
->struct_sem
);
732 atomic_dec( &dev
->buf_alloc
);
735 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
737 entry
->seglist
= drm_alloc( count
* sizeof(*entry
->seglist
),
739 if ( !entry
->seglist
) {
740 drm_free( entry
->buflist
,
741 count
* sizeof(*entry
->buflist
),
743 up( &dev
->struct_sem
);
744 atomic_dec( &dev
->buf_alloc
);
747 memset( entry
->seglist
, 0, count
* sizeof(*entry
->seglist
) );
749 /* Keep the original pagelist until we know all the allocations
752 temp_pagelist
= drm_alloc( (dma
->page_count
+ (count
<< page_order
))
753 * sizeof(*dma
->pagelist
),
755 if (!temp_pagelist
) {
756 drm_free( entry
->buflist
,
757 count
* sizeof(*entry
->buflist
),
759 drm_free( entry
->seglist
,
760 count
* sizeof(*entry
->seglist
),
762 up( &dev
->struct_sem
);
763 atomic_dec( &dev
->buf_alloc
);
766 memcpy(temp_pagelist
,
768 dma
->page_count
* sizeof(*dma
->pagelist
));
769 DRM_DEBUG( "pagelist: %d entries\n",
770 dma
->page_count
+ (count
<< page_order
) );
772 entry
->buf_size
= size
;
773 entry
->page_order
= page_order
;
777 while ( entry
->buf_count
< count
) {
778 page
= drm_alloc_pages( page_order
, DRM_MEM_DMA
);
780 /* Set count correctly so we free the proper amount. */
781 entry
->buf_count
= count
;
782 entry
->seg_count
= count
;
783 drm_cleanup_buf_error(dev
, entry
);
784 drm_free( temp_pagelist
,
785 (dma
->page_count
+ (count
<< page_order
))
786 * sizeof(*dma
->pagelist
),
788 up( &dev
->struct_sem
);
789 atomic_dec( &dev
->buf_alloc
);
792 entry
->seglist
[entry
->seg_count
++] = page
;
793 for ( i
= 0 ; i
< (1 << page_order
) ; i
++ ) {
794 DRM_DEBUG( "page %d @ 0x%08lx\n",
795 dma
->page_count
+ page_count
,
796 page
+ PAGE_SIZE
* i
);
797 temp_pagelist
[dma
->page_count
+ page_count
++]
798 = page
+ PAGE_SIZE
* i
;
801 offset
+ size
<= total
&& entry
->buf_count
< count
;
802 offset
+= alignment
, ++entry
->buf_count
) {
803 buf
= &entry
->buflist
[entry
->buf_count
];
804 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
805 buf
->total
= alignment
;
808 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
809 buf
->address
= (void *)(page
+ offset
);
813 init_waitqueue_head( &buf
->dma_wait
);
816 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
817 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
819 if(!buf
->dev_private
) {
820 /* Set count correctly so we free the proper amount. */
821 entry
->buf_count
= count
;
822 entry
->seg_count
= count
;
823 drm_cleanup_buf_error(dev
,entry
);
824 drm_free( temp_pagelist
,
825 (dma
->page_count
+ (count
<< page_order
))
826 * sizeof(*dma
->pagelist
),
828 up( &dev
->struct_sem
);
829 atomic_dec( &dev
->buf_alloc
);
832 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
834 DRM_DEBUG( "buffer %d @ %p\n",
835 entry
->buf_count
, buf
->address
);
837 byte_count
+= PAGE_SIZE
<< page_order
;
840 temp_buflist
= drm_realloc( dma
->buflist
,
841 dma
->buf_count
* sizeof(*dma
->buflist
),
842 (dma
->buf_count
+ entry
->buf_count
)
843 * sizeof(*dma
->buflist
),
846 /* Free the entry because it isn't valid */
847 drm_cleanup_buf_error(dev
,entry
);
848 drm_free( temp_pagelist
,
849 (dma
->page_count
+ (count
<< page_order
))
850 * sizeof(*dma
->pagelist
),
852 up( &dev
->struct_sem
);
853 atomic_dec( &dev
->buf_alloc
);
856 dma
->buflist
= temp_buflist
;
858 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
859 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
862 /* No allocations failed, so now we can replace the orginal pagelist
865 if (dma
->page_count
) {
866 drm_free(dma
->pagelist
,
867 dma
->page_count
* sizeof(*dma
->pagelist
),
870 dma
->pagelist
= temp_pagelist
;
872 dma
->buf_count
+= entry
->buf_count
;
873 dma
->seg_count
+= entry
->seg_count
;
874 dma
->page_count
+= entry
->seg_count
<< page_order
;
875 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
877 up( &dev
->struct_sem
);
879 request
->count
= entry
->buf_count
;
880 request
->size
= size
;
882 atomic_dec( &dev
->buf_alloc
);
886 EXPORT_SYMBOL(drm_addbufs_pci
);
888 static int drm_addbufs_sg(drm_device_t
*dev
, drm_buf_desc_t
*request
)
890 drm_device_dma_t
*dma
= dev
->dma
;
891 drm_buf_entry_t
*entry
;
893 unsigned long offset
;
894 unsigned long agp_offset
;
903 drm_buf_t
**temp_buflist
;
905 if (!drm_core_check_feature(dev
, DRIVER_SG
)) return -EINVAL
;
907 if ( !dma
) return -EINVAL
;
909 count
= request
->count
;
910 order
= drm_order(request
->size
);
913 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
914 ? PAGE_ALIGN(size
) : size
;
915 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
916 total
= PAGE_SIZE
<< page_order
;
919 agp_offset
= request
->agp_start
;
921 DRM_DEBUG( "count: %d\n", count
);
922 DRM_DEBUG( "order: %d\n", order
);
923 DRM_DEBUG( "size: %d\n", size
);
924 DRM_DEBUG( "agp_offset: %lu\n", agp_offset
);
925 DRM_DEBUG( "alignment: %d\n", alignment
);
926 DRM_DEBUG( "page_order: %d\n", page_order
);
927 DRM_DEBUG( "total: %d\n", total
);
929 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
930 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
932 spin_lock( &dev
->count_lock
);
933 if ( dev
->buf_use
) {
934 spin_unlock( &dev
->count_lock
);
937 atomic_inc( &dev
->buf_alloc
);
938 spin_unlock( &dev
->count_lock
);
940 down( &dev
->struct_sem
);
941 entry
= &dma
->bufs
[order
];
942 if ( entry
->buf_count
) {
943 up( &dev
->struct_sem
);
944 atomic_dec( &dev
->buf_alloc
);
945 return -ENOMEM
; /* May only call once for each order */
948 if (count
< 0 || count
> 4096) {
949 up( &dev
->struct_sem
);
950 atomic_dec( &dev
->buf_alloc
);
954 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
956 if ( !entry
->buflist
) {
957 up( &dev
->struct_sem
);
958 atomic_dec( &dev
->buf_alloc
);
961 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
963 entry
->buf_size
= size
;
964 entry
->page_order
= page_order
;
968 while ( entry
->buf_count
< count
) {
969 buf
= &entry
->buflist
[entry
->buf_count
];
970 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
971 buf
->total
= alignment
;
975 buf
->offset
= (dma
->byte_count
+ offset
);
976 buf
->bus_address
= agp_offset
+ offset
;
977 buf
->address
= (void *)(agp_offset
+ offset
978 + (unsigned long)dev
->sg
->virtual);
982 init_waitqueue_head( &buf
->dma_wait
);
985 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
986 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
988 if(!buf
->dev_private
) {
989 /* Set count correctly so we free the proper amount. */
990 entry
->buf_count
= count
;
991 drm_cleanup_buf_error(dev
,entry
);
992 up( &dev
->struct_sem
);
993 atomic_dec( &dev
->buf_alloc
);
997 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
999 DRM_DEBUG( "buffer %d @ %p\n",
1000 entry
->buf_count
, buf
->address
);
1002 offset
+= alignment
;
1004 byte_count
+= PAGE_SIZE
<< page_order
;
1007 DRM_DEBUG( "byte_count: %d\n", byte_count
);
1009 temp_buflist
= drm_realloc( dma
->buflist
,
1010 dma
->buf_count
* sizeof(*dma
->buflist
),
1011 (dma
->buf_count
+ entry
->buf_count
)
1012 * sizeof(*dma
->buflist
),
1015 /* Free the entry because it isn't valid */
1016 drm_cleanup_buf_error(dev
,entry
);
1017 up( &dev
->struct_sem
);
1018 atomic_dec( &dev
->buf_alloc
);
1021 dma
->buflist
= temp_buflist
;
1023 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
1024 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1027 dma
->buf_count
+= entry
->buf_count
;
1028 dma
->byte_count
+= byte_count
;
1030 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
1031 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
1033 up( &dev
->struct_sem
);
1035 request
->count
= entry
->buf_count
;
1036 request
->size
= size
;
1038 dma
->flags
= _DRM_DMA_USE_SG
;
1040 atomic_dec( &dev
->buf_alloc
);
1044 static int drm_addbufs_fb(drm_device_t
*dev
, drm_buf_desc_t
*request
)
1046 drm_device_dma_t
*dma
= dev
->dma
;
1047 drm_buf_entry_t
*entry
;
1049 unsigned long offset
;
1050 unsigned long agp_offset
;
1059 drm_buf_t
**temp_buflist
;
1061 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1067 count
= request
->count
;
1068 order
= drm_order(request
->size
);
1071 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1072 ? PAGE_ALIGN(size
) : size
;
1073 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1074 total
= PAGE_SIZE
<< page_order
;
1077 agp_offset
= request
->agp_start
;
1079 DRM_DEBUG("count: %d\n", count
);
1080 DRM_DEBUG("order: %d\n", order
);
1081 DRM_DEBUG("size: %d\n", size
);
1082 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1083 DRM_DEBUG("alignment: %d\n", alignment
);
1084 DRM_DEBUG("page_order: %d\n", page_order
);
1085 DRM_DEBUG("total: %d\n", total
);
1087 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1089 if (dev
->queue_count
)
1090 return -EBUSY
; /* Not while in use */
1092 spin_lock(&dev
->count_lock
);
1094 spin_unlock(&dev
->count_lock
);
1097 atomic_inc(&dev
->buf_alloc
);
1098 spin_unlock(&dev
->count_lock
);
1100 down(&dev
->struct_sem
);
1101 entry
= &dma
->bufs
[order
];
1102 if (entry
->buf_count
) {
1103 up(&dev
->struct_sem
);
1104 atomic_dec(&dev
->buf_alloc
);
1105 return -ENOMEM
; /* May only call once for each order */
1108 if (count
< 0 || count
> 4096) {
1109 up(&dev
->struct_sem
);
1110 atomic_dec(&dev
->buf_alloc
);
1114 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1116 if (!entry
->buflist
) {
1117 up(&dev
->struct_sem
);
1118 atomic_dec(&dev
->buf_alloc
);
1121 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1123 entry
->buf_size
= size
;
1124 entry
->page_order
= page_order
;
1128 while (entry
->buf_count
< count
) {
1129 buf
= &entry
->buflist
[entry
->buf_count
];
1130 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1131 buf
->total
= alignment
;
1135 buf
->offset
= (dma
->byte_count
+ offset
);
1136 buf
->bus_address
= agp_offset
+ offset
;
1137 buf
->address
= (void *)(agp_offset
+ offset
);
1141 init_waitqueue_head(&buf
->dma_wait
);
1144 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1145 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1146 if (!buf
->dev_private
) {
1147 /* Set count correctly so we free the proper amount. */
1148 entry
->buf_count
= count
;
1149 drm_cleanup_buf_error(dev
, entry
);
1150 up(&dev
->struct_sem
);
1151 atomic_dec(&dev
->buf_alloc
);
1154 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1156 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1158 offset
+= alignment
;
1160 byte_count
+= PAGE_SIZE
<< page_order
;
1163 DRM_DEBUG("byte_count: %d\n", byte_count
);
1165 temp_buflist
= drm_realloc(dma
->buflist
,
1166 dma
->buf_count
* sizeof(*dma
->buflist
),
1167 (dma
->buf_count
+ entry
->buf_count
)
1168 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1169 if (!temp_buflist
) {
1170 /* Free the entry because it isn't valid */
1171 drm_cleanup_buf_error(dev
, entry
);
1172 up(&dev
->struct_sem
);
1173 atomic_dec(&dev
->buf_alloc
);
1176 dma
->buflist
= temp_buflist
;
1178 for (i
= 0; i
< entry
->buf_count
; i
++) {
1179 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1182 dma
->buf_count
+= entry
->buf_count
;
1183 dma
->byte_count
+= byte_count
;
1185 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1186 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1188 up(&dev
->struct_sem
);
1190 request
->count
= entry
->buf_count
;
1191 request
->size
= size
;
1193 dma
->flags
= _DRM_DMA_USE_FB
;
1195 atomic_dec(&dev
->buf_alloc
);
1200 * Add buffers for DMA transfers (ioctl).
1202 * \param inode device inode.
1203 * \param filp file pointer.
1204 * \param cmd command.
1205 * \param arg pointer to a drm_buf_desc_t request.
1206 * \return zero on success or a negative number on failure.
1208 * According with the memory type specified in drm_buf_desc::flags and the
1209 * build options, it dispatches the call either to addbufs_agp(),
1210 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1211 * PCI memory respectively.
1213 int drm_addbufs( struct inode
*inode
, struct file
*filp
,
1214 unsigned int cmd
, unsigned long arg
)
1216 drm_buf_desc_t request
;
1217 drm_file_t
*priv
= filp
->private_data
;
1218 drm_device_t
*dev
= priv
->head
->dev
;
1221 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1224 if ( copy_from_user( &request
, (drm_buf_desc_t __user
*)arg
,
1229 if ( request
.flags
& _DRM_AGP_BUFFER
)
1230 ret
=drm_addbufs_agp(dev
, &request
);
1233 if ( request
.flags
& _DRM_SG_BUFFER
)
1234 ret
=drm_addbufs_sg(dev
, &request
);
1235 else if ( request
.flags
& _DRM_FB_BUFFER
)
1236 ret
=drm_addbufs_fb(dev
, &request
);
1238 ret
=drm_addbufs_pci(dev
, &request
);
1241 if (copy_to_user((void __user
*)arg
, &request
,
1251 * Get information about the buffer mappings.
1253 * This was originally mean for debugging purposes, or by a sophisticated
1254 * client library to determine how best to use the available buffers (e.g.,
1255 * large buffers can be used for image transfer).
1257 * \param inode device inode.
1258 * \param filp file pointer.
1259 * \param cmd command.
1260 * \param arg pointer to a drm_buf_info structure.
1261 * \return zero on success or a negative number on failure.
1263 * Increments drm_device::buf_use while holding the drm_device::count_lock
1264 * lock, preventing of allocating more buffers after this call. Information
1265 * about each requested buffer is then copied into user space.
1267 int drm_infobufs( struct inode
*inode
, struct file
*filp
,
1268 unsigned int cmd
, unsigned long arg
)
1270 drm_file_t
*priv
= filp
->private_data
;
1271 drm_device_t
*dev
= priv
->head
->dev
;
1272 drm_device_dma_t
*dma
= dev
->dma
;
1273 drm_buf_info_t request
;
1274 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1278 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1281 if ( !dma
) return -EINVAL
;
1283 spin_lock( &dev
->count_lock
);
1284 if ( atomic_read( &dev
->buf_alloc
) ) {
1285 spin_unlock( &dev
->count_lock
);
1288 ++dev
->buf_use
; /* Can't allocate more after this call */
1289 spin_unlock( &dev
->count_lock
);
1291 if ( copy_from_user( &request
, argp
, sizeof(request
) ) )
1294 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
1295 if ( dma
->bufs
[i
].buf_count
) ++count
;
1298 DRM_DEBUG( "count = %d\n", count
);
1300 if ( request
.count
>= count
) {
1301 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
1302 if ( dma
->bufs
[i
].buf_count
) {
1303 drm_buf_desc_t __user
*to
= &request
.list
[count
];
1304 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1305 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1306 if ( copy_to_user( &to
->count
,
1308 sizeof(from
->buf_count
) ) ||
1309 copy_to_user( &to
->size
,
1311 sizeof(from
->buf_size
) ) ||
1312 copy_to_user( &to
->low_mark
,
1314 sizeof(list
->low_mark
) ) ||
1315 copy_to_user( &to
->high_mark
,
1317 sizeof(list
->high_mark
) ) )
1320 DRM_DEBUG( "%d %d %d %d %d\n",
1322 dma
->bufs
[i
].buf_count
,
1323 dma
->bufs
[i
].buf_size
,
1324 dma
->bufs
[i
].freelist
.low_mark
,
1325 dma
->bufs
[i
].freelist
.high_mark
);
1330 request
.count
= count
;
1332 if ( copy_to_user( argp
, &request
, sizeof(request
) ) )
1339 * Specifies a low and high water mark for buffer allocation
1341 * \param inode device inode.
1342 * \param filp file pointer.
1343 * \param cmd command.
1344 * \param arg a pointer to a drm_buf_desc structure.
1345 * \return zero on success or a negative number on failure.
1347 * Verifies that the size order is bounded between the admissible orders and
1348 * updates the respective drm_device_dma::bufs entry low and high water mark.
1350 * \note This ioctl is deprecated and mostly never used.
1352 int drm_markbufs( struct inode
*inode
, struct file
*filp
,
1353 unsigned int cmd
, unsigned long arg
)
1355 drm_file_t
*priv
= filp
->private_data
;
1356 drm_device_t
*dev
= priv
->head
->dev
;
1357 drm_device_dma_t
*dma
= dev
->dma
;
1358 drm_buf_desc_t request
;
1360 drm_buf_entry_t
*entry
;
1362 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1365 if ( !dma
) return -EINVAL
;
1367 if ( copy_from_user( &request
,
1368 (drm_buf_desc_t __user
*)arg
,
1372 DRM_DEBUG( "%d, %d, %d\n",
1373 request
.size
, request
.low_mark
, request
.high_mark
);
1374 order
= drm_order( request
.size
);
1375 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
1376 entry
= &dma
->bufs
[order
];
1378 if ( request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1380 if ( request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1383 entry
->freelist
.low_mark
= request
.low_mark
;
1384 entry
->freelist
.high_mark
= request
.high_mark
;
1390 * Unreserve the buffers in list, previously reserved using drmDMA.
1392 * \param inode device inode.
1393 * \param filp file pointer.
1394 * \param cmd command.
1395 * \param arg pointer to a drm_buf_free structure.
1396 * \return zero on success or a negative number on failure.
1398 * Calls free_buffer() for each used buffer.
1399 * This function is primarily used for debugging.
1401 int drm_freebufs( struct inode
*inode
, struct file
*filp
,
1402 unsigned int cmd
, unsigned long arg
)
1404 drm_file_t
*priv
= filp
->private_data
;
1405 drm_device_t
*dev
= priv
->head
->dev
;
1406 drm_device_dma_t
*dma
= dev
->dma
;
1407 drm_buf_free_t request
;
1412 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1415 if ( !dma
) return -EINVAL
;
1417 if ( copy_from_user( &request
,
1418 (drm_buf_free_t __user
*)arg
,
1422 DRM_DEBUG( "%d\n", request
.count
);
1423 for ( i
= 0 ; i
< request
.count
; i
++ ) {
1424 if ( copy_from_user( &idx
,
1428 if ( idx
< 0 || idx
>= dma
->buf_count
) {
1429 DRM_ERROR( "Index %d (of %d max)\n",
1430 idx
, dma
->buf_count
- 1 );
1433 buf
= dma
->buflist
[idx
];
1434 if ( buf
->filp
!= filp
) {
1435 DRM_ERROR( "Process %d freeing buffer not owned\n",
1439 drm_free_buffer( dev
, buf
);
1446 * Maps all of the DMA buffers into client-virtual space (ioctl).
1448 * \param inode device inode.
1449 * \param filp file pointer.
1450 * \param cmd command.
1451 * \param arg pointer to a drm_buf_map structure.
1452 * \return zero on success or a negative number on failure.
1454 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1455 * about each buffer into user space. The PCI buffers are already mapped on the
1456 * addbufs_pci() call.
1458 int drm_mapbufs( struct inode
*inode
, struct file
*filp
,
1459 unsigned int cmd
, unsigned long arg
)
1461 drm_file_t
*priv
= filp
->private_data
;
1462 drm_device_t
*dev
= priv
->head
->dev
;
1463 drm_device_dma_t
*dma
= dev
->dma
;
1464 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1467 unsigned long virtual;
1468 unsigned long address
;
1469 drm_buf_map_t request
;
1472 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1475 if ( !dma
) return -EINVAL
;
1477 spin_lock( &dev
->count_lock
);
1478 if ( atomic_read( &dev
->buf_alloc
) ) {
1479 spin_unlock( &dev
->count_lock
);
1482 dev
->buf_use
++; /* Can't allocate more after this call */
1483 spin_unlock( &dev
->count_lock
);
1485 if ( copy_from_user( &request
, argp
, sizeof(request
) ) )
1488 if ( request
.count
>= dma
->buf_count
) {
1489 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1490 || (drm_core_check_feature(dev
, DRIVER_SG
)
1491 && (dma
->flags
& _DRM_DMA_USE_SG
))
1492 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1493 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1494 drm_map_t
*map
= dev
->agp_buffer_map
;
1495 unsigned long token
= dev
->agp_buffer_token
;
1502 down_write( ¤t
->mm
->mmap_sem
);
1503 virtual = do_mmap( filp
, 0, map
->size
,
1504 PROT_READ
| PROT_WRITE
,
1507 up_write( ¤t
->mm
->mmap_sem
);
1509 down_write( ¤t
->mm
->mmap_sem
);
1510 virtual = do_mmap( filp
, 0, dma
->byte_count
,
1511 PROT_READ
| PROT_WRITE
,
1513 up_write( ¤t
->mm
->mmap_sem
);
1515 if ( virtual > -1024UL ) {
1517 retcode
= (signed long)virtual;
1520 request
.virtual = (void __user
*)virtual;
1522 for ( i
= 0 ; i
< dma
->buf_count
; i
++ ) {
1523 if ( copy_to_user( &request
.list
[i
].idx
,
1524 &dma
->buflist
[i
]->idx
,
1525 sizeof(request
.list
[0].idx
) ) ) {
1529 if ( copy_to_user( &request
.list
[i
].total
,
1530 &dma
->buflist
[i
]->total
,
1531 sizeof(request
.list
[0].total
) ) ) {
1535 if ( copy_to_user( &request
.list
[i
].used
,
1541 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1542 if ( copy_to_user( &request
.list
[i
].address
,
1544 sizeof(address
) ) ) {
1551 request
.count
= dma
->buf_count
;
1552 DRM_DEBUG( "%d buffers, retcode = %d\n", request
.count
, retcode
);
1554 if ( copy_to_user( argp
, &request
, sizeof(request
) ) )
1561 * Compute size order. Returns the exponent of the smaller power of two which
1562 * is greater or equal to given number.
1567 * \todo Can be made faster.
1569 int drm_order( unsigned long size
)
1574 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++)
1577 if (size
& (size
- 1))
1582 EXPORT_SYMBOL(drm_order
);