3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
* dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
44 EXPORT_SYMBOL(drm_get_resource_start
);
46 unsigned long drm_get_resource_len(drm_device_t
* dev
, unsigned int resource
)
48 return pci_resource_len(dev
->pdev
, resource
);
51 EXPORT_SYMBOL(drm_get_resource_len
);
53 static drm_map_list_t
*drm_find_matching_map(drm_device_t
* dev
,
54 drm_local_map_t
* map
)
56 struct list_head
*list
;
58 list_for_each(list
, &dev
->maplist
->head
) {
59 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
60 if (entry
->map
&& map
->type
== entry
->map
->type
&&
61 entry
->map
->offset
== map
->offset
) {
70 * Used to allocate 32-bit handles for mappings.
72 #define START_RANGE 0x10000000
73 #define END_RANGE 0x40000000
76 static __inline__
unsigned int HandleID(unsigned long lhandle
,
79 static unsigned int map32_handle
= START_RANGE
;
82 if (lhandle
& 0xffffffff00000000) {
84 map32_handle
+= PAGE_SIZE
;
85 if (map32_handle
> END_RANGE
)
86 map32_handle
= START_RANGE
;
91 drm_map_list_t
*_entry
;
92 list_for_each_entry(_entry
, &dev
->maplist
->head
, head
) {
93 if (_entry
->user_token
== hash
)
96 if (&_entry
->head
== &dev
->maplist
->head
)
100 map32_handle
+= PAGE_SIZE
;
104 # define HandleID(x,dev) (unsigned int)(x)
108 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
110 * \param inode device inode.
111 * \param filp file pointer.
112 * \param cmd command.
113 * \param arg pointer to a drm_map structure.
114 * \return zero on success or a negative value on error.
116 * Adjusts the memory offset to its absolute value according to the mapping
117 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
118 * applicable and if supported by the kernel.
120 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
121 unsigned int size
, drm_map_type_t type
,
122 drm_map_flags_t flags
, drm_map_list_t
** maplist
)
125 drm_map_list_t
*list
;
126 drm_dma_handle_t
*dmah
;
128 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
132 map
->offset
= offset
;
137 /* Only allow shared memory to be removable since we only keep enough
138 * book keeping information about shared memory to allow for removal
139 * when processes fork.
141 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
142 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
145 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
146 map
->offset
, map
->size
, map
->type
);
147 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
148 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
156 case _DRM_FRAME_BUFFER
:
157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158 if (map
->offset
+ map
->size
< map
->offset
||
159 map
->offset
< virt_to_phys(high_memory
)) {
160 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
165 map
->offset
+= dev
->hose
->mem_space
->start
;
167 /* Some drivers preinitialize some maps, without the X Server
168 * needing to be aware of it. Therefore, we just return success
169 * when the server tries to create a duplicate map.
171 list
= drm_find_matching_map(dev
, map
);
173 if (list
->map
->size
!= map
->size
) {
174 DRM_DEBUG("Matching maps of type %d with "
175 "mismatched sizes, (%ld vs %ld)\n",
176 map
->type
, map
->size
,
178 list
->map
->size
= map
->size
;
181 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
186 if (drm_core_has_MTRR(dev
)) {
187 if (map
->type
== _DRM_FRAME_BUFFER
||
188 (map
->flags
& _DRM_WRITE_COMBINING
)) {
189 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
190 MTRR_TYPE_WRCOMB
, 1);
193 if (map
->type
== _DRM_REGISTERS
)
194 map
->handle
= drm_ioremap(map
->offset
, map
->size
, dev
);
198 map
->handle
= vmalloc_32(map
->size
);
199 DRM_DEBUG("%lu %d %p\n",
200 map
->size
, drm_order(map
->size
), map
->handle
);
202 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
205 map
->offset
= (unsigned long)map
->handle
;
206 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
207 /* Prevent a 2nd X Server from creating a 2nd lock */
208 if (dev
->lock
.hw_lock
!= NULL
) {
210 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
213 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
217 if (drm_core_has_AGP(dev
)) {
219 map
->offset
+= dev
->hose
->mem_space
->start
;
221 map
->offset
+= dev
->agp
->base
;
222 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
225 case _DRM_SCATTER_GATHER
:
227 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
230 map
->offset
+= (unsigned long)dev
->sg
->virtual;
232 case _DRM_CONSISTENT
:
233 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
234 * As we're limiting the address to 2^32-1 (or less),
235 * casting it down to 32 bits is no problem, but we
236 * need to point to a 64bit variable first. */
237 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
239 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
242 map
->handle
= dmah
->vaddr
;
243 map
->offset
= (unsigned long)dmah
->busaddr
;
247 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
251 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
253 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
256 memset(list
, 0, sizeof(*list
));
259 down(&dev
->struct_sem
);
260 list_add(&list
->head
, &dev
->maplist
->head
);
261 /* Assign a 32-bit handle */
262 /* We do it here so that dev->struct_sem protects the increment */
263 list
->user_token
= HandleID(map
->type
== _DRM_SHM
264 ? (unsigned long)map
->handle
266 up(&dev
->struct_sem
);
272 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
273 unsigned int size
, drm_map_type_t type
,
274 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
276 drm_map_list_t
*list
;
279 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
281 *map_ptr
= list
->map
;
285 EXPORT_SYMBOL(drm_addmap
);
287 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
288 unsigned int cmd
, unsigned long arg
)
290 drm_file_t
*priv
= filp
->private_data
;
291 drm_device_t
*dev
= priv
->head
->dev
;
293 drm_map_list_t
*maplist
;
294 drm_map_t __user
*argp
= (void __user
*)arg
;
297 if (!(filp
->f_mode
& 3))
298 return -EACCES
; /* Require read/write */
300 if (copy_from_user(&map
, argp
, sizeof(map
))) {
304 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
310 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
313 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
314 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
320 * Remove a map private from list and deallocate resources if the mapping
323 * \param inode device inode.
324 * \param filp file pointer.
325 * \param cmd command.
326 * \param arg pointer to a drm_map_t structure.
327 * \return zero on success or a negative value on error.
329 * Searches the map on drm_device::maplist, removes it from the list, see if
330 * its being used, and free any associate resource (such as MTRR's) if it's not
335 int drm_rmmap_locked(drm_device_t
* dev
, drm_local_map_t
* map
)
337 struct list_head
*list
;
338 drm_map_list_t
*r_list
= NULL
;
339 drm_dma_handle_t dmah
;
341 /* Find the list entry for the map and remove it */
342 list_for_each(list
, &dev
->maplist
->head
) {
343 r_list
= list_entry(list
, drm_map_list_t
, head
);
345 if (r_list
->map
== map
) {
347 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
352 /* List has wrapped around to the head pointer, or it's empty and we
353 * didn't find anything.
355 if (list
== (&dev
->maplist
->head
)) {
361 drm_ioremapfree(map
->handle
, map
->size
, dev
);
363 case _DRM_FRAME_BUFFER
:
364 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
366 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
367 DRM_DEBUG("mtrr_del=%d\n", retcode
);
374 case _DRM_SCATTER_GATHER
:
376 case _DRM_CONSISTENT
:
377 dmah
.vaddr
= map
->handle
;
378 dmah
.busaddr
= map
->offset
;
379 dmah
.size
= map
->size
;
380 __drm_pci_free(dev
, &dmah
);
383 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
388 EXPORT_SYMBOL(drm_rmmap_locked
);
390 int drm_rmmap(drm_device_t
* dev
, drm_local_map_t
* map
)
394 down(&dev
->struct_sem
);
395 ret
= drm_rmmap_locked(dev
, map
);
396 up(&dev
->struct_sem
);
401 EXPORT_SYMBOL(drm_rmmap
);
403 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
404 * the last close of the device, and this is necessary for cleanup when things
405 * exit uncleanly. Therefore, having userland manually remove mappings seems
406 * like a pointless exercise since they're going away anyway.
408 * One use case might be after addmap is allowed for normal users for SHM and
409 * gets used by drivers that the server doesn't need to care about. This seems
412 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
413 unsigned int cmd
, unsigned long arg
)
415 drm_file_t
*priv
= filp
->private_data
;
416 drm_device_t
*dev
= priv
->head
->dev
;
418 drm_local_map_t
*map
= NULL
;
419 struct list_head
*list
;
422 if (copy_from_user(&request
, (drm_map_t __user
*) arg
, sizeof(request
))) {
426 down(&dev
->struct_sem
);
427 list_for_each(list
, &dev
->maplist
->head
) {
428 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
431 r_list
->user_token
== (unsigned long)request
.handle
&&
432 r_list
->map
->flags
& _DRM_REMOVABLE
) {
438 /* List has wrapped around to the head pointer, or its empty we didn't
441 if (list
== (&dev
->maplist
->head
)) {
442 up(&dev
->struct_sem
);
449 /* Register and framebuffer maps are permanent */
450 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
451 up(&dev
->struct_sem
);
455 ret
= drm_rmmap_locked(dev
, map
);
457 up(&dev
->struct_sem
);
463 * Cleanup after an error on one of the addbufs() functions.
465 * \param dev DRM device.
466 * \param entry buffer entry where the error occurred.
468 * Frees any pages and buffers associated with the given entry.
470 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
474 if (entry
->seg_count
) {
475 for (i
= 0; i
< entry
->seg_count
; i
++) {
476 if (entry
->seglist
[i
]) {
477 drm_free_pages(entry
->seglist
[i
],
478 entry
->page_order
, DRM_MEM_DMA
);
481 drm_free(entry
->seglist
,
483 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
485 entry
->seg_count
= 0;
488 if (entry
->buf_count
) {
489 for (i
= 0; i
< entry
->buf_count
; i
++) {
490 if (entry
->buflist
[i
].dev_private
) {
491 drm_free(entry
->buflist
[i
].dev_private
,
492 entry
->buflist
[i
].dev_priv_size
,
496 drm_free(entry
->buflist
,
498 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
500 entry
->buf_count
= 0;
506 * Add AGP buffers for DMA transfers.
508 * \param dev drm_device_t to which the buffers are to be added.
509 * \param request pointer to a drm_buf_desc_t describing the request.
510 * \return zero on success or a negative number on failure.
512 * After some sanity checks creates a drm_buf structure for each buffer and
513 * reallocates the buffer list of the same size order to accommodate the new
516 int drm_addbufs_agp(drm_device_t
* dev
, drm_buf_desc_t
* request
)
518 drm_device_dma_t
*dma
= dev
->dma
;
519 drm_buf_entry_t
*entry
;
521 unsigned long offset
;
522 unsigned long agp_offset
;
531 drm_buf_t
**temp_buflist
;
536 count
= request
->count
;
537 order
= drm_order(request
->size
);
540 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
541 ? PAGE_ALIGN(size
) : size
;
542 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
543 total
= PAGE_SIZE
<< page_order
;
546 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
548 DRM_DEBUG("count: %d\n", count
);
549 DRM_DEBUG("order: %d\n", order
);
550 DRM_DEBUG("size: %d\n", size
);
551 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
552 DRM_DEBUG("alignment: %d\n", alignment
);
553 DRM_DEBUG("page_order: %d\n", page_order
);
554 DRM_DEBUG("total: %d\n", total
);
556 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
558 if (dev
->queue_count
)
559 return -EBUSY
; /* Not while in use */
561 spin_lock(&dev
->count_lock
);
563 spin_unlock(&dev
->count_lock
);
566 atomic_inc(&dev
->buf_alloc
);
567 spin_unlock(&dev
->count_lock
);
569 down(&dev
->struct_sem
);
570 entry
= &dma
->bufs
[order
];
571 if (entry
->buf_count
) {
572 up(&dev
->struct_sem
);
573 atomic_dec(&dev
->buf_alloc
);
574 return -ENOMEM
; /* May only call once for each order */
577 if (count
< 0 || count
> 4096) {
578 up(&dev
->struct_sem
);
579 atomic_dec(&dev
->buf_alloc
);
583 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
585 if (!entry
->buflist
) {
586 up(&dev
->struct_sem
);
587 atomic_dec(&dev
->buf_alloc
);
590 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
592 entry
->buf_size
= size
;
593 entry
->page_order
= page_order
;
597 while (entry
->buf_count
< count
) {
598 buf
= &entry
->buflist
[entry
->buf_count
];
599 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
600 buf
->total
= alignment
;
604 buf
->offset
= (dma
->byte_count
+ offset
);
605 buf
->bus_address
= agp_offset
+ offset
;
606 buf
->address
= (void *)(agp_offset
+ offset
);
610 init_waitqueue_head(&buf
->dma_wait
);
613 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
614 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
615 if (!buf
->dev_private
) {
616 /* Set count correctly so we free the proper amount. */
617 entry
->buf_count
= count
;
618 drm_cleanup_buf_error(dev
, entry
);
619 up(&dev
->struct_sem
);
620 atomic_dec(&dev
->buf_alloc
);
623 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
625 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
629 byte_count
+= PAGE_SIZE
<< page_order
;
632 DRM_DEBUG("byte_count: %d\n", byte_count
);
634 temp_buflist
= drm_realloc(dma
->buflist
,
635 dma
->buf_count
* sizeof(*dma
->buflist
),
636 (dma
->buf_count
+ entry
->buf_count
)
637 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
639 /* Free the entry because it isn't valid */
640 drm_cleanup_buf_error(dev
, entry
);
641 up(&dev
->struct_sem
);
642 atomic_dec(&dev
->buf_alloc
);
645 dma
->buflist
= temp_buflist
;
647 for (i
= 0; i
< entry
->buf_count
; i
++) {
648 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
651 dma
->buf_count
+= entry
->buf_count
;
652 dma
->byte_count
+= byte_count
;
654 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
655 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
657 up(&dev
->struct_sem
);
659 request
->count
= entry
->buf_count
;
660 request
->size
= size
;
662 dma
->flags
= _DRM_DMA_USE_AGP
;
664 atomic_dec(&dev
->buf_alloc
);
668 EXPORT_SYMBOL(drm_addbufs_agp
);
669 #endif /* __OS_HAS_AGP */
671 int drm_addbufs_pci(drm_device_t
* dev
, drm_buf_desc_t
* request
)
673 drm_device_dma_t
*dma
= dev
->dma
;
679 drm_buf_entry_t
*entry
;
683 unsigned long offset
;
687 unsigned long *temp_pagelist
;
688 drm_buf_t
**temp_buflist
;
690 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
695 count
= request
->count
;
696 order
= drm_order(request
->size
);
699 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
700 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
702 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
704 if (dev
->queue_count
)
705 return -EBUSY
; /* Not while in use */
707 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
708 ? PAGE_ALIGN(size
) : size
;
709 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
710 total
= PAGE_SIZE
<< page_order
;
712 spin_lock(&dev
->count_lock
);
714 spin_unlock(&dev
->count_lock
);
717 atomic_inc(&dev
->buf_alloc
);
718 spin_unlock(&dev
->count_lock
);
720 down(&dev
->struct_sem
);
721 entry
= &dma
->bufs
[order
];
722 if (entry
->buf_count
) {
723 up(&dev
->struct_sem
);
724 atomic_dec(&dev
->buf_alloc
);
725 return -ENOMEM
; /* May only call once for each order */
728 if (count
< 0 || count
> 4096) {
729 up(&dev
->struct_sem
);
730 atomic_dec(&dev
->buf_alloc
);
734 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
736 if (!entry
->buflist
) {
737 up(&dev
->struct_sem
);
738 atomic_dec(&dev
->buf_alloc
);
741 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
743 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
745 if (!entry
->seglist
) {
746 drm_free(entry
->buflist
,
747 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
748 up(&dev
->struct_sem
);
749 atomic_dec(&dev
->buf_alloc
);
752 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
754 /* Keep the original pagelist until we know all the allocations
757 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
758 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
759 if (!temp_pagelist
) {
760 drm_free(entry
->buflist
,
761 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
762 drm_free(entry
->seglist
,
763 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
764 up(&dev
->struct_sem
);
765 atomic_dec(&dev
->buf_alloc
);
768 memcpy(temp_pagelist
,
769 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
770 DRM_DEBUG("pagelist: %d entries\n",
771 dma
->page_count
+ (count
<< page_order
));
773 entry
->buf_size
= size
;
774 entry
->page_order
= page_order
;
778 while (entry
->buf_count
< count
) {
779 page
= drm_alloc_pages(page_order
, DRM_MEM_DMA
);
781 /* Set count correctly so we free the proper amount. */
782 entry
->buf_count
= count
;
783 entry
->seg_count
= count
;
784 drm_cleanup_buf_error(dev
, entry
);
785 drm_free(temp_pagelist
,
786 (dma
->page_count
+ (count
<< page_order
))
787 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
788 up(&dev
->struct_sem
);
789 atomic_dec(&dev
->buf_alloc
);
792 entry
->seglist
[entry
->seg_count
++] = page
;
793 for (i
= 0; i
< (1 << page_order
); i
++) {
794 DRM_DEBUG("page %d @ 0x%08lx\n",
795 dma
->page_count
+ page_count
,
796 page
+ PAGE_SIZE
* i
);
797 temp_pagelist
[dma
->page_count
+ page_count
++]
798 = page
+ PAGE_SIZE
* i
;
801 offset
+ size
<= total
&& entry
->buf_count
< count
;
802 offset
+= alignment
, ++entry
->buf_count
) {
803 buf
= &entry
->buflist
[entry
->buf_count
];
804 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
805 buf
->total
= alignment
;
808 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
809 buf
->address
= (void *)(page
+ offset
);
813 init_waitqueue_head(&buf
->dma_wait
);
816 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
817 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
819 if (!buf
->dev_private
) {
820 /* Set count correctly so we free the proper amount. */
821 entry
->buf_count
= count
;
822 entry
->seg_count
= count
;
823 drm_cleanup_buf_error(dev
, entry
);
824 drm_free(temp_pagelist
,
826 (count
<< page_order
))
827 * sizeof(*dma
->pagelist
),
829 up(&dev
->struct_sem
);
830 atomic_dec(&dev
->buf_alloc
);
833 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
835 DRM_DEBUG("buffer %d @ %p\n",
836 entry
->buf_count
, buf
->address
);
838 byte_count
+= PAGE_SIZE
<< page_order
;
841 temp_buflist
= drm_realloc(dma
->buflist
,
842 dma
->buf_count
* sizeof(*dma
->buflist
),
843 (dma
->buf_count
+ entry
->buf_count
)
844 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
846 /* Free the entry because it isn't valid */
847 drm_cleanup_buf_error(dev
, entry
);
848 drm_free(temp_pagelist
,
849 (dma
->page_count
+ (count
<< page_order
))
850 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
851 up(&dev
->struct_sem
);
852 atomic_dec(&dev
->buf_alloc
);
855 dma
->buflist
= temp_buflist
;
857 for (i
= 0; i
< entry
->buf_count
; i
++) {
858 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
861 /* No allocations failed, so now we can replace the orginal pagelist
864 if (dma
->page_count
) {
865 drm_free(dma
->pagelist
,
866 dma
->page_count
* sizeof(*dma
->pagelist
),
869 dma
->pagelist
= temp_pagelist
;
871 dma
->buf_count
+= entry
->buf_count
;
872 dma
->seg_count
+= entry
->seg_count
;
873 dma
->page_count
+= entry
->seg_count
<< page_order
;
874 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
876 up(&dev
->struct_sem
);
878 request
->count
= entry
->buf_count
;
879 request
->size
= size
;
881 atomic_dec(&dev
->buf_alloc
);
886 EXPORT_SYMBOL(drm_addbufs_pci
);
888 static int drm_addbufs_sg(drm_device_t
* dev
, drm_buf_desc_t
* request
)
890 drm_device_dma_t
*dma
= dev
->dma
;
891 drm_buf_entry_t
*entry
;
893 unsigned long offset
;
894 unsigned long agp_offset
;
903 drm_buf_t
**temp_buflist
;
905 if (!drm_core_check_feature(dev
, DRIVER_SG
))
911 count
= request
->count
;
912 order
= drm_order(request
->size
);
915 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
916 ? PAGE_ALIGN(size
) : size
;
917 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
918 total
= PAGE_SIZE
<< page_order
;
921 agp_offset
= request
->agp_start
;
923 DRM_DEBUG("count: %d\n", count
);
924 DRM_DEBUG("order: %d\n", order
);
925 DRM_DEBUG("size: %d\n", size
);
926 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
927 DRM_DEBUG("alignment: %d\n", alignment
);
928 DRM_DEBUG("page_order: %d\n", page_order
);
929 DRM_DEBUG("total: %d\n", total
);
931 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
933 if (dev
->queue_count
)
934 return -EBUSY
; /* Not while in use */
936 spin_lock(&dev
->count_lock
);
938 spin_unlock(&dev
->count_lock
);
941 atomic_inc(&dev
->buf_alloc
);
942 spin_unlock(&dev
->count_lock
);
944 down(&dev
->struct_sem
);
945 entry
= &dma
->bufs
[order
];
946 if (entry
->buf_count
) {
947 up(&dev
->struct_sem
);
948 atomic_dec(&dev
->buf_alloc
);
949 return -ENOMEM
; /* May only call once for each order */
952 if (count
< 0 || count
> 4096) {
953 up(&dev
->struct_sem
);
954 atomic_dec(&dev
->buf_alloc
);
958 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
960 if (!entry
->buflist
) {
961 up(&dev
->struct_sem
);
962 atomic_dec(&dev
->buf_alloc
);
965 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
967 entry
->buf_size
= size
;
968 entry
->page_order
= page_order
;
972 while (entry
->buf_count
< count
) {
973 buf
= &entry
->buflist
[entry
->buf_count
];
974 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
975 buf
->total
= alignment
;
979 buf
->offset
= (dma
->byte_count
+ offset
);
980 buf
->bus_address
= agp_offset
+ offset
;
981 buf
->address
= (void *)(agp_offset
+ offset
982 + (unsigned long)dev
->sg
->virtual);
986 init_waitqueue_head(&buf
->dma_wait
);
989 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
990 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
991 if (!buf
->dev_private
) {
992 /* Set count correctly so we free the proper amount. */
993 entry
->buf_count
= count
;
994 drm_cleanup_buf_error(dev
, entry
);
995 up(&dev
->struct_sem
);
996 atomic_dec(&dev
->buf_alloc
);
1000 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1002 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1004 offset
+= alignment
;
1006 byte_count
+= PAGE_SIZE
<< page_order
;
1009 DRM_DEBUG("byte_count: %d\n", byte_count
);
1011 temp_buflist
= drm_realloc(dma
->buflist
,
1012 dma
->buf_count
* sizeof(*dma
->buflist
),
1013 (dma
->buf_count
+ entry
->buf_count
)
1014 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1015 if (!temp_buflist
) {
1016 /* Free the entry because it isn't valid */
1017 drm_cleanup_buf_error(dev
, entry
);
1018 up(&dev
->struct_sem
);
1019 atomic_dec(&dev
->buf_alloc
);
1022 dma
->buflist
= temp_buflist
;
1024 for (i
= 0; i
< entry
->buf_count
; i
++) {
1025 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1028 dma
->buf_count
+= entry
->buf_count
;
1029 dma
->byte_count
+= byte_count
;
1031 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1032 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1034 up(&dev
->struct_sem
);
1036 request
->count
= entry
->buf_count
;
1037 request
->size
= size
;
1039 dma
->flags
= _DRM_DMA_USE_SG
;
1041 atomic_dec(&dev
->buf_alloc
);
1045 static int drm_addbufs_fb(drm_device_t
* dev
, drm_buf_desc_t
* request
)
1047 drm_device_dma_t
*dma
= dev
->dma
;
1048 drm_buf_entry_t
*entry
;
1050 unsigned long offset
;
1051 unsigned long agp_offset
;
1060 drm_buf_t
**temp_buflist
;
1062 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1068 count
= request
->count
;
1069 order
= drm_order(request
->size
);
1072 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1073 ? PAGE_ALIGN(size
) : size
;
1074 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1075 total
= PAGE_SIZE
<< page_order
;
1078 agp_offset
= request
->agp_start
;
1080 DRM_DEBUG("count: %d\n", count
);
1081 DRM_DEBUG("order: %d\n", order
);
1082 DRM_DEBUG("size: %d\n", size
);
1083 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1084 DRM_DEBUG("alignment: %d\n", alignment
);
1085 DRM_DEBUG("page_order: %d\n", page_order
);
1086 DRM_DEBUG("total: %d\n", total
);
1088 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1090 if (dev
->queue_count
)
1091 return -EBUSY
; /* Not while in use */
1093 spin_lock(&dev
->count_lock
);
1095 spin_unlock(&dev
->count_lock
);
1098 atomic_inc(&dev
->buf_alloc
);
1099 spin_unlock(&dev
->count_lock
);
1101 down(&dev
->struct_sem
);
1102 entry
= &dma
->bufs
[order
];
1103 if (entry
->buf_count
) {
1104 up(&dev
->struct_sem
);
1105 atomic_dec(&dev
->buf_alloc
);
1106 return -ENOMEM
; /* May only call once for each order */
1109 if (count
< 0 || count
> 4096) {
1110 up(&dev
->struct_sem
);
1111 atomic_dec(&dev
->buf_alloc
);
1115 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1117 if (!entry
->buflist
) {
1118 up(&dev
->struct_sem
);
1119 atomic_dec(&dev
->buf_alloc
);
1122 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1124 entry
->buf_size
= size
;
1125 entry
->page_order
= page_order
;
1129 while (entry
->buf_count
< count
) {
1130 buf
= &entry
->buflist
[entry
->buf_count
];
1131 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1132 buf
->total
= alignment
;
1136 buf
->offset
= (dma
->byte_count
+ offset
);
1137 buf
->bus_address
= agp_offset
+ offset
;
1138 buf
->address
= (void *)(agp_offset
+ offset
);
1142 init_waitqueue_head(&buf
->dma_wait
);
1145 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1146 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1147 if (!buf
->dev_private
) {
1148 /* Set count correctly so we free the proper amount. */
1149 entry
->buf_count
= count
;
1150 drm_cleanup_buf_error(dev
, entry
);
1151 up(&dev
->struct_sem
);
1152 atomic_dec(&dev
->buf_alloc
);
1155 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1157 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1159 offset
+= alignment
;
1161 byte_count
+= PAGE_SIZE
<< page_order
;
1164 DRM_DEBUG("byte_count: %d\n", byte_count
);
1166 temp_buflist
= drm_realloc(dma
->buflist
,
1167 dma
->buf_count
* sizeof(*dma
->buflist
),
1168 (dma
->buf_count
+ entry
->buf_count
)
1169 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1170 if (!temp_buflist
) {
1171 /* Free the entry because it isn't valid */
1172 drm_cleanup_buf_error(dev
, entry
);
1173 up(&dev
->struct_sem
);
1174 atomic_dec(&dev
->buf_alloc
);
1177 dma
->buflist
= temp_buflist
;
1179 for (i
= 0; i
< entry
->buf_count
; i
++) {
1180 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1183 dma
->buf_count
+= entry
->buf_count
;
1184 dma
->byte_count
+= byte_count
;
1186 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1187 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1189 up(&dev
->struct_sem
);
1191 request
->count
= entry
->buf_count
;
1192 request
->size
= size
;
1194 dma
->flags
= _DRM_DMA_USE_FB
;
1196 atomic_dec(&dev
->buf_alloc
);
1201 * Add buffers for DMA transfers (ioctl).
1203 * \param inode device inode.
1204 * \param filp file pointer.
1205 * \param cmd command.
1206 * \param arg pointer to a drm_buf_desc_t request.
1207 * \return zero on success or a negative number on failure.
1209 * According with the memory type specified in drm_buf_desc::flags and the
1210 * build options, it dispatches the call either to addbufs_agp(),
1211 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1212 * PCI memory respectively.
1214 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1215 unsigned int cmd
, unsigned long arg
)
1217 drm_buf_desc_t request
;
1218 drm_file_t
*priv
= filp
->private_data
;
1219 drm_device_t
*dev
= priv
->head
->dev
;
1222 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1225 if (copy_from_user(&request
, (drm_buf_desc_t __user
*) arg
,
1230 if (request
.flags
& _DRM_AGP_BUFFER
)
1231 ret
= drm_addbufs_agp(dev
, &request
);
1234 if (request
.flags
& _DRM_SG_BUFFER
)
1235 ret
= drm_addbufs_sg(dev
, &request
);
1236 else if (request
.flags
& _DRM_FB_BUFFER
)
1237 ret
= drm_addbufs_fb(dev
, &request
);
1239 ret
= drm_addbufs_pci(dev
, &request
);
1242 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1250 * Get information about the buffer mappings.
1252 * This was originally mean for debugging purposes, or by a sophisticated
1253 * client library to determine how best to use the available buffers (e.g.,
1254 * large buffers can be used for image transfer).
1256 * \param inode device inode.
1257 * \param filp file pointer.
1258 * \param cmd command.
1259 * \param arg pointer to a drm_buf_info structure.
1260 * \return zero on success or a negative number on failure.
1262 * Increments drm_device::buf_use while holding the drm_device::count_lock
1263 * lock, preventing of allocating more buffers after this call. Information
1264 * about each requested buffer is then copied into user space.
1266 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1267 unsigned int cmd
, unsigned long arg
)
1269 drm_file_t
*priv
= filp
->private_data
;
1270 drm_device_t
*dev
= priv
->head
->dev
;
1271 drm_device_dma_t
*dma
= dev
->dma
;
1272 drm_buf_info_t request
;
1273 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1277 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1283 spin_lock(&dev
->count_lock
);
1284 if (atomic_read(&dev
->buf_alloc
)) {
1285 spin_unlock(&dev
->count_lock
);
1288 ++dev
->buf_use
; /* Can't allocate more after this call */
1289 spin_unlock(&dev
->count_lock
);
1291 if (copy_from_user(&request
, argp
, sizeof(request
)))
1294 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1295 if (dma
->bufs
[i
].buf_count
)
1299 DRM_DEBUG("count = %d\n", count
);
1301 if (request
.count
>= count
) {
1302 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1303 if (dma
->bufs
[i
].buf_count
) {
1304 drm_buf_desc_t __user
*to
=
1305 &request
.list
[count
];
1306 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1307 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1308 if (copy_to_user(&to
->count
,
1310 sizeof(from
->buf_count
)) ||
1311 copy_to_user(&to
->size
,
1313 sizeof(from
->buf_size
)) ||
1314 copy_to_user(&to
->low_mark
,
1316 sizeof(list
->low_mark
)) ||
1317 copy_to_user(&to
->high_mark
,
1319 sizeof(list
->high_mark
)))
1322 DRM_DEBUG("%d %d %d %d %d\n",
1324 dma
->bufs
[i
].buf_count
,
1325 dma
->bufs
[i
].buf_size
,
1326 dma
->bufs
[i
].freelist
.low_mark
,
1327 dma
->bufs
[i
].freelist
.high_mark
);
1332 request
.count
= count
;
1334 if (copy_to_user(argp
, &request
, sizeof(request
)))
1341 * Specifies a low and high water mark for buffer allocation
1343 * \param inode device inode.
1344 * \param filp file pointer.
1345 * \param cmd command.
1346 * \param arg a pointer to a drm_buf_desc structure.
1347 * \return zero on success or a negative number on failure.
1349 * Verifies that the size order is bounded between the admissible orders and
1350 * updates the respective drm_device_dma::bufs entry low and high water mark.
1352 * \note This ioctl is deprecated and mostly never used.
1354 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1355 unsigned int cmd
, unsigned long arg
)
1357 drm_file_t
*priv
= filp
->private_data
;
1358 drm_device_t
*dev
= priv
->head
->dev
;
1359 drm_device_dma_t
*dma
= dev
->dma
;
1360 drm_buf_desc_t request
;
1362 drm_buf_entry_t
*entry
;
1364 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1370 if (copy_from_user(&request
,
1371 (drm_buf_desc_t __user
*) arg
, sizeof(request
)))
1374 DRM_DEBUG("%d, %d, %d\n",
1375 request
.size
, request
.low_mark
, request
.high_mark
);
1376 order
= drm_order(request
.size
);
1377 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1379 entry
= &dma
->bufs
[order
];
1381 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1383 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1386 entry
->freelist
.low_mark
= request
.low_mark
;
1387 entry
->freelist
.high_mark
= request
.high_mark
;
1393 * Unreserve the buffers in list, previously reserved using drmDMA.
1395 * \param inode device inode.
1396 * \param filp file pointer.
1397 * \param cmd command.
1398 * \param arg pointer to a drm_buf_free structure.
1399 * \return zero on success or a negative number on failure.
1401 * Calls free_buffer() for each used buffer.
1402 * This function is primarily used for debugging.
1404 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1405 unsigned int cmd
, unsigned long arg
)
1407 drm_file_t
*priv
= filp
->private_data
;
1408 drm_device_t
*dev
= priv
->head
->dev
;
1409 drm_device_dma_t
*dma
= dev
->dma
;
1410 drm_buf_free_t request
;
1415 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1421 if (copy_from_user(&request
,
1422 (drm_buf_free_t __user
*) arg
, sizeof(request
)))
1425 DRM_DEBUG("%d\n", request
.count
);
1426 for (i
= 0; i
< request
.count
; i
++) {
1427 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1429 if (idx
< 0 || idx
>= dma
->buf_count
) {
1430 DRM_ERROR("Index %d (of %d max)\n",
1431 idx
, dma
->buf_count
- 1);
1434 buf
= dma
->buflist
[idx
];
1435 if (buf
->filp
!= filp
) {
1436 DRM_ERROR("Process %d freeing buffer not owned\n",
1440 drm_free_buffer(dev
, buf
);
1447 * Maps all of the DMA buffers into client-virtual space (ioctl).
1449 * \param inode device inode.
1450 * \param filp file pointer.
1451 * \param cmd command.
1452 * \param arg pointer to a drm_buf_map structure.
1453 * \return zero on success or a negative number on failure.
1455 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1456 * about each buffer into user space. The PCI buffers are already mapped on the
1457 * addbufs_pci() call.
1459 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1460 unsigned int cmd
, unsigned long arg
)
1462 drm_file_t
*priv
= filp
->private_data
;
1463 drm_device_t
*dev
= priv
->head
->dev
;
1464 drm_device_dma_t
*dma
= dev
->dma
;
1465 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1468 unsigned long virtual;
1469 unsigned long address
;
1470 drm_buf_map_t request
;
1473 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1479 spin_lock(&dev
->count_lock
);
1480 if (atomic_read(&dev
->buf_alloc
)) {
1481 spin_unlock(&dev
->count_lock
);
1484 dev
->buf_use
++; /* Can't allocate more after this call */
1485 spin_unlock(&dev
->count_lock
);
1487 if (copy_from_user(&request
, argp
, sizeof(request
)))
1490 if (request
.count
>= dma
->buf_count
) {
1491 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1492 || (drm_core_check_feature(dev
, DRIVER_SG
)
1493 && (dma
->flags
& _DRM_DMA_USE_SG
))
1494 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1495 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1496 drm_map_t
*map
= dev
->agp_buffer_map
;
1497 unsigned long token
= dev
->agp_buffer_token
;
1504 down_write(¤t
->mm
->mmap_sem
);
1505 virtual = do_mmap(filp
, 0, map
->size
,
1506 PROT_READ
| PROT_WRITE
,
1508 up_write(¤t
->mm
->mmap_sem
);
1510 down_write(¤t
->mm
->mmap_sem
);
1511 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1512 PROT_READ
| PROT_WRITE
,
1514 up_write(¤t
->mm
->mmap_sem
);
1516 if (virtual > -1024UL) {
1518 retcode
= (signed long)virtual;
1521 request
.virtual = (void __user
*)virtual;
1523 for (i
= 0; i
< dma
->buf_count
; i
++) {
1524 if (copy_to_user(&request
.list
[i
].idx
,
1525 &dma
->buflist
[i
]->idx
,
1526 sizeof(request
.list
[0].idx
))) {
1530 if (copy_to_user(&request
.list
[i
].total
,
1531 &dma
->buflist
[i
]->total
,
1532 sizeof(request
.list
[0].total
))) {
1536 if (copy_to_user(&request
.list
[i
].used
,
1537 &zero
, sizeof(zero
))) {
1541 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1542 if (copy_to_user(&request
.list
[i
].address
,
1543 &address
, sizeof(address
))) {
1550 request
.count
= dma
->buf_count
;
1551 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1553 if (copy_to_user(argp
, &request
, sizeof(request
)))
1560 * Compute size order. Returns the exponent of the smaller power of two which
1561 * is greater or equal to given number.
1566 * \todo Can be made faster.
1568 int drm_order(unsigned long size
)
1573 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1575 if (size
& (size
- 1))
1581 EXPORT_SYMBOL(drm_order
);