3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
55 struct list_head
*list
;
57 list_for_each(list
, &dev
->maplist
->head
) {
58 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
59 if (entry
->map
&& map
->type
== entry
->map
->type
&&
60 entry
->map
->offset
== map
->offset
) {
68 static int drm_map_handle(drm_device_t
*dev
, drm_hash_item_t
*hash
,
69 unsigned long user_token
, int hashed_handle
)
71 int use_hashed_handle
;
72 #if (BITS_PER_LONG == 64)
73 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
74 #elif (BITS_PER_LONG == 32)
75 use_hashed_handle
= hashed_handle
;
77 #error Unsupported long size. Neither 64 nor 32 bits.
80 if (!use_hashed_handle
) {
82 hash
->key
= user_token
>> PAGE_SHIFT
;
83 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
87 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
88 user_token
, 32 - PAGE_SHIFT
- 3,
89 0, DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
);
93 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
95 * \param inode device inode.
96 * \param filp file pointer.
98 * \param arg pointer to a drm_map structure.
99 * \return zero on success or a negative value on error.
101 * Adjusts the memory offset to its absolute value according to the mapping
102 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
103 * applicable and if supported by the kernel.
105 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
106 unsigned int size
, drm_map_type_t type
,
107 drm_map_flags_t flags
, drm_map_list_t
** maplist
)
110 drm_map_list_t
*list
;
111 drm_dma_handle_t
*dmah
;
112 unsigned long user_token
;
115 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
119 map
->offset
= offset
;
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
128 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
129 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map
->offset
, map
->size
, map
->type
);
134 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
135 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
143 case _DRM_FRAME_BUFFER
:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map
->offset
+ (map
->size
-1) < map
->offset
||
146 map
->offset
< virt_to_phys(high_memory
)) {
147 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
152 map
->offset
+= dev
->hose
->mem_space
->start
;
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
158 list
= drm_find_matching_map(dev
, map
);
160 if (list
->map
->size
!= map
->size
) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map
->type
, map
->size
,
165 list
->map
->size
= map
->size
;
168 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
173 if (drm_core_has_MTRR(dev
)) {
174 if (map
->type
== _DRM_FRAME_BUFFER
||
175 (map
->flags
& _DRM_WRITE_COMBINING
)) {
176 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
177 MTRR_TYPE_WRCOMB
, 1);
180 if (map
->type
== _DRM_REGISTERS
)
181 map
->handle
= ioremap(map
->offset
, map
->size
);
185 map
->handle
= vmalloc_user(map
->size
);
186 DRM_DEBUG("%lu %d %p\n",
187 map
->size
, drm_order(map
->size
), map
->handle
);
189 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
192 map
->offset
= (unsigned long)map
->handle
;
193 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
194 /* Prevent a 2nd X Server from creating a 2nd lock */
195 if (dev
->lock
.hw_lock
!= NULL
) {
197 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
200 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
204 if (drm_core_has_AGP(dev
)) {
206 map
->offset
+= dev
->hose
->mem_space
->start
;
208 map
->offset
+= dev
->agp
->base
;
209 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
212 case _DRM_SCATTER_GATHER
:
214 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
217 map
->offset
+= (unsigned long)dev
->sg
->virtual;
219 case _DRM_CONSISTENT
:
220 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
221 * As we're limiting the address to 2^32-1 (or less),
222 * casting it down to 32 bits is no problem, but we
223 * need to point to a 64bit variable first. */
224 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
226 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
229 map
->handle
= dmah
->vaddr
;
230 map
->offset
= (unsigned long)dmah
->busaddr
;
234 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
238 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
240 if (map
->type
== _DRM_REGISTERS
)
241 iounmap(map
->handle
);
242 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
245 memset(list
, 0, sizeof(*list
));
248 mutex_lock(&dev
->struct_mutex
);
249 list_add(&list
->head
, &dev
->maplist
->head
);
251 /* Assign a 32-bit handle */
252 /* We do it here so that dev->struct_mutex protects the increment */
253 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
255 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0);
257 if (map
->type
== _DRM_REGISTERS
)
258 iounmap(map
->handle
);
259 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
260 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
261 mutex_unlock(&dev
->struct_mutex
);
265 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
266 mutex_unlock(&dev
->struct_mutex
);
272 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
273 unsigned int size
, drm_map_type_t type
,
274 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
276 drm_map_list_t
*list
;
279 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
281 *map_ptr
= list
->map
;
285 EXPORT_SYMBOL(drm_addmap
);
287 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
288 unsigned int cmd
, unsigned long arg
)
290 drm_file_t
*priv
= filp
->private_data
;
291 drm_device_t
*dev
= priv
->head
->dev
;
293 drm_map_list_t
*maplist
;
294 drm_map_t __user
*argp
= (void __user
*)arg
;
297 if (!(filp
->f_mode
& 3))
298 return -EACCES
; /* Require read/write */
300 if (copy_from_user(&map
, argp
, sizeof(map
))) {
304 if (!(capable(CAP_SYS_ADMIN
) || map
.type
== _DRM_AGP
))
307 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
313 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
316 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
317 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
323 * Remove a map private from list and deallocate resources if the mapping
326 * \param inode device inode.
327 * \param filp file pointer.
328 * \param cmd command.
329 * \param arg pointer to a drm_map_t structure.
330 * \return zero on success or a negative value on error.
332 * Searches the map on drm_device::maplist, removes it from the list, see if
333 * its being used, and free any associate resource (such as MTRR's) if it's not
338 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
340 struct list_head
*list
;
341 drm_map_list_t
*r_list
= NULL
;
342 drm_dma_handle_t dmah
;
344 /* Find the list entry for the map and remove it */
345 list_for_each(list
, &dev
->maplist
->head
) {
346 r_list
= list_entry(list
, drm_map_list_t
, head
);
348 if (r_list
->map
== map
) {
350 drm_ht_remove_key(&dev
->map_hash
,
351 r_list
->user_token
>> PAGE_SHIFT
);
352 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
357 /* List has wrapped around to the head pointer, or it's empty and we
358 * didn't find anything.
360 if (list
== (&dev
->maplist
->head
)) {
366 iounmap(map
->handle
);
368 case _DRM_FRAME_BUFFER
:
369 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
371 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
372 DRM_DEBUG("mtrr_del=%d\n", retcode
);
379 case _DRM_SCATTER_GATHER
:
381 case _DRM_CONSISTENT
:
382 dmah
.vaddr
= map
->handle
;
383 dmah
.busaddr
= map
->offset
;
384 dmah
.size
= map
->size
;
385 __drm_pci_free(dev
, &dmah
);
388 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
393 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
397 mutex_lock(&dev
->struct_mutex
);
398 ret
= drm_rmmap_locked(dev
, map
);
399 mutex_unlock(&dev
->struct_mutex
);
404 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
405 * the last close of the device, and this is necessary for cleanup when things
406 * exit uncleanly. Therefore, having userland manually remove mappings seems
407 * like a pointless exercise since they're going away anyway.
409 * One use case might be after addmap is allowed for normal users for SHM and
410 * gets used by drivers that the server doesn't need to care about. This seems
413 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
414 unsigned int cmd
, unsigned long arg
)
416 drm_file_t
*priv
= filp
->private_data
;
417 drm_device_t
*dev
= priv
->head
->dev
;
419 drm_local_map_t
*map
= NULL
;
420 struct list_head
*list
;
423 if (copy_from_user(&request
, (drm_map_t __user
*) arg
, sizeof(request
))) {
427 mutex_lock(&dev
->struct_mutex
);
428 list_for_each(list
, &dev
->maplist
->head
) {
429 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
432 r_list
->user_token
== (unsigned long)request
.handle
&&
433 r_list
->map
->flags
& _DRM_REMOVABLE
) {
439 /* List has wrapped around to the head pointer, or its empty we didn't
442 if (list
== (&dev
->maplist
->head
)) {
443 mutex_unlock(&dev
->struct_mutex
);
448 mutex_unlock(&dev
->struct_mutex
);
452 /* Register and framebuffer maps are permanent */
453 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
454 mutex_unlock(&dev
->struct_mutex
);
458 ret
= drm_rmmap_locked(dev
, map
);
460 mutex_unlock(&dev
->struct_mutex
);
466 * Cleanup after an error on one of the addbufs() functions.
468 * \param dev DRM device.
469 * \param entry buffer entry where the error occurred.
471 * Frees any pages and buffers associated with the given entry.
473 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
477 if (entry
->seg_count
) {
478 for (i
= 0; i
< entry
->seg_count
; i
++) {
479 if (entry
->seglist
[i
]) {
480 drm_pci_free(dev
, entry
->seglist
[i
]);
483 drm_free(entry
->seglist
,
485 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
487 entry
->seg_count
= 0;
490 if (entry
->buf_count
) {
491 for (i
= 0; i
< entry
->buf_count
; i
++) {
492 if (entry
->buflist
[i
].dev_private
) {
493 drm_free(entry
->buflist
[i
].dev_private
,
494 entry
->buflist
[i
].dev_priv_size
,
498 drm_free(entry
->buflist
,
500 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
502 entry
->buf_count
= 0;
508 * Add AGP buffers for DMA transfers.
510 * \param dev drm_device_t to which the buffers are to be added.
511 * \param request pointer to a drm_buf_desc_t describing the request.
512 * \return zero on success or a negative number on failure.
514 * After some sanity checks creates a drm_buf structure for each buffer and
515 * reallocates the buffer list of the same size order to accommodate the new
518 int drm_addbufs_agp(drm_device_t
* dev
, drm_buf_desc_t
* request
)
520 drm_device_dma_t
*dma
= dev
->dma
;
521 drm_buf_entry_t
*entry
;
523 unsigned long offset
;
524 unsigned long agp_offset
;
533 drm_buf_t
**temp_buflist
;
538 count
= request
->count
;
539 order
= drm_order(request
->size
);
542 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
543 ? PAGE_ALIGN(size
) : size
;
544 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
545 total
= PAGE_SIZE
<< page_order
;
548 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
550 DRM_DEBUG("count: %d\n", count
);
551 DRM_DEBUG("order: %d\n", order
);
552 DRM_DEBUG("size: %d\n", size
);
553 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
554 DRM_DEBUG("alignment: %d\n", alignment
);
555 DRM_DEBUG("page_order: %d\n", page_order
);
556 DRM_DEBUG("total: %d\n", total
);
558 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
560 if (dev
->queue_count
)
561 return -EBUSY
; /* Not while in use */
563 spin_lock(&dev
->count_lock
);
565 spin_unlock(&dev
->count_lock
);
568 atomic_inc(&dev
->buf_alloc
);
569 spin_unlock(&dev
->count_lock
);
571 mutex_lock(&dev
->struct_mutex
);
572 entry
= &dma
->bufs
[order
];
573 if (entry
->buf_count
) {
574 mutex_unlock(&dev
->struct_mutex
);
575 atomic_dec(&dev
->buf_alloc
);
576 return -ENOMEM
; /* May only call once for each order */
579 if (count
< 0 || count
> 4096) {
580 mutex_unlock(&dev
->struct_mutex
);
581 atomic_dec(&dev
->buf_alloc
);
585 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
587 if (!entry
->buflist
) {
588 mutex_unlock(&dev
->struct_mutex
);
589 atomic_dec(&dev
->buf_alloc
);
592 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
594 entry
->buf_size
= size
;
595 entry
->page_order
= page_order
;
599 while (entry
->buf_count
< count
) {
600 buf
= &entry
->buflist
[entry
->buf_count
];
601 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
602 buf
->total
= alignment
;
606 buf
->offset
= (dma
->byte_count
+ offset
);
607 buf
->bus_address
= agp_offset
+ offset
;
608 buf
->address
= (void *)(agp_offset
+ offset
);
612 init_waitqueue_head(&buf
->dma_wait
);
615 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
616 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
617 if (!buf
->dev_private
) {
618 /* Set count correctly so we free the proper amount. */
619 entry
->buf_count
= count
;
620 drm_cleanup_buf_error(dev
, entry
);
621 mutex_unlock(&dev
->struct_mutex
);
622 atomic_dec(&dev
->buf_alloc
);
625 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
627 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
631 byte_count
+= PAGE_SIZE
<< page_order
;
634 DRM_DEBUG("byte_count: %d\n", byte_count
);
636 temp_buflist
= drm_realloc(dma
->buflist
,
637 dma
->buf_count
* sizeof(*dma
->buflist
),
638 (dma
->buf_count
+ entry
->buf_count
)
639 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
641 /* Free the entry because it isn't valid */
642 drm_cleanup_buf_error(dev
, entry
);
643 mutex_unlock(&dev
->struct_mutex
);
644 atomic_dec(&dev
->buf_alloc
);
647 dma
->buflist
= temp_buflist
;
649 for (i
= 0; i
< entry
->buf_count
; i
++) {
650 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
653 dma
->buf_count
+= entry
->buf_count
;
654 dma
->seg_count
+= entry
->seg_count
;
655 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
656 dma
->byte_count
+= byte_count
;
658 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
659 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
661 mutex_unlock(&dev
->struct_mutex
);
663 request
->count
= entry
->buf_count
;
664 request
->size
= size
;
666 dma
->flags
= _DRM_DMA_USE_AGP
;
668 atomic_dec(&dev
->buf_alloc
);
671 EXPORT_SYMBOL(drm_addbufs_agp
);
672 #endif /* __OS_HAS_AGP */
674 int drm_addbufs_pci(drm_device_t
* dev
, drm_buf_desc_t
* request
)
676 drm_device_dma_t
*dma
= dev
->dma
;
682 drm_buf_entry_t
*entry
;
683 drm_dma_handle_t
*dmah
;
686 unsigned long offset
;
690 unsigned long *temp_pagelist
;
691 drm_buf_t
**temp_buflist
;
693 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
699 if (!capable(CAP_SYS_ADMIN
))
702 count
= request
->count
;
703 order
= drm_order(request
->size
);
706 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
707 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
709 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
711 if (dev
->queue_count
)
712 return -EBUSY
; /* Not while in use */
714 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
715 ? PAGE_ALIGN(size
) : size
;
716 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
717 total
= PAGE_SIZE
<< page_order
;
719 spin_lock(&dev
->count_lock
);
721 spin_unlock(&dev
->count_lock
);
724 atomic_inc(&dev
->buf_alloc
);
725 spin_unlock(&dev
->count_lock
);
727 mutex_lock(&dev
->struct_mutex
);
728 entry
= &dma
->bufs
[order
];
729 if (entry
->buf_count
) {
730 mutex_unlock(&dev
->struct_mutex
);
731 atomic_dec(&dev
->buf_alloc
);
732 return -ENOMEM
; /* May only call once for each order */
735 if (count
< 0 || count
> 4096) {
736 mutex_unlock(&dev
->struct_mutex
);
737 atomic_dec(&dev
->buf_alloc
);
741 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
743 if (!entry
->buflist
) {
744 mutex_unlock(&dev
->struct_mutex
);
745 atomic_dec(&dev
->buf_alloc
);
748 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
750 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
752 if (!entry
->seglist
) {
753 drm_free(entry
->buflist
,
754 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
755 mutex_unlock(&dev
->struct_mutex
);
756 atomic_dec(&dev
->buf_alloc
);
759 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
761 /* Keep the original pagelist until we know all the allocations
764 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
765 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
766 if (!temp_pagelist
) {
767 drm_free(entry
->buflist
,
768 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
769 drm_free(entry
->seglist
,
770 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
771 mutex_unlock(&dev
->struct_mutex
);
772 atomic_dec(&dev
->buf_alloc
);
775 memcpy(temp_pagelist
,
776 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
777 DRM_DEBUG("pagelist: %d entries\n",
778 dma
->page_count
+ (count
<< page_order
));
780 entry
->buf_size
= size
;
781 entry
->page_order
= page_order
;
785 while (entry
->buf_count
< count
) {
787 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
790 /* Set count correctly so we free the proper amount. */
791 entry
->buf_count
= count
;
792 entry
->seg_count
= count
;
793 drm_cleanup_buf_error(dev
, entry
);
794 drm_free(temp_pagelist
,
795 (dma
->page_count
+ (count
<< page_order
))
796 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
797 mutex_unlock(&dev
->struct_mutex
);
798 atomic_dec(&dev
->buf_alloc
);
801 entry
->seglist
[entry
->seg_count
++] = dmah
;
802 for (i
= 0; i
< (1 << page_order
); i
++) {
803 DRM_DEBUG("page %d @ 0x%08lx\n",
804 dma
->page_count
+ page_count
,
805 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
806 temp_pagelist
[dma
->page_count
+ page_count
++]
807 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
810 offset
+ size
<= total
&& entry
->buf_count
< count
;
811 offset
+= alignment
, ++entry
->buf_count
) {
812 buf
= &entry
->buflist
[entry
->buf_count
];
813 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
814 buf
->total
= alignment
;
817 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
818 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
819 buf
->bus_address
= dmah
->busaddr
+ offset
;
823 init_waitqueue_head(&buf
->dma_wait
);
826 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
827 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
829 if (!buf
->dev_private
) {
830 /* Set count correctly so we free the proper amount. */
831 entry
->buf_count
= count
;
832 entry
->seg_count
= count
;
833 drm_cleanup_buf_error(dev
, entry
);
834 drm_free(temp_pagelist
,
836 (count
<< page_order
))
837 * sizeof(*dma
->pagelist
),
839 mutex_unlock(&dev
->struct_mutex
);
840 atomic_dec(&dev
->buf_alloc
);
843 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
845 DRM_DEBUG("buffer %d @ %p\n",
846 entry
->buf_count
, buf
->address
);
848 byte_count
+= PAGE_SIZE
<< page_order
;
851 temp_buflist
= drm_realloc(dma
->buflist
,
852 dma
->buf_count
* sizeof(*dma
->buflist
),
853 (dma
->buf_count
+ entry
->buf_count
)
854 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
856 /* Free the entry because it isn't valid */
857 drm_cleanup_buf_error(dev
, entry
);
858 drm_free(temp_pagelist
,
859 (dma
->page_count
+ (count
<< page_order
))
860 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
861 mutex_unlock(&dev
->struct_mutex
);
862 atomic_dec(&dev
->buf_alloc
);
865 dma
->buflist
= temp_buflist
;
867 for (i
= 0; i
< entry
->buf_count
; i
++) {
868 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
871 /* No allocations failed, so now we can replace the orginal pagelist
874 if (dma
->page_count
) {
875 drm_free(dma
->pagelist
,
876 dma
->page_count
* sizeof(*dma
->pagelist
),
879 dma
->pagelist
= temp_pagelist
;
881 dma
->buf_count
+= entry
->buf_count
;
882 dma
->seg_count
+= entry
->seg_count
;
883 dma
->page_count
+= entry
->seg_count
<< page_order
;
884 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
886 mutex_unlock(&dev
->struct_mutex
);
888 request
->count
= entry
->buf_count
;
889 request
->size
= size
;
891 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
892 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
894 atomic_dec(&dev
->buf_alloc
);
898 EXPORT_SYMBOL(drm_addbufs_pci
);
900 static int drm_addbufs_sg(drm_device_t
* dev
, drm_buf_desc_t
* request
)
902 drm_device_dma_t
*dma
= dev
->dma
;
903 drm_buf_entry_t
*entry
;
905 unsigned long offset
;
906 unsigned long agp_offset
;
915 drm_buf_t
**temp_buflist
;
917 if (!drm_core_check_feature(dev
, DRIVER_SG
))
923 if (!capable(CAP_SYS_ADMIN
))
926 count
= request
->count
;
927 order
= drm_order(request
->size
);
930 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
931 ? PAGE_ALIGN(size
) : size
;
932 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
933 total
= PAGE_SIZE
<< page_order
;
936 agp_offset
= request
->agp_start
;
938 DRM_DEBUG("count: %d\n", count
);
939 DRM_DEBUG("order: %d\n", order
);
940 DRM_DEBUG("size: %d\n", size
);
941 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
942 DRM_DEBUG("alignment: %d\n", alignment
);
943 DRM_DEBUG("page_order: %d\n", page_order
);
944 DRM_DEBUG("total: %d\n", total
);
946 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
948 if (dev
->queue_count
)
949 return -EBUSY
; /* Not while in use */
951 spin_lock(&dev
->count_lock
);
953 spin_unlock(&dev
->count_lock
);
956 atomic_inc(&dev
->buf_alloc
);
957 spin_unlock(&dev
->count_lock
);
959 mutex_lock(&dev
->struct_mutex
);
960 entry
= &dma
->bufs
[order
];
961 if (entry
->buf_count
) {
962 mutex_unlock(&dev
->struct_mutex
);
963 atomic_dec(&dev
->buf_alloc
);
964 return -ENOMEM
; /* May only call once for each order */
967 if (count
< 0 || count
> 4096) {
968 mutex_unlock(&dev
->struct_mutex
);
969 atomic_dec(&dev
->buf_alloc
);
973 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
975 if (!entry
->buflist
) {
976 mutex_unlock(&dev
->struct_mutex
);
977 atomic_dec(&dev
->buf_alloc
);
980 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
982 entry
->buf_size
= size
;
983 entry
->page_order
= page_order
;
987 while (entry
->buf_count
< count
) {
988 buf
= &entry
->buflist
[entry
->buf_count
];
989 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
990 buf
->total
= alignment
;
994 buf
->offset
= (dma
->byte_count
+ offset
);
995 buf
->bus_address
= agp_offset
+ offset
;
996 buf
->address
= (void *)(agp_offset
+ offset
997 + (unsigned long)dev
->sg
->virtual);
1001 init_waitqueue_head(&buf
->dma_wait
);
1004 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1005 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1006 if (!buf
->dev_private
) {
1007 /* Set count correctly so we free the proper amount. */
1008 entry
->buf_count
= count
;
1009 drm_cleanup_buf_error(dev
, entry
);
1010 mutex_unlock(&dev
->struct_mutex
);
1011 atomic_dec(&dev
->buf_alloc
);
1015 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1017 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1019 offset
+= alignment
;
1021 byte_count
+= PAGE_SIZE
<< page_order
;
1024 DRM_DEBUG("byte_count: %d\n", byte_count
);
1026 temp_buflist
= drm_realloc(dma
->buflist
,
1027 dma
->buf_count
* sizeof(*dma
->buflist
),
1028 (dma
->buf_count
+ entry
->buf_count
)
1029 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1030 if (!temp_buflist
) {
1031 /* Free the entry because it isn't valid */
1032 drm_cleanup_buf_error(dev
, entry
);
1033 mutex_unlock(&dev
->struct_mutex
);
1034 atomic_dec(&dev
->buf_alloc
);
1037 dma
->buflist
= temp_buflist
;
1039 for (i
= 0; i
< entry
->buf_count
; i
++) {
1040 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1043 dma
->buf_count
+= entry
->buf_count
;
1044 dma
->seg_count
+= entry
->seg_count
;
1045 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1046 dma
->byte_count
+= byte_count
;
1048 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1049 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1051 mutex_unlock(&dev
->struct_mutex
);
1053 request
->count
= entry
->buf_count
;
1054 request
->size
= size
;
1056 dma
->flags
= _DRM_DMA_USE_SG
;
1058 atomic_dec(&dev
->buf_alloc
);
1062 static int drm_addbufs_fb(drm_device_t
* dev
, drm_buf_desc_t
* request
)
1064 drm_device_dma_t
*dma
= dev
->dma
;
1065 drm_buf_entry_t
*entry
;
1067 unsigned long offset
;
1068 unsigned long agp_offset
;
1077 drm_buf_t
**temp_buflist
;
1079 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1085 if (!capable(CAP_SYS_ADMIN
))
1088 count
= request
->count
;
1089 order
= drm_order(request
->size
);
1092 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1093 ? PAGE_ALIGN(size
) : size
;
1094 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1095 total
= PAGE_SIZE
<< page_order
;
1098 agp_offset
= request
->agp_start
;
1100 DRM_DEBUG("count: %d\n", count
);
1101 DRM_DEBUG("order: %d\n", order
);
1102 DRM_DEBUG("size: %d\n", size
);
1103 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1104 DRM_DEBUG("alignment: %d\n", alignment
);
1105 DRM_DEBUG("page_order: %d\n", page_order
);
1106 DRM_DEBUG("total: %d\n", total
);
1108 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1110 if (dev
->queue_count
)
1111 return -EBUSY
; /* Not while in use */
1113 spin_lock(&dev
->count_lock
);
1115 spin_unlock(&dev
->count_lock
);
1118 atomic_inc(&dev
->buf_alloc
);
1119 spin_unlock(&dev
->count_lock
);
1121 mutex_lock(&dev
->struct_mutex
);
1122 entry
= &dma
->bufs
[order
];
1123 if (entry
->buf_count
) {
1124 mutex_unlock(&dev
->struct_mutex
);
1125 atomic_dec(&dev
->buf_alloc
);
1126 return -ENOMEM
; /* May only call once for each order */
1129 if (count
< 0 || count
> 4096) {
1130 mutex_unlock(&dev
->struct_mutex
);
1131 atomic_dec(&dev
->buf_alloc
);
1135 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1137 if (!entry
->buflist
) {
1138 mutex_unlock(&dev
->struct_mutex
);
1139 atomic_dec(&dev
->buf_alloc
);
1142 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1144 entry
->buf_size
= size
;
1145 entry
->page_order
= page_order
;
1149 while (entry
->buf_count
< count
) {
1150 buf
= &entry
->buflist
[entry
->buf_count
];
1151 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1152 buf
->total
= alignment
;
1156 buf
->offset
= (dma
->byte_count
+ offset
);
1157 buf
->bus_address
= agp_offset
+ offset
;
1158 buf
->address
= (void *)(agp_offset
+ offset
);
1162 init_waitqueue_head(&buf
->dma_wait
);
1165 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1166 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1167 if (!buf
->dev_private
) {
1168 /* Set count correctly so we free the proper amount. */
1169 entry
->buf_count
= count
;
1170 drm_cleanup_buf_error(dev
, entry
);
1171 mutex_unlock(&dev
->struct_mutex
);
1172 atomic_dec(&dev
->buf_alloc
);
1175 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1177 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1179 offset
+= alignment
;
1181 byte_count
+= PAGE_SIZE
<< page_order
;
1184 DRM_DEBUG("byte_count: %d\n", byte_count
);
1186 temp_buflist
= drm_realloc(dma
->buflist
,
1187 dma
->buf_count
* sizeof(*dma
->buflist
),
1188 (dma
->buf_count
+ entry
->buf_count
)
1189 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1190 if (!temp_buflist
) {
1191 /* Free the entry because it isn't valid */
1192 drm_cleanup_buf_error(dev
, entry
);
1193 mutex_unlock(&dev
->struct_mutex
);
1194 atomic_dec(&dev
->buf_alloc
);
1197 dma
->buflist
= temp_buflist
;
1199 for (i
= 0; i
< entry
->buf_count
; i
++) {
1200 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1203 dma
->buf_count
+= entry
->buf_count
;
1204 dma
->seg_count
+= entry
->seg_count
;
1205 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1206 dma
->byte_count
+= byte_count
;
1208 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1209 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1211 mutex_unlock(&dev
->struct_mutex
);
1213 request
->count
= entry
->buf_count
;
1214 request
->size
= size
;
1216 dma
->flags
= _DRM_DMA_USE_FB
;
1218 atomic_dec(&dev
->buf_alloc
);
1224 * Add buffers for DMA transfers (ioctl).
1226 * \param inode device inode.
1227 * \param filp file pointer.
1228 * \param cmd command.
1229 * \param arg pointer to a drm_buf_desc_t request.
1230 * \return zero on success or a negative number on failure.
1232 * According with the memory type specified in drm_buf_desc::flags and the
1233 * build options, it dispatches the call either to addbufs_agp(),
1234 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1235 * PCI memory respectively.
1237 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1238 unsigned int cmd
, unsigned long arg
)
1240 drm_buf_desc_t request
;
1241 drm_file_t
*priv
= filp
->private_data
;
1242 drm_device_t
*dev
= priv
->head
->dev
;
1245 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1248 if (copy_from_user(&request
, (drm_buf_desc_t __user
*) arg
,
1253 if (request
.flags
& _DRM_AGP_BUFFER
)
1254 ret
= drm_addbufs_agp(dev
, &request
);
1257 if (request
.flags
& _DRM_SG_BUFFER
)
1258 ret
= drm_addbufs_sg(dev
, &request
);
1259 else if (request
.flags
& _DRM_FB_BUFFER
)
1260 ret
= drm_addbufs_fb(dev
, &request
);
1262 ret
= drm_addbufs_pci(dev
, &request
);
1265 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1273 * Get information about the buffer mappings.
1275 * This was originally mean for debugging purposes, or by a sophisticated
1276 * client library to determine how best to use the available buffers (e.g.,
1277 * large buffers can be used for image transfer).
1279 * \param inode device inode.
1280 * \param filp file pointer.
1281 * \param cmd command.
1282 * \param arg pointer to a drm_buf_info structure.
1283 * \return zero on success or a negative number on failure.
1285 * Increments drm_device::buf_use while holding the drm_device::count_lock
1286 * lock, preventing of allocating more buffers after this call. Information
1287 * about each requested buffer is then copied into user space.
1289 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1290 unsigned int cmd
, unsigned long arg
)
1292 drm_file_t
*priv
= filp
->private_data
;
1293 drm_device_t
*dev
= priv
->head
->dev
;
1294 drm_device_dma_t
*dma
= dev
->dma
;
1295 drm_buf_info_t request
;
1296 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1300 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1306 spin_lock(&dev
->count_lock
);
1307 if (atomic_read(&dev
->buf_alloc
)) {
1308 spin_unlock(&dev
->count_lock
);
1311 ++dev
->buf_use
; /* Can't allocate more after this call */
1312 spin_unlock(&dev
->count_lock
);
1314 if (copy_from_user(&request
, argp
, sizeof(request
)))
1317 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1318 if (dma
->bufs
[i
].buf_count
)
1322 DRM_DEBUG("count = %d\n", count
);
1324 if (request
.count
>= count
) {
1325 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1326 if (dma
->bufs
[i
].buf_count
) {
1327 drm_buf_desc_t __user
*to
=
1328 &request
.list
[count
];
1329 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1330 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1331 if (copy_to_user(&to
->count
,
1333 sizeof(from
->buf_count
)) ||
1334 copy_to_user(&to
->size
,
1336 sizeof(from
->buf_size
)) ||
1337 copy_to_user(&to
->low_mark
,
1339 sizeof(list
->low_mark
)) ||
1340 copy_to_user(&to
->high_mark
,
1342 sizeof(list
->high_mark
)))
1345 DRM_DEBUG("%d %d %d %d %d\n",
1347 dma
->bufs
[i
].buf_count
,
1348 dma
->bufs
[i
].buf_size
,
1349 dma
->bufs
[i
].freelist
.low_mark
,
1350 dma
->bufs
[i
].freelist
.high_mark
);
1355 request
.count
= count
;
1357 if (copy_to_user(argp
, &request
, sizeof(request
)))
1364 * Specifies a low and high water mark for buffer allocation
1366 * \param inode device inode.
1367 * \param filp file pointer.
1368 * \param cmd command.
1369 * \param arg a pointer to a drm_buf_desc structure.
1370 * \return zero on success or a negative number on failure.
1372 * Verifies that the size order is bounded between the admissible orders and
1373 * updates the respective drm_device_dma::bufs entry low and high water mark.
1375 * \note This ioctl is deprecated and mostly never used.
1377 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1378 unsigned int cmd
, unsigned long arg
)
1380 drm_file_t
*priv
= filp
->private_data
;
1381 drm_device_t
*dev
= priv
->head
->dev
;
1382 drm_device_dma_t
*dma
= dev
->dma
;
1383 drm_buf_desc_t request
;
1385 drm_buf_entry_t
*entry
;
1387 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1393 if (copy_from_user(&request
,
1394 (drm_buf_desc_t __user
*) arg
, sizeof(request
)))
1397 DRM_DEBUG("%d, %d, %d\n",
1398 request
.size
, request
.low_mark
, request
.high_mark
);
1399 order
= drm_order(request
.size
);
1400 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1402 entry
= &dma
->bufs
[order
];
1404 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1406 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1409 entry
->freelist
.low_mark
= request
.low_mark
;
1410 entry
->freelist
.high_mark
= request
.high_mark
;
1416 * Unreserve the buffers in list, previously reserved using drmDMA.
1418 * \param inode device inode.
1419 * \param filp file pointer.
1420 * \param cmd command.
1421 * \param arg pointer to a drm_buf_free structure.
1422 * \return zero on success or a negative number on failure.
1424 * Calls free_buffer() for each used buffer.
1425 * This function is primarily used for debugging.
1427 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1428 unsigned int cmd
, unsigned long arg
)
1430 drm_file_t
*priv
= filp
->private_data
;
1431 drm_device_t
*dev
= priv
->head
->dev
;
1432 drm_device_dma_t
*dma
= dev
->dma
;
1433 drm_buf_free_t request
;
1438 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1444 if (copy_from_user(&request
,
1445 (drm_buf_free_t __user
*) arg
, sizeof(request
)))
1448 DRM_DEBUG("%d\n", request
.count
);
1449 for (i
= 0; i
< request
.count
; i
++) {
1450 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1452 if (idx
< 0 || idx
>= dma
->buf_count
) {
1453 DRM_ERROR("Index %d (of %d max)\n",
1454 idx
, dma
->buf_count
- 1);
1457 buf
= dma
->buflist
[idx
];
1458 if (buf
->filp
!= filp
) {
1459 DRM_ERROR("Process %d freeing buffer not owned\n",
1463 drm_free_buffer(dev
, buf
);
1470 * Maps all of the DMA buffers into client-virtual space (ioctl).
1472 * \param inode device inode.
1473 * \param filp file pointer.
1474 * \param cmd command.
1475 * \param arg pointer to a drm_buf_map structure.
1476 * \return zero on success or a negative number on failure.
1478 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1479 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1480 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1483 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1484 unsigned int cmd
, unsigned long arg
)
1486 drm_file_t
*priv
= filp
->private_data
;
1487 drm_device_t
*dev
= priv
->head
->dev
;
1488 drm_device_dma_t
*dma
= dev
->dma
;
1489 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1492 unsigned long virtual;
1493 unsigned long address
;
1494 drm_buf_map_t request
;
1497 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1503 spin_lock(&dev
->count_lock
);
1504 if (atomic_read(&dev
->buf_alloc
)) {
1505 spin_unlock(&dev
->count_lock
);
1508 dev
->buf_use
++; /* Can't allocate more after this call */
1509 spin_unlock(&dev
->count_lock
);
1511 if (copy_from_user(&request
, argp
, sizeof(request
)))
1514 if (request
.count
>= dma
->buf_count
) {
1515 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1516 || (drm_core_check_feature(dev
, DRIVER_SG
)
1517 && (dma
->flags
& _DRM_DMA_USE_SG
))
1518 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1519 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1520 drm_map_t
*map
= dev
->agp_buffer_map
;
1521 unsigned long token
= dev
->agp_buffer_token
;
1528 down_write(¤t
->mm
->mmap_sem
);
1529 virtual = do_mmap(filp
, 0, map
->size
,
1530 PROT_READ
| PROT_WRITE
,
1532 up_write(¤t
->mm
->mmap_sem
);
1534 down_write(¤t
->mm
->mmap_sem
);
1535 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1536 PROT_READ
| PROT_WRITE
,
1538 up_write(¤t
->mm
->mmap_sem
);
1540 if (virtual > -1024UL) {
1542 retcode
= (signed long)virtual;
1545 request
.virtual = (void __user
*)virtual;
1547 for (i
= 0; i
< dma
->buf_count
; i
++) {
1548 if (copy_to_user(&request
.list
[i
].idx
,
1549 &dma
->buflist
[i
]->idx
,
1550 sizeof(request
.list
[0].idx
))) {
1554 if (copy_to_user(&request
.list
[i
].total
,
1555 &dma
->buflist
[i
]->total
,
1556 sizeof(request
.list
[0].total
))) {
1560 if (copy_to_user(&request
.list
[i
].used
,
1561 &zero
, sizeof(zero
))) {
1565 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1566 if (copy_to_user(&request
.list
[i
].address
,
1567 &address
, sizeof(address
))) {
1574 request
.count
= dma
->buf_count
;
1575 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1577 if (copy_to_user(argp
, &request
, sizeof(request
)))
1584 * Compute size order. Returns the exponent of the smaller power of two which
1585 * is greater or equal to given number.
1590 * \todo Can be made faster.
1592 int drm_order(unsigned long size
)
1597 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1599 if (size
& (size
- 1))
1604 EXPORT_SYMBOL(drm_order
);