3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static drm_map_list_t
*drm_find_matching_map(drm_device_t
*dev
,
55 struct list_head
*list
;
57 list_for_each(list
, &dev
->maplist
->head
) {
58 drm_map_list_t
*entry
= list_entry(list
, drm_map_list_t
, head
);
59 if (entry
->map
&& map
->type
== entry
->map
->type
&&
60 ((entry
->map
->offset
== map
->offset
) ||
61 (map
->type
== _DRM_SHM
&& map
->flags
==_DRM_CONTAINS_LOCK
))) {
69 static int drm_map_handle(drm_device_t
*dev
, drm_hash_item_t
*hash
,
70 unsigned long user_token
, int hashed_handle
)
72 int use_hashed_handle
;
73 #if (BITS_PER_LONG == 64)
74 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
75 #elif (BITS_PER_LONG == 32)
76 use_hashed_handle
= hashed_handle
;
78 #error Unsupported long size. Neither 64 nor 32 bits.
81 if (!use_hashed_handle
) {
83 hash
->key
= user_token
>> PAGE_SHIFT
;
84 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
88 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
89 user_token
, 32 - PAGE_SHIFT
- 3,
90 0, DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
);
94 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
96 * \param inode device inode.
97 * \param filp file pointer.
99 * \param arg pointer to a drm_map structure.
100 * \return zero on success or a negative value on error.
102 * Adjusts the memory offset to its absolute value according to the mapping
103 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
104 * applicable and if supported by the kernel.
106 static int drm_addmap_core(drm_device_t
* dev
, unsigned int offset
,
107 unsigned int size
, drm_map_type_t type
,
108 drm_map_flags_t flags
, drm_map_list_t
** maplist
)
111 drm_map_list_t
*list
;
112 drm_dma_handle_t
*dmah
;
113 unsigned long user_token
;
116 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
120 map
->offset
= offset
;
125 /* Only allow shared memory to be removable since we only keep enough
126 * book keeping information about shared memory to allow for removal
127 * when processes fork.
129 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
130 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
133 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
134 map
->offset
, map
->size
, map
->type
);
135 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
136 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
144 case _DRM_FRAME_BUFFER
:
145 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
146 if (map
->offset
+ (map
->size
-1) < map
->offset
||
147 map
->offset
< virt_to_phys(high_memory
)) {
148 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
153 map
->offset
+= dev
->hose
->mem_space
->start
;
155 /* Some drivers preinitialize some maps, without the X Server
156 * needing to be aware of it. Therefore, we just return success
157 * when the server tries to create a duplicate map.
159 list
= drm_find_matching_map(dev
, map
);
161 if (list
->map
->size
!= map
->size
) {
162 DRM_DEBUG("Matching maps of type %d with "
163 "mismatched sizes, (%ld vs %ld)\n",
164 map
->type
, map
->size
,
166 list
->map
->size
= map
->size
;
169 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
174 if (drm_core_has_MTRR(dev
)) {
175 if (map
->type
== _DRM_FRAME_BUFFER
||
176 (map
->flags
& _DRM_WRITE_COMBINING
)) {
177 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
178 MTRR_TYPE_WRCOMB
, 1);
181 if (map
->type
== _DRM_REGISTERS
)
182 map
->handle
= ioremap(map
->offset
, map
->size
);
185 list
= drm_find_matching_map(dev
, map
);
187 if(list
->map
->size
!= map
->size
) {
188 DRM_DEBUG("Matching maps of type %d with "
189 "mismatched sizes, (%ld vs %ld)\n",
190 map
->type
, map
->size
, list
->map
->size
);
191 list
->map
->size
= map
->size
;
194 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
198 map
->handle
= vmalloc_user(map
->size
);
199 DRM_DEBUG("%lu %d %p\n",
200 map
->size
, drm_order(map
->size
), map
->handle
);
202 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
205 map
->offset
= (unsigned long)map
->handle
;
206 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
207 /* Prevent a 2nd X Server from creating a 2nd lock */
208 if (dev
->lock
.hw_lock
!= NULL
) {
210 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
213 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
217 drm_agp_mem_t
*entry
;
220 if (!drm_core_has_AGP(dev
)) {
221 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
225 map
->offset
+= dev
->hose
->mem_space
->start
;
227 /* Note: dev->agp->base may actually be 0 when the DRM
228 * is not in control of AGP space. But if user space is
229 * it should already have added the AGP base itself.
231 map
->offset
+= dev
->agp
->base
;
232 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
234 /* This assumes the DRM is in total control of AGP space.
235 * It's not always the case as AGP can be in the control
236 * of user space (i.e. i810 driver). So this loop will get
237 * skipped and we double check that dev->agp->memory is
238 * actually set as well as being invalid before EPERM'ing
240 for (entry
= dev
->agp
->memory
; entry
; entry
= entry
->next
) {
241 if ((map
->offset
>= entry
->bound
) &&
242 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
247 if (dev
->agp
->memory
&& !valid
) {
248 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
251 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map
->offset
, map
->size
);
255 case _DRM_SCATTER_GATHER
:
257 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
260 map
->offset
+= (unsigned long)dev
->sg
->virtual;
262 case _DRM_CONSISTENT
:
263 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
264 * As we're limiting the address to 2^32-1 (or less),
265 * casting it down to 32 bits is no problem, but we
266 * need to point to a 64bit variable first. */
267 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
269 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
272 map
->handle
= dmah
->vaddr
;
273 map
->offset
= (unsigned long)dmah
->busaddr
;
277 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
281 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
283 if (map
->type
== _DRM_REGISTERS
)
284 iounmap(map
->handle
);
285 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
288 memset(list
, 0, sizeof(*list
));
291 mutex_lock(&dev
->struct_mutex
);
292 list_add(&list
->head
, &dev
->maplist
->head
);
294 /* Assign a 32-bit handle */
295 /* We do it here so that dev->struct_mutex protects the increment */
296 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
298 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0);
300 if (map
->type
== _DRM_REGISTERS
)
301 iounmap(map
->handle
);
302 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
303 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
304 mutex_unlock(&dev
->struct_mutex
);
308 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
309 mutex_unlock(&dev
->struct_mutex
);
315 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
316 unsigned int size
, drm_map_type_t type
,
317 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
319 drm_map_list_t
*list
;
322 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
324 *map_ptr
= list
->map
;
328 EXPORT_SYMBOL(drm_addmap
);
330 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
331 unsigned int cmd
, unsigned long arg
)
333 drm_file_t
*priv
= filp
->private_data
;
334 drm_device_t
*dev
= priv
->head
->dev
;
336 drm_map_list_t
*maplist
;
337 drm_map_t __user
*argp
= (void __user
*)arg
;
340 if (!(filp
->f_mode
& 3))
341 return -EACCES
; /* Require read/write */
343 if (copy_from_user(&map
, argp
, sizeof(map
))) {
347 if (!(capable(CAP_SYS_ADMIN
) || map
.type
== _DRM_AGP
))
350 err
= drm_addmap_core(dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
356 if (copy_to_user(argp
, maplist
->map
, sizeof(drm_map_t
)))
359 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
360 if (put_user((void *)(unsigned long)maplist
->user_token
, &argp
->handle
))
366 * Remove a map private from list and deallocate resources if the mapping
369 * \param inode device inode.
370 * \param filp file pointer.
371 * \param cmd command.
372 * \param arg pointer to a drm_map_t structure.
373 * \return zero on success or a negative value on error.
375 * Searches the map on drm_device::maplist, removes it from the list, see if
376 * its being used, and free any associate resource (such as MTRR's) if it's not
381 int drm_rmmap_locked(drm_device_t
*dev
, drm_local_map_t
*map
)
383 struct list_head
*list
;
384 drm_map_list_t
*r_list
= NULL
;
385 drm_dma_handle_t dmah
;
387 /* Find the list entry for the map and remove it */
388 list_for_each(list
, &dev
->maplist
->head
) {
389 r_list
= list_entry(list
, drm_map_list_t
, head
);
391 if (r_list
->map
== map
) {
393 drm_ht_remove_key(&dev
->map_hash
,
394 r_list
->user_token
>> PAGE_SHIFT
);
395 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
400 /* List has wrapped around to the head pointer, or it's empty and we
401 * didn't find anything.
403 if (list
== (&dev
->maplist
->head
)) {
409 iounmap(map
->handle
);
411 case _DRM_FRAME_BUFFER
:
412 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
414 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
415 DRM_DEBUG("mtrr_del=%d\n", retcode
);
422 case _DRM_SCATTER_GATHER
:
424 case _DRM_CONSISTENT
:
425 dmah
.vaddr
= map
->handle
;
426 dmah
.busaddr
= map
->offset
;
427 dmah
.size
= map
->size
;
428 __drm_pci_free(dev
, &dmah
);
431 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
436 int drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
440 mutex_lock(&dev
->struct_mutex
);
441 ret
= drm_rmmap_locked(dev
, map
);
442 mutex_unlock(&dev
->struct_mutex
);
447 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
448 * the last close of the device, and this is necessary for cleanup when things
449 * exit uncleanly. Therefore, having userland manually remove mappings seems
450 * like a pointless exercise since they're going away anyway.
452 * One use case might be after addmap is allowed for normal users for SHM and
453 * gets used by drivers that the server doesn't need to care about. This seems
456 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
457 unsigned int cmd
, unsigned long arg
)
459 drm_file_t
*priv
= filp
->private_data
;
460 drm_device_t
*dev
= priv
->head
->dev
;
462 drm_local_map_t
*map
= NULL
;
463 struct list_head
*list
;
466 if (copy_from_user(&request
, (drm_map_t __user
*) arg
, sizeof(request
))) {
470 mutex_lock(&dev
->struct_mutex
);
471 list_for_each(list
, &dev
->maplist
->head
) {
472 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
475 r_list
->user_token
== (unsigned long)request
.handle
&&
476 r_list
->map
->flags
& _DRM_REMOVABLE
) {
482 /* List has wrapped around to the head pointer, or its empty we didn't
485 if (list
== (&dev
->maplist
->head
)) {
486 mutex_unlock(&dev
->struct_mutex
);
491 mutex_unlock(&dev
->struct_mutex
);
495 /* Register and framebuffer maps are permanent */
496 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
497 mutex_unlock(&dev
->struct_mutex
);
501 ret
= drm_rmmap_locked(dev
, map
);
503 mutex_unlock(&dev
->struct_mutex
);
509 * Cleanup after an error on one of the addbufs() functions.
511 * \param dev DRM device.
512 * \param entry buffer entry where the error occurred.
514 * Frees any pages and buffers associated with the given entry.
516 static void drm_cleanup_buf_error(drm_device_t
* dev
, drm_buf_entry_t
* entry
)
520 if (entry
->seg_count
) {
521 for (i
= 0; i
< entry
->seg_count
; i
++) {
522 if (entry
->seglist
[i
]) {
523 drm_pci_free(dev
, entry
->seglist
[i
]);
526 drm_free(entry
->seglist
,
528 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
530 entry
->seg_count
= 0;
533 if (entry
->buf_count
) {
534 for (i
= 0; i
< entry
->buf_count
; i
++) {
535 if (entry
->buflist
[i
].dev_private
) {
536 drm_free(entry
->buflist
[i
].dev_private
,
537 entry
->buflist
[i
].dev_priv_size
,
541 drm_free(entry
->buflist
,
543 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
545 entry
->buf_count
= 0;
551 * Add AGP buffers for DMA transfers.
553 * \param dev drm_device_t to which the buffers are to be added.
554 * \param request pointer to a drm_buf_desc_t describing the request.
555 * \return zero on success or a negative number on failure.
557 * After some sanity checks creates a drm_buf structure for each buffer and
558 * reallocates the buffer list of the same size order to accommodate the new
561 int drm_addbufs_agp(drm_device_t
* dev
, drm_buf_desc_t
* request
)
563 drm_device_dma_t
*dma
= dev
->dma
;
564 drm_buf_entry_t
*entry
;
565 drm_agp_mem_t
*agp_entry
;
567 unsigned long offset
;
568 unsigned long agp_offset
;
577 drm_buf_t
**temp_buflist
;
582 count
= request
->count
;
583 order
= drm_order(request
->size
);
586 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
587 ? PAGE_ALIGN(size
) : size
;
588 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
589 total
= PAGE_SIZE
<< page_order
;
592 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
594 DRM_DEBUG("count: %d\n", count
);
595 DRM_DEBUG("order: %d\n", order
);
596 DRM_DEBUG("size: %d\n", size
);
597 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
598 DRM_DEBUG("alignment: %d\n", alignment
);
599 DRM_DEBUG("page_order: %d\n", page_order
);
600 DRM_DEBUG("total: %d\n", total
);
602 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
604 if (dev
->queue_count
)
605 return -EBUSY
; /* Not while in use */
607 /* Make sure buffers are located in AGP memory that we own */
609 for (agp_entry
= dev
->agp
->memory
; agp_entry
; agp_entry
= agp_entry
->next
) {
610 if ((agp_offset
>= agp_entry
->bound
) &&
611 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
616 if (dev
->agp
->memory
&& !valid
) {
617 DRM_DEBUG("zone invalid\n");
620 spin_lock(&dev
->count_lock
);
622 spin_unlock(&dev
->count_lock
);
625 atomic_inc(&dev
->buf_alloc
);
626 spin_unlock(&dev
->count_lock
);
628 mutex_lock(&dev
->struct_mutex
);
629 entry
= &dma
->bufs
[order
];
630 if (entry
->buf_count
) {
631 mutex_unlock(&dev
->struct_mutex
);
632 atomic_dec(&dev
->buf_alloc
);
633 return -ENOMEM
; /* May only call once for each order */
636 if (count
< 0 || count
> 4096) {
637 mutex_unlock(&dev
->struct_mutex
);
638 atomic_dec(&dev
->buf_alloc
);
642 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
644 if (!entry
->buflist
) {
645 mutex_unlock(&dev
->struct_mutex
);
646 atomic_dec(&dev
->buf_alloc
);
649 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
651 entry
->buf_size
= size
;
652 entry
->page_order
= page_order
;
656 while (entry
->buf_count
< count
) {
657 buf
= &entry
->buflist
[entry
->buf_count
];
658 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
659 buf
->total
= alignment
;
663 buf
->offset
= (dma
->byte_count
+ offset
);
664 buf
->bus_address
= agp_offset
+ offset
;
665 buf
->address
= (void *)(agp_offset
+ offset
);
669 init_waitqueue_head(&buf
->dma_wait
);
672 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
673 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
674 if (!buf
->dev_private
) {
675 /* Set count correctly so we free the proper amount. */
676 entry
->buf_count
= count
;
677 drm_cleanup_buf_error(dev
, entry
);
678 mutex_unlock(&dev
->struct_mutex
);
679 atomic_dec(&dev
->buf_alloc
);
682 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
684 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
688 byte_count
+= PAGE_SIZE
<< page_order
;
691 DRM_DEBUG("byte_count: %d\n", byte_count
);
693 temp_buflist
= drm_realloc(dma
->buflist
,
694 dma
->buf_count
* sizeof(*dma
->buflist
),
695 (dma
->buf_count
+ entry
->buf_count
)
696 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
698 /* Free the entry because it isn't valid */
699 drm_cleanup_buf_error(dev
, entry
);
700 mutex_unlock(&dev
->struct_mutex
);
701 atomic_dec(&dev
->buf_alloc
);
704 dma
->buflist
= temp_buflist
;
706 for (i
= 0; i
< entry
->buf_count
; i
++) {
707 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
710 dma
->buf_count
+= entry
->buf_count
;
711 dma
->seg_count
+= entry
->seg_count
;
712 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
713 dma
->byte_count
+= byte_count
;
715 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
716 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
718 mutex_unlock(&dev
->struct_mutex
);
720 request
->count
= entry
->buf_count
;
721 request
->size
= size
;
723 dma
->flags
= _DRM_DMA_USE_AGP
;
725 atomic_dec(&dev
->buf_alloc
);
728 EXPORT_SYMBOL(drm_addbufs_agp
);
729 #endif /* __OS_HAS_AGP */
731 int drm_addbufs_pci(drm_device_t
* dev
, drm_buf_desc_t
* request
)
733 drm_device_dma_t
*dma
= dev
->dma
;
739 drm_buf_entry_t
*entry
;
740 drm_dma_handle_t
*dmah
;
743 unsigned long offset
;
747 unsigned long *temp_pagelist
;
748 drm_buf_t
**temp_buflist
;
750 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
756 if (!capable(CAP_SYS_ADMIN
))
759 count
= request
->count
;
760 order
= drm_order(request
->size
);
763 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
764 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
766 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
768 if (dev
->queue_count
)
769 return -EBUSY
; /* Not while in use */
771 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
772 ? PAGE_ALIGN(size
) : size
;
773 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
774 total
= PAGE_SIZE
<< page_order
;
776 spin_lock(&dev
->count_lock
);
778 spin_unlock(&dev
->count_lock
);
781 atomic_inc(&dev
->buf_alloc
);
782 spin_unlock(&dev
->count_lock
);
784 mutex_lock(&dev
->struct_mutex
);
785 entry
= &dma
->bufs
[order
];
786 if (entry
->buf_count
) {
787 mutex_unlock(&dev
->struct_mutex
);
788 atomic_dec(&dev
->buf_alloc
);
789 return -ENOMEM
; /* May only call once for each order */
792 if (count
< 0 || count
> 4096) {
793 mutex_unlock(&dev
->struct_mutex
);
794 atomic_dec(&dev
->buf_alloc
);
798 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
800 if (!entry
->buflist
) {
801 mutex_unlock(&dev
->struct_mutex
);
802 atomic_dec(&dev
->buf_alloc
);
805 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
807 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
809 if (!entry
->seglist
) {
810 drm_free(entry
->buflist
,
811 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
812 mutex_unlock(&dev
->struct_mutex
);
813 atomic_dec(&dev
->buf_alloc
);
816 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
818 /* Keep the original pagelist until we know all the allocations
821 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
822 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
823 if (!temp_pagelist
) {
824 drm_free(entry
->buflist
,
825 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
826 drm_free(entry
->seglist
,
827 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
828 mutex_unlock(&dev
->struct_mutex
);
829 atomic_dec(&dev
->buf_alloc
);
832 memcpy(temp_pagelist
,
833 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
834 DRM_DEBUG("pagelist: %d entries\n",
835 dma
->page_count
+ (count
<< page_order
));
837 entry
->buf_size
= size
;
838 entry
->page_order
= page_order
;
842 while (entry
->buf_count
< count
) {
844 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
847 /* Set count correctly so we free the proper amount. */
848 entry
->buf_count
= count
;
849 entry
->seg_count
= count
;
850 drm_cleanup_buf_error(dev
, entry
);
851 drm_free(temp_pagelist
,
852 (dma
->page_count
+ (count
<< page_order
))
853 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
854 mutex_unlock(&dev
->struct_mutex
);
855 atomic_dec(&dev
->buf_alloc
);
858 entry
->seglist
[entry
->seg_count
++] = dmah
;
859 for (i
= 0; i
< (1 << page_order
); i
++) {
860 DRM_DEBUG("page %d @ 0x%08lx\n",
861 dma
->page_count
+ page_count
,
862 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
863 temp_pagelist
[dma
->page_count
+ page_count
++]
864 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
867 offset
+ size
<= total
&& entry
->buf_count
< count
;
868 offset
+= alignment
, ++entry
->buf_count
) {
869 buf
= &entry
->buflist
[entry
->buf_count
];
870 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
871 buf
->total
= alignment
;
874 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
875 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
876 buf
->bus_address
= dmah
->busaddr
+ offset
;
880 init_waitqueue_head(&buf
->dma_wait
);
883 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
884 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
886 if (!buf
->dev_private
) {
887 /* Set count correctly so we free the proper amount. */
888 entry
->buf_count
= count
;
889 entry
->seg_count
= count
;
890 drm_cleanup_buf_error(dev
, entry
);
891 drm_free(temp_pagelist
,
893 (count
<< page_order
))
894 * sizeof(*dma
->pagelist
),
896 mutex_unlock(&dev
->struct_mutex
);
897 atomic_dec(&dev
->buf_alloc
);
900 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
902 DRM_DEBUG("buffer %d @ %p\n",
903 entry
->buf_count
, buf
->address
);
905 byte_count
+= PAGE_SIZE
<< page_order
;
908 temp_buflist
= drm_realloc(dma
->buflist
,
909 dma
->buf_count
* sizeof(*dma
->buflist
),
910 (dma
->buf_count
+ entry
->buf_count
)
911 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
913 /* Free the entry because it isn't valid */
914 drm_cleanup_buf_error(dev
, entry
);
915 drm_free(temp_pagelist
,
916 (dma
->page_count
+ (count
<< page_order
))
917 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
918 mutex_unlock(&dev
->struct_mutex
);
919 atomic_dec(&dev
->buf_alloc
);
922 dma
->buflist
= temp_buflist
;
924 for (i
= 0; i
< entry
->buf_count
; i
++) {
925 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
928 /* No allocations failed, so now we can replace the orginal pagelist
931 if (dma
->page_count
) {
932 drm_free(dma
->pagelist
,
933 dma
->page_count
* sizeof(*dma
->pagelist
),
936 dma
->pagelist
= temp_pagelist
;
938 dma
->buf_count
+= entry
->buf_count
;
939 dma
->seg_count
+= entry
->seg_count
;
940 dma
->page_count
+= entry
->seg_count
<< page_order
;
941 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
943 mutex_unlock(&dev
->struct_mutex
);
945 request
->count
= entry
->buf_count
;
946 request
->size
= size
;
948 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
949 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
951 atomic_dec(&dev
->buf_alloc
);
955 EXPORT_SYMBOL(drm_addbufs_pci
);
957 static int drm_addbufs_sg(drm_device_t
* dev
, drm_buf_desc_t
* request
)
959 drm_device_dma_t
*dma
= dev
->dma
;
960 drm_buf_entry_t
*entry
;
962 unsigned long offset
;
963 unsigned long agp_offset
;
972 drm_buf_t
**temp_buflist
;
974 if (!drm_core_check_feature(dev
, DRIVER_SG
))
980 if (!capable(CAP_SYS_ADMIN
))
983 count
= request
->count
;
984 order
= drm_order(request
->size
);
987 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
988 ? PAGE_ALIGN(size
) : size
;
989 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
990 total
= PAGE_SIZE
<< page_order
;
993 agp_offset
= request
->agp_start
;
995 DRM_DEBUG("count: %d\n", count
);
996 DRM_DEBUG("order: %d\n", order
);
997 DRM_DEBUG("size: %d\n", size
);
998 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
999 DRM_DEBUG("alignment: %d\n", alignment
);
1000 DRM_DEBUG("page_order: %d\n", page_order
);
1001 DRM_DEBUG("total: %d\n", total
);
1003 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1005 if (dev
->queue_count
)
1006 return -EBUSY
; /* Not while in use */
1008 spin_lock(&dev
->count_lock
);
1010 spin_unlock(&dev
->count_lock
);
1013 atomic_inc(&dev
->buf_alloc
);
1014 spin_unlock(&dev
->count_lock
);
1016 mutex_lock(&dev
->struct_mutex
);
1017 entry
= &dma
->bufs
[order
];
1018 if (entry
->buf_count
) {
1019 mutex_unlock(&dev
->struct_mutex
);
1020 atomic_dec(&dev
->buf_alloc
);
1021 return -ENOMEM
; /* May only call once for each order */
1024 if (count
< 0 || count
> 4096) {
1025 mutex_unlock(&dev
->struct_mutex
);
1026 atomic_dec(&dev
->buf_alloc
);
1030 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1032 if (!entry
->buflist
) {
1033 mutex_unlock(&dev
->struct_mutex
);
1034 atomic_dec(&dev
->buf_alloc
);
1037 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1039 entry
->buf_size
= size
;
1040 entry
->page_order
= page_order
;
1044 while (entry
->buf_count
< count
) {
1045 buf
= &entry
->buflist
[entry
->buf_count
];
1046 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1047 buf
->total
= alignment
;
1051 buf
->offset
= (dma
->byte_count
+ offset
);
1052 buf
->bus_address
= agp_offset
+ offset
;
1053 buf
->address
= (void *)(agp_offset
+ offset
1054 + (unsigned long)dev
->sg
->virtual);
1058 init_waitqueue_head(&buf
->dma_wait
);
1061 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1062 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1063 if (!buf
->dev_private
) {
1064 /* Set count correctly so we free the proper amount. */
1065 entry
->buf_count
= count
;
1066 drm_cleanup_buf_error(dev
, entry
);
1067 mutex_unlock(&dev
->struct_mutex
);
1068 atomic_dec(&dev
->buf_alloc
);
1072 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1074 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1076 offset
+= alignment
;
1078 byte_count
+= PAGE_SIZE
<< page_order
;
1081 DRM_DEBUG("byte_count: %d\n", byte_count
);
1083 temp_buflist
= drm_realloc(dma
->buflist
,
1084 dma
->buf_count
* sizeof(*dma
->buflist
),
1085 (dma
->buf_count
+ entry
->buf_count
)
1086 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1087 if (!temp_buflist
) {
1088 /* Free the entry because it isn't valid */
1089 drm_cleanup_buf_error(dev
, entry
);
1090 mutex_unlock(&dev
->struct_mutex
);
1091 atomic_dec(&dev
->buf_alloc
);
1094 dma
->buflist
= temp_buflist
;
1096 for (i
= 0; i
< entry
->buf_count
; i
++) {
1097 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1100 dma
->buf_count
+= entry
->buf_count
;
1101 dma
->seg_count
+= entry
->seg_count
;
1102 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1103 dma
->byte_count
+= byte_count
;
1105 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1106 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1108 mutex_unlock(&dev
->struct_mutex
);
1110 request
->count
= entry
->buf_count
;
1111 request
->size
= size
;
1113 dma
->flags
= _DRM_DMA_USE_SG
;
1115 atomic_dec(&dev
->buf_alloc
);
1119 static int drm_addbufs_fb(drm_device_t
* dev
, drm_buf_desc_t
* request
)
1121 drm_device_dma_t
*dma
= dev
->dma
;
1122 drm_buf_entry_t
*entry
;
1124 unsigned long offset
;
1125 unsigned long agp_offset
;
1134 drm_buf_t
**temp_buflist
;
1136 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1142 if (!capable(CAP_SYS_ADMIN
))
1145 count
= request
->count
;
1146 order
= drm_order(request
->size
);
1149 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1150 ? PAGE_ALIGN(size
) : size
;
1151 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1152 total
= PAGE_SIZE
<< page_order
;
1155 agp_offset
= request
->agp_start
;
1157 DRM_DEBUG("count: %d\n", count
);
1158 DRM_DEBUG("order: %d\n", order
);
1159 DRM_DEBUG("size: %d\n", size
);
1160 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1161 DRM_DEBUG("alignment: %d\n", alignment
);
1162 DRM_DEBUG("page_order: %d\n", page_order
);
1163 DRM_DEBUG("total: %d\n", total
);
1165 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1167 if (dev
->queue_count
)
1168 return -EBUSY
; /* Not while in use */
1170 spin_lock(&dev
->count_lock
);
1172 spin_unlock(&dev
->count_lock
);
1175 atomic_inc(&dev
->buf_alloc
);
1176 spin_unlock(&dev
->count_lock
);
1178 mutex_lock(&dev
->struct_mutex
);
1179 entry
= &dma
->bufs
[order
];
1180 if (entry
->buf_count
) {
1181 mutex_unlock(&dev
->struct_mutex
);
1182 atomic_dec(&dev
->buf_alloc
);
1183 return -ENOMEM
; /* May only call once for each order */
1186 if (count
< 0 || count
> 4096) {
1187 mutex_unlock(&dev
->struct_mutex
);
1188 atomic_dec(&dev
->buf_alloc
);
1192 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1194 if (!entry
->buflist
) {
1195 mutex_unlock(&dev
->struct_mutex
);
1196 atomic_dec(&dev
->buf_alloc
);
1199 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1201 entry
->buf_size
= size
;
1202 entry
->page_order
= page_order
;
1206 while (entry
->buf_count
< count
) {
1207 buf
= &entry
->buflist
[entry
->buf_count
];
1208 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1209 buf
->total
= alignment
;
1213 buf
->offset
= (dma
->byte_count
+ offset
);
1214 buf
->bus_address
= agp_offset
+ offset
;
1215 buf
->address
= (void *)(agp_offset
+ offset
);
1219 init_waitqueue_head(&buf
->dma_wait
);
1222 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1223 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1224 if (!buf
->dev_private
) {
1225 /* Set count correctly so we free the proper amount. */
1226 entry
->buf_count
= count
;
1227 drm_cleanup_buf_error(dev
, entry
);
1228 mutex_unlock(&dev
->struct_mutex
);
1229 atomic_dec(&dev
->buf_alloc
);
1232 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1234 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1236 offset
+= alignment
;
1238 byte_count
+= PAGE_SIZE
<< page_order
;
1241 DRM_DEBUG("byte_count: %d\n", byte_count
);
1243 temp_buflist
= drm_realloc(dma
->buflist
,
1244 dma
->buf_count
* sizeof(*dma
->buflist
),
1245 (dma
->buf_count
+ entry
->buf_count
)
1246 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1247 if (!temp_buflist
) {
1248 /* Free the entry because it isn't valid */
1249 drm_cleanup_buf_error(dev
, entry
);
1250 mutex_unlock(&dev
->struct_mutex
);
1251 atomic_dec(&dev
->buf_alloc
);
1254 dma
->buflist
= temp_buflist
;
1256 for (i
= 0; i
< entry
->buf_count
; i
++) {
1257 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1260 dma
->buf_count
+= entry
->buf_count
;
1261 dma
->seg_count
+= entry
->seg_count
;
1262 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1263 dma
->byte_count
+= byte_count
;
1265 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1266 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1268 mutex_unlock(&dev
->struct_mutex
);
1270 request
->count
= entry
->buf_count
;
1271 request
->size
= size
;
1273 dma
->flags
= _DRM_DMA_USE_FB
;
1275 atomic_dec(&dev
->buf_alloc
);
1281 * Add buffers for DMA transfers (ioctl).
1283 * \param inode device inode.
1284 * \param filp file pointer.
1285 * \param cmd command.
1286 * \param arg pointer to a drm_buf_desc_t request.
1287 * \return zero on success or a negative number on failure.
1289 * According with the memory type specified in drm_buf_desc::flags and the
1290 * build options, it dispatches the call either to addbufs_agp(),
1291 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1292 * PCI memory respectively.
1294 int drm_addbufs(struct inode
*inode
, struct file
*filp
,
1295 unsigned int cmd
, unsigned long arg
)
1297 drm_buf_desc_t request
;
1298 drm_file_t
*priv
= filp
->private_data
;
1299 drm_device_t
*dev
= priv
->head
->dev
;
1302 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1305 if (copy_from_user(&request
, (drm_buf_desc_t __user
*) arg
,
1310 if (request
.flags
& _DRM_AGP_BUFFER
)
1311 ret
= drm_addbufs_agp(dev
, &request
);
1314 if (request
.flags
& _DRM_SG_BUFFER
)
1315 ret
= drm_addbufs_sg(dev
, &request
);
1316 else if (request
.flags
& _DRM_FB_BUFFER
)
1317 ret
= drm_addbufs_fb(dev
, &request
);
1319 ret
= drm_addbufs_pci(dev
, &request
);
1322 if (copy_to_user((void __user
*)arg
, &request
, sizeof(request
))) {
1330 * Get information about the buffer mappings.
1332 * This was originally mean for debugging purposes, or by a sophisticated
1333 * client library to determine how best to use the available buffers (e.g.,
1334 * large buffers can be used for image transfer).
1336 * \param inode device inode.
1337 * \param filp file pointer.
1338 * \param cmd command.
1339 * \param arg pointer to a drm_buf_info structure.
1340 * \return zero on success or a negative number on failure.
1342 * Increments drm_device::buf_use while holding the drm_device::count_lock
1343 * lock, preventing of allocating more buffers after this call. Information
1344 * about each requested buffer is then copied into user space.
1346 int drm_infobufs(struct inode
*inode
, struct file
*filp
,
1347 unsigned int cmd
, unsigned long arg
)
1349 drm_file_t
*priv
= filp
->private_data
;
1350 drm_device_t
*dev
= priv
->head
->dev
;
1351 drm_device_dma_t
*dma
= dev
->dma
;
1352 drm_buf_info_t request
;
1353 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1357 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1363 spin_lock(&dev
->count_lock
);
1364 if (atomic_read(&dev
->buf_alloc
)) {
1365 spin_unlock(&dev
->count_lock
);
1368 ++dev
->buf_use
; /* Can't allocate more after this call */
1369 spin_unlock(&dev
->count_lock
);
1371 if (copy_from_user(&request
, argp
, sizeof(request
)))
1374 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1375 if (dma
->bufs
[i
].buf_count
)
1379 DRM_DEBUG("count = %d\n", count
);
1381 if (request
.count
>= count
) {
1382 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1383 if (dma
->bufs
[i
].buf_count
) {
1384 drm_buf_desc_t __user
*to
=
1385 &request
.list
[count
];
1386 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1387 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1388 if (copy_to_user(&to
->count
,
1390 sizeof(from
->buf_count
)) ||
1391 copy_to_user(&to
->size
,
1393 sizeof(from
->buf_size
)) ||
1394 copy_to_user(&to
->low_mark
,
1396 sizeof(list
->low_mark
)) ||
1397 copy_to_user(&to
->high_mark
,
1399 sizeof(list
->high_mark
)))
1402 DRM_DEBUG("%d %d %d %d %d\n",
1404 dma
->bufs
[i
].buf_count
,
1405 dma
->bufs
[i
].buf_size
,
1406 dma
->bufs
[i
].freelist
.low_mark
,
1407 dma
->bufs
[i
].freelist
.high_mark
);
1412 request
.count
= count
;
1414 if (copy_to_user(argp
, &request
, sizeof(request
)))
1421 * Specifies a low and high water mark for buffer allocation
1423 * \param inode device inode.
1424 * \param filp file pointer.
1425 * \param cmd command.
1426 * \param arg a pointer to a drm_buf_desc structure.
1427 * \return zero on success or a negative number on failure.
1429 * Verifies that the size order is bounded between the admissible orders and
1430 * updates the respective drm_device_dma::bufs entry low and high water mark.
1432 * \note This ioctl is deprecated and mostly never used.
1434 int drm_markbufs(struct inode
*inode
, struct file
*filp
,
1435 unsigned int cmd
, unsigned long arg
)
1437 drm_file_t
*priv
= filp
->private_data
;
1438 drm_device_t
*dev
= priv
->head
->dev
;
1439 drm_device_dma_t
*dma
= dev
->dma
;
1440 drm_buf_desc_t request
;
1442 drm_buf_entry_t
*entry
;
1444 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1450 if (copy_from_user(&request
,
1451 (drm_buf_desc_t __user
*) arg
, sizeof(request
)))
1454 DRM_DEBUG("%d, %d, %d\n",
1455 request
.size
, request
.low_mark
, request
.high_mark
);
1456 order
= drm_order(request
.size
);
1457 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1459 entry
= &dma
->bufs
[order
];
1461 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1463 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1466 entry
->freelist
.low_mark
= request
.low_mark
;
1467 entry
->freelist
.high_mark
= request
.high_mark
;
1473 * Unreserve the buffers in list, previously reserved using drmDMA.
1475 * \param inode device inode.
1476 * \param filp file pointer.
1477 * \param cmd command.
1478 * \param arg pointer to a drm_buf_free structure.
1479 * \return zero on success or a negative number on failure.
1481 * Calls free_buffer() for each used buffer.
1482 * This function is primarily used for debugging.
1484 int drm_freebufs(struct inode
*inode
, struct file
*filp
,
1485 unsigned int cmd
, unsigned long arg
)
1487 drm_file_t
*priv
= filp
->private_data
;
1488 drm_device_t
*dev
= priv
->head
->dev
;
1489 drm_device_dma_t
*dma
= dev
->dma
;
1490 drm_buf_free_t request
;
1495 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1501 if (copy_from_user(&request
,
1502 (drm_buf_free_t __user
*) arg
, sizeof(request
)))
1505 DRM_DEBUG("%d\n", request
.count
);
1506 for (i
= 0; i
< request
.count
; i
++) {
1507 if (copy_from_user(&idx
, &request
.list
[i
], sizeof(idx
)))
1509 if (idx
< 0 || idx
>= dma
->buf_count
) {
1510 DRM_ERROR("Index %d (of %d max)\n",
1511 idx
, dma
->buf_count
- 1);
1514 buf
= dma
->buflist
[idx
];
1515 if (buf
->filp
!= filp
) {
1516 DRM_ERROR("Process %d freeing buffer not owned\n",
1520 drm_free_buffer(dev
, buf
);
1527 * Maps all of the DMA buffers into client-virtual space (ioctl).
1529 * \param inode device inode.
1530 * \param filp file pointer.
1531 * \param cmd command.
1532 * \param arg pointer to a drm_buf_map structure.
1533 * \return zero on success or a negative number on failure.
1535 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1536 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1537 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1540 int drm_mapbufs(struct inode
*inode
, struct file
*filp
,
1541 unsigned int cmd
, unsigned long arg
)
1543 drm_file_t
*priv
= filp
->private_data
;
1544 drm_device_t
*dev
= priv
->head
->dev
;
1545 drm_device_dma_t
*dma
= dev
->dma
;
1546 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1549 unsigned long virtual;
1550 unsigned long address
;
1551 drm_buf_map_t request
;
1554 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1560 spin_lock(&dev
->count_lock
);
1561 if (atomic_read(&dev
->buf_alloc
)) {
1562 spin_unlock(&dev
->count_lock
);
1565 dev
->buf_use
++; /* Can't allocate more after this call */
1566 spin_unlock(&dev
->count_lock
);
1568 if (copy_from_user(&request
, argp
, sizeof(request
)))
1571 if (request
.count
>= dma
->buf_count
) {
1572 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1573 || (drm_core_check_feature(dev
, DRIVER_SG
)
1574 && (dma
->flags
& _DRM_DMA_USE_SG
))
1575 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1576 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1577 drm_map_t
*map
= dev
->agp_buffer_map
;
1578 unsigned long token
= dev
->agp_buffer_token
;
1585 down_write(¤t
->mm
->mmap_sem
);
1586 virtual = do_mmap(filp
, 0, map
->size
,
1587 PROT_READ
| PROT_WRITE
,
1589 up_write(¤t
->mm
->mmap_sem
);
1591 down_write(¤t
->mm
->mmap_sem
);
1592 virtual = do_mmap(filp
, 0, dma
->byte_count
,
1593 PROT_READ
| PROT_WRITE
,
1595 up_write(¤t
->mm
->mmap_sem
);
1597 if (virtual > -1024UL) {
1599 retcode
= (signed long)virtual;
1602 request
.virtual = (void __user
*)virtual;
1604 for (i
= 0; i
< dma
->buf_count
; i
++) {
1605 if (copy_to_user(&request
.list
[i
].idx
,
1606 &dma
->buflist
[i
]->idx
,
1607 sizeof(request
.list
[0].idx
))) {
1611 if (copy_to_user(&request
.list
[i
].total
,
1612 &dma
->buflist
[i
]->total
,
1613 sizeof(request
.list
[0].total
))) {
1617 if (copy_to_user(&request
.list
[i
].used
,
1618 &zero
, sizeof(zero
))) {
1622 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1623 if (copy_to_user(&request
.list
[i
].address
,
1624 &address
, sizeof(address
))) {
1631 request
.count
= dma
->buf_count
;
1632 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
1634 if (copy_to_user(argp
, &request
, sizeof(request
)))
1641 * Compute size order. Returns the exponent of the smaller power of two which
1642 * is greater or equal to given number.
1647 * \todo Can be made faster.
1649 int drm_order(unsigned long size
)
1654 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1656 if (size
& (size
- 1))
1661 EXPORT_SYMBOL(drm_order
);