2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/export.h>
35 #include <asm/shmparam.h>
37 #include "drm_legacy.h"
41 #include <vm/vm_map.h>
43 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
44 struct drm_local_map
*map
)
46 struct drm_map_list
*entry
;
47 list_for_each_entry(entry
, &dev
->maplist
, head
) {
49 * Because the kernel-userspace ABI is fixed at a 32-bit offset
50 * while PCI resources may live above that, we only compare the
51 * lower 32 bits of the map offset for maps of type
52 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
53 * It is assumed that if a driver have more than one resource
54 * of each type, the lower 32 bits are different.
57 map
->type
!= entry
->map
->type
||
58 entry
->master
!= dev
->master
)
62 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
66 case _DRM_FRAME_BUFFER
:
67 if ((entry
->map
->offset
& 0xffffffff) ==
68 (map
->offset
& 0xffffffff))
70 default: /* Make gcc happy */
73 if (entry
->map
->offset
== map
->offset
)
80 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
81 unsigned long user_token
, int hashed_handle
, int shm
)
83 int use_hashed_handle
, shift
;
86 #if (BITS_PER_LONG == 64)
87 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
88 #elif (BITS_PER_LONG == 32)
89 use_hashed_handle
= hashed_handle
;
91 #error Unsupported long size. Neither 64 nor 32 bits.
94 if (!use_hashed_handle
) {
96 hash
->key
= user_token
>> PAGE_SHIFT
;
97 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
103 add
= DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
;
104 if (shm
&& (SHMLBA
> PAGE_SIZE
)) {
105 int bits
= ilog2(SHMLBA
>> PAGE_SHIFT
) + 1;
107 /* For shared memory, we have to preserve the SHMLBA
108 * bits of the eventual vma->vm_pgoff value during
109 * mmap(). Otherwise we run into cache aliasing problems
110 * on some platforms. On these platforms, the pgoff of
111 * a mmap() request is used to pick a suitable virtual
112 * address for the mmap() region such that it will not
113 * cause cache aliasing problems.
115 * Therefore, make sure the SHMLBA relevant bits of the
116 * hash value we use are equal to those in the original
117 * kernel virtual address.
120 add
|= ((user_token
>> PAGE_SHIFT
) & ((1UL << bits
) - 1UL));
123 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
124 user_token
, 32 - PAGE_SHIFT
- 3,
129 * Core function to create a range of memory available for mapping by a
132 * Adjusts the memory offset to its absolute value according to the mapping
133 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
134 * applicable and if supported by the kernel.
136 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
137 unsigned int size
, enum drm_map_type type
,
138 enum drm_map_flags flags
,
139 struct drm_map_list
** maplist
)
141 struct drm_local_map
*map
;
142 struct drm_map_list
*list
;
143 drm_dma_handle_t
*dmah
;
144 unsigned long user_token
;
147 map
= kmalloc(sizeof(*map
), M_DRM
, GFP_KERNEL
);
151 map
->offset
= offset
;
156 /* Only allow shared memory to be removable since we only keep enough
157 * book keeping information about shared memory to allow for removal
158 * when processes fork.
160 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
164 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
165 (unsigned long long)map
->offset
, map
->size
, map
->type
);
167 /* page-align _DRM_SHM maps. They are allocated here so there is no security
168 * hole created by that and it works around various broken drivers that use
169 * a non-aligned quantity to map the SAREA. --BenH
171 if (map
->type
== _DRM_SHM
)
172 map
->size
= PAGE_ALIGN(map
->size
);
174 if ((map
->offset
& (~(resource_size_t
) LINUX_PAGE_MASK
)) || (map
->size
& (~LINUX_PAGE_MASK
))) {
183 case _DRM_FRAME_BUFFER
:
184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
185 if (map
->offset
+ (map
->size
-1) < map
->offset
||
186 map
->offset
< virt_to_phys(high_memory
)) {
191 /* Some drivers preinitialize some maps, without the X Server
192 * needing to be aware of it. Therefore, we just return success
193 * when the server tries to create a duplicate map.
195 list
= drm_find_matching_map(dev
, map
);
197 if (list
->map
->size
!= map
->size
) {
198 DRM_DEBUG("Matching maps of type %d with "
199 "mismatched sizes, (%ld vs %ld)\n",
200 map
->type
, map
->size
,
202 list
->map
->size
= map
->size
;
210 if (map
->type
== _DRM_FRAME_BUFFER
||
211 (map
->flags
& _DRM_WRITE_COMBINING
)) {
213 arch_phys_wc_add(map
->offset
, map
->size
);
215 if (map
->type
== _DRM_REGISTERS
) {
216 if (map
->flags
& _DRM_WRITE_COMBINING
)
217 map
->handle
= ioremap_wc(map
->offset
,
220 map
->handle
= ioremap(map
->offset
, map
->size
);
229 list
= drm_find_matching_map(dev
, map
);
231 if(list
->map
->size
!= map
->size
) {
232 DRM_DEBUG("Matching maps of type %d with "
233 "mismatched sizes, (%ld vs %ld)\n",
234 map
->type
, map
->size
, list
->map
->size
);
235 list
->map
->size
= map
->size
;
242 map
->handle
= vmalloc_user(map
->size
);
243 DRM_DEBUG("%lu %d %p\n",
244 map
->size
, order_base_2(map
->size
), map
->handle
);
249 map
->offset
= (unsigned long)map
->handle
;
250 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
251 /* Prevent a 2nd X Server from creating a 2nd lock */
252 if (dev
->master
->lock
.hw_lock
!= NULL
) {
257 dev
->sigdata
.lock
= dev
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
262 struct drm_agp_mem
*entry
;
270 map
->offset
+= dev
->hose
->mem_space
->start
;
272 /* In some cases (i810 driver), user space may have already
273 * added the AGP base itself, because dev->agp->base previously
274 * only got set during AGP enable. So, only add the base
275 * address if the map's offset isn't already within the
278 if (map
->offset
< dev
->agp
->base
||
279 map
->offset
> dev
->agp
->base
+
280 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
281 map
->offset
+= dev
->agp
->base
;
283 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
285 /* This assumes the DRM is in total control of AGP space.
286 * It's not always the case as AGP can be in the control
287 * of user space (i.e. i810 driver). So this loop will get
288 * skipped and we double check that dev->agp->memory is
289 * actually set as well as being invalid before EPERM'ing
291 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
292 if ((map
->offset
>= entry
->bound
) &&
293 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
298 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
302 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
303 (unsigned long long)map
->offset
, map
->size
);
307 return -EINVAL
; /* AGP hardware is no longer supported */
309 case _DRM_SCATTER_GATHER
:
314 map
->handle
= (void *)(uintptr_t)(dev
->sg
->vaddr
+ offset
);
315 map
->offset
= dev
->sg
->vaddr
+ offset
;
317 case _DRM_CONSISTENT
:
318 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
319 * As we're limiting the address to 2^32-1 (or less),
320 * casting it down to 32 bits is no problem, but we
321 * need to point to a 64bit variable first. */
322 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
);
327 map
->handle
= dmah
->vaddr
;
328 map
->offset
= (unsigned long)dmah
->busaddr
;
336 list
= kzalloc(sizeof(*list
), GFP_KERNEL
);
338 if (map
->type
== _DRM_REGISTERS
)
339 iounmap(map
->handle
);
345 mutex_lock(&dev
->struct_mutex
);
346 list_add(&list
->head
, &dev
->maplist
);
348 /* Assign a 32-bit handle */
349 /* We do it here so that dev->struct_mutex protects the increment */
350 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
352 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0,
353 (map
->type
== _DRM_SHM
));
355 if (map
->type
== _DRM_REGISTERS
)
356 iounmap(map
->handle
);
359 mutex_unlock(&dev
->struct_mutex
);
363 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
364 mutex_unlock(&dev
->struct_mutex
);
366 if (!(map
->flags
& _DRM_DRIVER
))
367 list
->master
= dev
->master
;
372 int drm_legacy_addmap(struct drm_device
* dev
, resource_size_t offset
,
373 unsigned int size
, enum drm_map_type type
,
374 enum drm_map_flags flags
, struct drm_local_map
**map_ptr
)
376 struct drm_map_list
*list
;
379 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
381 *map_ptr
= list
->map
;
384 EXPORT_SYMBOL(drm_legacy_addmap
);
387 * Ioctl to specify a range of memory that is available for mapping by a
390 * \param inode device inode.
391 * \param file_priv DRM file private.
392 * \param cmd command.
393 * \param arg pointer to a drm_map structure.
394 * \return zero on success or a negative value on error.
397 int drm_legacy_addmap_ioctl(struct drm_device
*dev
, void *data
,
398 struct drm_file
*file_priv
)
400 struct drm_map
*map
= data
;
401 struct drm_map_list
*maplist
;
404 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
407 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
408 !drm_core_check_feature(dev
, DRIVER_LEGACY
))
411 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
412 map
->flags
, &maplist
);
417 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
418 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
421 * It appears that there are no users of this value whatsoever --
422 * drmAddMap just discards it. Let's not encourage its use.
423 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
424 * it's not a real mtrr index anymore.)
432 * Get a mapping information.
434 * \param inode device inode.
435 * \param file_priv DRM file private.
436 * \param cmd command.
437 * \param arg user argument, pointing to a drm_map structure.
439 * \return zero on success or a negative number on failure.
441 * Searches for the mapping with the specified offset and copies its information
444 int drm_legacy_getmap_ioctl(struct drm_device
*dev
, void *data
,
445 struct drm_file
*file_priv
)
447 struct drm_map
*map
= data
;
448 struct drm_map_list
*r_list
= NULL
;
449 struct list_head
*list
;
453 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
454 !drm_core_check_feature(dev
, DRIVER_LEGACY
))
462 mutex_lock(&dev
->struct_mutex
);
463 list_for_each(list
, &dev
->maplist
) {
465 r_list
= list_entry(list
, struct drm_map_list
, head
);
470 if (!r_list
|| !r_list
->map
) {
471 mutex_unlock(&dev
->struct_mutex
);
475 map
->offset
= r_list
->map
->offset
;
476 map
->size
= r_list
->map
->size
;
477 map
->type
= r_list
->map
->type
;
478 map
->flags
= r_list
->map
->flags
;
479 map
->handle
= (void *)(unsigned long) r_list
->user_token
;
480 map
->mtrr
= r_list
->map
->mtrr
;
482 mutex_unlock(&dev
->struct_mutex
);
488 * Remove a map private from list and deallocate resources if the mapping
491 * Searches the map on drm_device::maplist, removes it from the list, see if
492 * its being used, and free any associate resource (such as MTRR's) if it's not
495 * \sa drm_legacy_addmap
497 int drm_legacy_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
499 struct drm_map_list
*r_list
= NULL
, *list_t
;
500 drm_dma_handle_t dmah
;
502 struct drm_master
*master
;
504 /* Find the list entry for the map and remove it */
505 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
506 if (r_list
->map
== map
) {
507 master
= r_list
->master
;
508 list_del(&r_list
->head
);
509 drm_ht_remove_key(&dev
->map_hash
,
510 r_list
->user_token
>> PAGE_SHIFT
);
522 iounmap(map
->handle
);
524 case _DRM_FRAME_BUFFER
:
525 arch_phys_wc_del(map
->mtrr
);
530 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
531 dev
->sigdata
.lock
= NULL
;
532 master
->lock
.hw_lock
= NULL
; /* SHM removed */
533 master
->lock
.file_priv
= NULL
;
534 wake_up_interruptible_all(&master
->lock
.lock_queue
);
538 case _DRM_SCATTER_GATHER
:
540 case _DRM_CONSISTENT
:
541 dmah
.vaddr
= map
->handle
;
542 dmah
.busaddr
= map
->offset
;
543 dmah
.size
= map
->size
;
544 __drm_legacy_pci_free(dev
, &dmah
);
551 EXPORT_SYMBOL(drm_legacy_rmmap_locked
);
553 void drm_legacy_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
555 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
556 !drm_core_check_feature(dev
, DRIVER_LEGACY
))
559 mutex_lock(&dev
->struct_mutex
);
560 drm_legacy_rmmap_locked(dev
, map
);
561 mutex_unlock(&dev
->struct_mutex
);
563 EXPORT_SYMBOL(drm_legacy_rmmap
);
565 void drm_legacy_master_rmmaps(struct drm_device
*dev
, struct drm_master
*master
)
567 struct drm_map_list
*r_list
, *list_temp
;
569 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
572 mutex_lock(&dev
->struct_mutex
);
573 list_for_each_entry_safe(r_list
, list_temp
, &dev
->maplist
, head
) {
574 if (r_list
->master
== master
) {
575 drm_legacy_rmmap_locked(dev
, r_list
->map
);
579 mutex_unlock(&dev
->struct_mutex
);
582 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
583 * the last close of the device, and this is necessary for cleanup when things
584 * exit uncleanly. Therefore, having userland manually remove mappings seems
585 * like a pointless exercise since they're going away anyway.
587 * One use case might be after addmap is allowed for normal users for SHM and
588 * gets used by drivers that the server doesn't need to care about. This seems
591 * \param inode device inode.
592 * \param file_priv DRM file private.
593 * \param cmd command.
594 * \param arg pointer to a struct drm_map structure.
595 * \return zero on success or a negative value on error.
597 int drm_legacy_rmmap_ioctl(struct drm_device
*dev
, void *data
,
598 struct drm_file
*file_priv
)
600 struct drm_map
*request
= data
;
601 struct drm_local_map
*map
= NULL
;
602 struct drm_map_list
*r_list
;
605 if (!drm_core_check_feature(dev
, DRIVER_KMS_LEGACY_CONTEXT
) &&
606 !drm_core_check_feature(dev
, DRIVER_LEGACY
))
609 mutex_lock(&dev
->struct_mutex
);
610 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
612 r_list
->user_token
== (unsigned long)request
->handle
&&
613 r_list
->map
->flags
& _DRM_REMOVABLE
) {
619 /* List has wrapped around to the head pointer, or its empty we didn't
622 if (list_empty(&dev
->maplist
) || !map
) {
623 mutex_unlock(&dev
->struct_mutex
);
627 /* Register and framebuffer maps are permanent */
628 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
629 mutex_unlock(&dev
->struct_mutex
);
633 ret
= drm_legacy_rmmap_locked(dev
, map
);
635 mutex_unlock(&dev
->struct_mutex
);
641 * Cleanup after an error on one of the addbufs() functions.
643 * \param dev DRM device.
644 * \param entry buffer entry where the error occurred.
646 * Frees any pages and buffers associated with the given entry.
648 static void drm_cleanup_buf_error(struct drm_device
* dev
,
649 struct drm_buf_entry
* entry
)
653 if (entry
->seg_count
) {
654 for (i
= 0; i
< entry
->seg_count
; i
++) {
655 if (entry
->seglist
[i
]) {
656 drm_pci_free(dev
, entry
->seglist
[i
]);
659 kfree(entry
->seglist
);
661 entry
->seg_count
= 0;
664 if (entry
->buf_count
) {
665 for (i
= 0; i
< entry
->buf_count
; i
++) {
666 kfree(entry
->buflist
[i
].dev_private
);
668 kfree(entry
->buflist
);
670 entry
->buf_count
= 0;
674 #if IS_ENABLED(CONFIG_AGP)
676 * Add AGP buffers for DMA transfers.
678 * \param dev struct drm_device to which the buffers are to be added.
679 * \param request pointer to a struct drm_buf_desc describing the request.
680 * \return zero on success or a negative number on failure.
682 * After some sanity checks creates a drm_buf structure for each buffer and
683 * reallocates the buffer list of the same size order to accommodate the new
686 int drm_legacy_addbufs_agp(struct drm_device
*dev
,
687 struct drm_buf_desc
*request
)
689 struct drm_device_dma
*dma
= dev
->dma
;
690 struct drm_buf_entry
*entry
;
691 struct drm_agp_mem
*agp_entry
;
693 unsigned long offset
;
694 unsigned long agp_offset
;
703 struct drm_buf
**temp_buflist
;
708 count
= request
->count
;
709 order
= order_base_2(request
->size
);
712 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
713 ? PAGE_ALIGN(size
) : size
;
714 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
715 total
= PAGE_SIZE
<< page_order
;
718 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
720 DRM_DEBUG("count: %d\n", count
);
721 DRM_DEBUG("order: %d\n", order
);
722 DRM_DEBUG("size: %d\n", size
);
723 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
724 DRM_DEBUG("alignment: %d\n", alignment
);
725 DRM_DEBUG("page_order: %d\n", page_order
);
726 DRM_DEBUG("total: %d\n", total
);
728 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
731 /* Make sure buffers are located in AGP memory that we own */
733 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
734 if ((agp_offset
>= agp_entry
->bound
) &&
735 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
740 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
741 DRM_DEBUG("zone invalid\n");
744 lockmgr(&dev
->buf_lock
, LK_EXCLUSIVE
);
746 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
749 atomic_inc(&dev
->buf_alloc
);
750 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
752 mutex_lock(&dev
->struct_mutex
);
753 entry
= &dma
->bufs
[order
];
754 if (entry
->buf_count
) {
755 mutex_unlock(&dev
->struct_mutex
);
756 atomic_dec(&dev
->buf_alloc
);
757 return -ENOMEM
; /* May only call once for each order */
760 if (count
< 0 || count
> 4096) {
761 mutex_unlock(&dev
->struct_mutex
);
762 atomic_dec(&dev
->buf_alloc
);
766 entry
->buflist
= kcalloc(count
, sizeof(*entry
->buflist
), GFP_KERNEL
);
767 if (!entry
->buflist
) {
768 mutex_unlock(&dev
->struct_mutex
);
769 atomic_dec(&dev
->buf_alloc
);
773 entry
->buf_size
= size
;
774 entry
->page_order
= page_order
;
778 while (entry
->buf_count
< count
) {
779 buf
= &entry
->buflist
[entry
->buf_count
];
780 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
781 buf
->total
= alignment
;
785 buf
->offset
= (dma
->byte_count
+ offset
);
786 buf
->bus_address
= agp_offset
+ offset
;
787 buf
->address
= (void *)(agp_offset
+ offset
);
791 buf
->file_priv
= NULL
;
793 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
794 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
795 if (!buf
->dev_private
) {
796 /* Set count correctly so we free the proper amount. */
797 entry
->buf_count
= count
;
798 drm_cleanup_buf_error(dev
, entry
);
799 mutex_unlock(&dev
->struct_mutex
);
800 atomic_dec(&dev
->buf_alloc
);
804 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
808 byte_count
+= PAGE_SIZE
<< page_order
;
811 DRM_DEBUG("byte_count: %d\n", byte_count
);
813 temp_buflist
= krealloc(dma
->buflist
,
814 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
817 /* Free the entry because it isn't valid */
818 drm_cleanup_buf_error(dev
, entry
);
819 mutex_unlock(&dev
->struct_mutex
);
820 atomic_dec(&dev
->buf_alloc
);
823 dma
->buflist
= temp_buflist
;
825 for (i
= 0; i
< entry
->buf_count
; i
++) {
826 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
829 dma
->buf_count
+= entry
->buf_count
;
830 dma
->seg_count
+= entry
->seg_count
;
831 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
832 dma
->byte_count
+= byte_count
;
834 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
835 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
837 mutex_unlock(&dev
->struct_mutex
);
839 request
->count
= entry
->buf_count
;
840 request
->size
= size
;
842 dma
->flags
= _DRM_DMA_USE_AGP
;
844 atomic_dec(&dev
->buf_alloc
);
847 EXPORT_SYMBOL(drm_legacy_addbufs_agp
);
848 #endif /* CONFIG_AGP */
850 int drm_legacy_addbufs_pci(struct drm_device
*dev
,
851 struct drm_buf_desc
*request
)
853 struct drm_device_dma
*dma
= dev
->dma
;
859 struct drm_buf_entry
*entry
;
860 drm_dma_handle_t
*dmah
;
863 unsigned long offset
;
867 unsigned long *temp_pagelist
;
868 struct drm_buf
**temp_buflist
;
870 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
876 if (!capable(CAP_SYS_ADMIN
))
879 count
= request
->count
;
880 order
= order_base_2(request
->size
);
883 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
884 request
->count
, request
->size
, size
, order
);
886 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
889 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
890 ? PAGE_ALIGN(size
) : size
;
891 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
892 total
= PAGE_SIZE
<< page_order
;
894 lockmgr(&dev
->buf_lock
, LK_EXCLUSIVE
);
896 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
899 atomic_inc(&dev
->buf_alloc
);
900 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
902 mutex_lock(&dev
->struct_mutex
);
903 entry
= &dma
->bufs
[order
];
904 if (entry
->buf_count
) {
905 mutex_unlock(&dev
->struct_mutex
);
906 atomic_dec(&dev
->buf_alloc
);
907 return -ENOMEM
; /* May only call once for each order */
910 if (count
< 0 || count
> 4096) {
911 mutex_unlock(&dev
->struct_mutex
);
912 atomic_dec(&dev
->buf_alloc
);
916 entry
->buflist
= kcalloc(count
, sizeof(*entry
->buflist
), GFP_KERNEL
);
917 if (!entry
->buflist
) {
918 mutex_unlock(&dev
->struct_mutex
);
919 atomic_dec(&dev
->buf_alloc
);
923 entry
->seglist
= kcalloc(count
, sizeof(*entry
->seglist
), GFP_KERNEL
);
924 if (!entry
->seglist
) {
925 kfree(entry
->buflist
);
926 mutex_unlock(&dev
->struct_mutex
);
927 atomic_dec(&dev
->buf_alloc
);
931 /* Keep the original pagelist until we know all the allocations
934 temp_pagelist
= kmalloc_array(dma
->page_count
+ (count
<< page_order
),
935 sizeof(*dma
->pagelist
),
937 if (!temp_pagelist
) {
938 kfree(entry
->buflist
);
939 kfree(entry
->seglist
);
940 mutex_unlock(&dev
->struct_mutex
);
941 atomic_dec(&dev
->buf_alloc
);
944 memcpy(temp_pagelist
,
945 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
946 DRM_DEBUG("pagelist: %d entries\n",
947 dma
->page_count
+ (count
<< page_order
));
949 entry
->buf_size
= size
;
950 entry
->page_order
= page_order
;
954 while (entry
->buf_count
< count
) {
956 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000);
959 /* Set count correctly so we free the proper amount. */
960 entry
->buf_count
= count
;
961 entry
->seg_count
= count
;
962 drm_cleanup_buf_error(dev
, entry
);
963 kfree(temp_pagelist
);
964 mutex_unlock(&dev
->struct_mutex
);
965 atomic_dec(&dev
->buf_alloc
);
968 entry
->seglist
[entry
->seg_count
++] = dmah
;
969 for (i
= 0; i
< (1 << page_order
); i
++) {
970 DRM_DEBUG("page %d @ 0x%08lx\n",
971 dma
->page_count
+ page_count
,
972 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
973 temp_pagelist
[dma
->page_count
+ page_count
++]
974 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
977 offset
+ size
<= total
&& entry
->buf_count
< count
;
978 offset
+= alignment
, ++entry
->buf_count
) {
979 buf
= &entry
->buflist
[entry
->buf_count
];
980 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
981 buf
->total
= alignment
;
984 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
985 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
986 buf
->bus_address
= dmah
->busaddr
+ offset
;
990 buf
->file_priv
= NULL
;
992 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
993 buf
->dev_private
= kzalloc(buf
->dev_priv_size
,
995 if (!buf
->dev_private
) {
996 /* Set count correctly so we free the proper amount. */
997 entry
->buf_count
= count
;
998 entry
->seg_count
= count
;
999 drm_cleanup_buf_error(dev
, entry
);
1000 kfree(temp_pagelist
);
1001 mutex_unlock(&dev
->struct_mutex
);
1002 atomic_dec(&dev
->buf_alloc
);
1006 DRM_DEBUG("buffer %d @ %p\n",
1007 entry
->buf_count
, buf
->address
);
1009 byte_count
+= PAGE_SIZE
<< page_order
;
1012 temp_buflist
= krealloc(dma
->buflist
,
1013 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
1015 if (!temp_buflist
) {
1016 /* Free the entry because it isn't valid */
1017 drm_cleanup_buf_error(dev
, entry
);
1018 kfree(temp_pagelist
);
1019 mutex_unlock(&dev
->struct_mutex
);
1020 atomic_dec(&dev
->buf_alloc
);
1023 dma
->buflist
= temp_buflist
;
1025 for (i
= 0; i
< entry
->buf_count
; i
++) {
1026 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1029 /* No allocations failed, so now we can replace the original pagelist
1032 if (dma
->page_count
) {
1033 kfree(dma
->pagelist
);
1035 dma
->pagelist
= temp_pagelist
;
1037 dma
->buf_count
+= entry
->buf_count
;
1038 dma
->seg_count
+= entry
->seg_count
;
1039 dma
->page_count
+= entry
->seg_count
<< page_order
;
1040 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
1042 mutex_unlock(&dev
->struct_mutex
);
1044 request
->count
= entry
->buf_count
;
1045 request
->size
= size
;
1047 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
1048 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
1050 atomic_dec(&dev
->buf_alloc
);
1054 EXPORT_SYMBOL(drm_legacy_addbufs_pci
);
1056 static int drm_legacy_addbufs_sg(struct drm_device
*dev
,
1057 struct drm_buf_desc
*request
)
1059 struct drm_device_dma
*dma
= dev
->dma
;
1060 struct drm_buf_entry
*entry
;
1061 struct drm_buf
*buf
;
1062 unsigned long offset
;
1063 unsigned long agp_offset
;
1072 struct drm_buf
**temp_buflist
;
1074 if (!drm_core_check_feature(dev
, DRIVER_SG
))
1080 if (!capable(CAP_SYS_ADMIN
))
1083 count
= request
->count
;
1084 order
= order_base_2(request
->size
);
1087 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1088 ? PAGE_ALIGN(size
) : size
;
1089 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1090 total
= PAGE_SIZE
<< page_order
;
1093 agp_offset
= request
->agp_start
;
1095 DRM_DEBUG("count: %d\n", count
);
1096 DRM_DEBUG("order: %d\n", order
);
1097 DRM_DEBUG("size: %d\n", size
);
1098 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1099 DRM_DEBUG("alignment: %d\n", alignment
);
1100 DRM_DEBUG("page_order: %d\n", page_order
);
1101 DRM_DEBUG("total: %d\n", total
);
1103 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1106 lockmgr(&dev
->buf_lock
, LK_EXCLUSIVE
);
1108 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1111 atomic_inc(&dev
->buf_alloc
);
1112 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1114 mutex_lock(&dev
->struct_mutex
);
1115 entry
= &dma
->bufs
[order
];
1116 if (entry
->buf_count
) {
1117 mutex_unlock(&dev
->struct_mutex
);
1118 atomic_dec(&dev
->buf_alloc
);
1119 return -ENOMEM
; /* May only call once for each order */
1122 if (count
< 0 || count
> 4096) {
1123 mutex_unlock(&dev
->struct_mutex
);
1124 atomic_dec(&dev
->buf_alloc
);
1128 entry
->buflist
= kcalloc(count
, sizeof(*entry
->buflist
), GFP_KERNEL
);
1129 if (!entry
->buflist
) {
1130 mutex_unlock(&dev
->struct_mutex
);
1131 atomic_dec(&dev
->buf_alloc
);
1135 entry
->buf_size
= size
;
1136 entry
->page_order
= page_order
;
1140 while (entry
->buf_count
< count
) {
1141 buf
= &entry
->buflist
[entry
->buf_count
];
1142 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1143 buf
->total
= alignment
;
1147 buf
->offset
= (dma
->byte_count
+ offset
);
1148 buf
->bus_address
= agp_offset
+ offset
;
1149 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->sg
->vaddr
);
1153 buf
->file_priv
= NULL
;
1155 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1156 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
1157 if (!buf
->dev_private
) {
1158 /* Set count correctly so we free the proper amount. */
1159 entry
->buf_count
= count
;
1160 drm_cleanup_buf_error(dev
, entry
);
1161 mutex_unlock(&dev
->struct_mutex
);
1162 atomic_dec(&dev
->buf_alloc
);
1166 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1168 offset
+= alignment
;
1170 byte_count
+= PAGE_SIZE
<< page_order
;
1173 DRM_DEBUG("byte_count: %d\n", byte_count
);
1175 temp_buflist
= krealloc(dma
->buflist
,
1176 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
1178 if (!temp_buflist
) {
1179 /* Free the entry because it isn't valid */
1180 drm_cleanup_buf_error(dev
, entry
);
1181 mutex_unlock(&dev
->struct_mutex
);
1182 atomic_dec(&dev
->buf_alloc
);
1185 dma
->buflist
= temp_buflist
;
1187 for (i
= 0; i
< entry
->buf_count
; i
++) {
1188 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1191 dma
->buf_count
+= entry
->buf_count
;
1192 dma
->seg_count
+= entry
->seg_count
;
1193 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1194 dma
->byte_count
+= byte_count
;
1196 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1197 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1199 mutex_unlock(&dev
->struct_mutex
);
1201 request
->count
= entry
->buf_count
;
1202 request
->size
= size
;
1204 dma
->flags
= _DRM_DMA_USE_SG
;
1206 atomic_dec(&dev
->buf_alloc
);
1211 * Add buffers for DMA transfers (ioctl).
1213 * \param inode device inode.
1214 * \param file_priv DRM file private.
1215 * \param cmd command.
1216 * \param arg pointer to a struct drm_buf_desc request.
1217 * \return zero on success or a negative number on failure.
1219 * According with the memory type specified in drm_buf_desc::flags and the
1220 * build options, it dispatches the call either to addbufs_agp(),
1221 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1222 * PCI memory respectively.
1224 int drm_legacy_addbufs(struct drm_device
*dev
, void *data
,
1225 struct drm_file
*file_priv
)
1227 struct drm_buf_desc
*request
= data
;
1230 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1233 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1236 #if IS_ENABLED(CONFIG_AGP)
1237 if (request
->flags
& _DRM_AGP_BUFFER
)
1238 ret
= drm_legacy_addbufs_agp(dev
, request
);
1241 if (request
->flags
& _DRM_SG_BUFFER
)
1242 ret
= drm_legacy_addbufs_sg(dev
, request
);
1243 else if (request
->flags
& _DRM_FB_BUFFER
)
1246 ret
= drm_legacy_addbufs_pci(dev
, request
);
1252 * Get information about the buffer mappings.
1254 * This was originally mean for debugging purposes, or by a sophisticated
1255 * client library to determine how best to use the available buffers (e.g.,
1256 * large buffers can be used for image transfer).
1258 * \param inode device inode.
1259 * \param file_priv DRM file private.
1260 * \param cmd command.
1261 * \param arg pointer to a drm_buf_info structure.
1262 * \return zero on success or a negative number on failure.
1264 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1265 * lock, preventing of allocating more buffers after this call. Information
1266 * about each requested buffer is then copied into user space.
1268 int __drm_legacy_infobufs(struct drm_device
*dev
,
1270 int (*f
)(void *, int, struct drm_buf_entry
*))
1272 struct drm_device_dma
*dma
= dev
->dma
;
1276 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1279 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1285 lockmgr(&dev
->buf_lock
, LK_EXCLUSIVE
);
1286 if (atomic_read(&dev
->buf_alloc
)) {
1287 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1290 ++dev
->buf_use
; /* Can't allocate more after this call */
1291 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1293 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1294 if (dma
->bufs
[i
].buf_count
)
1298 DRM_DEBUG("count = %d\n", count
);
1301 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1302 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1303 if (from
->buf_count
) {
1304 if (f(data
, count
, from
) < 0)
1306 DRM_DEBUG("%d %d %d %d %d\n",
1308 dma
->bufs
[i
].buf_count
,
1309 dma
->bufs
[i
].buf_size
,
1310 dma
->bufs
[i
].low_mark
,
1311 dma
->bufs
[i
].high_mark
);
1321 static int copy_one_buf(void *data
, int count
, struct drm_buf_entry
*from
)
1323 struct drm_buf_info
*request
= data
;
1324 struct drm_buf_desc __user
*to
= &request
->list
[count
];
1325 struct drm_buf_desc v
= {.count
= from
->buf_count
,
1326 .size
= from
->buf_size
,
1327 .low_mark
= from
->low_mark
,
1328 .high_mark
= from
->high_mark
};
1329 return copy_to_user(to
, &v
, offsetof(struct drm_buf_desc
, flags
));
1332 int drm_legacy_infobufs(struct drm_device
*dev
, void *data
,
1333 struct drm_file
*file_priv
)
1335 struct drm_buf_info
*request
= data
;
1336 return __drm_legacy_infobufs(dev
, data
, &request
->count
, copy_one_buf
);
1340 * Specifies a low and high water mark for buffer allocation
1342 * \param inode device inode.
1343 * \param file_priv DRM file private.
1344 * \param cmd command.
1345 * \param arg a pointer to a drm_buf_desc structure.
1346 * \return zero on success or a negative number on failure.
1348 * Verifies that the size order is bounded between the admissible orders and
1349 * updates the respective drm_device_dma::bufs entry low and high water mark.
1351 * \note This ioctl is deprecated and mostly never used.
1353 int drm_legacy_markbufs(struct drm_device
*dev
, void *data
,
1354 struct drm_file
*file_priv
)
1356 struct drm_device_dma
*dma
= dev
->dma
;
1357 struct drm_buf_desc
*request
= data
;
1359 struct drm_buf_entry
*entry
;
1361 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1364 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1370 DRM_DEBUG("%d, %d, %d\n",
1371 request
->size
, request
->low_mark
, request
->high_mark
);
1372 order
= order_base_2(request
->size
);
1373 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1375 entry
= &dma
->bufs
[order
];
1377 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1379 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1382 entry
->low_mark
= request
->low_mark
;
1383 entry
->high_mark
= request
->high_mark
;
1389 * Unreserve the buffers in list, previously reserved using drmDMA.
1391 * \param inode device inode.
1392 * \param file_priv DRM file private.
1393 * \param cmd command.
1394 * \param arg pointer to a drm_buf_free structure.
1395 * \return zero on success or a negative number on failure.
1397 * Calls free_buffer() for each used buffer.
1398 * This function is primarily used for debugging.
1400 int drm_legacy_freebufs(struct drm_device
*dev
, void *data
,
1401 struct drm_file
*file_priv
)
1403 struct drm_device_dma
*dma
= dev
->dma
;
1404 struct drm_buf_free
*request
= data
;
1407 struct drm_buf
*buf
;
1409 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1412 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1418 DRM_DEBUG("%d\n", request
->count
);
1419 for (i
= 0; i
< request
->count
; i
++) {
1420 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1422 if (idx
< 0 || idx
>= dma
->buf_count
) {
1423 DRM_ERROR("Index %d (of %d max)\n",
1424 idx
, dma
->buf_count
- 1);
1427 buf
= dma
->buflist
[idx
];
1428 if (buf
->file_priv
!= file_priv
) {
1429 DRM_ERROR("Process %d freeing buffer not owned\n",
1433 drm_legacy_free_buffer(dev
, buf
);
1440 * Maps all of the DMA buffers into client-virtual space (ioctl).
1442 * \param inode device inode.
1443 * \param file_priv DRM file private.
1444 * \param cmd command.
1445 * \param arg pointer to a drm_buf_map structure.
1446 * \return zero on success or a negative number on failure.
1448 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1449 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1450 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1453 int __drm_legacy_mapbufs(struct drm_device
*dev
, void *data
, int *p
,
1455 int (*f
)(void *, int, unsigned long,
1457 struct drm_file
*file_priv
)
1459 #ifndef __DragonFly__
1460 struct drm_device_dma
*dma
= dev
->dma
;
1462 unsigned long virtual;
1465 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1468 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1474 lockmgr(&dev
->buf_lock
, LK_EXCLUSIVE
);
1475 if (atomic_read(&dev
->buf_alloc
)) {
1476 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1479 dev
->buf_use
++; /* Can't allocate more after this call */
1480 lockmgr(&dev
->buf_lock
, LK_RELEASE
);
1482 if (*p
>= dma
->buf_count
) {
1483 if ((dev
->agp
&& (dma
->flags
& _DRM_DMA_USE_AGP
))
1484 || (drm_core_check_feature(dev
, DRIVER_SG
)
1485 && (dma
->flags
& _DRM_DMA_USE_SG
))) {
1486 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1487 unsigned long token
= dev
->agp_buffer_token
;
1493 virtual = vm_mmap(file_priv
->filp
, 0, map
->size
,
1494 PROT_READ
| PROT_WRITE
,
1498 virtual = vm_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1499 PROT_READ
| PROT_WRITE
,
1503 if (virtual > -1024UL) {
1505 retcode
= (signed long)virtual;
1508 *v
= (void __user
*)virtual;
1510 for (i
= 0; i
< dma
->buf_count
; i
++) {
1511 if (f(data
, i
, virtual, dma
->buflist
[i
]) < 0) {
1518 *p
= dma
->buf_count
;
1519 DRM_DEBUG("%d buffers, retcode = %d\n", *p
, retcode
);
1527 static int map_one_buf(void *data
, int idx
, unsigned long virtual,
1528 struct drm_buf
*buf
)
1530 #ifndef __DragonFly__
1531 struct drm_buf_map
*request
= data
;
1532 unsigned long address
= virtual + buf
->offset
; /* *** */
1534 if (copy_to_user(&request
->list
[idx
].idx
, &buf
->idx
,
1535 sizeof(request
->list
[0].idx
)))
1537 if (copy_to_user(&request
->list
[idx
].total
, &buf
->total
,
1538 sizeof(request
->list
[0].total
)))
1540 if (clear_user(&request
->list
[idx
].used
, sizeof(int)))
1542 if (copy_to_user(&request
->list
[idx
].address
, &address
,
1549 int drm_legacy_mapbufs(struct drm_device
*dev
, void *data
,
1550 struct drm_file
*file_priv
)
1552 struct drm_buf_map
*request
= data
;
1553 return __drm_legacy_mapbufs(dev
, data
, &request
->count
,
1554 &request
->virtual, map_one_buf
,
1558 int drm_legacy_dma_ioctl(struct drm_device
*dev
, void *data
,
1559 struct drm_file
*file_priv
)
1561 if (!drm_core_check_feature(dev
, DRIVER_LEGACY
))
1564 if (dev
->driver
->dma_ioctl
)
1565 return dev
->driver
->dma_ioctl(dev
, data
, file_priv
);
1570 struct drm_local_map
*drm_legacy_getsarea(struct drm_device
*dev
)
1572 struct drm_map_list
*entry
;
1574 list_for_each_entry(entry
, &dev
->maplist
, head
) {
1575 if (entry
->map
&& entry
->map
->type
== _DRM_SHM
&&
1576 (entry
->map
->flags
& _DRM_CONTAINS_LOCK
)) {
1582 EXPORT_SYMBOL(drm_legacy_getsarea
);