3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 resource_size_t
drm_get_resource_start(struct drm_device
*dev
, unsigned int resource
)
41 return pci_resource_start(dev
->pdev
, resource
);
43 EXPORT_SYMBOL(drm_get_resource_start
);
45 resource_size_t
drm_get_resource_len(struct drm_device
*dev
, unsigned int resource
)
47 return pci_resource_len(dev
->pdev
, resource
);
50 EXPORT_SYMBOL(drm_get_resource_len
);
52 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
53 struct drm_local_map
*map
)
55 struct drm_map_list
*entry
;
56 list_for_each_entry(entry
, &dev
->maplist
, head
) {
58 * Because the kernel-userspace ABI is fixed at a 32-bit offset
59 * while PCI resources may live above that, we ignore the map
60 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
61 * It is assumed that each driver will have only one resource of
65 map
->type
!= entry
->map
->type
||
66 entry
->master
!= dev
->primary
->master
)
70 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
73 case _DRM_FRAME_BUFFER
:
75 default: /* Make gcc happy */
78 if (entry
->map
->offset
== map
->offset
)
85 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
86 unsigned long user_token
, int hashed_handle
)
88 int use_hashed_handle
;
89 #if (BITS_PER_LONG == 64)
90 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
91 #elif (BITS_PER_LONG == 32)
92 use_hashed_handle
= hashed_handle
;
94 #error Unsupported long size. Neither 64 nor 32 bits.
97 if (!use_hashed_handle
) {
99 hash
->key
= user_token
>> PAGE_SHIFT
;
100 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
104 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
105 user_token
, 32 - PAGE_SHIFT
- 3,
106 0, DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
);
110 * Core function to create a range of memory available for mapping by a
113 * Adjusts the memory offset to its absolute value according to the mapping
114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
115 * applicable and if supported by the kernel.
117 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
118 unsigned int size
, enum drm_map_type type
,
119 enum drm_map_flags flags
,
120 struct drm_map_list
** maplist
)
122 struct drm_local_map
*map
;
123 struct drm_map_list
*list
;
124 drm_dma_handle_t
*dmah
;
125 unsigned long user_token
;
128 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
132 map
->offset
= offset
;
137 /* Only allow shared memory to be removable since we only keep enough
138 * book keeping information about shared memory to allow for removal
139 * when processes fork.
141 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
142 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
145 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
146 (unsigned long long)map
->offset
, map
->size
, map
->type
);
147 if ((map
->offset
& (~(resource_size_t
)PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
148 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
156 case _DRM_FRAME_BUFFER
:
157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158 if (map
->offset
+ (map
->size
-1) < map
->offset
||
159 map
->offset
< virt_to_phys(high_memory
)) {
160 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
165 map
->offset
+= dev
->hose
->mem_space
->start
;
167 /* Some drivers preinitialize some maps, without the X Server
168 * needing to be aware of it. Therefore, we just return success
169 * when the server tries to create a duplicate map.
171 list
= drm_find_matching_map(dev
, map
);
173 if (list
->map
->size
!= map
->size
) {
174 DRM_DEBUG("Matching maps of type %d with "
175 "mismatched sizes, (%ld vs %ld)\n",
176 map
->type
, map
->size
,
178 list
->map
->size
= map
->size
;
181 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
186 if (drm_core_has_MTRR(dev
)) {
187 if (map
->type
== _DRM_FRAME_BUFFER
||
188 (map
->flags
& _DRM_WRITE_COMBINING
)) {
189 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
190 MTRR_TYPE_WRCOMB
, 1);
193 if (map
->type
== _DRM_REGISTERS
) {
194 map
->handle
= ioremap(map
->offset
, map
->size
);
196 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
203 list
= drm_find_matching_map(dev
, map
);
205 if(list
->map
->size
!= map
->size
) {
206 DRM_DEBUG("Matching maps of type %d with "
207 "mismatched sizes, (%ld vs %ld)\n",
208 map
->type
, map
->size
, list
->map
->size
);
209 list
->map
->size
= map
->size
;
212 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
216 map
->handle
= vmalloc_user(map
->size
);
217 DRM_DEBUG("%lu %d %p\n",
218 map
->size
, drm_order(map
->size
), map
->handle
);
220 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
223 map
->offset
= (unsigned long)map
->handle
;
224 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
225 /* Prevent a 2nd X Server from creating a 2nd lock */
226 if (dev
->primary
->master
->lock
.hw_lock
!= NULL
) {
228 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
231 dev
->sigdata
.lock
= dev
->primary
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
235 struct drm_agp_mem
*entry
;
238 if (!drm_core_has_AGP(dev
)) {
239 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
243 map
->offset
+= dev
->hose
->mem_space
->start
;
245 /* In some cases (i810 driver), user space may have already
246 * added the AGP base itself, because dev->agp->base previously
247 * only got set during AGP enable. So, only add the base
248 * address if the map's offset isn't already within the
251 if (map
->offset
< dev
->agp
->base
||
252 map
->offset
> dev
->agp
->base
+
253 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
254 map
->offset
+= dev
->agp
->base
;
256 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
258 /* This assumes the DRM is in total control of AGP space.
259 * It's not always the case as AGP can be in the control
260 * of user space (i.e. i810 driver). So this loop will get
261 * skipped and we double check that dev->agp->memory is
262 * actually set as well as being invalid before EPERM'ing
264 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
265 if ((map
->offset
>= entry
->bound
) &&
266 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
271 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
272 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
275 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
276 (unsigned long long)map
->offset
, map
->size
);
280 DRM_ERROR("tried to rmmap GEM object\n");
283 case _DRM_SCATTER_GATHER
:
285 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
288 map
->offset
+= (unsigned long)dev
->sg
->virtual;
290 case _DRM_CONSISTENT
:
291 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
292 * As we're limiting the address to 2^32-1 (or less),
293 * casting it down to 32 bits is no problem, but we
294 * need to point to a 64bit variable first. */
295 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
297 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
300 map
->handle
= dmah
->vaddr
;
301 map
->offset
= (unsigned long)dmah
->busaddr
;
305 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
309 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
311 if (map
->type
== _DRM_REGISTERS
)
312 iounmap(map
->handle
);
313 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
316 memset(list
, 0, sizeof(*list
));
319 mutex_lock(&dev
->struct_mutex
);
320 list_add(&list
->head
, &dev
->maplist
);
322 /* Assign a 32-bit handle */
323 /* We do it here so that dev->struct_mutex protects the increment */
324 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
326 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0);
328 if (map
->type
== _DRM_REGISTERS
)
329 iounmap(map
->handle
);
330 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
331 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
332 mutex_unlock(&dev
->struct_mutex
);
336 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
337 mutex_unlock(&dev
->struct_mutex
);
339 list
->master
= dev
->primary
->master
;
344 int drm_addmap(struct drm_device
* dev
, resource_size_t offset
,
345 unsigned int size
, enum drm_map_type type
,
346 enum drm_map_flags flags
, struct drm_local_map
** map_ptr
)
348 struct drm_map_list
*list
;
351 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
353 *map_ptr
= list
->map
;
357 EXPORT_SYMBOL(drm_addmap
);
360 * Ioctl to specify a range of memory that is available for mapping by a
363 * \param inode device inode.
364 * \param file_priv DRM file private.
365 * \param cmd command.
366 * \param arg pointer to a drm_map structure.
367 * \return zero on success or a negative value on error.
370 int drm_addmap_ioctl(struct drm_device
*dev
, void *data
,
371 struct drm_file
*file_priv
)
373 struct drm_map
*map
= data
;
374 struct drm_map_list
*maplist
;
377 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
380 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
381 map
->flags
, &maplist
);
386 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
387 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
392 * Remove a map private from list and deallocate resources if the mapping
395 * Searches the map on drm_device::maplist, removes it from the list, see if
396 * its being used, and free any associate resource (such as MTRR's) if it's not
401 int drm_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
403 struct drm_map_list
*r_list
= NULL
, *list_t
;
404 drm_dma_handle_t dmah
;
406 struct drm_master
*master
;
408 /* Find the list entry for the map and remove it */
409 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
410 if (r_list
->map
== map
) {
411 master
= r_list
->master
;
412 list_del(&r_list
->head
);
413 drm_ht_remove_key(&dev
->map_hash
,
414 r_list
->user_token
>> PAGE_SHIFT
);
415 drm_free(r_list
, sizeof(*r_list
), DRM_MEM_MAPS
);
426 iounmap(map
->handle
);
428 case _DRM_FRAME_BUFFER
:
429 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
431 retcode
= mtrr_del(map
->mtrr
, map
->offset
, map
->size
);
432 DRM_DEBUG("mtrr_del=%d\n", retcode
);
438 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
439 dev
->sigdata
.lock
= NULL
;
440 master
->lock
.hw_lock
= NULL
; /* SHM removed */
441 master
->lock
.file_priv
= NULL
;
442 wake_up_interruptible_all(&master
->lock
.lock_queue
);
446 case _DRM_SCATTER_GATHER
:
448 case _DRM_CONSISTENT
:
449 dmah
.vaddr
= map
->handle
;
450 dmah
.busaddr
= map
->offset
;
451 dmah
.size
= map
->size
;
452 __drm_pci_free(dev
, &dmah
);
455 DRM_ERROR("tried to rmmap GEM object\n");
458 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
462 EXPORT_SYMBOL(drm_rmmap_locked
);
464 int drm_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
468 mutex_lock(&dev
->struct_mutex
);
469 ret
= drm_rmmap_locked(dev
, map
);
470 mutex_unlock(&dev
->struct_mutex
);
474 EXPORT_SYMBOL(drm_rmmap
);
476 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
477 * the last close of the device, and this is necessary for cleanup when things
478 * exit uncleanly. Therefore, having userland manually remove mappings seems
479 * like a pointless exercise since they're going away anyway.
481 * One use case might be after addmap is allowed for normal users for SHM and
482 * gets used by drivers that the server doesn't need to care about. This seems
485 * \param inode device inode.
486 * \param file_priv DRM file private.
487 * \param cmd command.
488 * \param arg pointer to a struct drm_map structure.
489 * \return zero on success or a negative value on error.
491 int drm_rmmap_ioctl(struct drm_device
*dev
, void *data
,
492 struct drm_file
*file_priv
)
494 struct drm_map
*request
= data
;
495 struct drm_local_map
*map
= NULL
;
496 struct drm_map_list
*r_list
;
499 mutex_lock(&dev
->struct_mutex
);
500 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
502 r_list
->user_token
== (unsigned long)request
->handle
&&
503 r_list
->map
->flags
& _DRM_REMOVABLE
) {
509 /* List has wrapped around to the head pointer, or its empty we didn't
512 if (list_empty(&dev
->maplist
) || !map
) {
513 mutex_unlock(&dev
->struct_mutex
);
517 /* Register and framebuffer maps are permanent */
518 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
519 mutex_unlock(&dev
->struct_mutex
);
523 ret
= drm_rmmap_locked(dev
, map
);
525 mutex_unlock(&dev
->struct_mutex
);
531 * Cleanup after an error on one of the addbufs() functions.
533 * \param dev DRM device.
534 * \param entry buffer entry where the error occurred.
536 * Frees any pages and buffers associated with the given entry.
538 static void drm_cleanup_buf_error(struct drm_device
* dev
,
539 struct drm_buf_entry
* entry
)
543 if (entry
->seg_count
) {
544 for (i
= 0; i
< entry
->seg_count
; i
++) {
545 if (entry
->seglist
[i
]) {
546 drm_pci_free(dev
, entry
->seglist
[i
]);
549 drm_free(entry
->seglist
,
551 sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
553 entry
->seg_count
= 0;
556 if (entry
->buf_count
) {
557 for (i
= 0; i
< entry
->buf_count
; i
++) {
558 if (entry
->buflist
[i
].dev_private
) {
559 drm_free(entry
->buflist
[i
].dev_private
,
560 entry
->buflist
[i
].dev_priv_size
,
564 drm_free(entry
->buflist
,
566 sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
568 entry
->buf_count
= 0;
574 * Add AGP buffers for DMA transfers.
576 * \param dev struct drm_device to which the buffers are to be added.
577 * \param request pointer to a struct drm_buf_desc describing the request.
578 * \return zero on success or a negative number on failure.
580 * After some sanity checks creates a drm_buf structure for each buffer and
581 * reallocates the buffer list of the same size order to accommodate the new
584 int drm_addbufs_agp(struct drm_device
* dev
, struct drm_buf_desc
* request
)
586 struct drm_device_dma
*dma
= dev
->dma
;
587 struct drm_buf_entry
*entry
;
588 struct drm_agp_mem
*agp_entry
;
590 unsigned long offset
;
591 unsigned long agp_offset
;
600 struct drm_buf
**temp_buflist
;
605 count
= request
->count
;
606 order
= drm_order(request
->size
);
609 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
610 ? PAGE_ALIGN(size
) : size
;
611 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
612 total
= PAGE_SIZE
<< page_order
;
615 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
617 DRM_DEBUG("count: %d\n", count
);
618 DRM_DEBUG("order: %d\n", order
);
619 DRM_DEBUG("size: %d\n", size
);
620 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
621 DRM_DEBUG("alignment: %d\n", alignment
);
622 DRM_DEBUG("page_order: %d\n", page_order
);
623 DRM_DEBUG("total: %d\n", total
);
625 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
627 if (dev
->queue_count
)
628 return -EBUSY
; /* Not while in use */
630 /* Make sure buffers are located in AGP memory that we own */
632 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
633 if ((agp_offset
>= agp_entry
->bound
) &&
634 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
639 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
640 DRM_DEBUG("zone invalid\n");
643 spin_lock(&dev
->count_lock
);
645 spin_unlock(&dev
->count_lock
);
648 atomic_inc(&dev
->buf_alloc
);
649 spin_unlock(&dev
->count_lock
);
651 mutex_lock(&dev
->struct_mutex
);
652 entry
= &dma
->bufs
[order
];
653 if (entry
->buf_count
) {
654 mutex_unlock(&dev
->struct_mutex
);
655 atomic_dec(&dev
->buf_alloc
);
656 return -ENOMEM
; /* May only call once for each order */
659 if (count
< 0 || count
> 4096) {
660 mutex_unlock(&dev
->struct_mutex
);
661 atomic_dec(&dev
->buf_alloc
);
665 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
667 if (!entry
->buflist
) {
668 mutex_unlock(&dev
->struct_mutex
);
669 atomic_dec(&dev
->buf_alloc
);
672 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
674 entry
->buf_size
= size
;
675 entry
->page_order
= page_order
;
679 while (entry
->buf_count
< count
) {
680 buf
= &entry
->buflist
[entry
->buf_count
];
681 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
682 buf
->total
= alignment
;
686 buf
->offset
= (dma
->byte_count
+ offset
);
687 buf
->bus_address
= agp_offset
+ offset
;
688 buf
->address
= (void *)(agp_offset
+ offset
);
692 init_waitqueue_head(&buf
->dma_wait
);
693 buf
->file_priv
= NULL
;
695 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
696 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
697 if (!buf
->dev_private
) {
698 /* Set count correctly so we free the proper amount. */
699 entry
->buf_count
= count
;
700 drm_cleanup_buf_error(dev
, entry
);
701 mutex_unlock(&dev
->struct_mutex
);
702 atomic_dec(&dev
->buf_alloc
);
705 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
707 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
711 byte_count
+= PAGE_SIZE
<< page_order
;
714 DRM_DEBUG("byte_count: %d\n", byte_count
);
716 temp_buflist
= drm_realloc(dma
->buflist
,
717 dma
->buf_count
* sizeof(*dma
->buflist
),
718 (dma
->buf_count
+ entry
->buf_count
)
719 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
721 /* Free the entry because it isn't valid */
722 drm_cleanup_buf_error(dev
, entry
);
723 mutex_unlock(&dev
->struct_mutex
);
724 atomic_dec(&dev
->buf_alloc
);
727 dma
->buflist
= temp_buflist
;
729 for (i
= 0; i
< entry
->buf_count
; i
++) {
730 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
733 dma
->buf_count
+= entry
->buf_count
;
734 dma
->seg_count
+= entry
->seg_count
;
735 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
736 dma
->byte_count
+= byte_count
;
738 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
739 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
741 mutex_unlock(&dev
->struct_mutex
);
743 request
->count
= entry
->buf_count
;
744 request
->size
= size
;
746 dma
->flags
= _DRM_DMA_USE_AGP
;
748 atomic_dec(&dev
->buf_alloc
);
751 EXPORT_SYMBOL(drm_addbufs_agp
);
752 #endif /* __OS_HAS_AGP */
754 int drm_addbufs_pci(struct drm_device
* dev
, struct drm_buf_desc
* request
)
756 struct drm_device_dma
*dma
= dev
->dma
;
762 struct drm_buf_entry
*entry
;
763 drm_dma_handle_t
*dmah
;
766 unsigned long offset
;
770 unsigned long *temp_pagelist
;
771 struct drm_buf
**temp_buflist
;
773 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
779 if (!capable(CAP_SYS_ADMIN
))
782 count
= request
->count
;
783 order
= drm_order(request
->size
);
786 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
787 request
->count
, request
->size
, size
, order
, dev
->queue_count
);
789 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
791 if (dev
->queue_count
)
792 return -EBUSY
; /* Not while in use */
794 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
795 ? PAGE_ALIGN(size
) : size
;
796 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
797 total
= PAGE_SIZE
<< page_order
;
799 spin_lock(&dev
->count_lock
);
801 spin_unlock(&dev
->count_lock
);
804 atomic_inc(&dev
->buf_alloc
);
805 spin_unlock(&dev
->count_lock
);
807 mutex_lock(&dev
->struct_mutex
);
808 entry
= &dma
->bufs
[order
];
809 if (entry
->buf_count
) {
810 mutex_unlock(&dev
->struct_mutex
);
811 atomic_dec(&dev
->buf_alloc
);
812 return -ENOMEM
; /* May only call once for each order */
815 if (count
< 0 || count
> 4096) {
816 mutex_unlock(&dev
->struct_mutex
);
817 atomic_dec(&dev
->buf_alloc
);
821 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
823 if (!entry
->buflist
) {
824 mutex_unlock(&dev
->struct_mutex
);
825 atomic_dec(&dev
->buf_alloc
);
828 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
830 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
832 if (!entry
->seglist
) {
833 drm_free(entry
->buflist
,
834 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
835 mutex_unlock(&dev
->struct_mutex
);
836 atomic_dec(&dev
->buf_alloc
);
839 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
841 /* Keep the original pagelist until we know all the allocations
844 temp_pagelist
= drm_alloc((dma
->page_count
+ (count
<< page_order
))
845 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
846 if (!temp_pagelist
) {
847 drm_free(entry
->buflist
,
848 count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
);
849 drm_free(entry
->seglist
,
850 count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
);
851 mutex_unlock(&dev
->struct_mutex
);
852 atomic_dec(&dev
->buf_alloc
);
855 memcpy(temp_pagelist
,
856 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
857 DRM_DEBUG("pagelist: %d entries\n",
858 dma
->page_count
+ (count
<< page_order
));
860 entry
->buf_size
= size
;
861 entry
->page_order
= page_order
;
865 while (entry
->buf_count
< count
) {
867 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000, 0xfffffffful
);
870 /* Set count correctly so we free the proper amount. */
871 entry
->buf_count
= count
;
872 entry
->seg_count
= count
;
873 drm_cleanup_buf_error(dev
, entry
);
874 drm_free(temp_pagelist
,
875 (dma
->page_count
+ (count
<< page_order
))
876 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
877 mutex_unlock(&dev
->struct_mutex
);
878 atomic_dec(&dev
->buf_alloc
);
881 entry
->seglist
[entry
->seg_count
++] = dmah
;
882 for (i
= 0; i
< (1 << page_order
); i
++) {
883 DRM_DEBUG("page %d @ 0x%08lx\n",
884 dma
->page_count
+ page_count
,
885 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
886 temp_pagelist
[dma
->page_count
+ page_count
++]
887 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
890 offset
+ size
<= total
&& entry
->buf_count
< count
;
891 offset
+= alignment
, ++entry
->buf_count
) {
892 buf
= &entry
->buflist
[entry
->buf_count
];
893 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
894 buf
->total
= alignment
;
897 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
898 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
899 buf
->bus_address
= dmah
->busaddr
+ offset
;
903 init_waitqueue_head(&buf
->dma_wait
);
904 buf
->file_priv
= NULL
;
906 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
907 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
,
909 if (!buf
->dev_private
) {
910 /* Set count correctly so we free the proper amount. */
911 entry
->buf_count
= count
;
912 entry
->seg_count
= count
;
913 drm_cleanup_buf_error(dev
, entry
);
914 drm_free(temp_pagelist
,
916 (count
<< page_order
))
917 * sizeof(*dma
->pagelist
),
919 mutex_unlock(&dev
->struct_mutex
);
920 atomic_dec(&dev
->buf_alloc
);
923 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
925 DRM_DEBUG("buffer %d @ %p\n",
926 entry
->buf_count
, buf
->address
);
928 byte_count
+= PAGE_SIZE
<< page_order
;
931 temp_buflist
= drm_realloc(dma
->buflist
,
932 dma
->buf_count
* sizeof(*dma
->buflist
),
933 (dma
->buf_count
+ entry
->buf_count
)
934 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
936 /* Free the entry because it isn't valid */
937 drm_cleanup_buf_error(dev
, entry
);
938 drm_free(temp_pagelist
,
939 (dma
->page_count
+ (count
<< page_order
))
940 * sizeof(*dma
->pagelist
), DRM_MEM_PAGES
);
941 mutex_unlock(&dev
->struct_mutex
);
942 atomic_dec(&dev
->buf_alloc
);
945 dma
->buflist
= temp_buflist
;
947 for (i
= 0; i
< entry
->buf_count
; i
++) {
948 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
951 /* No allocations failed, so now we can replace the orginal pagelist
954 if (dma
->page_count
) {
955 drm_free(dma
->pagelist
,
956 dma
->page_count
* sizeof(*dma
->pagelist
),
959 dma
->pagelist
= temp_pagelist
;
961 dma
->buf_count
+= entry
->buf_count
;
962 dma
->seg_count
+= entry
->seg_count
;
963 dma
->page_count
+= entry
->seg_count
<< page_order
;
964 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
966 mutex_unlock(&dev
->struct_mutex
);
968 request
->count
= entry
->buf_count
;
969 request
->size
= size
;
971 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
972 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
974 atomic_dec(&dev
->buf_alloc
);
978 EXPORT_SYMBOL(drm_addbufs_pci
);
980 static int drm_addbufs_sg(struct drm_device
* dev
, struct drm_buf_desc
* request
)
982 struct drm_device_dma
*dma
= dev
->dma
;
983 struct drm_buf_entry
*entry
;
985 unsigned long offset
;
986 unsigned long agp_offset
;
995 struct drm_buf
**temp_buflist
;
997 if (!drm_core_check_feature(dev
, DRIVER_SG
))
1003 if (!capable(CAP_SYS_ADMIN
))
1006 count
= request
->count
;
1007 order
= drm_order(request
->size
);
1010 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1011 ? PAGE_ALIGN(size
) : size
;
1012 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1013 total
= PAGE_SIZE
<< page_order
;
1016 agp_offset
= request
->agp_start
;
1018 DRM_DEBUG("count: %d\n", count
);
1019 DRM_DEBUG("order: %d\n", order
);
1020 DRM_DEBUG("size: %d\n", size
);
1021 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1022 DRM_DEBUG("alignment: %d\n", alignment
);
1023 DRM_DEBUG("page_order: %d\n", page_order
);
1024 DRM_DEBUG("total: %d\n", total
);
1026 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1028 if (dev
->queue_count
)
1029 return -EBUSY
; /* Not while in use */
1031 spin_lock(&dev
->count_lock
);
1033 spin_unlock(&dev
->count_lock
);
1036 atomic_inc(&dev
->buf_alloc
);
1037 spin_unlock(&dev
->count_lock
);
1039 mutex_lock(&dev
->struct_mutex
);
1040 entry
= &dma
->bufs
[order
];
1041 if (entry
->buf_count
) {
1042 mutex_unlock(&dev
->struct_mutex
);
1043 atomic_dec(&dev
->buf_alloc
);
1044 return -ENOMEM
; /* May only call once for each order */
1047 if (count
< 0 || count
> 4096) {
1048 mutex_unlock(&dev
->struct_mutex
);
1049 atomic_dec(&dev
->buf_alloc
);
1053 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1055 if (!entry
->buflist
) {
1056 mutex_unlock(&dev
->struct_mutex
);
1057 atomic_dec(&dev
->buf_alloc
);
1060 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1062 entry
->buf_size
= size
;
1063 entry
->page_order
= page_order
;
1067 while (entry
->buf_count
< count
) {
1068 buf
= &entry
->buflist
[entry
->buf_count
];
1069 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1070 buf
->total
= alignment
;
1074 buf
->offset
= (dma
->byte_count
+ offset
);
1075 buf
->bus_address
= agp_offset
+ offset
;
1076 buf
->address
= (void *)(agp_offset
+ offset
1077 + (unsigned long)dev
->sg
->virtual);
1081 init_waitqueue_head(&buf
->dma_wait
);
1082 buf
->file_priv
= NULL
;
1084 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1085 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1086 if (!buf
->dev_private
) {
1087 /* Set count correctly so we free the proper amount. */
1088 entry
->buf_count
= count
;
1089 drm_cleanup_buf_error(dev
, entry
);
1090 mutex_unlock(&dev
->struct_mutex
);
1091 atomic_dec(&dev
->buf_alloc
);
1095 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1097 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1099 offset
+= alignment
;
1101 byte_count
+= PAGE_SIZE
<< page_order
;
1104 DRM_DEBUG("byte_count: %d\n", byte_count
);
1106 temp_buflist
= drm_realloc(dma
->buflist
,
1107 dma
->buf_count
* sizeof(*dma
->buflist
),
1108 (dma
->buf_count
+ entry
->buf_count
)
1109 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1110 if (!temp_buflist
) {
1111 /* Free the entry because it isn't valid */
1112 drm_cleanup_buf_error(dev
, entry
);
1113 mutex_unlock(&dev
->struct_mutex
);
1114 atomic_dec(&dev
->buf_alloc
);
1117 dma
->buflist
= temp_buflist
;
1119 for (i
= 0; i
< entry
->buf_count
; i
++) {
1120 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1123 dma
->buf_count
+= entry
->buf_count
;
1124 dma
->seg_count
+= entry
->seg_count
;
1125 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1126 dma
->byte_count
+= byte_count
;
1128 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1129 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1131 mutex_unlock(&dev
->struct_mutex
);
1133 request
->count
= entry
->buf_count
;
1134 request
->size
= size
;
1136 dma
->flags
= _DRM_DMA_USE_SG
;
1138 atomic_dec(&dev
->buf_alloc
);
1142 static int drm_addbufs_fb(struct drm_device
* dev
, struct drm_buf_desc
* request
)
1144 struct drm_device_dma
*dma
= dev
->dma
;
1145 struct drm_buf_entry
*entry
;
1146 struct drm_buf
*buf
;
1147 unsigned long offset
;
1148 unsigned long agp_offset
;
1157 struct drm_buf
**temp_buflist
;
1159 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
1165 if (!capable(CAP_SYS_ADMIN
))
1168 count
= request
->count
;
1169 order
= drm_order(request
->size
);
1172 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1173 ? PAGE_ALIGN(size
) : size
;
1174 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1175 total
= PAGE_SIZE
<< page_order
;
1178 agp_offset
= request
->agp_start
;
1180 DRM_DEBUG("count: %d\n", count
);
1181 DRM_DEBUG("order: %d\n", order
);
1182 DRM_DEBUG("size: %d\n", size
);
1183 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1184 DRM_DEBUG("alignment: %d\n", alignment
);
1185 DRM_DEBUG("page_order: %d\n", page_order
);
1186 DRM_DEBUG("total: %d\n", total
);
1188 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1190 if (dev
->queue_count
)
1191 return -EBUSY
; /* Not while in use */
1193 spin_lock(&dev
->count_lock
);
1195 spin_unlock(&dev
->count_lock
);
1198 atomic_inc(&dev
->buf_alloc
);
1199 spin_unlock(&dev
->count_lock
);
1201 mutex_lock(&dev
->struct_mutex
);
1202 entry
= &dma
->bufs
[order
];
1203 if (entry
->buf_count
) {
1204 mutex_unlock(&dev
->struct_mutex
);
1205 atomic_dec(&dev
->buf_alloc
);
1206 return -ENOMEM
; /* May only call once for each order */
1209 if (count
< 0 || count
> 4096) {
1210 mutex_unlock(&dev
->struct_mutex
);
1211 atomic_dec(&dev
->buf_alloc
);
1215 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1217 if (!entry
->buflist
) {
1218 mutex_unlock(&dev
->struct_mutex
);
1219 atomic_dec(&dev
->buf_alloc
);
1222 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1224 entry
->buf_size
= size
;
1225 entry
->page_order
= page_order
;
1229 while (entry
->buf_count
< count
) {
1230 buf
= &entry
->buflist
[entry
->buf_count
];
1231 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1232 buf
->total
= alignment
;
1236 buf
->offset
= (dma
->byte_count
+ offset
);
1237 buf
->bus_address
= agp_offset
+ offset
;
1238 buf
->address
= (void *)(agp_offset
+ offset
);
1242 init_waitqueue_head(&buf
->dma_wait
);
1243 buf
->file_priv
= NULL
;
1245 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1246 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1247 if (!buf
->dev_private
) {
1248 /* Set count correctly so we free the proper amount. */
1249 entry
->buf_count
= count
;
1250 drm_cleanup_buf_error(dev
, entry
);
1251 mutex_unlock(&dev
->struct_mutex
);
1252 atomic_dec(&dev
->buf_alloc
);
1255 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1257 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1259 offset
+= alignment
;
1261 byte_count
+= PAGE_SIZE
<< page_order
;
1264 DRM_DEBUG("byte_count: %d\n", byte_count
);
1266 temp_buflist
= drm_realloc(dma
->buflist
,
1267 dma
->buf_count
* sizeof(*dma
->buflist
),
1268 (dma
->buf_count
+ entry
->buf_count
)
1269 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1270 if (!temp_buflist
) {
1271 /* Free the entry because it isn't valid */
1272 drm_cleanup_buf_error(dev
, entry
);
1273 mutex_unlock(&dev
->struct_mutex
);
1274 atomic_dec(&dev
->buf_alloc
);
1277 dma
->buflist
= temp_buflist
;
1279 for (i
= 0; i
< entry
->buf_count
; i
++) {
1280 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1283 dma
->buf_count
+= entry
->buf_count
;
1284 dma
->seg_count
+= entry
->seg_count
;
1285 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1286 dma
->byte_count
+= byte_count
;
1288 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1289 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1291 mutex_unlock(&dev
->struct_mutex
);
1293 request
->count
= entry
->buf_count
;
1294 request
->size
= size
;
1296 dma
->flags
= _DRM_DMA_USE_FB
;
1298 atomic_dec(&dev
->buf_alloc
);
1304 * Add buffers for DMA transfers (ioctl).
1306 * \param inode device inode.
1307 * \param file_priv DRM file private.
1308 * \param cmd command.
1309 * \param arg pointer to a struct drm_buf_desc request.
1310 * \return zero on success or a negative number on failure.
1312 * According with the memory type specified in drm_buf_desc::flags and the
1313 * build options, it dispatches the call either to addbufs_agp(),
1314 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1315 * PCI memory respectively.
1317 int drm_addbufs(struct drm_device
*dev
, void *data
,
1318 struct drm_file
*file_priv
)
1320 struct drm_buf_desc
*request
= data
;
1323 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1327 if (request
->flags
& _DRM_AGP_BUFFER
)
1328 ret
= drm_addbufs_agp(dev
, request
);
1331 if (request
->flags
& _DRM_SG_BUFFER
)
1332 ret
= drm_addbufs_sg(dev
, request
);
1333 else if (request
->flags
& _DRM_FB_BUFFER
)
1334 ret
= drm_addbufs_fb(dev
, request
);
1336 ret
= drm_addbufs_pci(dev
, request
);
1342 * Get information about the buffer mappings.
1344 * This was originally mean for debugging purposes, or by a sophisticated
1345 * client library to determine how best to use the available buffers (e.g.,
1346 * large buffers can be used for image transfer).
1348 * \param inode device inode.
1349 * \param file_priv DRM file private.
1350 * \param cmd command.
1351 * \param arg pointer to a drm_buf_info structure.
1352 * \return zero on success or a negative number on failure.
1354 * Increments drm_device::buf_use while holding the drm_device::count_lock
1355 * lock, preventing of allocating more buffers after this call. Information
1356 * about each requested buffer is then copied into user space.
1358 int drm_infobufs(struct drm_device
*dev
, void *data
,
1359 struct drm_file
*file_priv
)
1361 struct drm_device_dma
*dma
= dev
->dma
;
1362 struct drm_buf_info
*request
= data
;
1366 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1372 spin_lock(&dev
->count_lock
);
1373 if (atomic_read(&dev
->buf_alloc
)) {
1374 spin_unlock(&dev
->count_lock
);
1377 ++dev
->buf_use
; /* Can't allocate more after this call */
1378 spin_unlock(&dev
->count_lock
);
1380 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1381 if (dma
->bufs
[i
].buf_count
)
1385 DRM_DEBUG("count = %d\n", count
);
1387 if (request
->count
>= count
) {
1388 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1389 if (dma
->bufs
[i
].buf_count
) {
1390 struct drm_buf_desc __user
*to
=
1391 &request
->list
[count
];
1392 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1393 struct drm_freelist
*list
= &dma
->bufs
[i
].freelist
;
1394 if (copy_to_user(&to
->count
,
1396 sizeof(from
->buf_count
)) ||
1397 copy_to_user(&to
->size
,
1399 sizeof(from
->buf_size
)) ||
1400 copy_to_user(&to
->low_mark
,
1402 sizeof(list
->low_mark
)) ||
1403 copy_to_user(&to
->high_mark
,
1405 sizeof(list
->high_mark
)))
1408 DRM_DEBUG("%d %d %d %d %d\n",
1410 dma
->bufs
[i
].buf_count
,
1411 dma
->bufs
[i
].buf_size
,
1412 dma
->bufs
[i
].freelist
.low_mark
,
1413 dma
->bufs
[i
].freelist
.high_mark
);
1418 request
->count
= count
;
1424 * Specifies a low and high water mark for buffer allocation
1426 * \param inode device inode.
1427 * \param file_priv DRM file private.
1428 * \param cmd command.
1429 * \param arg a pointer to a drm_buf_desc structure.
1430 * \return zero on success or a negative number on failure.
1432 * Verifies that the size order is bounded between the admissible orders and
1433 * updates the respective drm_device_dma::bufs entry low and high water mark.
1435 * \note This ioctl is deprecated and mostly never used.
1437 int drm_markbufs(struct drm_device
*dev
, void *data
,
1438 struct drm_file
*file_priv
)
1440 struct drm_device_dma
*dma
= dev
->dma
;
1441 struct drm_buf_desc
*request
= data
;
1443 struct drm_buf_entry
*entry
;
1445 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1451 DRM_DEBUG("%d, %d, %d\n",
1452 request
->size
, request
->low_mark
, request
->high_mark
);
1453 order
= drm_order(request
->size
);
1454 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1456 entry
= &dma
->bufs
[order
];
1458 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1460 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1463 entry
->freelist
.low_mark
= request
->low_mark
;
1464 entry
->freelist
.high_mark
= request
->high_mark
;
1470 * Unreserve the buffers in list, previously reserved using drmDMA.
1472 * \param inode device inode.
1473 * \param file_priv DRM file private.
1474 * \param cmd command.
1475 * \param arg pointer to a drm_buf_free structure.
1476 * \return zero on success or a negative number on failure.
1478 * Calls free_buffer() for each used buffer.
1479 * This function is primarily used for debugging.
1481 int drm_freebufs(struct drm_device
*dev
, void *data
,
1482 struct drm_file
*file_priv
)
1484 struct drm_device_dma
*dma
= dev
->dma
;
1485 struct drm_buf_free
*request
= data
;
1488 struct drm_buf
*buf
;
1490 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1496 DRM_DEBUG("%d\n", request
->count
);
1497 for (i
= 0; i
< request
->count
; i
++) {
1498 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1500 if (idx
< 0 || idx
>= dma
->buf_count
) {
1501 DRM_ERROR("Index %d (of %d max)\n",
1502 idx
, dma
->buf_count
- 1);
1505 buf
= dma
->buflist
[idx
];
1506 if (buf
->file_priv
!= file_priv
) {
1507 DRM_ERROR("Process %d freeing buffer not owned\n",
1508 task_pid_nr(current
));
1511 drm_free_buffer(dev
, buf
);
1518 * Maps all of the DMA buffers into client-virtual space (ioctl).
1520 * \param inode device inode.
1521 * \param file_priv DRM file private.
1522 * \param cmd command.
1523 * \param arg pointer to a drm_buf_map structure.
1524 * \return zero on success or a negative number on failure.
1526 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1527 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1528 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1531 int drm_mapbufs(struct drm_device
*dev
, void *data
,
1532 struct drm_file
*file_priv
)
1534 struct drm_device_dma
*dma
= dev
->dma
;
1537 unsigned long virtual;
1538 unsigned long address
;
1539 struct drm_buf_map
*request
= data
;
1542 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1548 spin_lock(&dev
->count_lock
);
1549 if (atomic_read(&dev
->buf_alloc
)) {
1550 spin_unlock(&dev
->count_lock
);
1553 dev
->buf_use
++; /* Can't allocate more after this call */
1554 spin_unlock(&dev
->count_lock
);
1556 if (request
->count
>= dma
->buf_count
) {
1557 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1558 || (drm_core_check_feature(dev
, DRIVER_SG
)
1559 && (dma
->flags
& _DRM_DMA_USE_SG
))
1560 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1561 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1562 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1563 unsigned long token
= dev
->agp_buffer_token
;
1569 down_write(¤t
->mm
->mmap_sem
);
1570 virtual = do_mmap(file_priv
->filp
, 0, map
->size
,
1571 PROT_READ
| PROT_WRITE
,
1574 up_write(¤t
->mm
->mmap_sem
);
1576 down_write(¤t
->mm
->mmap_sem
);
1577 virtual = do_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1578 PROT_READ
| PROT_WRITE
,
1580 up_write(¤t
->mm
->mmap_sem
);
1582 if (virtual > -1024UL) {
1584 retcode
= (signed long)virtual;
1587 request
->virtual = (void __user
*)virtual;
1589 for (i
= 0; i
< dma
->buf_count
; i
++) {
1590 if (copy_to_user(&request
->list
[i
].idx
,
1591 &dma
->buflist
[i
]->idx
,
1592 sizeof(request
->list
[0].idx
))) {
1596 if (copy_to_user(&request
->list
[i
].total
,
1597 &dma
->buflist
[i
]->total
,
1598 sizeof(request
->list
[0].total
))) {
1602 if (copy_to_user(&request
->list
[i
].used
,
1603 &zero
, sizeof(zero
))) {
1607 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1608 if (copy_to_user(&request
->list
[i
].address
,
1609 &address
, sizeof(address
))) {
1616 request
->count
= dma
->buf_count
;
1617 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1623 * Compute size order. Returns the exponent of the smaller power of two which
1624 * is greater or equal to given number.
1629 * \todo Can be made faster.
1631 int drm_order(unsigned long size
)
1636 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++) ;
1638 if (size
& (size
- 1))
1643 EXPORT_SYMBOL(drm_order
);