2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
29 * $DragonFly: src/sys/dev/drm/drm_bufs.c,v 1.1 2008/04/05 18:12:29 hasso Exp $
33 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
37 #include <bus/pci/pcireg.h>
39 #include "dev/pci/pcireg.h"
46 * Compute order. Can be made faster.
48 int drm_order(unsigned long size
)
53 for ( order
= 0, tmp
= size
; tmp
>>= 1 ; ++order
);
55 if ( size
& ~(1 << order
) )
61 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
62 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
63 * address for accessing them. Cleaned up at unload.
65 static int drm_alloc_resource(drm_device_t
*dev
, int resource
)
67 if (resource
>= DRM_MAX_PCI_RESOURCE
) {
68 DRM_ERROR("Resource %d too large\n", resource
);
73 if (dev
->pcir
[resource
] != NULL
) {
78 dev
->pcirid
[resource
] = PCIR_BAR(resource
);
79 dev
->pcir
[resource
] = bus_alloc_resource_any(dev
->device
,
80 SYS_RES_MEMORY
, &dev
->pcirid
[resource
], RF_SHAREABLE
);
83 if (dev
->pcir
[resource
] == NULL
) {
84 DRM_ERROR("Couldn't find resource 0x%x\n", resource
);
91 unsigned long drm_get_resource_start(drm_device_t
*dev
, unsigned int resource
)
93 if (drm_alloc_resource(dev
, resource
) != 0)
96 return rman_get_start(dev
->pcir
[resource
]);
99 unsigned long drm_get_resource_len(drm_device_t
*dev
, unsigned int resource
)
101 if (drm_alloc_resource(dev
, resource
) != 0)
104 return rman_get_size(dev
->pcir
[resource
]);
107 int drm_addmap(drm_device_t
* dev
, unsigned long offset
, unsigned long size
,
108 drm_map_type_t type
, drm_map_flags_t flags
, drm_local_map_t
**map_ptr
)
110 drm_local_map_t
*map
;
112 /*drm_agp_mem_t *entry;
115 /* Only allow shared memory to be removable since we only keep enough
116 * book keeping information about shared memory to allow for removal
117 * when processes fork.
119 if ((flags
& _DRM_REMOVABLE
) && type
!= _DRM_SHM
) {
120 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
123 if ((offset
& PAGE_MASK
) || (size
& PAGE_MASK
)) {
124 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
128 if (offset
+ size
< offset
) {
129 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
134 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset
,
137 /* Check if this is just another version of a kernel-allocated map, and
138 * just hand that back if so.
140 if (type
== _DRM_REGISTERS
|| type
== _DRM_FRAME_BUFFER
||
142 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
143 if (map
->type
== type
&& (map
->offset
== offset
||
144 (map
->type
== _DRM_SHM
&&
145 map
->flags
== _DRM_CONTAINS_LOCK
))) {
147 DRM_DEBUG("Found kernel map %d\n", type
);
154 /* Allocate a new map structure, fill it in, and do any type-specific
155 * initialization necessary.
157 map
= malloc(sizeof(*map
), M_DRM
, M_ZERO
| M_NOWAIT
);
161 map
->offset
= offset
;
166 switch ( map
->type
) {
168 map
->handle
= drm_ioremap(dev
, map
);
169 if (!(map
->flags
& _DRM_WRITE_COMBINING
))
172 case _DRM_FRAME_BUFFER
:
173 if (drm_mtrr_add(map
->offset
, map
->size
, DRM_MTRR_WC
) == 0)
177 map
->handle
= malloc(map
->size
, M_DRM
, M_NOWAIT
);
178 DRM_DEBUG( "%lu %d %p\n",
179 map
->size
, drm_order(map
->size
), map
->handle
);
180 if ( !map
->handle
) {
184 map
->offset
= (unsigned long)map
->handle
;
185 if ( map
->flags
& _DRM_CONTAINS_LOCK
) {
186 /* Prevent a 2nd X Server from creating a 2nd lock */
188 if (dev
->lock
.hw_lock
!= NULL
) {
190 free(map
->handle
, M_DRM
);
194 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
200 /* In some cases (i810 driver), user space may have already
201 * added the AGP base itself, because dev->agp->base previously
202 * only got set during AGP enable. So, only add the base
203 * address if the map's offset isn't already within the
206 if (map
->offset
< dev
->agp
->base
||
207 map
->offset
> dev
->agp
->base
+
208 dev
->agp
->info
.ai_aperture_size
- 1) {
209 map
->offset
+= dev
->agp
->base
;
211 map
->mtrr
= dev
->agp
->mtrr
; /* for getmap */
212 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
213 if ((map->offset >= entry->bound) &&
214 (map->offset + map->size <=
215 entry->bound + entry->pages * PAGE_SIZE)) {
225 case _DRM_SCATTER_GATHER
:
230 map
->offset
= map
->offset
+ dev
->sg
->handle
;
232 case _DRM_CONSISTENT
:
233 /* Unfortunately, we don't get any alignment specification from
234 * the caller, so we have to guess. drm_pci_alloc requires
235 * a power-of-two alignment, so try to align the bus address of
236 * the map to it size if possible, otherwise just assume
237 * PAGE_SIZE alignment.
240 if ((align
& (align
- 1)) != 0)
242 map
->dmah
= drm_pci_alloc(dev
, map
->size
, align
, 0xfffffffful
);
243 if (map
->dmah
== NULL
) {
247 map
->handle
= map
->dmah
->vaddr
;
248 map
->offset
= map
->dmah
->busaddr
;
251 DRM_ERROR("Bad map type %d\n", map
->type
);
257 TAILQ_INSERT_TAIL(&dev
->maplist
, map
, link
);
260 /* Jumped to, with lock held, when a kernel map is found. */
262 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map
->type
, map
->offset
,
270 int drm_addmap_ioctl(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
272 drm_map_t
*request
= data
;
273 drm_local_map_t
*map
;
276 if (!(dev
->flags
& (FREAD
|FWRITE
)))
277 return EACCES
; /* Require read/write */
279 if (!DRM_SUSER(DRM_CURPROC
) && request
->type
!= _DRM_AGP
)
283 err
= drm_addmap(dev
, request
->offset
, request
->size
, request
->type
,
284 request
->flags
, &map
);
289 request
->offset
= map
->offset
;
290 request
->size
= map
->size
;
291 request
->type
= map
->type
;
292 request
->flags
= map
->flags
;
293 request
->mtrr
= map
->mtrr
;
294 request
->handle
= map
->handle
;
296 if (request
->type
!= _DRM_SHM
) {
297 request
->handle
= (void *)request
->offset
;
303 void drm_rmmap(drm_device_t
*dev
, drm_local_map_t
*map
)
305 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
307 TAILQ_REMOVE(&dev
->maplist
, map
, link
);
311 if (map
->bsr
== NULL
)
312 drm_ioremapfree(map
);
314 case _DRM_FRAME_BUFFER
:
316 int __unused retcode
;
318 retcode
= drm_mtrr_del(0, map
->offset
, map
->size
,
320 DRM_DEBUG("mtrr_del = %d\n", retcode
);
324 free(map
->handle
, M_DRM
);
327 case _DRM_SCATTER_GATHER
:
329 case _DRM_CONSISTENT
:
330 drm_pci_free(dev
, map
->dmah
);
333 DRM_ERROR("Bad map type %d\n", map
->type
);
337 if (map
->bsr
!= NULL
) {
338 bus_release_resource(dev
->device
, SYS_RES_MEMORY
, map
->rid
,
345 /* Remove a map private from list and deallocate resources if the mapping
349 int drm_rmmap_ioctl(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
351 drm_local_map_t
*map
;
352 drm_map_t
*request
= data
;
355 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
356 if (map
->handle
== request
->handle
&&
357 map
->flags
& _DRM_REMOVABLE
)
361 /* No match found. */
375 static void drm_cleanup_buf_error(drm_device_t
*dev
, drm_buf_entry_t
*entry
)
379 if (entry
->seg_count
) {
380 for (i
= 0; i
< entry
->seg_count
; i
++) {
381 drm_pci_free(dev
, entry
->seglist
[i
]);
383 free(entry
->seglist
, M_DRM
);
385 entry
->seg_count
= 0;
388 if (entry
->buf_count
) {
389 for (i
= 0; i
< entry
->buf_count
; i
++) {
390 free(entry
->buflist
[i
].dev_private
, M_DRM
);
392 free(entry
->buflist
, M_DRM
);
394 entry
->buf_count
= 0;
398 static int drm_do_addbufs_agp(drm_device_t
*dev
, drm_buf_desc_t
*request
)
400 drm_device_dma_t
*dma
= dev
->dma
;
401 drm_buf_entry_t
*entry
;
402 /*drm_agp_mem_t *agp_entry;
405 unsigned long offset
;
406 unsigned long agp_offset
;
415 drm_buf_t
**temp_buflist
;
417 count
= request
->count
;
418 order
= drm_order(request
->size
);
421 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
422 ? round_page(size
) : size
;
423 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
424 total
= PAGE_SIZE
<< page_order
;
427 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
429 DRM_DEBUG( "count: %d\n", count
);
430 DRM_DEBUG( "order: %d\n", order
);
431 DRM_DEBUG( "size: %d\n", size
);
432 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset
);
433 DRM_DEBUG( "alignment: %d\n", alignment
);
434 DRM_DEBUG( "page_order: %d\n", page_order
);
435 DRM_DEBUG( "total: %d\n", total
);
437 /* Make sure buffers are located in AGP memory that we own */
438 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
439 * memory. Safe to ignore for now because these ioctls are still
443 for (agp_entry = dev->agp->memory; agp_entry;
444 agp_entry = agp_entry->next) {
445 if ((agp_offset >= agp_entry->bound) &&
446 (agp_offset + total * count <=
447 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
453 DRM_DEBUG("zone invalid\n");
457 entry
= &dma
->bufs
[order
];
459 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), M_DRM
,
461 if ( !entry
->buflist
) {
465 entry
->buf_size
= size
;
466 entry
->page_order
= page_order
;
470 while ( entry
->buf_count
< count
) {
471 buf
= &entry
->buflist
[entry
->buf_count
];
472 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
473 buf
->total
= alignment
;
477 buf
->offset
= (dma
->byte_count
+ offset
);
478 buf
->bus_address
= agp_offset
+ offset
;
479 buf
->address
= (void *)(agp_offset
+ offset
);
482 buf
->file_priv
= NULL
;
484 buf
->dev_priv_size
= dev
->driver
.buf_priv_size
;
485 buf
->dev_private
= malloc(buf
->dev_priv_size
, M_DRM
,
487 if (buf
->dev_private
== NULL
) {
488 /* Set count correctly so we free the proper amount. */
489 entry
->buf_count
= count
;
490 drm_cleanup_buf_error(dev
, entry
);
496 byte_count
+= PAGE_SIZE
<< page_order
;
499 DRM_DEBUG( "byte_count: %d\n", byte_count
);
501 temp_buflist
= realloc(dma
->buflist
,
502 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
), M_DRM
,
504 if (temp_buflist
== NULL
) {
505 /* Free the entry because it isn't valid */
506 drm_cleanup_buf_error(dev
, entry
);
509 dma
->buflist
= temp_buflist
;
511 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
512 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
515 dma
->buf_count
+= entry
->buf_count
;
516 dma
->byte_count
+= byte_count
;
518 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
519 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
521 request
->count
= entry
->buf_count
;
522 request
->size
= size
;
524 dma
->flags
= _DRM_DMA_USE_AGP
;
529 static int drm_do_addbufs_pci(drm_device_t
*dev
, drm_buf_desc_t
*request
)
531 drm_device_dma_t
*dma
= dev
->dma
;
537 drm_buf_entry_t
*entry
;
540 unsigned long offset
;
544 unsigned long *temp_pagelist
;
545 drm_buf_t
**temp_buflist
;
547 count
= request
->count
;
548 order
= drm_order(request
->size
);
551 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
552 request
->count
, request
->size
, size
, order
);
554 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
555 ? round_page(size
) : size
;
556 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
557 total
= PAGE_SIZE
<< page_order
;
559 entry
= &dma
->bufs
[order
];
561 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), M_DRM
,
563 entry
->seglist
= malloc(count
* sizeof(*entry
->seglist
), M_DRM
,
566 /* Keep the original pagelist until we know all the allocations
569 temp_pagelist
= malloc((dma
->page_count
+ (count
<< page_order
)) *
570 sizeof(*dma
->pagelist
), M_DRM
, M_NOWAIT
);
572 if (entry
->buflist
== NULL
|| entry
->seglist
== NULL
||
573 temp_pagelist
== NULL
) {
574 free(entry
->buflist
, M_DRM
);
575 free(entry
->seglist
, M_DRM
);
579 memcpy(temp_pagelist
, dma
->pagelist
, dma
->page_count
*
580 sizeof(*dma
->pagelist
));
582 DRM_DEBUG( "pagelist: %d entries\n",
583 dma
->page_count
+ (count
<< page_order
) );
585 entry
->buf_size
= size
;
586 entry
->page_order
= page_order
;
590 while ( entry
->buf_count
< count
) {
591 drm_dma_handle_t
*dmah
= drm_pci_alloc(dev
, size
, alignment
,
594 /* Set count correctly so we free the proper amount. */
595 entry
->buf_count
= count
;
596 entry
->seg_count
= count
;
597 drm_cleanup_buf_error(dev
, entry
);
598 free(temp_pagelist
, M_DRM
);
602 entry
->seglist
[entry
->seg_count
++] = dmah
;
603 for ( i
= 0 ; i
< (1 << page_order
) ; i
++ ) {
604 DRM_DEBUG( "page %d @ %p\n",
605 dma
->page_count
+ page_count
,
606 (char *)dmah
->vaddr
+ PAGE_SIZE
* i
);
607 temp_pagelist
[dma
->page_count
+ page_count
++] =
608 (long)dmah
->vaddr
+ PAGE_SIZE
* i
;
611 offset
+ size
<= total
&& entry
->buf_count
< count
;
612 offset
+= alignment
, ++entry
->buf_count
) {
613 buf
= &entry
->buflist
[entry
->buf_count
];
614 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
615 buf
->total
= alignment
;
618 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
619 buf
->address
= ((char *)dmah
->vaddr
+ offset
);
620 buf
->bus_address
= dmah
->busaddr
+ offset
;
623 buf
->file_priv
= NULL
;
625 buf
->dev_priv_size
= dev
->driver
.buf_priv_size
;
626 buf
->dev_private
= malloc(buf
->dev_priv_size
, M_DRM
,
628 if (buf
->dev_private
== NULL
) {
629 /* Set count correctly so we free the proper amount. */
630 entry
->buf_count
= count
;
631 entry
->seg_count
= count
;
632 drm_cleanup_buf_error(dev
, entry
);
633 free(temp_pagelist
, M_DRM
);
637 DRM_DEBUG( "buffer %d @ %p\n",
638 entry
->buf_count
, buf
->address
);
640 byte_count
+= PAGE_SIZE
<< page_order
;
643 temp_buflist
= realloc(dma
->buflist
,
644 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
), M_DRM
,
646 if (temp_buflist
== NULL
) {
647 /* Free the entry because it isn't valid */
648 drm_cleanup_buf_error(dev
, entry
);
649 free(temp_pagelist
, M_DRM
);
652 dma
->buflist
= temp_buflist
;
654 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
655 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
658 /* No allocations failed, so now we can replace the orginal pagelist
661 free(dma
->pagelist
, M_DRM
);
662 dma
->pagelist
= temp_pagelist
;
664 dma
->buf_count
+= entry
->buf_count
;
665 dma
->seg_count
+= entry
->seg_count
;
666 dma
->page_count
+= entry
->seg_count
<< page_order
;
667 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
669 request
->count
= entry
->buf_count
;
670 request
->size
= size
;
676 static int drm_do_addbufs_sg(drm_device_t
*dev
, drm_buf_desc_t
*request
)
678 drm_device_dma_t
*dma
= dev
->dma
;
679 drm_buf_entry_t
*entry
;
681 unsigned long offset
;
682 unsigned long agp_offset
;
691 drm_buf_t
**temp_buflist
;
693 count
= request
->count
;
694 order
= drm_order(request
->size
);
697 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
698 ? round_page(size
) : size
;
699 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
700 total
= PAGE_SIZE
<< page_order
;
703 agp_offset
= request
->agp_start
;
705 DRM_DEBUG( "count: %d\n", count
);
706 DRM_DEBUG( "order: %d\n", order
);
707 DRM_DEBUG( "size: %d\n", size
);
708 DRM_DEBUG( "agp_offset: %ld\n", agp_offset
);
709 DRM_DEBUG( "alignment: %d\n", alignment
);
710 DRM_DEBUG( "page_order: %d\n", page_order
);
711 DRM_DEBUG( "total: %d\n", total
);
713 entry
= &dma
->bufs
[order
];
715 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), M_DRM
,
717 if (entry
->buflist
== NULL
)
720 entry
->buf_size
= size
;
721 entry
->page_order
= page_order
;
725 while ( entry
->buf_count
< count
) {
726 buf
= &entry
->buflist
[entry
->buf_count
];
727 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
728 buf
->total
= alignment
;
732 buf
->offset
= (dma
->byte_count
+ offset
);
733 buf
->bus_address
= agp_offset
+ offset
;
734 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->sg
->handle
);
737 buf
->file_priv
= NULL
;
739 buf
->dev_priv_size
= dev
->driver
.buf_priv_size
;
740 buf
->dev_private
= malloc(buf
->dev_priv_size
, M_DRM
,
742 if (buf
->dev_private
== NULL
) {
743 /* Set count correctly so we free the proper amount. */
744 entry
->buf_count
= count
;
745 drm_cleanup_buf_error(dev
, entry
);
749 DRM_DEBUG( "buffer %d @ %p\n",
750 entry
->buf_count
, buf
->address
);
754 byte_count
+= PAGE_SIZE
<< page_order
;
757 DRM_DEBUG( "byte_count: %d\n", byte_count
);
759 temp_buflist
= realloc(dma
->buflist
,
760 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
), M_DRM
,
762 if (temp_buflist
== NULL
) {
763 /* Free the entry because it isn't valid */
764 drm_cleanup_buf_error(dev
, entry
);
767 dma
->buflist
= temp_buflist
;
769 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
770 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
773 dma
->buf_count
+= entry
->buf_count
;
774 dma
->byte_count
+= byte_count
;
776 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
777 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
779 request
->count
= entry
->buf_count
;
780 request
->size
= size
;
782 dma
->flags
= _DRM_DMA_USE_SG
;
787 int drm_addbufs_agp(drm_device_t
*dev
, drm_buf_desc_t
*request
)
791 DRM_SPINLOCK(&dev
->dma_lock
);
793 if (request
->count
< 0 || request
->count
> 4096)
796 order
= drm_order(request
->size
);
797 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
800 /* No more allocations after first buffer-using ioctl. */
801 if (dev
->buf_use
!= 0) {
802 DRM_SPINUNLOCK(&dev
->dma_lock
);
805 /* No more than one allocation per order */
806 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
807 DRM_SPINUNLOCK(&dev
->dma_lock
);
811 ret
= drm_do_addbufs_agp(dev
, request
);
813 DRM_SPINUNLOCK(&dev
->dma_lock
);
818 int drm_addbufs_sg(drm_device_t
*dev
, drm_buf_desc_t
*request
)
822 DRM_SPINLOCK(&dev
->dma_lock
);
824 if (!DRM_SUSER(DRM_CURPROC
))
827 if (request
->count
< 0 || request
->count
> 4096)
830 order
= drm_order(request
->size
);
831 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
834 /* No more allocations after first buffer-using ioctl. */
835 if (dev
->buf_use
!= 0) {
836 DRM_SPINUNLOCK(&dev
->dma_lock
);
839 /* No more than one allocation per order */
840 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
841 DRM_SPINUNLOCK(&dev
->dma_lock
);
845 ret
= drm_do_addbufs_sg(dev
, request
);
847 DRM_SPINUNLOCK(&dev
->dma_lock
);
852 int drm_addbufs_pci(drm_device_t
*dev
, drm_buf_desc_t
*request
)
856 DRM_SPINLOCK(&dev
->dma_lock
);
858 if (!DRM_SUSER(DRM_CURPROC
))
861 if (request
->count
< 0 || request
->count
> 4096)
864 order
= drm_order(request
->size
);
865 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
868 /* No more allocations after first buffer-using ioctl. */
869 if (dev
->buf_use
!= 0) {
870 DRM_SPINUNLOCK(&dev
->dma_lock
);
873 /* No more than one allocation per order */
874 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
875 DRM_SPINUNLOCK(&dev
->dma_lock
);
879 ret
= drm_do_addbufs_pci(dev
, request
);
881 DRM_SPINUNLOCK(&dev
->dma_lock
);
886 int drm_addbufs_ioctl(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
888 drm_buf_desc_t
*request
= data
;
891 if (request
->flags
& _DRM_AGP_BUFFER
)
892 err
= drm_addbufs_agp(dev
, request
);
893 else if (request
->flags
& _DRM_SG_BUFFER
)
894 err
= drm_addbufs_sg(dev
, request
);
896 err
= drm_addbufs_pci(dev
, request
);
901 int drm_infobufs(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
903 drm_device_dma_t
*dma
= dev
->dma
;
904 drm_buf_info_t
*request
= data
;
909 DRM_SPINLOCK(&dev
->dma_lock
);
910 ++dev
->buf_use
; /* Can't allocate more after this call */
911 DRM_SPINUNLOCK(&dev
->dma_lock
);
913 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
914 if ( dma
->bufs
[i
].buf_count
) ++count
;
917 DRM_DEBUG( "count = %d\n", count
);
919 if ( request
->count
>= count
) {
920 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
921 if ( dma
->bufs
[i
].buf_count
) {
924 from
.count
= dma
->bufs
[i
].buf_count
;
925 from
.size
= dma
->bufs
[i
].buf_size
;
926 from
.low_mark
= dma
->bufs
[i
].freelist
.low_mark
;
927 from
.high_mark
= dma
->bufs
[i
].freelist
.high_mark
;
929 if (DRM_COPY_TO_USER(&request
->list
[count
], &from
,
930 sizeof(drm_buf_desc_t
)) != 0) {
935 DRM_DEBUG( "%d %d %d %d %d\n",
937 dma
->bufs
[i
].buf_count
,
938 dma
->bufs
[i
].buf_size
,
939 dma
->bufs
[i
].freelist
.low_mark
,
940 dma
->bufs
[i
].freelist
.high_mark
);
945 request
->count
= count
;
950 int drm_markbufs(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
952 drm_device_dma_t
*dma
= dev
->dma
;
953 drm_buf_desc_t
*request
= data
;
956 DRM_DEBUG( "%d, %d, %d\n",
957 request
->size
, request
->low_mark
, request
->high_mark
);
960 order
= drm_order(request
->size
);
961 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
||
962 request
->low_mark
< 0 || request
->high_mark
< 0) {
966 DRM_SPINLOCK(&dev
->dma_lock
);
967 if (request
->low_mark
> dma
->bufs
[order
].buf_count
||
968 request
->high_mark
> dma
->bufs
[order
].buf_count
) {
972 dma
->bufs
[order
].freelist
.low_mark
= request
->low_mark
;
973 dma
->bufs
[order
].freelist
.high_mark
= request
->high_mark
;
974 DRM_SPINUNLOCK(&dev
->dma_lock
);
979 int drm_freebufs(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
981 drm_device_dma_t
*dma
= dev
->dma
;
982 drm_buf_free_t
*request
= data
;
988 DRM_DEBUG( "%d\n", request
->count
);
990 DRM_SPINLOCK(&dev
->dma_lock
);
991 for ( i
= 0 ; i
< request
->count
; i
++ ) {
992 if (DRM_COPY_FROM_USER(&idx
, &request
->list
[i
], sizeof(idx
))) {
996 if ( idx
< 0 || idx
>= dma
->buf_count
) {
997 DRM_ERROR( "Index %d (of %d max)\n",
998 idx
, dma
->buf_count
- 1 );
1002 buf
= dma
->buflist
[idx
];
1003 if ( buf
->file_priv
!= file_priv
) {
1004 DRM_ERROR("Process %d freeing buffer not owned\n",
1009 drm_free_buffer(dev
, buf
);
1011 DRM_SPINUNLOCK(&dev
->dma_lock
);
1016 int drm_mapbufs(drm_device_t
*dev
, void *data
, struct drm_file
*file_priv
)
1018 drm_device_dma_t
*dma
= dev
->dma
;
1021 vm_offset_t address
;
1022 struct vmspace
*vms
;
1023 #if defined(__FreeBSD__) || defined(__DragonFly__)
1027 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1032 #endif /* __NetBSD__ || __OpenBSD__ */
1034 drm_buf_map_t
*request
= data
;
1037 #if defined(__NetBSD__) || defined(__OpenBSD__)
1038 if (!vfinddev(kdev
, VCHR
, &vn
))
1039 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1040 #endif /* __NetBSD__ || __OpenBSD */
1042 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 || defined(__DragonFly__)
1043 vms
= DRM_CURPROC
->td_proc
->p_vmspace
;
1045 vms
= DRM_CURPROC
->p_vmspace
;
1048 DRM_SPINLOCK(&dev
->dma_lock
);
1049 dev
->buf_use
++; /* Can't allocate more after this call */
1050 DRM_SPINUNLOCK(&dev
->dma_lock
);
1052 if (request
->count
< dma
->buf_count
)
1055 if ((dev
->driver
.use_agp
&& (dma
->flags
& _DRM_DMA_USE_AGP
)) ||
1056 (dev
->driver
.use_sg
&& (dma
->flags
& _DRM_DMA_USE_SG
))) {
1057 drm_local_map_t
*map
= dev
->agp_buffer_map
;
1063 size
= round_page(map
->size
);
1066 size
= round_page(dma
->byte_count
),
1070 #if defined(__FreeBSD__) || defined(__DragonFly__)
1071 vaddr
= round_page((vm_offset_t
)vms
->vm_daddr
+ MAXDSIZ
);
1072 #if __FreeBSD_version >= 600023
1073 retcode
= vm_mmap(&vms
->vm_map
, &vaddr
, size
, PROT_READ
| PROT_WRITE
,
1074 VM_PROT_ALL
, MAP_SHARED
, OBJT_DEVICE
, dev
->devnode
, foff
);
1076 retcode
= vm_mmap(&vms
->vm_map
, &vaddr
, size
, PROT_READ
| PROT_WRITE
,
1077 VM_PROT_ALL
, MAP_SHARED
, SLIST_FIRST(&dev
->devnode
->si_hlist
),
1080 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1081 vaddr
= round_page((vaddr_t
)vms
->vm_daddr
+ MAXDSIZ
);
1082 retcode
= uvm_mmap(&vms
->vm_map
, &vaddr
, size
,
1083 UVM_PROT_READ
| UVM_PROT_WRITE
, UVM_PROT_ALL
, MAP_SHARED
,
1084 &vn
->v_uobj
, foff
, p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
1085 #endif /* __NetBSD__ || __OpenBSD */
1089 request
->virtual = (void *)vaddr
;
1091 for ( i
= 0 ; i
< dma
->buf_count
; i
++ ) {
1092 if (DRM_COPY_TO_USER(&request
->list
[i
].idx
,
1093 &dma
->buflist
[i
]->idx
, sizeof(request
->list
[0].idx
))) {
1097 if (DRM_COPY_TO_USER(&request
->list
[i
].total
,
1098 &dma
->buflist
[i
]->total
, sizeof(request
->list
[0].total
))) {
1102 if (DRM_COPY_TO_USER(&request
->list
[i
].used
, &zero
,
1107 address
= vaddr
+ dma
->buflist
[i
]->offset
; /* *** */
1108 if (DRM_COPY_TO_USER(&request
->list
[i
].address
, &address
,
1116 request
->count
= dma
->buf_count
;
1118 DRM_DEBUG( "%d buffers, retcode = %d\n", request
->count
, retcode
);