1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * $FreeBSD: src/sys/dev/drm/drm_bufs.h,v 1.5.2.1 2003/04/26 07:05:28 anholt Exp $
31 * $DragonFly: src/sys/dev/drm/drm_bufs.h,v 1.4 2004/02/13 02:23:57 joerg Exp $
34 #include "dev/drm/drmP.h"
36 #ifndef __HAVE_PCI_DMA
37 #define __HAVE_PCI_DMA 0
44 #ifndef DRIVER_BUF_PRIV_T
45 #define DRIVER_BUF_PRIV_T u32
47 #ifndef DRIVER_AGP_BUFFERS_MAP
48 #if __HAVE_AGP && __HAVE_DMA
49 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
51 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
56 * Compute order. Can be made faster.
58 int DRM(order
)( unsigned long size
)
63 for ( order
= 0, tmp
= size
; tmp
>>= 1 ; ++order
);
65 if ( size
& ~(1 << order
) )
71 int DRM(addmap
)( DRM_IOCTL_ARGS
)
76 drm_map_list_entry_t
*list
;
78 if (!(dev
->flags
& (FREAD
|FWRITE
)))
79 return DRM_ERR(EACCES
); /* Require read/write */
81 DRM_COPY_FROM_USER_IOCTL( request
, (drm_map_t
*)data
, sizeof(drm_map_t
) );
83 map
= (drm_local_map_t
*) DRM(alloc
)( sizeof(*map
), DRM_MEM_MAPS
);
85 return DRM_ERR(ENOMEM
);
87 map
->offset
= request
.offset
;
88 map
->size
= request
.size
;
89 map
->type
= request
.type
;
90 map
->flags
= request
.flags
;
94 /* Only allow shared memory to be removable since we only keep enough
95 * book keeping information about shared memory to allow for removal
96 * when processes fork.
98 if ( (map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
99 DRM(free
)( map
, sizeof(*map
), DRM_MEM_MAPS
);
100 return DRM_ERR(EINVAL
);
102 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
103 map
->offset
, map
->size
, map
->type
);
104 if ( (map
->offset
& PAGE_MASK
) || (map
->size
& PAGE_MASK
) ) {
105 DRM(free
)( map
, sizeof(*map
), DRM_MEM_MAPS
);
106 return DRM_ERR(EINVAL
);
109 switch ( map
->type
) {
111 case _DRM_FRAME_BUFFER
:
112 if ( map
->offset
+ map
->size
< map
->offset
) {
113 DRM(free
)( map
, sizeof(*map
), DRM_MEM_MAPS
);
114 return DRM_ERR(EINVAL
);
116 #if __REALLY_HAVE_MTRR
117 if ( map
->type
== _DRM_FRAME_BUFFER
||
118 (map
->flags
& _DRM_WRITE_COMBINING
) ) {
119 #if defined(__DragonFly__) || defined(__FreeBSD__)
120 int retcode
= 0, act
;
121 struct mem_range_desc mrdesc
;
122 mrdesc
.mr_base
= map
->offset
;
123 mrdesc
.mr_len
= map
->size
;
124 mrdesc
.mr_flags
= MDF_WRITECOMBINE
;
125 act
= MEMRANGE_SET_UPDATE
;
126 bcopy(DRIVER_NAME
, &mrdesc
.mr_owner
, strlen(DRIVER_NAME
));
127 retcode
= mem_range_attr_set(&mrdesc
, &act
);
129 #elif defined __NetBSD__
132 mtrrmap
.base
= map
->offset
;
133 mtrrmap
.len
= map
->size
;
134 mtrrmap
.type
= MTRR_TYPE_WC
;
135 mtrrmap
.flags
= MTRR_VALID
;
136 map
->mtrr
= mtrr_set( &mtrrmap
, &one
, p
, MTRR_GETSET_KERNEL
);
139 #endif /* __REALLY_HAVE_MTRR */
144 map
->handle
= (void *)DRM(alloc
)(map
->size
, DRM_MEM_SAREA
);
145 DRM_DEBUG( "%ld %d %p\n",
146 map
->size
, DRM(order
)( map
->size
), map
->handle
);
147 if ( !map
->handle
) {
148 DRM(free
)( map
, sizeof(*map
), DRM_MEM_MAPS
);
149 return DRM_ERR(ENOMEM
);
151 map
->offset
= (unsigned long)map
->handle
;
152 if ( map
->flags
& _DRM_CONTAINS_LOCK
) {
153 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
156 #if __REALLY_HAVE_AGP
158 map
->offset
+= dev
->agp
->base
;
159 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
162 case _DRM_SCATTER_GATHER
:
164 DRM(free
)(map
, sizeof(*map
), DRM_MEM_MAPS
);
165 return DRM_ERR(EINVAL
);
167 map
->offset
= map
->offset
+ dev
->sg
->handle
;
171 DRM(free
)( map
, sizeof(*map
), DRM_MEM_MAPS
);
172 return DRM_ERR(EINVAL
);
175 list
= DRM(alloc
)(sizeof(*list
), DRM_MEM_MAPS
);
177 DRM(free
)(map
, sizeof(*map
), DRM_MEM_MAPS
);
178 return DRM_ERR(EINVAL
);
180 memset(list
, 0, sizeof(*list
));
184 TAILQ_INSERT_TAIL(dev
->maplist
, list
, link
);
187 request
.offset
= map
->offset
;
188 request
.size
= map
->size
;
189 request
.type
= map
->type
;
190 request
.flags
= map
->flags
;
191 request
.mtrr
= map
->mtrr
;
192 request
.handle
= map
->handle
;
194 if ( request
.type
!= _DRM_SHM
) {
195 request
.handle
= (void *)request
.offset
;
198 DRM_COPY_TO_USER_IOCTL( (drm_map_t
*)data
, request
, sizeof(drm_map_t
) );
204 /* Remove a map private from list and deallocate resources if the mapping
208 int DRM(rmmap
)( DRM_IOCTL_ARGS
)
211 drm_map_list_entry_t
*list
;
212 drm_local_map_t
*map
;
216 DRM_COPY_FROM_USER_IOCTL( request
, (drm_map_t
*)data
, sizeof(request
) );
219 TAILQ_FOREACH(list
, dev
->maplist
, link
) {
221 if(map
->handle
== request
.handle
&&
222 map
->flags
& _DRM_REMOVABLE
) break;
225 /* List has wrapped around to the head pointer, or its empty we didn't
230 return DRM_ERR(EINVAL
);
232 TAILQ_REMOVE(dev
->maplist
, list
, link
);
233 DRM(free
)(list
, sizeof(*list
), DRM_MEM_MAPS
);
239 case _DRM_FRAME_BUFFER
:
240 #if __REALLY_HAVE_MTRR
241 if (map
->mtrr
>= 0) {
243 #if defined(__DragonFly__) || defined(__FreeBSD__)
245 struct mem_range_desc mrdesc
;
246 mrdesc
.mr_base
= map
->offset
;
247 mrdesc
.mr_len
= map
->size
;
248 mrdesc
.mr_flags
= MDF_WRITECOMBINE
;
249 act
= MEMRANGE_SET_REMOVE
;
250 bcopy(DRIVER_NAME
, &mrdesc
.mr_owner
, strlen(DRIVER_NAME
));
251 retcode
= mem_range_attr_set(&mrdesc
, &act
);
252 #elif defined __NetBSD__
255 mtrrmap
.base
= map
->offset
;
256 mtrrmap
.len
= map
->size
;
259 mtrrmap
.owner
= p
->p_pid
;
260 retcode
= mtrr_set( &mtrrmap
, &one
, p
, MTRR_GETSET_KERNEL
);
261 DRM_DEBUG("mtrr_del = %d\n", retcode
);
265 DRM(ioremapfree
)( map
);
268 DRM(free
)( map
->handle
, map
->size
, DRM_MEM_SAREA
);
271 case _DRM_SCATTER_GATHER
:
274 DRM(free
)(map
, sizeof(*map
), DRM_MEM_MAPS
);
283 static void DRM(cleanup_buf_error
)(drm_buf_entry_t
*entry
)
287 if (entry
->seg_count
) {
288 for (i
= 0; i
< entry
->seg_count
; i
++) {
289 DRM(free
)((void *)entry
->seglist
[i
],
293 DRM(free
)(entry
->seglist
,
295 sizeof(*entry
->seglist
),
298 entry
->seg_count
= 0;
301 if(entry
->buf_count
) {
302 for(i
= 0; i
< entry
->buf_count
; i
++) {
303 if(entry
->buflist
[i
].dev_private
) {
304 DRM(free
)(entry
->buflist
[i
].dev_private
,
305 entry
->buflist
[i
].dev_priv_size
,
309 DRM(free
)(entry
->buflist
,
311 sizeof(*entry
->buflist
),
314 entry
->buf_count
= 0;
318 #if __REALLY_HAVE_AGP
319 static int DRM(addbufs_agp
)(drm_device_t
*dev
, drm_buf_desc_t
*request
)
321 drm_device_dma_t
*dma
= dev
->dma
;
322 drm_buf_entry_t
*entry
;
324 unsigned long offset
;
325 unsigned long agp_offset
;
334 drm_buf_t
**temp_buflist
;
336 count
= request
->count
;
337 order
= DRM(order
)(request
->size
);
340 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
341 ? round_page(size
) : size
;
342 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
343 total
= PAGE_SIZE
<< page_order
;
346 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
348 DRM_DEBUG( "count: %d\n", count
);
349 DRM_DEBUG( "order: %d\n", order
);
350 DRM_DEBUG( "size: %d\n", size
);
351 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset
);
352 DRM_DEBUG( "alignment: %d\n", alignment
);
353 DRM_DEBUG( "page_order: %d\n", page_order
);
354 DRM_DEBUG( "total: %d\n", total
);
356 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
357 return DRM_ERR(EINVAL
);
360 entry
= &dma
->bufs
[order
];
361 if ( entry
->buf_count
) {
363 return DRM_ERR(ENOMEM
); /* May only call once for each order */
366 entry
->buflist
= DRM(alloc
)( count
* sizeof(*entry
->buflist
),
368 if ( !entry
->buflist
) {
370 return DRM_ERR(ENOMEM
);
372 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
374 entry
->buf_size
= size
;
375 entry
->page_order
= page_order
;
379 while ( entry
->buf_count
< count
) {
380 buf
= &entry
->buflist
[entry
->buf_count
];
381 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
382 buf
->total
= alignment
;
386 buf
->offset
= (dma
->byte_count
+ offset
);
387 buf
->bus_address
= agp_offset
+ offset
;
388 buf
->address
= (void *)(agp_offset
+ offset
);
393 buf
->dev_priv_size
= sizeof(DRIVER_BUF_PRIV_T
);
394 buf
->dev_private
= DRM(alloc
)( sizeof(DRIVER_BUF_PRIV_T
),
396 if(!buf
->dev_private
) {
397 /* Set count correctly so we free the proper amount. */
398 entry
->buf_count
= count
;
399 DRM(cleanup_buf_error
)(entry
);
401 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
405 byte_count
+= PAGE_SIZE
<< page_order
;
408 DRM_DEBUG( "byte_count: %d\n", byte_count
);
410 temp_buflist
= DRM(realloc
)( dma
->buflist
,
411 dma
->buf_count
* sizeof(*dma
->buflist
),
412 (dma
->buf_count
+ entry
->buf_count
)
413 * sizeof(*dma
->buflist
),
416 /* Free the entry because it isn't valid */
417 DRM(cleanup_buf_error
)(entry
);
419 return DRM_ERR(ENOMEM
);
421 dma
->buflist
= temp_buflist
;
423 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
424 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
427 dma
->buf_count
+= entry
->buf_count
;
428 dma
->byte_count
+= byte_count
;
430 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
431 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
435 request
->count
= entry
->buf_count
;
436 request
->size
= size
;
438 dma
->flags
= _DRM_DMA_USE_AGP
;
442 #endif /* __REALLY_HAVE_AGP */
445 static int DRM(addbufs_pci
)(drm_device_t
*dev
, drm_buf_desc_t
*request
)
447 drm_device_dma_t
*dma
= dev
->dma
;
453 drm_buf_entry_t
*entry
;
457 unsigned long offset
;
461 unsigned long *temp_pagelist
;
462 drm_buf_t
**temp_buflist
;
464 count
= request
->count
;
465 order
= DRM(order
)(request
->size
);
468 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
469 request
->count
, request
->size
, size
, order
);
471 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
472 return DRM_ERR(EINVAL
);
474 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
475 ? round_page(size
) : size
;
476 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
477 total
= PAGE_SIZE
<< page_order
;
480 entry
= &dma
->bufs
[order
];
481 if ( entry
->buf_count
) {
483 return DRM_ERR(ENOMEM
); /* May only call once for each order */
486 entry
->buflist
= DRM(alloc
)( count
* sizeof(*entry
->buflist
),
488 if ( !entry
->buflist
) {
490 return DRM_ERR(ENOMEM
);
492 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
494 entry
->seglist
= DRM(alloc
)( count
* sizeof(*entry
->seglist
),
496 if ( !entry
->seglist
) {
497 DRM(free
)( entry
->buflist
,
498 count
* sizeof(*entry
->buflist
),
501 return DRM_ERR(ENOMEM
);
503 memset( entry
->seglist
, 0, count
* sizeof(*entry
->seglist
) );
505 temp_pagelist
= DRM(realloc
)( dma
->pagelist
,
506 dma
->page_count
* sizeof(*dma
->pagelist
),
507 (dma
->page_count
+ (count
<< page_order
))
508 * sizeof(*dma
->pagelist
),
511 DRM(free
)( entry
->buflist
,
512 count
* sizeof(*entry
->buflist
),
514 DRM(free
)( entry
->seglist
,
515 count
* sizeof(*entry
->seglist
),
518 return DRM_ERR(ENOMEM
);
521 dma
->pagelist
= temp_pagelist
;
522 DRM_DEBUG( "pagelist: %d entries\n",
523 dma
->page_count
+ (count
<< page_order
) );
525 entry
->buf_size
= size
;
526 entry
->page_order
= page_order
;
530 while ( entry
->buf_count
< count
) {
531 page
= (unsigned long)DRM(alloc
)( size
, DRM_MEM_DMA
);
533 entry
->seglist
[entry
->seg_count
++] = page
;
534 for ( i
= 0 ; i
< (1 << page_order
) ; i
++ ) {
535 DRM_DEBUG( "page %d @ 0x%08lx\n",
536 dma
->page_count
+ page_count
,
537 page
+ PAGE_SIZE
* i
);
538 dma
->pagelist
[dma
->page_count
+ page_count
++]
539 = page
+ PAGE_SIZE
* i
;
542 offset
+ size
<= total
&& entry
->buf_count
< count
;
543 offset
+= alignment
, ++entry
->buf_count
) {
544 buf
= &entry
->buflist
[entry
->buf_count
];
545 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
546 buf
->total
= alignment
;
549 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
550 buf
->address
= (void *)(page
+ offset
);
554 DRM_DEBUG( "buffer %d @ %p\n",
555 entry
->buf_count
, buf
->address
);
557 byte_count
+= PAGE_SIZE
<< page_order
;
560 temp_buflist
= DRM(realloc
)( dma
->buflist
,
561 dma
->buf_count
* sizeof(*dma
->buflist
),
562 (dma
->buf_count
+ entry
->buf_count
)
563 * sizeof(*dma
->buflist
),
566 /* Free the entry because it isn't valid */
567 DRM(cleanup_buf_error
)(entry
);
569 return DRM_ERR(ENOMEM
);
571 dma
->buflist
= temp_buflist
;
573 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
574 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
577 dma
->buf_count
+= entry
->buf_count
;
578 dma
->seg_count
+= entry
->seg_count
;
579 dma
->page_count
+= entry
->seg_count
<< page_order
;
580 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
584 request
->count
= entry
->buf_count
;
585 request
->size
= size
;
590 #endif /* __HAVE_PCI_DMA */
593 static int DRM(addbufs_sg
)(drm_device_t
*dev
, drm_buf_desc_t
*request
)
595 drm_device_dma_t
*dma
= dev
->dma
;
596 drm_buf_entry_t
*entry
;
598 unsigned long offset
;
599 unsigned long agp_offset
;
608 drm_buf_t
**temp_buflist
;
610 count
= request
->count
;
611 order
= DRM(order
)(request
->size
);
614 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
615 ? round_page(size
) : size
;
616 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
617 total
= PAGE_SIZE
<< page_order
;
620 agp_offset
= request
->agp_start
;
622 DRM_DEBUG( "count: %d\n", count
);
623 DRM_DEBUG( "order: %d\n", order
);
624 DRM_DEBUG( "size: %d\n", size
);
625 DRM_DEBUG( "agp_offset: %ld\n", agp_offset
);
626 DRM_DEBUG( "alignment: %d\n", alignment
);
627 DRM_DEBUG( "page_order: %d\n", page_order
);
628 DRM_DEBUG( "total: %d\n", total
);
630 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
631 return DRM_ERR(EINVAL
);
634 entry
= &dma
->bufs
[order
];
635 if ( entry
->buf_count
) {
637 return DRM_ERR(ENOMEM
); /* May only call once for each order */
640 entry
->buflist
= DRM(alloc
)( count
* sizeof(*entry
->buflist
),
642 if ( !entry
->buflist
) {
644 return DRM_ERR(ENOMEM
);
646 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
648 entry
->buf_size
= size
;
649 entry
->page_order
= page_order
;
653 while ( entry
->buf_count
< count
) {
654 buf
= &entry
->buflist
[entry
->buf_count
];
655 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
656 buf
->total
= alignment
;
660 buf
->offset
= (dma
->byte_count
+ offset
);
661 buf
->bus_address
= agp_offset
+ offset
;
662 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->sg
->handle
);
667 buf
->dev_priv_size
= sizeof(DRIVER_BUF_PRIV_T
);
668 buf
->dev_private
= DRM(alloc
)( sizeof(DRIVER_BUF_PRIV_T
),
670 if(!buf
->dev_private
) {
671 /* Set count correctly so we free the proper amount. */
672 entry
->buf_count
= count
;
673 DRM(cleanup_buf_error
)(entry
);
675 return DRM_ERR(ENOMEM
);
678 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
680 DRM_DEBUG( "buffer %d @ %p\n",
681 entry
->buf_count
, buf
->address
);
685 byte_count
+= PAGE_SIZE
<< page_order
;
688 DRM_DEBUG( "byte_count: %d\n", byte_count
);
690 temp_buflist
= DRM(realloc
)( dma
->buflist
,
691 dma
->buf_count
* sizeof(*dma
->buflist
),
692 (dma
->buf_count
+ entry
->buf_count
)
693 * sizeof(*dma
->buflist
),
696 /* Free the entry because it isn't valid */
697 DRM(cleanup_buf_error
)(entry
);
699 return DRM_ERR(ENOMEM
);
701 dma
->buflist
= temp_buflist
;
703 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
704 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
707 dma
->buf_count
+= entry
->buf_count
;
708 dma
->byte_count
+= byte_count
;
710 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
711 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
715 request
->count
= entry
->buf_count
;
716 request
->size
= size
;
718 dma
->flags
= _DRM_DMA_USE_SG
;
722 #endif /* __REALLY_HAVE_SG */
724 int DRM(addbufs
)( DRM_IOCTL_ARGS
)
727 drm_buf_desc_t request
;
730 DRM_COPY_FROM_USER_IOCTL( request
, (drm_buf_desc_t
*)data
, sizeof(request
) );
732 if (dev
->dma
== NULL
)
733 return DRM_ERR(EINVAL
);
735 if (request
.count
< 0 || request
.count
> 4096)
736 return DRM_ERR(EINVAL
);
738 DRM_SPINLOCK(&dev
->count_lock
);
740 DRM_SPINUNLOCK(&dev
->count_lock
);
741 return DRM_ERR(EBUSY
);
743 /* dev->buf_alloc acts as a lock to prevent infobufs/mapbufs from
744 * trying to read from the dma->bufs while buffers are being allocated */
746 DRM_SPINUNLOCK(&dev
->count_lock
);
749 #if __REALLY_HAVE_AGP
750 if ( request
.flags
& _DRM_AGP_BUFFER
)
751 err
= DRM(addbufs_agp
)(dev
, &request
);
755 if ( request
.flags
& _DRM_SG_BUFFER
)
756 err
= DRM(addbufs_sg
)(dev
, &request
);
760 err
= DRM(addbufs_pci
)(dev
, &request
);
762 err
= DRM_ERR(EINVAL
);
765 DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t
*)data
, request
, sizeof(request
));
767 DRM_SPINLOCK(&dev
->count_lock
);
769 DRM_SPINUNLOCK(&dev
->count_lock
);
774 int DRM(infobufs
)( DRM_IOCTL_ARGS
)
777 drm_device_dma_t
*dma
= dev
->dma
;
778 drm_buf_info_t request
;
782 if ( !dma
) return DRM_ERR(EINVAL
);
784 DRM_SPINLOCK( &dev
->count_lock
);
785 if (dev
->buf_alloc
!= 0) {
786 DRM_SPINUNLOCK( &dev
->count_lock
);
787 return DRM_ERR(EBUSY
);
789 ++dev
->buf_use
; /* Can't allocate more after this call */
790 DRM_SPINUNLOCK( &dev
->count_lock
);
792 DRM_COPY_FROM_USER_IOCTL( request
, (drm_buf_info_t
*)data
, sizeof(request
) );
794 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
795 if ( dma
->bufs
[i
].buf_count
) ++count
;
798 DRM_DEBUG( "count = %d\n", count
);
800 if ( request
.count
>= count
) {
801 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
802 if ( dma
->bufs
[i
].buf_count
) {
805 from
.count
= dma
->bufs
[i
].buf_count
;
806 from
.size
= dma
->bufs
[i
].buf_size
;
807 from
.low_mark
= dma
->bufs
[i
].freelist
.low_mark
;
808 from
.high_mark
= dma
->bufs
[i
].freelist
.high_mark
;
810 if (DRM_COPY_TO_USER(&request
.list
[count
], &from
,
811 sizeof(drm_buf_desc_t
)) != 0)
812 return DRM_ERR(EFAULT
);
814 DRM_DEBUG( "%d %d %d %d %d\n",
816 dma
->bufs
[i
].buf_count
,
817 dma
->bufs
[i
].buf_size
,
818 dma
->bufs
[i
].freelist
.low_mark
,
819 dma
->bufs
[i
].freelist
.high_mark
);
824 request
.count
= count
;
826 DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t
*)data
, request
, sizeof(request
) );
831 int DRM(markbufs
)( DRM_IOCTL_ARGS
)
834 drm_device_dma_t
*dma
= dev
->dma
;
835 drm_buf_desc_t request
;
837 drm_buf_entry_t
*entry
;
839 if ( !dma
) return DRM_ERR(EINVAL
);
841 DRM_COPY_FROM_USER_IOCTL( request
, (drm_buf_desc_t
*)data
, sizeof(request
) );
843 DRM_DEBUG( "%d, %d, %d\n",
844 request
.size
, request
.low_mark
, request
.high_mark
);
845 order
= DRM(order
)( request
.size
);
846 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
847 return DRM_ERR(EINVAL
);
848 entry
= &dma
->bufs
[order
];
850 if ( request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
851 return DRM_ERR(EINVAL
);
852 if ( request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
853 return DRM_ERR(EINVAL
);
855 entry
->freelist
.low_mark
= request
.low_mark
;
856 entry
->freelist
.high_mark
= request
.high_mark
;
861 int DRM(freebufs
)( DRM_IOCTL_ARGS
)
864 drm_device_dma_t
*dma
= dev
->dma
;
865 drm_buf_free_t request
;
870 if ( !dma
) return DRM_ERR(EINVAL
);
872 DRM_COPY_FROM_USER_IOCTL( request
, (drm_buf_free_t
*)data
, sizeof(request
) );
874 DRM_DEBUG( "%d\n", request
.count
);
875 for ( i
= 0 ; i
< request
.count
; i
++ ) {
876 if ( DRM_COPY_FROM_USER( &idx
,
879 return DRM_ERR(EFAULT
);
880 if ( idx
< 0 || idx
>= dma
->buf_count
) {
881 DRM_ERROR( "Index %d (of %d max)\n",
882 idx
, dma
->buf_count
- 1 );
883 return DRM_ERR(EINVAL
);
885 buf
= dma
->buflist
[idx
];
886 if ( buf
->filp
!= filp
) {
887 DRM_ERROR("Process %d freeing buffer not owned\n",
889 return DRM_ERR(EINVAL
);
891 DRM(free_buffer
)( dev
, buf
);
897 int DRM(mapbufs
)( DRM_IOCTL_ARGS
)
900 drm_device_dma_t
*dma
= dev
->dma
;
903 vm_offset_t
virtual, address
;
904 #if defined(__DragonFly__) || defined(__FreeBSD__)
905 struct vmspace
*vms
= p
->td_proc
->p_vmspace
;
906 #endif /* __FreeBSD__ */
909 struct vmspace
*vms
= p
->p_vmspace
;
910 #endif /* __NetBSD__ */
912 drm_buf_map_t request
;
915 if ( !dma
) return DRM_ERR(EINVAL
);
917 DRM_SPINLOCK( &dev
->count_lock
);
918 if (dev
->buf_alloc
!= 0) {
919 DRM_SPINUNLOCK( &dev
->count_lock
);
920 return DRM_ERR(EBUSY
);
922 dev
->buf_use
++; /* Can't allocate more after this call */
923 DRM_SPINUNLOCK( &dev
->count_lock
);
925 DRM_COPY_FROM_USER_IOCTL( request
, (drm_buf_map_t
*)data
, sizeof(request
) );
928 if(!vfinddev(kdev
, VCHR
, &vn
))
929 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
930 #endif /* __NetBSD__ */
932 if ( request
.count
>= dma
->buf_count
) {
933 if ( (__HAVE_AGP
&& (dma
->flags
& _DRM_DMA_USE_AGP
)) ||
934 (__HAVE_SG
&& (dma
->flags
& _DRM_DMA_USE_SG
)) ) {
935 drm_local_map_t
*map
= DRIVER_AGP_BUFFERS_MAP( dev
);
942 #if defined(__DragonFly__) || defined(__FreeBSD__)
943 virtual = round_page((vm_offset_t
)vms
->vm_daddr
+ MAXDSIZ
);
944 retcode
= vm_mmap(&vms
->vm_map
,
946 round_page(map
->size
),
947 PROT_READ
|PROT_WRITE
, VM_PROT_ALL
,
949 SLIST_FIRST(&kdev
->si_hlist
),
950 (unsigned long)map
->offset
);
951 #elif defined(__NetBSD__)
952 virtual = round_page((vaddr_t
)vms
->vm_daddr
+ MAXDSIZ
);
953 retcode
= uvm_mmap(&vms
->vm_map
,
955 round_page(map
->size
),
956 UVM_PROT_READ
| UVM_PROT_WRITE
,
957 UVM_PROT_ALL
, MAP_SHARED
,
958 &vn
->v_uobj
, map
->offset
,
959 p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
960 #endif /* __NetBSD__ */
962 #if defined(__DragonFly__) || defined(__FreeBSD__)
963 virtual = round_page((vm_offset_t
)vms
->vm_daddr
+ MAXDSIZ
);
964 retcode
= vm_mmap(&vms
->vm_map
,
966 round_page(dma
->byte_count
),
967 PROT_READ
|PROT_WRITE
, VM_PROT_ALL
,
969 SLIST_FIRST(&kdev
->si_hlist
),
971 #elif defined(__NetBSD__)
972 virtual = round_page((vaddr_t
)vms
->vm_daddr
+ MAXDSIZ
);
973 retcode
= uvm_mmap(&vms
->vm_map
,
975 round_page(dma
->byte_count
),
976 UVM_PROT_READ
| UVM_PROT_WRITE
,
977 UVM_PROT_ALL
, MAP_SHARED
,
979 p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
980 #endif /* __NetBSD__ */
984 request
.virtual = (void *)virtual;
986 for ( i
= 0 ; i
< dma
->buf_count
; i
++ ) {
987 if ( DRM_COPY_TO_USER( &request
.list
[i
].idx
,
988 &dma
->buflist
[i
]->idx
,
989 sizeof(request
.list
[0].idx
) ) ) {
993 if ( DRM_COPY_TO_USER( &request
.list
[i
].total
,
994 &dma
->buflist
[i
]->total
,
995 sizeof(request
.list
[0].total
) ) ) {
999 if ( DRM_COPY_TO_USER( &request
.list
[i
].used
,
1005 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1006 if ( DRM_COPY_TO_USER( &request
.list
[i
].address
,
1008 sizeof(address
) ) ) {
1015 request
.count
= dma
->buf_count
;
1017 DRM_DEBUG( "%d buffers, retcode = %d\n", request
.count
, retcode
);
1019 DRM_COPY_TO_USER_IOCTL( (drm_buf_map_t
*)data
, request
, sizeof(request
) );
1021 return DRM_ERR(retcode
);
1024 #endif /* __HAVE_DMA */