2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.19 2007/06/03 11:47:10 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
39 #include <vm/vm_page.h>
41 /* XXX needed for to access pmap to convert per-proc virtual to physical */
44 #include <vm/vm_map.h>
46 #include <machine/md_var.h>
48 #define MAX_BPAGES 128
56 bus_dma_filter_t
*filter
;
64 bus_dma_segment_t
*segments
;
68 vm_offset_t vaddr
; /* kva of bounce buffer */
69 bus_addr_t busaddr
; /* Physical address */
70 vm_offset_t datavaddr
; /* kva of client data */
71 bus_size_t datacount
; /* client data count */
72 STAILQ_ENTRY(bounce_page
) links
;
75 int busdma_swi_pending
;
77 static STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
78 static int free_bpages
;
79 static int reserved_bpages
;
80 static int active_bpages
;
81 static int total_bpages
;
82 static bus_addr_t bounce_lowaddr
= BUS_SPACE_MAXADDR
;
85 struct bp_list bpages
;
89 void *buf
; /* unmapped buffer pointer */
90 bus_size_t buflen
; /* unmapped buffer length */
91 bus_dmamap_callback_t
*callback
;
93 STAILQ_ENTRY(bus_dmamap
) links
;
96 static STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
97 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
;
98 static struct bus_dmamap nobounce_dmamap
;
100 static int alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
);
102 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
,
103 vm_offset_t vaddr
, bus_size_t size
);
104 static void free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
);
105 static __inline
int run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
);
108 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
114 if (paddr
> dmat
->lowaddr
115 && paddr
<= dmat
->highaddr
116 && (dmat
->filter
== NULL
117 || (*dmat
->filter
)(dmat
->filterarg
, paddr
) != 0))
121 } while (retval
== 0 && dmat
!= NULL
);
125 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
127 * Allocate a device specific dma_tag.
130 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
131 bus_size_t boundary
, bus_addr_t lowaddr
,
132 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
133 void *filterarg
, bus_size_t maxsize
, int nsegments
,
134 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
136 bus_dma_tag_t newtag
;
139 /* Return a NULL tag on failure */
142 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
);
144 newtag
->parent
= parent
;
145 newtag
->alignment
= alignment
;
146 newtag
->boundary
= boundary
;
147 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
148 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
149 newtag
->filter
= filter
;
150 newtag
->filterarg
= filterarg
;
151 newtag
->maxsize
= maxsize
;
152 newtag
->nsegments
= nsegments
;
153 newtag
->maxsegsz
= maxsegsz
;
154 newtag
->flags
= flags
;
155 newtag
->ref_count
= 1; /* Count ourself */
156 newtag
->map_count
= 0;
157 newtag
->segments
= NULL
;
159 /* Take into account any restrictions imposed by our parent tag */
160 if (parent
!= NULL
) {
161 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
162 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
164 * XXX Not really correct??? Probably need to honor boundary
165 * all the way up the inheritence chain.
167 newtag
->boundary
= MAX(parent
->boundary
, newtag
->boundary
);
168 if (newtag
->filter
== NULL
) {
170 * Short circuit looking at our parent directly
171 * since we have encapsulated all of its information
173 newtag
->filter
= parent
->filter
;
174 newtag
->filterarg
= parent
->filterarg
;
175 newtag
->parent
= parent
->parent
;
177 if (newtag
->parent
!= NULL
) {
182 if (newtag
->lowaddr
< ptoa(Maxmem
) &&
183 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
186 if (lowaddr
> bounce_lowaddr
) {
188 * Go through the pool and kill any pages
189 * that don't reside below lowaddr.
191 panic("bus_dma_tag_create: page reallocation "
194 if (ptoa(total_bpages
) < maxsize
) {
197 pages
= atop(maxsize
) - total_bpages
;
199 /* Add pages to our bounce pool */
200 if (alloc_bounce_pages(newtag
, pages
) < pages
)
203 /* Performed initial allocation */
204 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
208 kfree(newtag
, M_DEVBUF
);
216 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
220 if (dmat
->map_count
!= 0)
223 while (dmat
!= NULL
) {
224 bus_dma_tag_t parent
;
226 parent
= dmat
->parent
;
228 if (dmat
->ref_count
== 0) {
229 if (dmat
->segments
!= NULL
)
230 kfree(dmat
->segments
, M_DEVBUF
);
231 kfree(dmat
, M_DEVBUF
);
233 * Last reference count, so
234 * release our reference
235 * count on our parent.
246 * Allocate a handle for mapping from kva/uva/physical
247 * address space into bus device space.
250 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
256 if (dmat
->segments
== NULL
) {
257 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
258 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
259 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
262 if (dmat
->lowaddr
< ptoa(Maxmem
)) {
266 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
);
270 /* Initialize the new map */
271 bzero(*mapp
, sizeof(**mapp
));
272 STAILQ_INIT(&((*mapp
)->bpages
));
275 * Attempt to add pages to our pool on a per-instance
276 * basis up to a sane limit.
278 maxpages
= MIN(MAX_BPAGES
, Maxmem
- atop(dmat
->lowaddr
));
279 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
280 || (dmat
->map_count
> 0
281 && total_bpages
< maxpages
)) {
284 if (dmat
->lowaddr
> bounce_lowaddr
) {
286 * Go through the pool and kill any pages
287 * that don't reside below lowaddr.
289 panic("bus_dmamap_create: page reallocation "
292 pages
= atop(dmat
->maxsize
);
293 pages
= MIN(maxpages
- total_bpages
, pages
);
294 error
= alloc_bounce_pages(dmat
, pages
);
296 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
298 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
312 * Destroy a handle for mapping from kva/uva/physical
313 * address space into bus device space.
316 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
319 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
321 kfree(map
, M_DEVBUF
);
329 * Allocate a piece of memory that can be efficiently mapped into
330 * bus device space based on the constraints lited in the dma tag.
332 * mapp is degenerate. By definition this allocation should not require
333 * bounce buffers so do not allocate a dma map.
336 bus_dmamem_alloc(bus_dma_tag_t dmat
, void** vaddr
, int flags
,
340 /* If we succeed, no mapping/bouncing will be required */
343 if (dmat
->segments
== NULL
) {
344 KKASSERT(dmat
->nsegments
< 16384);
345 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
346 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
349 if (flags
& BUS_DMA_NOWAIT
)
353 if (flags
& BUS_DMA_ZERO
)
356 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
357 dmat
->lowaddr
>= ptoa(Maxmem
)) {
358 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
360 * XXX Check whether the allocation crossed a page boundary
361 * and retry with power-of-2 alignment in that case.
363 if ((((intptr_t)*vaddr
) & PAGE_MASK
) !=
364 (((intptr_t)*vaddr
+ dmat
->maxsize
) & PAGE_MASK
)) {
366 kfree(*vaddr
, M_DEVBUF
);
367 /* XXX check for overflow? */
368 for (size
= 1; size
<= dmat
->maxsize
; size
<<= 1)
370 *vaddr
= kmalloc(size
, M_DEVBUF
, mflags
);
374 * XXX Use Contigmalloc until it is merged into this facility
375 * and handles multi-seg allocations. Nobody is doing
376 * multi-seg allocations yet though.
378 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
379 0ul, dmat
->lowaddr
, dmat
->alignment
? dmat
->alignment
: 1ul,
388 * Free a piece of memory and it's allociated dmamap, that was allocated
389 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
392 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
395 * dmamem does not need to be bounced, so the map should be
399 panic("bus_dmamem_free: Invalid map freed\n");
400 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
401 dmat
->lowaddr
>= ptoa(Maxmem
))
402 kfree(vaddr
, M_DEVBUF
);
404 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
407 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
410 * Map the buffer buf into bus space using the dmamap map.
413 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
414 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
415 void *callback_arg
, int flags
)
419 bus_dma_segment_t
*sg
;
422 vm_paddr_t nextpaddr
;
426 map
= &nobounce_dmamap
;
430 * If we are being called during a callback, pagesneeded will
431 * be non-zero, so we can avoid doing the work twice.
433 if (dmat
->lowaddr
< ptoa(Maxmem
) &&
434 map
->pagesneeded
== 0) {
435 vm_offset_t vendaddr
;
438 * Count the number of bounce pages
439 * needed in order to complete this transfer
441 vaddr
= trunc_page((vm_offset_t
)buf
);
442 vendaddr
= (vm_offset_t
)buf
+ buflen
;
444 while (vaddr
< vendaddr
) {
445 paddr
= pmap_kextract(vaddr
);
446 if (run_filter(dmat
, paddr
) != 0) {
454 /* Reserve Necessary Bounce Pages */
455 if (map
->pagesneeded
!= 0) {
457 if (reserve_bounce_pages(dmat
, map
) != 0) {
459 /* Queue us for resources */
462 map
->buflen
= buflen
;
463 map
->callback
= callback
;
464 map
->callback_arg
= callback_arg
;
466 STAILQ_INSERT_TAIL(&bounce_map_waitinglist
, map
, links
);
469 return (EINPROGRESS
);
474 vaddr
= (vm_offset_t
)buf
;
479 bmask
= ~(dmat
->boundary
- 1); /* note: will be 0 if boundary is 0 */
481 /* force at least one segment */
488 paddr
= pmap_kextract(vaddr
);
489 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
492 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
494 * note: this paddr has the same in-page offset
495 * as vaddr and thus the paddr above, so the
496 * size does not have to be recalculated
498 paddr
= add_bounce_page(dmat
, map
, vaddr
, size
);
502 * Fill in the bus_dma_segment
504 if (sg
->ds_len
== 0) {
507 } else if (paddr
== nextpaddr
) {
512 if (seg
> dmat
->nsegments
)
517 nextpaddr
= paddr
+ size
;
520 * Handle maxsegsz and boundary issues with a nested loop
526 * Limit to the boundary and maximum segment size
528 if ((nextpaddr
^ sg
->ds_addr
) & bmask
) {
529 tmpsize
= dmat
->boundary
-
530 (sg
->ds_addr
& ~bmask
);
531 if (tmpsize
> dmat
->maxsegsz
)
532 tmpsize
= dmat
->maxsegsz
;
533 KKASSERT(tmpsize
< sg
->ds_len
);
534 } else if (sg
->ds_len
> dmat
->maxsegsz
) {
535 tmpsize
= dmat
->maxsegsz
;
541 * Futz, split the data into a new segment.
543 if (seg
>= dmat
->nsegments
)
545 sg
[1].ds_len
= sg
[0].ds_len
- tmpsize
;
546 sg
[1].ds_addr
= sg
[0].ds_addr
+ tmpsize
;
547 sg
[0].ds_len
= tmpsize
;
557 } while (buflen
> 0);
561 kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
566 (*callback
)(callback_arg
, dmat
->segments
, seg
, error
);
572 * Utility function to load a linear buffer. lastaddrp holds state
573 * between invocations (for multiple-buffer loads). segp contains
574 * the starting segment on entrace, and the ending segment on exit.
575 * first indicates if this is the first invocation of this function.
578 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
579 void *buf
, bus_size_t buflen
,
582 vm_offset_t
*lastaddrp
,
586 bus_dma_segment_t
*segs
;
588 bus_addr_t curaddr
, lastaddr
, baddr
, bmask
;
589 vm_offset_t vaddr
= (vm_offset_t
)buf
;
593 if (td
->td_proc
!= NULL
)
594 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
598 segs
= dmat
->segments
;
599 lastaddr
= *lastaddrp
;
600 bmask
= ~(dmat
->boundary
- 1);
602 for (seg
= *segp
; buflen
> 0 ; ) {
604 * Get the physical address for this segment.
607 curaddr
= pmap_extract(pmap
, vaddr
);
609 curaddr
= pmap_kextract(vaddr
);
612 * Compute the segment size, and adjust counts.
614 sgsize
= PAGE_SIZE
- ((u_long
)curaddr
& PAGE_MASK
);
619 * Make sure we don't cross any boundaries.
621 if (dmat
->boundary
> 0) {
622 baddr
= (curaddr
+ dmat
->boundary
) & bmask
;
623 if (sgsize
> (baddr
- curaddr
))
624 sgsize
= (baddr
- curaddr
);
628 * Insert chunk into a segment, coalescing with
629 * previous segment if possible.
632 segs
[seg
].ds_addr
= curaddr
;
633 segs
[seg
].ds_len
= sgsize
;
636 if (curaddr
== lastaddr
&&
637 (segs
[seg
].ds_len
+ sgsize
) <= dmat
->maxsegsz
&&
638 (dmat
->boundary
== 0 ||
639 (segs
[seg
].ds_addr
& bmask
) == (curaddr
& bmask
)))
640 segs
[seg
].ds_len
+= sgsize
;
642 if (++seg
>= dmat
->nsegments
)
644 segs
[seg
].ds_addr
= curaddr
;
645 segs
[seg
].ds_len
= sgsize
;
649 lastaddr
= curaddr
+ sgsize
;
655 *lastaddrp
= lastaddr
;
660 return (buflen
!= 0 ? EFBIG
: 0); /* XXX better return value here? */
664 * Like _bus_dmamap_load(), but for mbufs.
667 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
669 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
674 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
675 ("bus_dmamap_load_mbuf: No support for bounce pages!"));
676 KASSERT(m0
->m_flags
& M_PKTHDR
,
677 ("bus_dmamap_load_mbuf: no packet header"));
681 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
683 vm_offset_t lastaddr
= 0;
686 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
689 error
= _bus_dmamap_load_buffer(dmat
,
691 curthread
, flags
, &lastaddr
,
700 /* force "no valid mappings" in callback */
701 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
703 (*callback
)(callback_arg
, dmat
->segments
,
704 nsegs
+1, m0
->m_pkthdr
.len
, error
);
710 * Like _bus_dmamap_load(), but for uios.
713 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
715 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
718 vm_offset_t lastaddr
;
719 int nsegs
, error
, first
, i
;
722 struct thread
*td
= NULL
;
724 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
725 ("bus_dmamap_load_uio: No support for bounce pages!"));
727 resid
= uio
->uio_resid
;
730 if (uio
->uio_segflg
== UIO_USERSPACE
) {
732 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
733 ("bus_dmamap_load_uio: USERSPACE but no proc"));
739 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
741 * Now at the first iovec to load. Load each iovec
742 * until we have exhausted the residual count.
745 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
746 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
748 error
= _bus_dmamap_load_buffer(dmat
,
750 td
, flags
, &lastaddr
, &nsegs
, first
);
757 /* force "no valid mappings" in callback */
758 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
760 (*callback
)(callback_arg
, dmat
->segments
,
761 nsegs
+1, uio
->uio_resid
, error
);
767 * Release the mapping held by map.
770 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
772 struct bounce_page
*bpage
;
774 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
775 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
776 free_bounce_page(dmat
, bpage
);
781 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
783 struct bounce_page
*bpage
;
785 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
788 * Handle data bouncing. We might also
789 * want to add support for invalidating
790 * the caches on broken hardware
793 case BUS_DMASYNC_PREWRITE
:
794 while (bpage
!= NULL
) {
795 bcopy((void *)bpage
->datavaddr
,
796 (void *)bpage
->vaddr
,
798 bpage
= STAILQ_NEXT(bpage
, links
);
802 case BUS_DMASYNC_POSTREAD
:
803 while (bpage
!= NULL
) {
804 bcopy((void *)bpage
->vaddr
,
805 (void *)bpage
->datavaddr
,
807 bpage
= STAILQ_NEXT(bpage
, links
);
810 case BUS_DMASYNC_PREREAD
:
811 case BUS_DMASYNC_POSTWRITE
:
819 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
)
824 if (total_bpages
== 0) {
825 STAILQ_INIT(&bounce_page_list
);
826 STAILQ_INIT(&bounce_map_waitinglist
);
827 STAILQ_INIT(&bounce_map_callbacklist
);
830 while (numpages
> 0) {
831 struct bounce_page
*bpage
;
833 bpage
= (struct bounce_page
*)kmalloc(sizeof(*bpage
), M_DEVBUF
,
838 bzero(bpage
, sizeof(*bpage
));
839 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
844 if (bpage
->vaddr
== NULL
) {
845 kfree(bpage
, M_DEVBUF
);
848 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
850 STAILQ_INSERT_TAIL(&bounce_page_list
, bpage
, links
);
861 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
865 pages
= MIN(free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
866 free_bpages
-= pages
;
867 reserved_bpages
+= pages
;
868 map
->pagesreserved
+= pages
;
869 pages
= map
->pagesneeded
- map
->pagesreserved
;
875 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
878 struct bounce_page
*bpage
;
880 if (map
->pagesneeded
== 0)
881 panic("add_bounce_page: map doesn't need any pages");
884 if (map
->pagesreserved
== 0)
885 panic("add_bounce_page: map doesn't need any pages");
886 map
->pagesreserved
--;
889 bpage
= STAILQ_FIRST(&bounce_page_list
);
891 panic("add_bounce_page: free page list is empty");
893 STAILQ_REMOVE_HEAD(&bounce_page_list
, links
);
898 bpage
->datavaddr
= vaddr
;
899 bpage
->datacount
= size
;
900 STAILQ_INSERT_TAIL(&(map
->bpages
), bpage
, links
);
901 return (bpage
->busaddr
);
905 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
907 struct bus_dmamap
*map
;
909 bpage
->datavaddr
= 0;
910 bpage
->datacount
= 0;
913 STAILQ_INSERT_HEAD(&bounce_page_list
, bpage
, links
);
916 if ((map
= STAILQ_FIRST(&bounce_map_waitinglist
)) != NULL
) {
917 if (reserve_bounce_pages(map
->dmat
, map
) == 0) {
918 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist
, links
);
919 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
,
921 busdma_swi_pending
= 1;
931 struct bus_dmamap
*map
;
934 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
935 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
937 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
938 map
->callback
, map
->callback_arg
, /*flags*/0);