2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
27 * $DragonFly: src/sys/platform/vkernel/platform/busdma_machdep.c,v 1.3 2008/06/06 13:19:25 swildner Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
39 #include <vm/vm_page.h>
41 /* XXX needed for to access pmap to convert per-proc virtual to physical */
44 #include <vm/vm_map.h>
46 #include <machine/md_var.h>
48 #define MAX_BPAGES 128
56 bus_dma_filter_t
*filter
;
64 bus_dma_segment_t
*segments
;
68 vm_offset_t vaddr
; /* kva of bounce buffer */
69 bus_addr_t busaddr
; /* Physical address */
70 vm_offset_t datavaddr
; /* kva of client data */
71 bus_size_t datacount
; /* client data count */
72 STAILQ_ENTRY(bounce_page
) links
;
75 int busdma_swi_pending
;
77 static STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
78 static int free_bpages
;
79 static int reserved_bpages
;
80 static int active_bpages
;
81 static int total_bpages
;
82 static bus_addr_t bounce_lowaddr
= BUS_SPACE_MAXADDR
;
85 struct bp_list bpages
;
89 void *buf
; /* unmapped buffer pointer */
90 bus_size_t buflen
; /* unmapped buffer length */
91 bus_dmamap_callback_t
*callback
;
93 STAILQ_ENTRY(bus_dmamap
) links
;
96 static STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
97 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
;
98 static struct bus_dmamap nobounce_dmamap
;
100 static int alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
);
102 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
,
103 vm_offset_t vaddr
, bus_size_t size
);
104 static void free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
);
105 static __inline
int run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
);
108 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
114 if (paddr
> dmat
->lowaddr
115 && paddr
<= dmat
->highaddr
116 && (dmat
->filter
== NULL
117 || (*dmat
->filter
)(dmat
->filterarg
, paddr
) != 0))
121 } while (retval
== 0 && dmat
!= NULL
);
125 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
127 * Allocate a device specific dma_tag.
130 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
131 bus_size_t boundary
, bus_addr_t lowaddr
,
132 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
133 void *filterarg
, bus_size_t maxsize
, int nsegments
,
134 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
136 bus_dma_tag_t newtag
;
139 /* Return a NULL tag on failure */
142 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
);
144 newtag
->parent
= parent
;
145 newtag
->alignment
= alignment
;
146 newtag
->boundary
= boundary
;
147 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
148 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
149 newtag
->filter
= filter
;
150 newtag
->filterarg
= filterarg
;
151 newtag
->maxsize
= maxsize
;
152 newtag
->nsegments
= nsegments
;
153 newtag
->maxsegsz
= maxsegsz
;
154 newtag
->flags
= flags
;
155 newtag
->ref_count
= 1; /* Count ourself */
156 newtag
->map_count
= 0;
157 newtag
->segments
= NULL
;
159 /* Take into account any restrictions imposed by our parent tag */
160 if (parent
!= NULL
) {
161 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
162 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
164 * XXX Not really correct??? Probably need to honor boundary
165 * all the way up the inheritence chain.
167 newtag
->boundary
= MAX(parent
->boundary
, newtag
->boundary
);
168 if (newtag
->filter
== NULL
) {
170 * Short circuit looking at our parent directly
171 * since we have encapsulated all of its information
173 newtag
->filter
= parent
->filter
;
174 newtag
->filterarg
= parent
->filterarg
;
175 newtag
->parent
= parent
->parent
;
177 if (newtag
->parent
!= NULL
) {
182 if (newtag
->lowaddr
< ptoa(Maxmem
) &&
183 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
186 if (lowaddr
> bounce_lowaddr
) {
188 * Go through the pool and kill any pages
189 * that don't reside below lowaddr.
191 panic("bus_dma_tag_create: page reallocation "
194 if (ptoa(total_bpages
) < maxsize
) {
197 pages
= atop(maxsize
) - total_bpages
;
199 /* Add pages to our bounce pool */
200 if (alloc_bounce_pages(newtag
, pages
) < pages
)
203 /* Performed initial allocation */
204 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
208 kfree(newtag
, M_DEVBUF
);
216 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
220 if (dmat
->map_count
!= 0)
223 while (dmat
!= NULL
) {
224 bus_dma_tag_t parent
;
226 parent
= dmat
->parent
;
228 if (dmat
->ref_count
== 0) {
229 if (dmat
->segments
!= NULL
)
230 kfree(dmat
->segments
, M_DEVBUF
);
231 kfree(dmat
, M_DEVBUF
);
233 * Last reference count, so
234 * release our reference
235 * count on our parent.
246 * Allocate a handle for mapping from kva/uva/physical
247 * address space into bus device space.
250 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
256 if (dmat
->segments
== NULL
) {
257 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
258 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
259 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
262 if (dmat
->lowaddr
< ptoa(Maxmem
)) {
266 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
269 /* Initialize the new map */
270 STAILQ_INIT(&((*mapp
)->bpages
));
272 * Attempt to add pages to our pool on a per-instance
273 * basis up to a sane limit.
275 maxpages
= MIN(MAX_BPAGES
, Maxmem
- atop(dmat
->lowaddr
));
276 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
277 || (dmat
->map_count
> 0
278 && total_bpages
< maxpages
)) {
281 if (dmat
->lowaddr
> bounce_lowaddr
) {
283 * Go through the pool and kill any pages
284 * that don't reside below lowaddr.
286 panic("bus_dmamap_create: page reallocation "
289 pages
= atop(dmat
->maxsize
);
290 pages
= MIN(maxpages
- total_bpages
, pages
);
291 error
= alloc_bounce_pages(dmat
, pages
);
293 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
295 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
309 * Destroy a handle for mapping from kva/uva/physical
310 * address space into bus device space.
313 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
316 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
318 kfree(map
, M_DEVBUF
);
326 * Allocate a piece of memory that can be efficiently mapped into
327 * bus device space based on the constraints lited in the dma tag.
329 * mapp is degenerate. By definition this allocation should not require
330 * bounce buffers so do not allocate a dma map.
333 bus_dmamem_alloc(bus_dma_tag_t dmat
, void** vaddr
, int flags
,
337 /* If we succeed, no mapping/bouncing will be required */
340 if (dmat
->segments
== NULL
) {
341 KKASSERT(dmat
->nsegments
< 16384);
342 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
343 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
346 if (flags
& BUS_DMA_NOWAIT
)
350 if (flags
& BUS_DMA_ZERO
)
353 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
354 dmat
->lowaddr
>= ptoa(Maxmem
)) {
355 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
357 * XXX Check whether the allocation crossed a page boundary
358 * and retry with power-of-2 alignment in that case.
360 if ((((intptr_t)*vaddr
) & PAGE_MASK
) !=
361 (((intptr_t)*vaddr
+ dmat
->maxsize
) & PAGE_MASK
)) {
363 kfree(*vaddr
, M_DEVBUF
);
364 /* XXX check for overflow? */
365 for (size
= 1; size
<= dmat
->maxsize
; size
<<= 1)
367 *vaddr
= kmalloc(size
, M_DEVBUF
, mflags
);
371 * XXX Use Contigmalloc until it is merged into this facility
372 * and handles multi-seg allocations. Nobody is doing
373 * multi-seg allocations yet though.
375 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
376 0ul, dmat
->lowaddr
, dmat
->alignment
? dmat
->alignment
: 1ul,
385 * Free a piece of memory and it's allociated dmamap, that was allocated
386 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
389 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
392 * dmamem does not need to be bounced, so the map should be
396 panic("bus_dmamem_free: Invalid map freed\n");
397 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
398 dmat
->lowaddr
>= ptoa(Maxmem
))
399 kfree(vaddr
, M_DEVBUF
);
401 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
404 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
407 * Map the buffer buf into bus space using the dmamap map.
410 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
411 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
412 void *callback_arg
, int flags
)
416 bus_dma_segment_t
*sg
;
419 vm_paddr_t nextpaddr
;
422 map
= &nobounce_dmamap
;
426 * If we are being called during a callback, pagesneeded will
427 * be non-zero, so we can avoid doing the work twice.
429 if (dmat
->lowaddr
< ptoa(Maxmem
) &&
430 map
->pagesneeded
== 0) {
431 vm_offset_t vendaddr
;
434 * Count the number of bounce pages
435 * needed in order to complete this transfer
437 vaddr
= trunc_page((vm_offset_t
)buf
);
438 vendaddr
= (vm_offset_t
)buf
+ buflen
;
440 while (vaddr
< vendaddr
) {
441 paddr
= pmap_kextract(vaddr
);
442 if (run_filter(dmat
, paddr
) != 0) {
450 /* Reserve Necessary Bounce Pages */
451 if (map
->pagesneeded
!= 0) {
453 if (reserve_bounce_pages(dmat
, map
) != 0) {
455 /* Queue us for resources */
458 map
->buflen
= buflen
;
459 map
->callback
= callback
;
460 map
->callback_arg
= callback_arg
;
462 STAILQ_INSERT_TAIL(&bounce_map_waitinglist
, map
, links
);
465 return (EINPROGRESS
);
470 vaddr
= (vm_offset_t
)buf
;
479 paddr
= pmap_kextract(vaddr
);
480 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
484 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
485 paddr
= add_bounce_page(dmat
, map
, vaddr
, size
);
488 if (sg
->ds_len
== 0) {
491 } else if (paddr
== nextpaddr
) {
494 /* Go to the next segment */
497 if (seg
> dmat
->nsegments
)
503 nextpaddr
= paddr
+ size
;
505 } while (buflen
> 0);
508 kprintf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
513 (*callback
)(callback_arg
, dmat
->segments
, seg
, error
);
519 * Utility function to load a linear buffer. lastaddrp holds state
520 * between invocations (for multiple-buffer loads). segp contains
521 * the starting segment on entrace, and the ending segment on exit.
522 * first indicates if this is the first invocation of this function.
525 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
526 void *buf
, bus_size_t buflen
,
529 vm_offset_t
*lastaddrp
,
533 bus_dma_segment_t
*segs
;
535 bus_addr_t curaddr
, lastaddr
, baddr
, bmask
;
536 vm_offset_t vaddr
= (vm_offset_t
)buf
;
540 if (td
->td_proc
!= NULL
)
541 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
545 segs
= dmat
->segments
;
546 lastaddr
= *lastaddrp
;
547 bmask
= ~(dmat
->boundary
- 1);
549 for (seg
= *segp
; buflen
> 0 ; ) {
551 * Get the physical address for this segment.
554 curaddr
= pmap_extract(pmap
, vaddr
);
556 curaddr
= pmap_kextract(vaddr
);
559 * Compute the segment size, and adjust counts.
561 sgsize
= PAGE_SIZE
- ((u_long
)curaddr
& PAGE_MASK
);
566 * Make sure we don't cross any boundaries.
568 if (dmat
->boundary
> 0) {
569 baddr
= (curaddr
+ dmat
->boundary
) & bmask
;
570 if (sgsize
> (baddr
- curaddr
))
571 sgsize
= (baddr
- curaddr
);
575 * Insert chunk into a segment, coalescing with
576 * previous segment if possible.
579 segs
[seg
].ds_addr
= curaddr
;
580 segs
[seg
].ds_len
= sgsize
;
583 if (curaddr
== lastaddr
&&
584 (segs
[seg
].ds_len
+ sgsize
) <= dmat
->maxsegsz
&&
585 (dmat
->boundary
== 0 ||
586 (segs
[seg
].ds_addr
& bmask
) == (curaddr
& bmask
)))
587 segs
[seg
].ds_len
+= sgsize
;
589 if (++seg
>= dmat
->nsegments
)
591 segs
[seg
].ds_addr
= curaddr
;
592 segs
[seg
].ds_len
= sgsize
;
596 lastaddr
= curaddr
+ sgsize
;
602 *lastaddrp
= lastaddr
;
607 return (buflen
!= 0 ? EFBIG
: 0); /* XXX better return value here? */
611 * Like _bus_dmamap_load(), but for mbufs.
614 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
616 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
621 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
622 ("bus_dmamap_load_mbuf: No support for bounce pages!"));
623 KASSERT(m0
->m_flags
& M_PKTHDR
,
624 ("bus_dmamap_load_mbuf: no packet header"));
628 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
630 vm_offset_t lastaddr
= 0;
633 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
636 error
= _bus_dmamap_load_buffer(dmat
,
638 curthread
, flags
, &lastaddr
,
647 /* force "no valid mappings" in callback */
648 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
650 (*callback
)(callback_arg
, dmat
->segments
,
651 nsegs
+1, m0
->m_pkthdr
.len
, error
);
657 * Like _bus_dmamap_load(), but for uios.
660 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
662 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
665 vm_offset_t lastaddr
;
666 int nsegs
, error
, first
, i
;
669 struct thread
*td
= NULL
;
671 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
672 ("bus_dmamap_load_uio: No support for bounce pages!"));
674 resid
= uio
->uio_resid
;
677 if (uio
->uio_segflg
== UIO_USERSPACE
) {
679 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
680 ("bus_dmamap_load_uio: USERSPACE but no proc"));
686 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
688 * Now at the first iovec to load. Load each iovec
689 * until we have exhausted the residual count.
692 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
693 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
695 error
= _bus_dmamap_load_buffer(dmat
,
697 td
, flags
, &lastaddr
, &nsegs
, first
);
704 /* force "no valid mappings" in callback */
705 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
707 (*callback
)(callback_arg
, dmat
->segments
,
708 nsegs
+1, uio
->uio_resid
, error
);
714 * Release the mapping held by map.
717 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
719 struct bounce_page
*bpage
;
721 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
722 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
723 free_bounce_page(dmat
, bpage
);
728 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
730 struct bounce_page
*bpage
;
732 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
735 * Handle data bouncing. We might also
736 * want to add support for invalidating
737 * the caches on broken hardware
740 case BUS_DMASYNC_PREWRITE
:
741 while (bpage
!= NULL
) {
742 bcopy((void *)bpage
->datavaddr
,
743 (void *)bpage
->vaddr
,
745 bpage
= STAILQ_NEXT(bpage
, links
);
749 case BUS_DMASYNC_POSTREAD
:
750 while (bpage
!= NULL
) {
751 bcopy((void *)bpage
->vaddr
,
752 (void *)bpage
->datavaddr
,
754 bpage
= STAILQ_NEXT(bpage
, links
);
757 case BUS_DMASYNC_PREREAD
:
758 case BUS_DMASYNC_POSTWRITE
:
766 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
)
771 if (total_bpages
== 0) {
772 STAILQ_INIT(&bounce_page_list
);
773 STAILQ_INIT(&bounce_map_waitinglist
);
774 STAILQ_INIT(&bounce_map_callbacklist
);
777 while (numpages
> 0) {
778 struct bounce_page
*bpage
;
780 bpage
= (struct bounce_page
*)kmalloc(sizeof(*bpage
), M_DEVBUF
,
785 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
790 if (bpage
->vaddr
== 0) {
791 kfree(bpage
, M_DEVBUF
);
794 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
796 STAILQ_INSERT_TAIL(&bounce_page_list
, bpage
, links
);
807 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
811 pages
= MIN(free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
812 free_bpages
-= pages
;
813 reserved_bpages
+= pages
;
814 map
->pagesreserved
+= pages
;
815 pages
= map
->pagesneeded
- map
->pagesreserved
;
821 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
824 struct bounce_page
*bpage
;
826 if (map
->pagesneeded
== 0)
827 panic("add_bounce_page: map doesn't need any pages");
830 if (map
->pagesreserved
== 0)
831 panic("add_bounce_page: map doesn't need any pages");
832 map
->pagesreserved
--;
835 bpage
= STAILQ_FIRST(&bounce_page_list
);
837 panic("add_bounce_page: free page list is empty");
839 STAILQ_REMOVE_HEAD(&bounce_page_list
, links
);
844 bpage
->datavaddr
= vaddr
;
845 bpage
->datacount
= size
;
846 STAILQ_INSERT_TAIL(&(map
->bpages
), bpage
, links
);
847 return (bpage
->busaddr
);
851 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
853 struct bus_dmamap
*map
;
855 bpage
->datavaddr
= 0;
856 bpage
->datacount
= 0;
859 STAILQ_INSERT_HEAD(&bounce_page_list
, bpage
, links
);
862 if ((map
= STAILQ_FIRST(&bounce_map_waitinglist
)) != NULL
) {
863 if (reserve_bounce_pages(map
->dmat
, map
) == 0) {
864 panic("free_bounce_pages: uncoded\n");
866 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist
, links
);
867 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
,
869 busdma_swi_pending
= 1;
882 struct bus_dmamap
*map
;
885 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
886 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
888 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
889 map
->callback
, map
->callback_arg
, /*flags*/0);