2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.16.2.2 2003/01/23 00:55:27 scottl Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.17 2006/10/25 20:55:51 dillon Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
39 #include <vm/vm_page.h>
41 /* XXX needed for to access pmap to convert per-proc virtual to physical */
44 #include <vm/vm_map.h>
46 #include <machine/md_var.h>
48 #define MAX_BPAGES 128
56 bus_dma_filter_t
*filter
;
64 bus_dma_segment_t
*segments
;
68 vm_offset_t vaddr
; /* kva of bounce buffer */
69 bus_addr_t busaddr
; /* Physical address */
70 vm_offset_t datavaddr
; /* kva of client data */
71 bus_size_t datacount
; /* client data count */
72 STAILQ_ENTRY(bounce_page
) links
;
75 int busdma_swi_pending
;
77 static STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
78 static int free_bpages
;
79 static int reserved_bpages
;
80 static int active_bpages
;
81 static int total_bpages
;
82 static bus_addr_t bounce_lowaddr
= BUS_SPACE_MAXADDR
;
85 struct bp_list bpages
;
89 void *buf
; /* unmapped buffer pointer */
90 bus_size_t buflen
; /* unmapped buffer length */
91 bus_dmamap_callback_t
*callback
;
93 STAILQ_ENTRY(bus_dmamap
) links
;
96 static STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
97 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
;
98 static struct bus_dmamap nobounce_dmamap
;
100 static int alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
);
101 static int reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
);
102 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
,
103 vm_offset_t vaddr
, bus_size_t size
);
104 static void free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
);
105 static __inline
int run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
);
108 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
114 if (paddr
> dmat
->lowaddr
115 && paddr
<= dmat
->highaddr
116 && (dmat
->filter
== NULL
117 || (*dmat
->filter
)(dmat
->filterarg
, paddr
) != 0))
121 } while (retval
== 0 && dmat
!= NULL
);
125 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
127 * Allocate a device specific dma_tag.
130 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
131 bus_size_t boundary
, bus_addr_t lowaddr
,
132 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
133 void *filterarg
, bus_size_t maxsize
, int nsegments
,
134 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
136 bus_dma_tag_t newtag
;
139 /* Return a NULL tag on failure */
142 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
);
144 newtag
->parent
= parent
;
145 newtag
->alignment
= alignment
;
146 newtag
->boundary
= boundary
;
147 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
148 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
149 newtag
->filter
= filter
;
150 newtag
->filterarg
= filterarg
;
151 newtag
->maxsize
= maxsize
;
152 newtag
->nsegments
= nsegments
;
153 newtag
->maxsegsz
= maxsegsz
;
154 newtag
->flags
= flags
;
155 newtag
->ref_count
= 1; /* Count ourself */
156 newtag
->map_count
= 0;
157 newtag
->segments
= NULL
;
159 /* Take into account any restrictions imposed by our parent tag */
160 if (parent
!= NULL
) {
161 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
162 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
164 * XXX Not really correct??? Probably need to honor boundary
165 * all the way up the inheritence chain.
167 newtag
->boundary
= MAX(parent
->boundary
, newtag
->boundary
);
168 if (newtag
->filter
== NULL
) {
170 * Short circuit looking at our parent directly
171 * since we have encapsulated all of its information
173 newtag
->filter
= parent
->filter
;
174 newtag
->filterarg
= parent
->filterarg
;
175 newtag
->parent
= parent
->parent
;
177 if (newtag
->parent
!= NULL
) {
182 if (newtag
->lowaddr
< ptoa(Maxmem
) &&
183 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
186 if (lowaddr
> bounce_lowaddr
) {
188 * Go through the pool and kill any pages
189 * that don't reside below lowaddr.
191 panic("bus_dma_tag_create: page reallocation "
194 if (ptoa(total_bpages
) < maxsize
) {
197 pages
= atop(maxsize
) - total_bpages
;
199 /* Add pages to our bounce pool */
200 if (alloc_bounce_pages(newtag
, pages
) < pages
)
203 /* Performed initial allocation */
204 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
208 kfree(newtag
, M_DEVBUF
);
216 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
220 if (dmat
->map_count
!= 0)
223 while (dmat
!= NULL
) {
224 bus_dma_tag_t parent
;
226 parent
= dmat
->parent
;
228 if (dmat
->ref_count
== 0) {
229 if (dmat
->segments
!= NULL
)
230 kfree(dmat
->segments
, M_DEVBUF
);
231 kfree(dmat
, M_DEVBUF
);
233 * Last reference count, so
234 * release our reference
235 * count on our parent.
246 * Allocate a handle for mapping from kva/uva/physical
247 * address space into bus device space.
250 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
256 if (dmat
->segments
== NULL
) {
257 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
258 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
259 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
262 if (dmat
->lowaddr
< ptoa(Maxmem
)) {
266 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
);
270 /* Initialize the new map */
271 bzero(*mapp
, sizeof(**mapp
));
272 STAILQ_INIT(&((*mapp
)->bpages
));
275 * Attempt to add pages to our pool on a per-instance
276 * basis up to a sane limit.
278 maxpages
= MIN(MAX_BPAGES
, Maxmem
- atop(dmat
->lowaddr
));
279 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
280 || (dmat
->map_count
> 0
281 && total_bpages
< maxpages
)) {
284 if (dmat
->lowaddr
> bounce_lowaddr
) {
286 * Go through the pool and kill any pages
287 * that don't reside below lowaddr.
289 panic("bus_dmamap_create: page reallocation "
292 pages
= atop(dmat
->maxsize
);
293 pages
= MIN(maxpages
- total_bpages
, pages
);
294 error
= alloc_bounce_pages(dmat
, pages
);
296 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
298 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
312 * Destroy a handle for mapping from kva/uva/physical
313 * address space into bus device space.
316 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
319 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
321 kfree(map
, M_DEVBUF
);
329 * Allocate a piece of memory that can be efficiently mapped into
330 * bus device space based on the constraints lited in the dma tag.
332 * mapp is degenerate. By definition this allocation should not require
333 * bounce buffers so do not allocate a dma map.
336 bus_dmamem_alloc(bus_dma_tag_t dmat
, void** vaddr
, int flags
,
340 /* If we succeed, no mapping/bouncing will be required */
343 if (dmat
->segments
== NULL
) {
344 KKASSERT(dmat
->nsegments
< 16384);
345 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
346 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
349 if (flags
& BUS_DMA_NOWAIT
)
353 if (flags
& BUS_DMA_ZERO
)
356 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
357 dmat
->lowaddr
>= ptoa(Maxmem
)) {
358 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
360 * XXX Check whether the allocation crossed a page boundary
361 * and retry with power-of-2 alignment in that case.
363 if ((((intptr_t)*vaddr
) & PAGE_MASK
) !=
364 (((intptr_t)*vaddr
+ dmat
->maxsize
) & PAGE_MASK
)) {
366 kfree(*vaddr
, M_DEVBUF
);
367 /* XXX check for overflow? */
368 for (size
= 1; size
<= dmat
->maxsize
; size
<<= 1)
370 *vaddr
= kmalloc(size
, M_DEVBUF
, mflags
);
374 * XXX Use Contigmalloc until it is merged into this facility
375 * and handles multi-seg allocations. Nobody is doing
376 * multi-seg allocations yet though.
378 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
379 0ul, dmat
->lowaddr
, dmat
->alignment
? dmat
->alignment
: 1ul,
388 * Free a piece of memory and it's allociated dmamap, that was allocated
389 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
392 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
395 * dmamem does not need to be bounced, so the map should be
399 panic("bus_dmamem_free: Invalid map freed\n");
400 if ((dmat
->maxsize
<= PAGE_SIZE
) &&
401 dmat
->lowaddr
>= ptoa(Maxmem
))
402 kfree(vaddr
, M_DEVBUF
);
404 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
407 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
410 * Map the buffer buf into bus space using the dmamap map.
413 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
414 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
415 void *callback_arg
, int flags
)
419 bus_dma_segment_t
*sg
;
422 vm_paddr_t nextpaddr
;
425 map
= &nobounce_dmamap
;
429 * If we are being called during a callback, pagesneeded will
430 * be non-zero, so we can avoid doing the work twice.
432 if (dmat
->lowaddr
< ptoa(Maxmem
) &&
433 map
->pagesneeded
== 0) {
434 vm_offset_t vendaddr
;
437 * Count the number of bounce pages
438 * needed in order to complete this transfer
440 vaddr
= trunc_page((vm_offset_t
)buf
);
441 vendaddr
= (vm_offset_t
)buf
+ buflen
;
443 while (vaddr
< vendaddr
) {
444 paddr
= pmap_kextract(vaddr
);
445 if (run_filter(dmat
, paddr
) != 0) {
453 /* Reserve Necessary Bounce Pages */
454 if (map
->pagesneeded
!= 0) {
456 if (reserve_bounce_pages(dmat
, map
) != 0) {
458 /* Queue us for resources */
461 map
->buflen
= buflen
;
462 map
->callback
= callback
;
463 map
->callback_arg
= callback_arg
;
465 STAILQ_INSERT_TAIL(&bounce_map_waitinglist
, map
, links
);
468 return (EINPROGRESS
);
473 vaddr
= (vm_offset_t
)buf
;
482 paddr
= pmap_kextract(vaddr
);
483 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
487 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
488 paddr
= add_bounce_page(dmat
, map
, vaddr
, size
);
491 if (sg
->ds_len
== 0) {
494 } else if (paddr
== nextpaddr
) {
497 /* Go to the next segment */
500 if (seg
> dmat
->nsegments
)
506 nextpaddr
= paddr
+ size
;
508 } while (buflen
> 0);
511 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
516 (*callback
)(callback_arg
, dmat
->segments
, seg
, error
);
522 * Utility function to load a linear buffer. lastaddrp holds state
523 * between invocations (for multiple-buffer loads). segp contains
524 * the starting segment on entrace, and the ending segment on exit.
525 * first indicates if this is the first invocation of this function.
528 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
529 void *buf
, bus_size_t buflen
,
532 vm_offset_t
*lastaddrp
,
536 bus_dma_segment_t
*segs
;
538 bus_addr_t curaddr
, lastaddr
, baddr
, bmask
;
539 vm_offset_t vaddr
= (vm_offset_t
)buf
;
543 if (td
->td_proc
!= NULL
)
544 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
548 segs
= dmat
->segments
;
549 lastaddr
= *lastaddrp
;
550 bmask
= ~(dmat
->boundary
- 1);
552 for (seg
= *segp
; buflen
> 0 ; ) {
554 * Get the physical address for this segment.
557 curaddr
= pmap_extract(pmap
, vaddr
);
559 curaddr
= pmap_kextract(vaddr
);
562 * Compute the segment size, and adjust counts.
564 sgsize
= PAGE_SIZE
- ((u_long
)curaddr
& PAGE_MASK
);
569 * Make sure we don't cross any boundaries.
571 if (dmat
->boundary
> 0) {
572 baddr
= (curaddr
+ dmat
->boundary
) & bmask
;
573 if (sgsize
> (baddr
- curaddr
))
574 sgsize
= (baddr
- curaddr
);
578 * Insert chunk into a segment, coalescing with
579 * previous segment if possible.
582 segs
[seg
].ds_addr
= curaddr
;
583 segs
[seg
].ds_len
= sgsize
;
586 if (curaddr
== lastaddr
&&
587 (segs
[seg
].ds_len
+ sgsize
) <= dmat
->maxsegsz
&&
588 (dmat
->boundary
== 0 ||
589 (segs
[seg
].ds_addr
& bmask
) == (curaddr
& bmask
)))
590 segs
[seg
].ds_len
+= sgsize
;
592 if (++seg
>= dmat
->nsegments
)
594 segs
[seg
].ds_addr
= curaddr
;
595 segs
[seg
].ds_len
= sgsize
;
599 lastaddr
= curaddr
+ sgsize
;
605 *lastaddrp
= lastaddr
;
610 return (buflen
!= 0 ? EFBIG
: 0); /* XXX better return value here? */
614 * Like _bus_dmamap_load(), but for mbufs.
617 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
619 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
624 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
625 ("bus_dmamap_load_mbuf: No support for bounce pages!"));
626 KASSERT(m0
->m_flags
& M_PKTHDR
,
627 ("bus_dmamap_load_mbuf: no packet header"));
631 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
633 vm_offset_t lastaddr
= 0;
636 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
639 error
= _bus_dmamap_load_buffer(dmat
,
641 curthread
, flags
, &lastaddr
,
650 /* force "no valid mappings" in callback */
651 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
653 (*callback
)(callback_arg
, dmat
->segments
,
654 nsegs
+1, m0
->m_pkthdr
.len
, error
);
660 * Like _bus_dmamap_load(), but for uios.
663 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
665 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
668 vm_offset_t lastaddr
;
669 int nsegs
, error
, first
, i
;
672 struct thread
*td
= NULL
;
674 KASSERT(dmat
->lowaddr
>= ptoa(Maxmem
) || map
!= NULL
,
675 ("bus_dmamap_load_uio: No support for bounce pages!"));
677 resid
= uio
->uio_resid
;
680 if (uio
->uio_segflg
== UIO_USERSPACE
) {
682 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
683 ("bus_dmamap_load_uio: USERSPACE but no proc"));
689 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
691 * Now at the first iovec to load. Load each iovec
692 * until we have exhausted the residual count.
695 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
696 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
698 error
= _bus_dmamap_load_buffer(dmat
,
700 td
, flags
, &lastaddr
, &nsegs
, first
);
707 /* force "no valid mappings" in callback */
708 (*callback
)(callback_arg
, dmat
->segments
, 0, 0, error
);
710 (*callback
)(callback_arg
, dmat
->segments
,
711 nsegs
+1, uio
->uio_resid
, error
);
717 * Release the mapping held by map.
720 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
722 struct bounce_page
*bpage
;
724 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
725 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
726 free_bounce_page(dmat
, bpage
);
731 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
733 struct bounce_page
*bpage
;
735 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
738 * Handle data bouncing. We might also
739 * want to add support for invalidating
740 * the caches on broken hardware
743 case BUS_DMASYNC_PREWRITE
:
744 while (bpage
!= NULL
) {
745 bcopy((void *)bpage
->datavaddr
,
746 (void *)bpage
->vaddr
,
748 bpage
= STAILQ_NEXT(bpage
, links
);
752 case BUS_DMASYNC_POSTREAD
:
753 while (bpage
!= NULL
) {
754 bcopy((void *)bpage
->vaddr
,
755 (void *)bpage
->datavaddr
,
757 bpage
= STAILQ_NEXT(bpage
, links
);
760 case BUS_DMASYNC_PREREAD
:
761 case BUS_DMASYNC_POSTWRITE
:
769 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
)
774 if (total_bpages
== 0) {
775 STAILQ_INIT(&bounce_page_list
);
776 STAILQ_INIT(&bounce_map_waitinglist
);
777 STAILQ_INIT(&bounce_map_callbacklist
);
780 while (numpages
> 0) {
781 struct bounce_page
*bpage
;
783 bpage
= (struct bounce_page
*)kmalloc(sizeof(*bpage
), M_DEVBUF
,
788 bzero(bpage
, sizeof(*bpage
));
789 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
794 if (bpage
->vaddr
== NULL
) {
795 kfree(bpage
, M_DEVBUF
);
798 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
800 STAILQ_INSERT_TAIL(&bounce_page_list
, bpage
, links
);
811 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
815 pages
= MIN(free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
816 free_bpages
-= pages
;
817 reserved_bpages
+= pages
;
818 map
->pagesreserved
+= pages
;
819 pages
= map
->pagesneeded
- map
->pagesreserved
;
825 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
828 struct bounce_page
*bpage
;
830 if (map
->pagesneeded
== 0)
831 panic("add_bounce_page: map doesn't need any pages");
834 if (map
->pagesreserved
== 0)
835 panic("add_bounce_page: map doesn't need any pages");
836 map
->pagesreserved
--;
839 bpage
= STAILQ_FIRST(&bounce_page_list
);
841 panic("add_bounce_page: free page list is empty");
843 STAILQ_REMOVE_HEAD(&bounce_page_list
, links
);
848 bpage
->datavaddr
= vaddr
;
849 bpage
->datacount
= size
;
850 STAILQ_INSERT_TAIL(&(map
->bpages
), bpage
, links
);
851 return (bpage
->busaddr
);
855 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
857 struct bus_dmamap
*map
;
859 bpage
->datavaddr
= 0;
860 bpage
->datacount
= 0;
863 STAILQ_INSERT_HEAD(&bounce_page_list
, bpage
, links
);
866 if ((map
= STAILQ_FIRST(&bounce_map_waitinglist
)) != NULL
) {
867 if (reserve_bounce_pages(map
->dmat
, map
) == 0) {
868 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist
, links
);
869 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
,
871 busdma_swi_pending
= 1;
881 struct bus_dmamap
*map
;
884 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
885 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
887 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
888 map
->callback
, map
->callback_arg
, /*flags*/0);