2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
34 #include <sys/thread2.h>
35 #include <sys/bus_dma.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
39 #include <sys/spinlock2.h>
42 #include <vm/vm_page.h>
44 /* XXX needed for to access pmap to convert per-proc virtual to physical */
46 #include <vm/vm_map.h>
48 #include <machine/md_var.h>
50 #define MAX_BPAGES 1024
61 bus_dma_filter_t
*filter
;
69 bus_dma_segment_t
*segments
;
70 struct bounce_zone
*bounce_zone
;
74 * bus_dma_tag private flags
76 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2
77 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3
78 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
80 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
82 #define BUS_DMAMEM_KMALLOC(dmat) \
83 ((dmat)->maxsize <= PAGE_SIZE && \
84 (dmat)->alignment <= PAGE_SIZE && \
85 (dmat)->lowaddr >= ptoa(Maxmem))
88 vm_offset_t vaddr
; /* kva of bounce buffer */
89 bus_addr_t busaddr
; /* Physical address */
90 vm_offset_t datavaddr
; /* kva of client data */
91 bus_size_t datacount
; /* client data count */
92 STAILQ_ENTRY(bounce_page
) links
;
96 STAILQ_ENTRY(bounce_zone
) links
;
97 STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
98 STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
107 bus_size_t alignment
;
111 struct sysctl_ctx_list sysctl_ctx
;
112 struct sysctl_oid
*sysctl_tree
;
115 #define BZ_LOCK(bz) spin_lock(&(bz)->spin)
116 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin)
118 static struct lwkt_token bounce_zone_tok
=
119 LWKT_TOKEN_INITIALIZER(bounce_zone_token
);
120 static int busdma_zonecount
;
121 static STAILQ_HEAD(, bounce_zone
) bounce_zone_list
=
122 STAILQ_HEAD_INITIALIZER(bounce_zone_list
);
124 int busdma_swi_pending
;
125 static int total_bounce_pages
;
126 static int max_bounce_pages
= MAX_BPAGES
;
127 static int bounce_alignment
= 1; /* XXX temporary */
129 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages
);
130 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment
);
133 struct bp_list bpages
;
137 void *buf
; /* unmapped buffer pointer */
138 bus_size_t buflen
; /* unmapped buffer length */
139 bus_dmamap_callback_t
*callback
;
141 STAILQ_ENTRY(bus_dmamap
) links
;
144 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
=
145 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist
);
147 static struct bus_dmamap nobounce_dmamap
;
149 static int alloc_bounce_zone(bus_dma_tag_t
);
150 static int alloc_bounce_pages(bus_dma_tag_t
, u_int
, int);
151 static int reserve_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
, int);
152 static void return_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
);
153 static bus_addr_t
add_bounce_page(bus_dma_tag_t
, bus_dmamap_t
,
154 vm_offset_t
, bus_size_t
);
155 static void free_bounce_page(bus_dma_tag_t
, struct bounce_page
*);
157 static bus_dmamap_t
get_map_waiting(bus_dma_tag_t
);
158 static void add_map_callback(bus_dmamap_t
);
160 SYSCTL_NODE(_hw
, OID_AUTO
, busdma
, CTLFLAG_RD
, 0, "Busdma parameters");
161 SYSCTL_INT(_hw_busdma
, OID_AUTO
, total_bpages
, CTLFLAG_RD
, &total_bounce_pages
,
162 0, "Total bounce pages");
163 SYSCTL_INT(_hw_busdma
, OID_AUTO
, max_bpages
, CTLFLAG_RD
, &max_bounce_pages
,
164 0, "Max bounce pages per bounce zone");
165 SYSCTL_INT(_hw_busdma
, OID_AUTO
, bounce_alignment
, CTLFLAG_RD
,
166 &bounce_alignment
, 0, "Obey alignment constraint");
169 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
175 if (((paddr
> dmat
->lowaddr
&& paddr
<= dmat
->highaddr
) ||
176 (bounce_alignment
&& (paddr
& (dmat
->alignment
- 1)) != 0))
177 && (dmat
->filter
== NULL
||
178 dmat
->filter(dmat
->filterarg
, paddr
) != 0))
182 } while (retval
== 0 && dmat
!= NULL
);
187 * Allocate a device specific dma_tag.
190 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
191 bus_size_t boundary
, bus_addr_t lowaddr
,
192 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
193 void *filterarg
, bus_size_t maxsize
, int nsegments
,
194 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
196 bus_dma_tag_t newtag
;
205 if (alignment
& (alignment
- 1))
206 panic("alignment must be power of 2");
209 if (boundary
& (boundary
- 1))
210 panic("boundary must be power of 2");
211 if (boundary
< maxsegsz
) {
212 kprintf("boundary < maxsegsz:\n");
218 /* Return a NULL tag on failure */
221 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
);
223 newtag
->parent
= parent
;
224 newtag
->alignment
= alignment
;
225 newtag
->boundary
= boundary
;
226 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
227 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
228 newtag
->filter
= filter
;
229 newtag
->filterarg
= filterarg
;
230 newtag
->maxsize
= maxsize
;
231 newtag
->nsegments
= nsegments
;
232 newtag
->maxsegsz
= maxsegsz
;
233 newtag
->flags
= flags
;
234 newtag
->ref_count
= 1; /* Count ourself */
235 newtag
->map_count
= 0;
236 newtag
->segments
= NULL
;
237 newtag
->bounce_zone
= NULL
;
239 /* Take into account any restrictions imposed by our parent tag */
240 if (parent
!= NULL
) {
241 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
242 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
244 if (newtag
->boundary
== 0) {
245 newtag
->boundary
= parent
->boundary
;
246 } else if (parent
->boundary
!= 0) {
247 newtag
->boundary
= MIN(parent
->boundary
,
252 newtag
->alignment
= MAX(parent
->alignment
, newtag
->alignment
);
255 if (newtag
->filter
== NULL
) {
257 * Short circuit looking at our parent directly
258 * since we have encapsulated all of its information
260 newtag
->filter
= parent
->filter
;
261 newtag
->filterarg
= parent
->filterarg
;
262 newtag
->parent
= parent
->parent
;
264 if (newtag
->parent
!= NULL
)
268 if (newtag
->lowaddr
< ptoa(Maxmem
))
269 newtag
->flags
|= BUS_DMA_BOUNCE_LOWADDR
;
270 if (bounce_alignment
&& newtag
->alignment
> 1 &&
271 !(newtag
->flags
& BUS_DMA_ALIGNED
))
272 newtag
->flags
|= BUS_DMA_BOUNCE_ALIGN
;
274 if ((newtag
->flags
& BUS_DMA_COULD_BOUNCE
) &&
275 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
276 struct bounce_zone
*bz
;
280 error
= alloc_bounce_zone(newtag
);
283 bz
= newtag
->bounce_zone
;
285 if (ptoa(bz
->total_bpages
) < maxsize
) {
288 if (flags
& BUS_DMA_ONEBPAGE
) {
291 pages
= atop(round_page(maxsize
)) -
293 pages
= MAX(pages
, 1);
296 /* Add pages to our bounce pool */
297 if (alloc_bounce_pages(newtag
, pages
, flags
) < pages
)
300 /* Performed initial allocation */
301 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
306 kfree(newtag
, M_DEVBUF
);
313 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
316 if (dmat
->map_count
!= 0)
319 while (dmat
!= NULL
) {
320 bus_dma_tag_t parent
;
322 parent
= dmat
->parent
;
324 if (dmat
->ref_count
== 0) {
325 if (dmat
->segments
!= NULL
)
326 kfree(dmat
->segments
, M_DEVBUF
);
327 kfree(dmat
, M_DEVBUF
);
329 * Last reference count, so
330 * release our reference
331 * count on our parent.
342 * Allocate a handle for mapping from kva/uva/physical
343 * address space into bus device space.
346 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
352 if (dmat
->segments
== NULL
) {
353 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
354 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
355 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
358 if (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) {
359 struct bounce_zone
*bz
;
364 if (dmat
->bounce_zone
== NULL
) {
365 error
= alloc_bounce_zone(dmat
);
369 bz
= dmat
->bounce_zone
;
371 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
373 /* Initialize the new map */
374 STAILQ_INIT(&((*mapp
)->bpages
));
377 * Attempt to add pages to our pool on a per-instance
378 * basis up to a sane limit.
380 if (dmat
->flags
& BUS_DMA_BOUNCE_ALIGN
) {
381 maxpages
= max_bounce_pages
;
383 maxpages
= MIN(max_bounce_pages
,
384 Maxmem
- atop(dmat
->lowaddr
));
386 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
387 || (dmat
->map_count
> 0
388 && bz
->total_bpages
< maxpages
)) {
391 if (flags
& BUS_DMA_ONEBPAGE
) {
394 pages
= atop(round_page(dmat
->maxsize
));
395 pages
= MIN(maxpages
- bz
->total_bpages
, pages
);
396 pages
= MAX(pages
, 1);
398 if (alloc_bounce_pages(dmat
, pages
, flags
) < pages
)
401 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
403 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
417 * Destroy a handle for mapping from kva/uva/physical
418 * address space into bus device space.
421 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
424 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
426 kfree(map
, M_DEVBUF
);
432 static __inline bus_size_t
433 check_kmalloc(bus_dma_tag_t dmat
, const void *vaddr0
, int verify
)
435 bus_size_t maxsize
= 0;
436 uintptr_t vaddr
= (uintptr_t)vaddr0
;
438 if ((vaddr
^ (vaddr
+ dmat
->maxsize
- 1)) & ~PAGE_MASK
) {
439 kprintf("boundary check failed\n");
441 print_backtrace(-1); /* XXX panic */
442 maxsize
= dmat
->maxsize
;
444 if (vaddr
& (dmat
->alignment
- 1)) {
445 kprintf("alignment check failed\n");
447 print_backtrace(-1); /* XXX panic */
448 if (dmat
->maxsize
< dmat
->alignment
)
449 maxsize
= dmat
->alignment
;
451 maxsize
= dmat
->maxsize
;
457 * Allocate a piece of memory that can be efficiently mapped into
458 * bus device space based on the constraints lited in the dma tag.
460 * mapp is degenerate. By definition this allocation should not require
461 * bounce buffers so do not allocate a dma map.
464 bus_dmamem_alloc(bus_dma_tag_t dmat
, void **vaddr
, int flags
,
469 /* If we succeed, no mapping/bouncing will be required */
472 if (dmat
->segments
== NULL
) {
473 KKASSERT(dmat
->nsegments
< 16384);
474 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
475 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
478 if (flags
& BUS_DMA_NOWAIT
)
482 if (flags
& BUS_DMA_ZERO
)
485 if (BUS_DMAMEM_KMALLOC(dmat
)) {
488 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
492 * Check whether the allocation
493 * - crossed a page boundary
495 * Retry with power-of-2 alignment in the above cases.
497 maxsize
= check_kmalloc(dmat
, *vaddr
, 0);
501 kfree(*vaddr
, M_DEVBUF
);
502 /* XXX check for overflow? */
503 for (size
= 1; size
<= maxsize
; size
<<= 1)
505 *vaddr
= kmalloc(size
, M_DEVBUF
, mflags
);
506 check_kmalloc(dmat
, *vaddr
, 1);
510 * XXX Use Contigmalloc until it is merged into this facility
511 * and handles multi-seg allocations. Nobody is doing
512 * multi-seg allocations yet though.
514 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
515 0ul, dmat
->lowaddr
, dmat
->alignment
, dmat
->boundary
);
523 * Free a piece of memory and it's allociated dmamap, that was allocated
524 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
527 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
530 * dmamem does not need to be bounced, so the map should be
534 panic("bus_dmamem_free: Invalid map freed");
535 if (BUS_DMAMEM_KMALLOC(dmat
))
536 kfree(vaddr
, M_DEVBUF
);
538 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
541 static __inline vm_paddr_t
542 _bus_dma_extract(pmap_t pmap
, vm_offset_t vaddr
)
545 return pmap_extract(pmap
, vaddr
, NULL
);
547 return pmap_kextract(vaddr
);
551 * Utility function to load a linear buffer. lastaddrp holds state
552 * between invocations (for multiple-buffer loads). segp contains
553 * the segment following the starting one on entrace, and the ending
554 * segment on exit. first indicates if this is the first invocation
558 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
560 void *buf
, bus_size_t buflen
,
561 bus_dma_segment_t
*segments
,
565 vm_paddr_t
*lastpaddrp
,
570 vm_paddr_t paddr
, nextpaddr
;
571 bus_dma_segment_t
*sg
;
576 map
= &nobounce_dmamap
;
579 if (dmat
->flags
& BUS_DMA_ALIGNED
)
580 KKASSERT(((uintptr_t)buf
& (dmat
->alignment
- 1)) == 0);
584 * If we are being called during a callback, pagesneeded will
585 * be non-zero, so we can avoid doing the work twice.
587 if ((dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
588 map
!= &nobounce_dmamap
&& map
->pagesneeded
== 0) {
589 vm_offset_t vendaddr
;
592 * Count the number of bounce pages
593 * needed in order to complete this transfer
595 vaddr
= (vm_offset_t
)buf
;
596 vendaddr
= (vm_offset_t
)buf
+ buflen
;
598 while (vaddr
< vendaddr
) {
599 paddr
= _bus_dma_extract(pmap
, vaddr
);
600 if (run_filter(dmat
, paddr
) != 0)
602 vaddr
+= (PAGE_SIZE
- (vaddr
& PAGE_MASK
));
606 /* Reserve Necessary Bounce Pages */
607 if (map
->pagesneeded
!= 0) {
608 struct bounce_zone
*bz
;
610 bz
= dmat
->bounce_zone
;
612 if (flags
& BUS_DMA_NOWAIT
) {
613 if (reserve_bounce_pages(dmat
, map
, 0) != 0) {
619 if (reserve_bounce_pages(dmat
, map
, 1) != 0) {
620 /* Queue us for resources */
623 map
->buflen
= buflen
;
626 &dmat
->bounce_zone
->bounce_map_waitinglist
,
630 return (EINPROGRESS
);
636 KKASSERT(*segp
>= 1 && *segp
<= nsegments
);
638 sg
= &segments
[seg
- 1];
640 vaddr
= (vm_offset_t
)buf
;
641 nextpaddr
= *lastpaddrp
;
642 bmask
= ~(dmat
->boundary
- 1); /* note: will be 0 if boundary is 0 */
644 /* force at least one segment */
651 paddr
= _bus_dma_extract(pmap
, vaddr
);
652 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
655 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
657 * note: this paddr has the same in-page offset
658 * as vaddr and thus the paddr above, so the
659 * size does not have to be recalculated
661 paddr
= add_bounce_page(dmat
, map
, vaddr
, size
);
665 * Fill in the bus_dma_segment
671 } else if (paddr
== nextpaddr
) {
681 nextpaddr
= paddr
+ size
;
684 * Handle maxsegsz and boundary issues with a nested loop
690 * Limit to the boundary and maximum segment size
692 if (((nextpaddr
- 1) ^ sg
->ds_addr
) & bmask
) {
693 tmpsize
= dmat
->boundary
-
694 (sg
->ds_addr
& ~bmask
);
695 if (tmpsize
> dmat
->maxsegsz
)
696 tmpsize
= dmat
->maxsegsz
;
697 KKASSERT(tmpsize
< sg
->ds_len
);
698 } else if (sg
->ds_len
> dmat
->maxsegsz
) {
699 tmpsize
= dmat
->maxsegsz
;
705 * Futz, split the data into a new segment.
707 if (seg
>= nsegments
)
709 sg
[1].ds_len
= sg
[0].ds_len
- tmpsize
;
710 sg
[1].ds_addr
= sg
[0].ds_addr
+ tmpsize
;
711 sg
[0].ds_len
= tmpsize
;
721 } while (buflen
> 0);
727 *lastpaddrp
= nextpaddr
;
730 if (error
&& (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
731 map
!= &nobounce_dmamap
) {
732 _bus_dmamap_unload(dmat
, map
);
733 return_bounce_pages(dmat
, map
);
739 * Map the buffer buf into bus space using the dmamap map.
742 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
743 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
744 void *callback_arg
, int flags
)
746 vm_paddr_t lastaddr
= 0;
747 int error
, nsegs
= 1;
752 * Follow old semantics. Once all of the callers are fixed,
753 * we should get rid of these internal flag "adjustment".
755 flags
&= ~BUS_DMA_NOWAIT
;
756 flags
|= BUS_DMA_WAITOK
;
758 map
->callback
= callback
;
759 map
->callback_arg
= callback_arg
;
762 error
= _bus_dmamap_load_buffer(dmat
, map
, buf
, buflen
,
763 dmat
->segments
, dmat
->nsegments
,
764 NULL
, flags
, &lastaddr
, &nsegs
, 1);
765 if (error
== EINPROGRESS
)
768 callback(callback_arg
, dmat
->segments
, nsegs
, error
);
773 * Like _bus_dmamap_load(), but for mbufs.
776 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
778 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
785 * Follow old semantics. Once all of the callers are fixed,
786 * we should get rid of these internal flag "adjustment".
788 flags
&= ~BUS_DMA_WAITOK
;
789 flags
|= BUS_DMA_NOWAIT
;
791 error
= bus_dmamap_load_mbuf_segment(dmat
, map
, m0
,
792 dmat
->segments
, dmat
->nsegments
, &nsegs
, flags
);
794 /* force "no valid mappings" in callback */
795 callback(callback_arg
, dmat
->segments
, 0, 0, error
);
797 callback(callback_arg
, dmat
->segments
, nsegs
,
798 m0
->m_pkthdr
.len
, error
);
804 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat
, bus_dmamap_t map
,
806 bus_dma_segment_t
*segs
, int maxsegs
,
807 int *nsegs
, int flags
)
813 KASSERT(maxsegs
>= 1, ("invalid maxsegs %d", maxsegs
));
814 KASSERT(maxsegs
<= dmat
->nsegments
,
815 ("%d too many segments, dmat only supports %d segments",
816 maxsegs
, dmat
->nsegments
));
817 KASSERT(flags
& BUS_DMA_NOWAIT
,
818 ("only BUS_DMA_NOWAIT is supported"));
820 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
822 vm_paddr_t lastaddr
= 0;
827 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
831 error
= _bus_dmamap_load_buffer(dmat
, map
,
834 NULL
, flags
, &lastaddr
,
836 if (error
== ENOMEM
&& !first
) {
838 * Out of bounce pages due to too many
839 * fragments in the mbuf chain; return
848 KKASSERT(*nsegs
<= maxsegs
&& *nsegs
>= 1);
854 KKASSERT(error
!= EINPROGRESS
);
859 * Like _bus_dmamap_load(), but for uios.
862 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
864 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
868 int nsegs
, error
, first
, i
;
875 * Follow old semantics. Once all of the callers are fixed,
876 * we should get rid of these internal flag "adjustment".
878 flags
&= ~BUS_DMA_WAITOK
;
879 flags
|= BUS_DMA_NOWAIT
;
881 resid
= (bus_size_t
)uio
->uio_resid
;
884 if (uio
->uio_segflg
== UIO_USERSPACE
) {
888 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
889 ("bus_dmamap_load_uio: USERSPACE but no proc"));
890 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
899 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
901 * Now at the first iovec to load. Load each iovec
902 * until we have exhausted the residual count.
905 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
906 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
908 error
= _bus_dmamap_load_buffer(dmat
, map
, addr
, minlen
,
909 dmat
->segments
, dmat
->nsegments
,
910 pmap
, flags
, &lastaddr
, &nsegs
, first
);
917 /* force "no valid mappings" in callback */
918 callback(callback_arg
, dmat
->segments
, 0, 0, error
);
920 callback(callback_arg
, dmat
->segments
, nsegs
,
921 (bus_size_t
)uio
->uio_resid
, error
);
927 * Release the mapping held by map.
930 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
932 struct bounce_page
*bpage
;
934 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
935 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
936 free_bounce_page(dmat
, bpage
);
941 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
943 struct bounce_page
*bpage
;
945 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
947 * Handle data bouncing. We might also
948 * want to add support for invalidating
949 * the caches on broken hardware
952 case BUS_DMASYNC_PREWRITE
:
953 while (bpage
!= NULL
) {
954 bcopy((void *)bpage
->datavaddr
,
955 (void *)bpage
->vaddr
,
957 bpage
= STAILQ_NEXT(bpage
, links
);
959 dmat
->bounce_zone
->total_bounced
++;
962 case BUS_DMASYNC_POSTREAD
:
963 while (bpage
!= NULL
) {
964 bcopy((void *)bpage
->vaddr
,
965 (void *)bpage
->datavaddr
,
967 bpage
= STAILQ_NEXT(bpage
, links
);
969 dmat
->bounce_zone
->total_bounced
++;
972 case BUS_DMASYNC_PREREAD
:
973 case BUS_DMASYNC_POSTWRITE
:
981 alloc_bounce_zone(bus_dma_tag_t dmat
)
983 struct bounce_zone
*bz
, *new_bz
;
985 KASSERT(dmat
->bounce_zone
== NULL
,
986 ("bounce zone was already assigned"));
988 new_bz
= kmalloc(sizeof(*new_bz
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
990 lwkt_gettoken(&bounce_zone_tok
);
992 /* Check to see if we already have a suitable zone */
993 STAILQ_FOREACH(bz
, &bounce_zone_list
, links
) {
994 if (dmat
->alignment
<= bz
->alignment
&&
995 dmat
->lowaddr
>= bz
->lowaddr
) {
996 lwkt_reltoken(&bounce_zone_tok
);
998 dmat
->bounce_zone
= bz
;
999 kfree(new_bz
, M_DEVBUF
);
1005 spin_init(&bz
->spin
, "allocbouncezone");
1006 STAILQ_INIT(&bz
->bounce_page_list
);
1007 STAILQ_INIT(&bz
->bounce_map_waitinglist
);
1008 bz
->free_bpages
= 0;
1009 bz
->reserved_bpages
= 0;
1010 bz
->active_bpages
= 0;
1011 bz
->lowaddr
= dmat
->lowaddr
;
1012 bz
->alignment
= round_page(dmat
->alignment
);
1013 ksnprintf(bz
->zoneid
, 8, "zone%d", busdma_zonecount
);
1015 ksnprintf(bz
->lowaddrid
, 18, "%#jx", (uintmax_t)bz
->lowaddr
);
1016 STAILQ_INSERT_TAIL(&bounce_zone_list
, bz
, links
);
1018 lwkt_reltoken(&bounce_zone_tok
);
1020 dmat
->bounce_zone
= bz
;
1022 sysctl_ctx_init(&bz
->sysctl_ctx
);
1023 bz
->sysctl_tree
= SYSCTL_ADD_NODE(&bz
->sysctl_ctx
,
1024 SYSCTL_STATIC_CHILDREN(_hw_busdma
), OID_AUTO
, bz
->zoneid
,
1026 if (bz
->sysctl_tree
== NULL
) {
1027 sysctl_ctx_free(&bz
->sysctl_ctx
);
1028 return 0; /* XXX error code? */
1031 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1032 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1033 "total_bpages", CTLFLAG_RD
, &bz
->total_bpages
, 0,
1034 "Total bounce pages");
1035 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1036 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1037 "free_bpages", CTLFLAG_RD
, &bz
->free_bpages
, 0,
1038 "Free bounce pages");
1039 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1040 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1041 "reserved_bpages", CTLFLAG_RD
, &bz
->reserved_bpages
, 0,
1042 "Reserved bounce pages");
1043 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1044 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1045 "active_bpages", CTLFLAG_RD
, &bz
->active_bpages
, 0,
1046 "Active bounce pages");
1047 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1048 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1049 "total_bounced", CTLFLAG_RD
, &bz
->total_bounced
, 0,
1050 "Total bounce requests");
1051 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1052 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1053 "total_deferred", CTLFLAG_RD
, &bz
->total_deferred
, 0,
1054 "Total bounce requests that were deferred");
1055 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1056 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1057 "reserve_failed", CTLFLAG_RD
, &bz
->reserve_failed
, 0,
1058 "Total bounce page reservations that were failed");
1059 SYSCTL_ADD_STRING(&bz
->sysctl_ctx
,
1060 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1061 "lowaddr", CTLFLAG_RD
, bz
->lowaddrid
, 0, "");
1062 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1063 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1064 "alignment", CTLFLAG_RD
, &bz
->alignment
, 0, "");
1070 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
, int flags
)
1072 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1073 int count
= 0, mflags
;
1075 if (flags
& BUS_DMA_NOWAIT
)
1080 while (numpages
> 0) {
1081 struct bounce_page
*bpage
;
1083 bpage
= kmalloc(sizeof(*bpage
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
1085 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
1089 if (bpage
->vaddr
== 0) {
1090 kfree(bpage
, M_DEVBUF
);
1093 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
1096 STAILQ_INSERT_TAIL(&bz
->bounce_page_list
, bpage
, links
);
1097 total_bounce_pages
++;
1108 /* Assume caller holds bounce zone spinlock */
1110 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
, int commit
)
1112 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1115 pages
= MIN(bz
->free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
1116 if (!commit
&& map
->pagesneeded
> (map
->pagesreserved
+ pages
)) {
1117 bz
->reserve_failed
++;
1118 return (map
->pagesneeded
- (map
->pagesreserved
+ pages
));
1121 bz
->free_bpages
-= pages
;
1123 bz
->reserved_bpages
+= pages
;
1124 KKASSERT(bz
->reserved_bpages
<= bz
->total_bpages
);
1126 map
->pagesreserved
+= pages
;
1127 pages
= map
->pagesneeded
- map
->pagesreserved
;
1133 return_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
1135 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1136 int reserved
= map
->pagesreserved
;
1137 bus_dmamap_t wait_map
;
1139 map
->pagesreserved
= 0;
1140 map
->pagesneeded
= 0;
1147 bz
->free_bpages
+= reserved
;
1148 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1150 KKASSERT(bz
->reserved_bpages
>= reserved
);
1151 bz
->reserved_bpages
-= reserved
;
1153 wait_map
= get_map_waiting(dmat
);
1157 if (wait_map
!= NULL
)
1158 add_map_callback(map
);
1162 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
1165 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1166 struct bounce_page
*bpage
;
1168 KASSERT(map
->pagesneeded
> 0, ("map doesn't need any pages"));
1171 KASSERT(map
->pagesreserved
> 0, ("map doesn't reserve any pages"));
1172 map
->pagesreserved
--;
1176 bpage
= STAILQ_FIRST(&bz
->bounce_page_list
);
1177 KASSERT(bpage
!= NULL
, ("free page list is empty"));
1178 STAILQ_REMOVE_HEAD(&bz
->bounce_page_list
, links
);
1180 KKASSERT(bz
->reserved_bpages
> 0);
1181 bz
->reserved_bpages
--;
1183 bz
->active_bpages
++;
1184 KKASSERT(bz
->active_bpages
<= bz
->total_bpages
);
1188 bpage
->datavaddr
= vaddr
;
1189 bpage
->datacount
= size
;
1190 STAILQ_INSERT_TAIL(&map
->bpages
, bpage
, links
);
1191 return bpage
->busaddr
;
1195 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
1197 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1200 bpage
->datavaddr
= 0;
1201 bpage
->datacount
= 0;
1205 STAILQ_INSERT_HEAD(&bz
->bounce_page_list
, bpage
, links
);
1208 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1210 KKASSERT(bz
->active_bpages
> 0);
1211 bz
->active_bpages
--;
1213 map
= get_map_waiting(dmat
);
1218 add_map_callback(map
);
1221 /* Assume caller holds bounce zone spinlock */
1223 get_map_waiting(bus_dma_tag_t dmat
)
1225 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1228 map
= STAILQ_FIRST(&bz
->bounce_map_waitinglist
);
1230 if (reserve_bounce_pages(map
->dmat
, map
, 1) == 0) {
1231 STAILQ_REMOVE_HEAD(&bz
->bounce_map_waitinglist
, links
);
1232 bz
->total_deferred
++;
1241 add_map_callback(bus_dmamap_t map
)
1244 /* XXX callbacklist is not MPSAFE */
1247 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
, map
, links
);
1248 busdma_swi_pending
= 1;
1253 panic("%s uncoded", __func__
);
1264 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
1265 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
1267 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
1268 map
->callback
, map
->callback_arg
, /*flags*/0);