2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27 * $DragonFly: src/sys/platform/pc32/i386/busdma_machdep.c,v 1.23 2008/06/05 18:06:32 swildner Exp $
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
35 #include <sys/thread2.h>
36 #include <sys/bus_dma.h>
37 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
40 #include <sys/spinlock2.h>
43 #include <vm/vm_page.h>
45 /* XXX needed for to access pmap to convert per-proc virtual to physical */
48 #include <vm/vm_map.h>
50 #include <machine/md_var.h>
52 #define MAX_BPAGES 1024
55 * 16 x N declared on stack.
57 #define BUS_DMA_CACHE_SEGMENTS 8
68 bus_dma_filter_t
*filter
;
76 bus_dma_segment_t
*segments
;
77 struct bounce_zone
*bounce_zone
;
86 * bus_dma_tag private flags
88 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2
89 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3
90 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
92 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
94 #define BUS_DMAMEM_KMALLOC(dmat) \
95 ((dmat)->maxsize <= PAGE_SIZE && \
96 (dmat)->alignment <= PAGE_SIZE && \
97 (dmat)->lowaddr >= ptoa(Maxmem))
100 vm_offset_t vaddr
; /* kva of bounce buffer */
101 bus_addr_t busaddr
; /* Physical address */
102 vm_offset_t datavaddr
; /* kva of client data */
103 bus_size_t datacount
; /* client data count */
104 STAILQ_ENTRY(bounce_page
) links
;
108 STAILQ_ENTRY(bounce_zone
) links
;
109 STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
110 STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
112 struct spinlock spin
;
123 bus_size_t alignment
;
127 struct sysctl_ctx_list sysctl_ctx
;
128 struct sysctl_oid
*sysctl_tree
;
132 #define BZ_LOCK(bz) spin_lock_wr(&(bz)->spin)
133 #define BZ_UNLOCK(bz) spin_unlock_wr(&(bz)->spin)
135 #define BZ_LOCK(bz) crit_enter()
136 #define BZ_UNLOCK(bz) crit_exit()
139 static struct lwkt_token bounce_zone_tok
=
140 LWKT_TOKEN_INITIALIZER(bounce_zone_tok
);
141 static int busdma_zonecount
;
142 static STAILQ_HEAD(, bounce_zone
) bounce_zone_list
=
143 STAILQ_HEAD_INITIALIZER(bounce_zone_list
);
145 int busdma_swi_pending
;
146 static int total_bounce_pages
;
147 static int max_bounce_pages
= MAX_BPAGES
;
148 static int bounce_alignment
= 1; /* XXX temporary */
150 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages
);
151 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment
);
154 struct bp_list bpages
;
158 void *buf
; /* unmapped buffer pointer */
159 bus_size_t buflen
; /* unmapped buffer length */
160 bus_dmamap_callback_t
*callback
;
162 STAILQ_ENTRY(bus_dmamap
) links
;
165 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
=
166 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist
);
168 static struct bus_dmamap nobounce_dmamap
;
170 static int alloc_bounce_zone(bus_dma_tag_t
);
171 static int alloc_bounce_pages(bus_dma_tag_t
, u_int
, int);
172 static int reserve_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
, int);
173 static void return_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
);
174 static bus_addr_t
add_bounce_page(bus_dma_tag_t
, bus_dmamap_t
,
175 vm_offset_t
, bus_size_t
);
176 static void free_bounce_page(bus_dma_tag_t
, struct bounce_page
*);
178 static bus_dmamap_t
get_map_waiting(bus_dma_tag_t
);
179 static void add_map_callback(bus_dmamap_t
);
181 SYSCTL_NODE(_hw
, OID_AUTO
, busdma
, CTLFLAG_RD
, 0, "Busdma parameters");
182 SYSCTL_INT(_hw_busdma
, OID_AUTO
, total_bpages
, CTLFLAG_RD
, &total_bounce_pages
,
183 0, "Total bounce pages");
184 SYSCTL_INT(_hw_busdma
, OID_AUTO
, max_bpages
, CTLFLAG_RD
, &max_bounce_pages
,
185 0, "Max bounce pages per bounce zone");
186 SYSCTL_INT(_hw_busdma
, OID_AUTO
, bounce_alignment
, CTLFLAG_RD
,
187 &bounce_alignment
, 0, "Obey alignment constraint");
190 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
196 if (((paddr
> dmat
->lowaddr
&& paddr
<= dmat
->highaddr
) ||
197 (bounce_alignment
&& (paddr
& (dmat
->alignment
- 1)) != 0))
198 && (dmat
->filter
== NULL
||
199 dmat
->filter(dmat
->filterarg
, paddr
) != 0))
203 } while (retval
== 0 && dmat
!= NULL
);
209 bus_dma_tag_lock(bus_dma_tag_t tag
, bus_dma_segment_t
*cache
)
211 if (tag
->nsegments
<= BUS_DMA_CACHE_SEGMENTS
)
214 spin_lock_wr(&tag
->spin
);
216 return(tag
->segments
);
221 bus_dma_tag_unlock(bus_dma_tag_t tag
)
224 if (tag
->nsegments
> BUS_DMA_CACHE_SEGMENTS
)
225 spin_unlock_wr(&tag
->spin
);
230 * Allocate a device specific dma_tag.
233 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
234 bus_size_t boundary
, bus_addr_t lowaddr
,
235 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
236 void *filterarg
, bus_size_t maxsize
, int nsegments
,
237 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
239 bus_dma_tag_t newtag
;
248 if (alignment
& (alignment
- 1))
249 panic("alignment must be power of 2\n");
252 if (boundary
& (boundary
- 1))
253 panic("boundary must be power of 2\n");
254 if (boundary
< maxsegsz
) {
255 kprintf("boundary < maxsegsz:\n");
261 /* Return a NULL tag on failure */
264 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
267 spin_init(&newtag
->spin
);
269 newtag
->parent
= parent
;
270 newtag
->alignment
= alignment
;
271 newtag
->boundary
= boundary
;
272 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
273 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
274 newtag
->filter
= filter
;
275 newtag
->filterarg
= filterarg
;
276 newtag
->maxsize
= maxsize
;
277 newtag
->nsegments
= nsegments
;
278 newtag
->maxsegsz
= maxsegsz
;
279 newtag
->flags
= flags
;
280 newtag
->ref_count
= 1; /* Count ourself */
281 newtag
->map_count
= 0;
282 newtag
->segments
= NULL
;
283 newtag
->bounce_zone
= NULL
;
285 /* Take into account any restrictions imposed by our parent tag */
286 if (parent
!= NULL
) {
287 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
288 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
290 if (newtag
->boundary
== 0) {
291 newtag
->boundary
= parent
->boundary
;
292 } else if (parent
->boundary
!= 0) {
293 newtag
->boundary
= MIN(parent
->boundary
,
298 newtag
->alignment
= MAX(parent
->alignment
, newtag
->alignment
);
301 if (newtag
->filter
== NULL
) {
303 * Short circuit looking at our parent directly
304 * since we have encapsulated all of its information
306 newtag
->filter
= parent
->filter
;
307 newtag
->filterarg
= parent
->filterarg
;
308 newtag
->parent
= parent
->parent
;
310 if (newtag
->parent
!= NULL
)
314 if (newtag
->lowaddr
< ptoa(Maxmem
))
315 newtag
->flags
|= BUS_DMA_BOUNCE_LOWADDR
;
316 if (bounce_alignment
&& newtag
->alignment
> 1 &&
317 !(newtag
->flags
& BUS_DMA_ALIGNED
))
318 newtag
->flags
|= BUS_DMA_BOUNCE_ALIGN
;
320 if ((newtag
->flags
& BUS_DMA_COULD_BOUNCE
) &&
321 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
322 struct bounce_zone
*bz
;
326 error
= alloc_bounce_zone(newtag
);
329 bz
= newtag
->bounce_zone
;
331 if (ptoa(bz
->total_bpages
) < maxsize
) {
334 if (flags
& BUS_DMA_ONEBPAGE
) {
337 pages
= atop(round_page(maxsize
)) -
339 pages
= MAX(pages
, 1);
342 /* Add pages to our bounce pool */
343 if (alloc_bounce_pages(newtag
, pages
, flags
) < pages
)
346 /* Performed initial allocation */
347 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
352 kfree(newtag
, M_DEVBUF
);
359 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
362 if (dmat
->map_count
!= 0)
365 while (dmat
!= NULL
) {
366 bus_dma_tag_t parent
;
368 parent
= dmat
->parent
;
370 if (dmat
->ref_count
== 0) {
371 if (dmat
->segments
!= NULL
)
372 kfree(dmat
->segments
, M_DEVBUF
);
373 kfree(dmat
, M_DEVBUF
);
375 * Last reference count, so
376 * release our reference
377 * count on our parent.
388 bus_dma_tag_getmaxsize(bus_dma_tag_t tag
)
390 return(tag
->maxsize
);
394 * Allocate a handle for mapping from kva/uva/physical
395 * address space into bus device space.
398 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
404 if (dmat
->segments
== NULL
) {
405 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
406 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
407 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
410 if (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) {
411 struct bounce_zone
*bz
;
416 if (dmat
->bounce_zone
== NULL
) {
417 error
= alloc_bounce_zone(dmat
);
421 bz
= dmat
->bounce_zone
;
423 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
425 /* Initialize the new map */
426 STAILQ_INIT(&((*mapp
)->bpages
));
429 * Attempt to add pages to our pool on a per-instance
430 * basis up to a sane limit.
432 if (dmat
->flags
& BUS_DMA_BOUNCE_ALIGN
) {
433 maxpages
= max_bounce_pages
;
435 maxpages
= MIN(max_bounce_pages
,
436 Maxmem
- atop(dmat
->lowaddr
));
438 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0
439 || (dmat
->map_count
> 0
440 && bz
->total_bpages
< maxpages
)) {
443 if (flags
& BUS_DMA_ONEBPAGE
) {
446 pages
= atop(round_page(dmat
->maxsize
));
447 pages
= MIN(maxpages
- bz
->total_bpages
, pages
);
448 pages
= MAX(pages
, 1);
450 if (alloc_bounce_pages(dmat
, pages
, flags
) < pages
)
453 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
455 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
469 * Destroy a handle for mapping from kva/uva/physical
470 * address space into bus device space.
473 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
476 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
478 kfree(map
, M_DEVBUF
);
484 static __inline bus_size_t
485 check_kmalloc(bus_dma_tag_t dmat
, const void *vaddr0
, int verify
)
487 bus_size_t maxsize
= 0;
488 uintptr_t vaddr
= (uintptr_t)vaddr0
;
490 if ((vaddr
^ (vaddr
+ dmat
->maxsize
- 1)) & ~PAGE_MASK
) {
491 kprintf("boundary check failed\n");
493 print_backtrace(); /* XXX panic */
494 maxsize
= dmat
->maxsize
;
496 if (vaddr
& (dmat
->alignment
- 1)) {
497 kprintf("alignment check failed\n");
499 print_backtrace(); /* XXX panic */
500 if (dmat
->maxsize
< dmat
->alignment
)
501 maxsize
= dmat
->alignment
;
503 maxsize
= dmat
->maxsize
;
509 * Allocate a piece of memory that can be efficiently mapped into
510 * bus device space based on the constraints lited in the dma tag.
512 * mapp is degenerate. By definition this allocation should not require
513 * bounce buffers so do not allocate a dma map.
516 bus_dmamem_alloc(bus_dma_tag_t dmat
, void **vaddr
, int flags
,
521 /* If we succeed, no mapping/bouncing will be required */
524 if (dmat
->segments
== NULL
) {
525 KKASSERT(dmat
->nsegments
< 16384);
526 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
527 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
530 if (flags
& BUS_DMA_NOWAIT
)
534 if (flags
& BUS_DMA_ZERO
)
537 if (BUS_DMAMEM_KMALLOC(dmat
)) {
540 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
544 * Check whether the allocation
545 * - crossed a page boundary
547 * Retry with power-of-2 alignment in the above cases.
549 maxsize
= check_kmalloc(dmat
, *vaddr
, 0);
553 kfree(*vaddr
, M_DEVBUF
);
554 /* XXX check for overflow? */
555 for (size
= 1; size
<= maxsize
; size
<<= 1)
557 *vaddr
= kmalloc(size
, M_DEVBUF
, mflags
);
558 check_kmalloc(dmat
, *vaddr
, 1);
562 * XXX Use Contigmalloc until it is merged into this facility
563 * and handles multi-seg allocations. Nobody is doing
564 * multi-seg allocations yet though.
566 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
567 0ul, dmat
->lowaddr
, dmat
->alignment
, dmat
->boundary
);
575 * Free a piece of memory and it's allociated dmamap, that was allocated
576 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
579 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
582 * dmamem does not need to be bounced, so the map should be
586 panic("bus_dmamem_free: Invalid map freed\n");
587 if (BUS_DMAMEM_KMALLOC(dmat
))
588 kfree(vaddr
, M_DEVBUF
);
590 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
593 static __inline vm_paddr_t
594 _bus_dma_extract(pmap_t pmap
, vm_offset_t vaddr
)
597 return pmap_extract(pmap
, vaddr
);
599 return pmap_kextract(vaddr
);
603 * Utility function to load a linear buffer. lastaddrp holds state
604 * between invocations (for multiple-buffer loads). segp contains
605 * the segment following the starting one on entrace, and the ending
606 * segment on exit. first indicates if this is the first invocation
610 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
612 void *buf
, bus_size_t buflen
,
613 bus_dma_segment_t
*segments
,
617 vm_paddr_t
*lastpaddrp
,
622 vm_paddr_t paddr
, nextpaddr
;
623 bus_dma_segment_t
*sg
;
628 map
= &nobounce_dmamap
;
631 if (dmat
->flags
& BUS_DMA_ALIGNED
)
632 KKASSERT(((uintptr_t)buf
& (dmat
->alignment
- 1)) == 0);
636 * If we are being called during a callback, pagesneeded will
637 * be non-zero, so we can avoid doing the work twice.
639 if ((dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
640 map
!= &nobounce_dmamap
&& map
->pagesneeded
== 0) {
641 vm_offset_t vendaddr
;
644 * Count the number of bounce pages
645 * needed in order to complete this transfer
647 vaddr
= (vm_offset_t
)buf
;
648 vendaddr
= (vm_offset_t
)buf
+ buflen
;
650 while (vaddr
< vendaddr
) {
651 paddr
= _bus_dma_extract(pmap
, vaddr
);
652 if (run_filter(dmat
, paddr
) != 0)
654 vaddr
+= (PAGE_SIZE
- ((vm_offset_t
)vaddr
& PAGE_MASK
));
658 /* Reserve Necessary Bounce Pages */
659 if (map
->pagesneeded
!= 0) {
660 struct bounce_zone
*bz
;
662 bz
= dmat
->bounce_zone
;
664 if (flags
& BUS_DMA_NOWAIT
) {
665 if (reserve_bounce_pages(dmat
, map
, 0) != 0) {
671 if (reserve_bounce_pages(dmat
, map
, 1) != 0) {
672 /* Queue us for resources */
675 map
->buflen
= buflen
;
678 &dmat
->bounce_zone
->bounce_map_waitinglist
,
682 return (EINPROGRESS
);
688 KKASSERT(*segp
>= 1 && *segp
<= nsegments
);
690 sg
= &segments
[seg
- 1];
692 vaddr
= (vm_offset_t
)buf
;
693 nextpaddr
= *lastpaddrp
;
694 bmask
= ~(dmat
->boundary
- 1); /* note: will be 0 if boundary is 0 */
696 /* force at least one segment */
703 paddr
= _bus_dma_extract(pmap
, vaddr
);
704 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
707 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
709 * note: this paddr has the same in-page offset
710 * as vaddr and thus the paddr above, so the
711 * size does not have to be recalculated
713 paddr
= add_bounce_page(dmat
, map
, vaddr
, size
);
717 * Fill in the bus_dma_segment
723 } else if (paddr
== nextpaddr
) {
733 nextpaddr
= paddr
+ size
;
736 * Handle maxsegsz and boundary issues with a nested loop
742 * Limit to the boundary and maximum segment size
744 if (((nextpaddr
- 1) ^ sg
->ds_addr
) & bmask
) {
745 tmpsize
= dmat
->boundary
-
746 (sg
->ds_addr
& ~bmask
);
747 if (tmpsize
> dmat
->maxsegsz
)
748 tmpsize
= dmat
->maxsegsz
;
749 KKASSERT(tmpsize
< sg
->ds_len
);
750 } else if (sg
->ds_len
> dmat
->maxsegsz
) {
751 tmpsize
= dmat
->maxsegsz
;
757 * Futz, split the data into a new segment.
759 if (seg
>= nsegments
)
761 sg
[1].ds_len
= sg
[0].ds_len
- tmpsize
;
762 sg
[1].ds_addr
= sg
[0].ds_addr
+ tmpsize
;
763 sg
[0].ds_len
= tmpsize
;
773 } while (buflen
> 0);
779 *lastpaddrp
= nextpaddr
;
782 if (error
&& (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
783 map
!= &nobounce_dmamap
) {
784 _bus_dmamap_unload(dmat
, map
);
785 return_bounce_pages(dmat
, map
);
791 * Map the buffer buf into bus space using the dmamap map.
794 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
795 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
796 void *callback_arg
, int flags
)
798 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
799 bus_dma_segment_t
*segments
;
800 vm_paddr_t lastaddr
= 0;
801 int error
, nsegs
= 1;
806 * Follow old semantics. Once all of the callers are fixed,
807 * we should get rid of these internal flag "adjustment".
809 flags
&= ~BUS_DMA_NOWAIT
;
810 flags
|= BUS_DMA_WAITOK
;
812 map
->callback
= callback
;
813 map
->callback_arg
= callback_arg
;
816 segments
= bus_dma_tag_lock(dmat
, cache_segments
);
817 error
= _bus_dmamap_load_buffer(dmat
, map
, buf
, buflen
,
818 segments
, dmat
->nsegments
,
819 NULL
, flags
, &lastaddr
, &nsegs
, 1);
820 if (error
== EINPROGRESS
) {
821 bus_dma_tag_unlock(dmat
);
824 callback(callback_arg
, segments
, nsegs
, error
);
825 bus_dma_tag_unlock(dmat
);
830 * Like _bus_dmamap_load(), but for mbufs.
833 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
835 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
838 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
839 bus_dma_segment_t
*segments
;
844 * Follow old semantics. Once all of the callers are fixed,
845 * we should get rid of these internal flag "adjustment".
847 flags
&= ~BUS_DMA_WAITOK
;
848 flags
|= BUS_DMA_NOWAIT
;
850 segments
= bus_dma_tag_lock(dmat
, cache_segments
);
851 error
= bus_dmamap_load_mbuf_segment(dmat
, map
, m0
,
852 segments
, dmat
->nsegments
, &nsegs
, flags
);
854 /* force "no valid mappings" in callback */
855 callback(callback_arg
, segments
, 0,
858 callback(callback_arg
, segments
, nsegs
,
859 m0
->m_pkthdr
.len
, error
);
861 bus_dma_tag_unlock(dmat
);
866 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat
, bus_dmamap_t map
,
868 bus_dma_segment_t
*segs
, int maxsegs
,
869 int *nsegs
, int flags
)
875 KASSERT(maxsegs
>= 1, ("invalid maxsegs %d\n", maxsegs
));
876 KASSERT(maxsegs
<= dmat
->nsegments
,
877 ("%d too many segments, dmat only support %d segments\n",
878 maxsegs
, dmat
->nsegments
));
879 KASSERT(flags
& BUS_DMA_NOWAIT
,
880 ("only BUS_DMA_NOWAIT is supported\n"));
882 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
884 vm_paddr_t lastaddr
= 0;
889 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
893 error
= _bus_dmamap_load_buffer(dmat
, map
,
896 NULL
, flags
, &lastaddr
,
898 if (error
== ENOMEM
&& !first
) {
900 * Out of bounce pages due to too many
901 * fragments in the mbuf chain; return
910 KKASSERT(*nsegs
<= maxsegs
&& *nsegs
>= 1);
916 KKASSERT(error
!= EINPROGRESS
);
921 * Like _bus_dmamap_load(), but for uios.
924 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
926 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
930 int nsegs
, error
, first
, i
;
934 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
935 bus_dma_segment_t
*segments
;
936 bus_dma_segment_t
*segs
;
939 if (dmat
->nsegments
<= BUS_DMA_CACHE_SEGMENTS
)
940 segments
= cache_segments
;
942 segments
= kmalloc(sizeof(bus_dma_segment_t
) * dmat
->nsegments
,
943 M_DEVBUF
, M_WAITOK
| M_ZERO
);
947 * Follow old semantics. Once all of the callers are fixed,
948 * we should get rid of these internal flag "adjustment".
950 flags
&= ~BUS_DMA_WAITOK
;
951 flags
|= BUS_DMA_NOWAIT
;
953 resid
= (bus_size_t
)uio
->uio_resid
;
957 nsegs_left
= dmat
->nsegments
;
959 if (uio
->uio_segflg
== UIO_USERSPACE
) {
963 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
964 ("bus_dmamap_load_uio: USERSPACE but no proc"));
965 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
974 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
976 * Now at the first iovec to load. Load each iovec
977 * until we have exhausted the residual count.
980 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
981 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
983 error
= _bus_dmamap_load_buffer(dmat
, map
, addr
, minlen
,
985 pmap
, flags
, &lastaddr
, &nsegs
, first
);
996 * Minimum one DMA segment, even if 0-length buffer.
998 if (nsegs_left
== dmat
->nsegments
)
1002 /* force "no valid mappings" in callback */
1003 callback(callback_arg
, segments
, 0,
1006 callback(callback_arg
, segments
, dmat
->nsegments
- nsegs_left
,
1007 (bus_size_t
)uio
->uio_resid
, error
);
1009 if (dmat
->nsegments
> BUS_DMA_CACHE_SEGMENTS
)
1010 kfree(segments
, M_DEVBUF
);
1015 * Release the mapping held by map.
1018 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
1020 struct bounce_page
*bpage
;
1022 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
1023 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
1024 free_bounce_page(dmat
, bpage
);
1029 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
1031 struct bounce_page
*bpage
;
1033 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
1035 * Handle data bouncing. We might also
1036 * want to add support for invalidating
1037 * the caches on broken hardware
1040 case BUS_DMASYNC_PREWRITE
:
1041 while (bpage
!= NULL
) {
1042 bcopy((void *)bpage
->datavaddr
,
1043 (void *)bpage
->vaddr
,
1045 bpage
= STAILQ_NEXT(bpage
, links
);
1047 dmat
->bounce_zone
->total_bounced
++;
1050 case BUS_DMASYNC_POSTREAD
:
1051 while (bpage
!= NULL
) {
1052 bcopy((void *)bpage
->vaddr
,
1053 (void *)bpage
->datavaddr
,
1055 bpage
= STAILQ_NEXT(bpage
, links
);
1057 dmat
->bounce_zone
->total_bounced
++;
1060 case BUS_DMASYNC_PREREAD
:
1061 case BUS_DMASYNC_POSTWRITE
:
1069 alloc_bounce_zone(bus_dma_tag_t dmat
)
1071 struct bounce_zone
*bz
, *new_bz
;
1074 KASSERT(dmat
->bounce_zone
== NULL
,
1075 ("bounce zone was already assigned\n"));
1077 new_bz
= kmalloc(sizeof(*new_bz
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
1079 lwkt_gettoken(&ref
, &bounce_zone_tok
);
1081 /* Check to see if we already have a suitable zone */
1082 STAILQ_FOREACH(bz
, &bounce_zone_list
, links
) {
1083 if (dmat
->alignment
<= bz
->alignment
&&
1084 dmat
->lowaddr
>= bz
->lowaddr
) {
1085 lwkt_reltoken(&ref
);
1087 dmat
->bounce_zone
= bz
;
1088 kfree(new_bz
, M_DEVBUF
);
1095 spin_init(&bz
->spin
);
1097 STAILQ_INIT(&bz
->bounce_page_list
);
1098 STAILQ_INIT(&bz
->bounce_map_waitinglist
);
1099 bz
->free_bpages
= 0;
1100 bz
->reserved_bpages
= 0;
1101 bz
->active_bpages
= 0;
1102 bz
->lowaddr
= dmat
->lowaddr
;
1103 bz
->alignment
= round_page(dmat
->alignment
);
1104 ksnprintf(bz
->zoneid
, 8, "zone%d", busdma_zonecount
);
1106 ksnprintf(bz
->lowaddrid
, 18, "%#jx", (uintmax_t)bz
->lowaddr
);
1107 STAILQ_INSERT_TAIL(&bounce_zone_list
, bz
, links
);
1109 lwkt_reltoken(&ref
);
1111 dmat
->bounce_zone
= bz
;
1113 sysctl_ctx_init(&bz
->sysctl_ctx
);
1114 bz
->sysctl_tree
= SYSCTL_ADD_NODE(&bz
->sysctl_ctx
,
1115 SYSCTL_STATIC_CHILDREN(_hw_busdma
), OID_AUTO
, bz
->zoneid
,
1117 if (bz
->sysctl_tree
== NULL
) {
1118 sysctl_ctx_free(&bz
->sysctl_ctx
);
1119 return 0; /* XXX error code? */
1122 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1123 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1124 "total_bpages", CTLFLAG_RD
, &bz
->total_bpages
, 0,
1125 "Total bounce pages");
1126 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1127 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1128 "free_bpages", CTLFLAG_RD
, &bz
->free_bpages
, 0,
1129 "Free bounce pages");
1130 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1131 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1132 "reserved_bpages", CTLFLAG_RD
, &bz
->reserved_bpages
, 0,
1133 "Reserved bounce pages");
1134 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1135 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1136 "active_bpages", CTLFLAG_RD
, &bz
->active_bpages
, 0,
1137 "Active bounce pages");
1138 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1139 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1140 "total_bounced", CTLFLAG_RD
, &bz
->total_bounced
, 0,
1141 "Total bounce requests");
1142 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1143 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1144 "total_deferred", CTLFLAG_RD
, &bz
->total_deferred
, 0,
1145 "Total bounce requests that were deferred");
1146 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1147 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1148 "reserve_failed", CTLFLAG_RD
, &bz
->reserve_failed
, 0,
1149 "Total bounce page reservations that were failed");
1150 SYSCTL_ADD_STRING(&bz
->sysctl_ctx
,
1151 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1152 "lowaddr", CTLFLAG_RD
, bz
->lowaddrid
, 0, "");
1153 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1154 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1155 "alignment", CTLFLAG_RD
, &bz
->alignment
, 0, "");
1161 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
, int flags
)
1163 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1164 int count
= 0, mflags
;
1166 if (flags
& BUS_DMA_NOWAIT
)
1171 while (numpages
> 0) {
1172 struct bounce_page
*bpage
;
1174 bpage
= kmalloc(sizeof(*bpage
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
1176 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
1180 if (bpage
->vaddr
== 0) {
1181 kfree(bpage
, M_DEVBUF
);
1184 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
1187 STAILQ_INSERT_TAIL(&bz
->bounce_page_list
, bpage
, links
);
1188 total_bounce_pages
++;
1199 /* Assume caller holds bounce zone spinlock */
1201 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
, int commit
)
1203 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1206 pages
= MIN(bz
->free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
1207 if (!commit
&& map
->pagesneeded
> (map
->pagesreserved
+ pages
)) {
1208 bz
->reserve_failed
++;
1209 return (map
->pagesneeded
- (map
->pagesreserved
+ pages
));
1212 bz
->free_bpages
-= pages
;
1214 bz
->reserved_bpages
+= pages
;
1215 KKASSERT(bz
->reserved_bpages
<= bz
->total_bpages
);
1217 map
->pagesreserved
+= pages
;
1218 pages
= map
->pagesneeded
- map
->pagesreserved
;
1224 return_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
1226 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1227 int reserved
= map
->pagesreserved
;
1228 bus_dmamap_t wait_map
;
1230 map
->pagesreserved
= 0;
1231 map
->pagesneeded
= 0;
1238 bz
->free_bpages
+= reserved
;
1239 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1241 KKASSERT(bz
->reserved_bpages
>= reserved
);
1242 bz
->reserved_bpages
-= reserved
;
1244 wait_map
= get_map_waiting(dmat
);
1248 if (wait_map
!= NULL
)
1249 add_map_callback(map
);
1253 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
1256 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1257 struct bounce_page
*bpage
;
1259 KASSERT(map
->pagesneeded
> 0, ("map doesn't need any pages"));
1262 KASSERT(map
->pagesreserved
> 0, ("map doesn't reserve any pages"));
1263 map
->pagesreserved
--;
1267 bpage
= STAILQ_FIRST(&bz
->bounce_page_list
);
1268 KASSERT(bpage
!= NULL
, ("free page list is empty"));
1269 STAILQ_REMOVE_HEAD(&bz
->bounce_page_list
, links
);
1271 KKASSERT(bz
->reserved_bpages
> 0);
1272 bz
->reserved_bpages
--;
1274 bz
->active_bpages
++;
1275 KKASSERT(bz
->active_bpages
<= bz
->total_bpages
);
1279 bpage
->datavaddr
= vaddr
;
1280 bpage
->datacount
= size
;
1281 STAILQ_INSERT_TAIL(&map
->bpages
, bpage
, links
);
1282 return bpage
->busaddr
;
1286 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
1288 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1291 bpage
->datavaddr
= 0;
1292 bpage
->datacount
= 0;
1296 STAILQ_INSERT_HEAD(&bz
->bounce_page_list
, bpage
, links
);
1299 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1301 KKASSERT(bz
->active_bpages
> 0);
1302 bz
->active_bpages
--;
1304 map
= get_map_waiting(dmat
);
1309 add_map_callback(map
);
1312 /* Assume caller holds bounce zone spinlock */
1314 get_map_waiting(bus_dma_tag_t dmat
)
1316 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1319 map
= STAILQ_FIRST(&bz
->bounce_map_waitinglist
);
1321 if (reserve_bounce_pages(map
->dmat
, map
, 1) == 0) {
1322 STAILQ_REMOVE_HEAD(&bz
->bounce_map_waitinglist
, links
);
1323 bz
->total_deferred
++;
1332 add_map_callback(bus_dmamap_t map
)
1334 /* XXX callbacklist is not MPSAFE */
1337 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
, map
, links
);
1338 busdma_swi_pending
= 1;
1350 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
1351 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
1353 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
1354 map
->callback
, map
->callback_arg
, /*flags*/0);