2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
34 #include <sys/bus_dma.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
39 #include <sys/thread2.h>
40 #include <sys/spinlock2.h>
41 #include <sys/mplock2.h>
44 #include <vm/vm_page.h>
46 /* XXX needed for to access pmap to convert per-proc virtual to physical */
48 #include <vm/vm_map.h>
50 #include <machine/md_var.h>
52 #include <bus/cam/cam.h>
53 #include <bus/cam/cam_ccb.h>
55 #define MAX_BPAGES 1024
58 * 16 x N declared on stack.
60 #define BUS_DMA_CACHE_SEGMENTS 8
71 bus_dma_filter_t
*filter
;
79 bus_dma_segment_t
*segments
;
80 struct bounce_zone
*bounce_zone
;
85 * bus_dma_tag private flags
87 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2
88 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3
89 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
91 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
93 #define BUS_DMAMEM_KMALLOC(dmat) \
94 ((dmat)->maxsize <= PAGE_SIZE && \
95 (dmat)->alignment <= PAGE_SIZE && \
96 (dmat)->lowaddr >= ptoa(Maxmem))
99 vm_offset_t vaddr
; /* kva of bounce buffer */
100 bus_addr_t busaddr
; /* Physical address */
101 vm_offset_t datavaddr
; /* kva of client data */
102 bus_size_t datacount
; /* client data count */
103 STAILQ_ENTRY(bounce_page
) links
;
107 STAILQ_ENTRY(bounce_zone
) links
;
108 STAILQ_HEAD(bp_list
, bounce_page
) bounce_page_list
;
109 STAILQ_HEAD(, bus_dmamap
) bounce_map_waitinglist
;
110 struct spinlock spin
;
118 bus_size_t alignment
;
122 struct sysctl_ctx_list sysctl_ctx
;
123 struct sysctl_oid
*sysctl_tree
;
126 #define BZ_LOCK(bz) spin_lock(&(bz)->spin)
127 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin)
129 static struct lwkt_token bounce_zone_tok
=
130 LWKT_TOKEN_INITIALIZER(bounce_zone_token
);
131 static int busdma_zonecount
;
132 static STAILQ_HEAD(, bounce_zone
) bounce_zone_list
=
133 STAILQ_HEAD_INITIALIZER(bounce_zone_list
);
135 static int busdma_priv_zonecount
= -1;
137 int busdma_swi_pending
;
138 static int total_bounce_pages
;
139 static int max_bounce_pages
= MAX_BPAGES
;
140 static int bounce_alignment
= 1; /* XXX temporary */
142 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages
);
143 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment
);
146 struct bp_list bpages
;
150 void *buf
; /* unmapped buffer pointer */
151 bus_size_t buflen
; /* unmapped buffer length */
152 bus_dmamap_callback_t
*callback
;
154 STAILQ_ENTRY(bus_dmamap
) links
;
157 static STAILQ_HEAD(, bus_dmamap
) bounce_map_callbacklist
=
158 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist
);
159 static struct spinlock bounce_map_list_spin
=
160 SPINLOCK_INITIALIZER(&bounce_map_list_spin
, "bounce_map_list_spin");
162 static struct bus_dmamap nobounce_dmamap
;
164 static int alloc_bounce_zone(bus_dma_tag_t
);
165 static int alloc_bounce_pages(bus_dma_tag_t
, u_int
, int);
166 static void free_bounce_pages_all(bus_dma_tag_t
);
167 static void free_bounce_zone(bus_dma_tag_t
);
168 static int reserve_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
, int);
169 static void return_bounce_pages(bus_dma_tag_t
, bus_dmamap_t
);
170 static bus_addr_t
add_bounce_page(bus_dma_tag_t
, bus_dmamap_t
,
171 vm_offset_t
, bus_size_t
*);
172 static void free_bounce_page(bus_dma_tag_t
, struct bounce_page
*);
174 static bus_dmamap_t
get_map_waiting(bus_dma_tag_t
);
175 static void add_map_callback(bus_dmamap_t
);
177 SYSCTL_NODE(_hw
, OID_AUTO
, busdma
, CTLFLAG_RD
, 0, "Busdma parameters");
178 SYSCTL_INT(_hw_busdma
, OID_AUTO
, total_bpages
, CTLFLAG_RD
, &total_bounce_pages
,
179 0, "Total bounce pages");
180 SYSCTL_INT(_hw_busdma
, OID_AUTO
, max_bpages
, CTLFLAG_RD
, &max_bounce_pages
,
181 0, "Max bounce pages per bounce zone");
182 SYSCTL_INT(_hw_busdma
, OID_AUTO
, bounce_alignment
, CTLFLAG_RD
,
183 &bounce_alignment
, 0, "Obey alignment constraint");
186 run_filter(bus_dma_tag_t dmat
, bus_addr_t paddr
)
192 if (((paddr
> dmat
->lowaddr
&& paddr
<= dmat
->highaddr
) ||
193 (bounce_alignment
&& (paddr
& (dmat
->alignment
- 1)) != 0))
194 && (dmat
->filter
== NULL
||
195 dmat
->filter(dmat
->filterarg
, paddr
) != 0))
199 } while (retval
== 0 && dmat
!= NULL
);
205 bus_dma_tag_lock(bus_dma_tag_t tag
, bus_dma_segment_t
*cache
)
207 if (tag
->flags
& BUS_DMA_PROTECTED
)
208 return(tag
->segments
);
210 if (tag
->nsegments
<= BUS_DMA_CACHE_SEGMENTS
)
212 spin_lock(&tag
->spin
);
213 return(tag
->segments
);
218 bus_dma_tag_unlock(bus_dma_tag_t tag
)
220 if (tag
->flags
& BUS_DMA_PROTECTED
)
223 if (tag
->nsegments
> BUS_DMA_CACHE_SEGMENTS
)
224 spin_unlock(&tag
->spin
);
228 * Allocate a device specific dma_tag.
231 bus_dma_tag_create(bus_dma_tag_t parent
, bus_size_t alignment
,
232 bus_size_t boundary
, bus_addr_t lowaddr
,
233 bus_addr_t highaddr
, bus_dma_filter_t
*filter
,
234 void *filterarg
, bus_size_t maxsize
, int nsegments
,
235 bus_size_t maxsegsz
, int flags
, bus_dma_tag_t
*dmat
)
237 bus_dma_tag_t newtag
;
246 if (alignment
& (alignment
- 1))
247 panic("alignment must be power of 2");
250 if (boundary
& (boundary
- 1))
251 panic("boundary must be power of 2");
252 if (boundary
< maxsegsz
) {
253 kprintf("boundary < maxsegsz:\n");
259 /* Return a NULL tag on failure */
262 newtag
= kmalloc(sizeof(*newtag
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
264 spin_init(&newtag
->spin
, "busdmacreate");
265 newtag
->parent
= parent
;
266 newtag
->alignment
= alignment
;
267 newtag
->boundary
= boundary
;
268 newtag
->lowaddr
= trunc_page((vm_paddr_t
)lowaddr
) + (PAGE_SIZE
- 1);
269 newtag
->highaddr
= trunc_page((vm_paddr_t
)highaddr
) + (PAGE_SIZE
- 1);
270 newtag
->filter
= filter
;
271 newtag
->filterarg
= filterarg
;
272 newtag
->maxsize
= maxsize
;
273 newtag
->nsegments
= nsegments
;
274 newtag
->maxsegsz
= maxsegsz
;
275 newtag
->flags
= flags
;
276 newtag
->ref_count
= 1; /* Count ourself */
277 newtag
->map_count
= 0;
278 newtag
->segments
= NULL
;
279 newtag
->bounce_zone
= NULL
;
281 /* Take into account any restrictions imposed by our parent tag */
282 if (parent
!= NULL
) {
283 newtag
->lowaddr
= MIN(parent
->lowaddr
, newtag
->lowaddr
);
284 newtag
->highaddr
= MAX(parent
->highaddr
, newtag
->highaddr
);
286 if (newtag
->boundary
== 0) {
287 newtag
->boundary
= parent
->boundary
;
288 } else if (parent
->boundary
!= 0) {
289 newtag
->boundary
= MIN(parent
->boundary
,
294 newtag
->alignment
= MAX(parent
->alignment
, newtag
->alignment
);
297 if (newtag
->filter
== NULL
) {
299 * Short circuit looking at our parent directly
300 * since we have encapsulated all of its information
302 newtag
->filter
= parent
->filter
;
303 newtag
->filterarg
= parent
->filterarg
;
304 newtag
->parent
= parent
->parent
;
306 if (newtag
->parent
!= NULL
)
310 if (newtag
->lowaddr
< ptoa(Maxmem
))
311 newtag
->flags
|= BUS_DMA_BOUNCE_LOWADDR
;
312 if (bounce_alignment
&& newtag
->alignment
> 1 &&
313 !(newtag
->flags
& BUS_DMA_ALIGNED
))
314 newtag
->flags
|= BUS_DMA_BOUNCE_ALIGN
;
316 if ((newtag
->flags
& BUS_DMA_COULD_BOUNCE
) &&
317 (flags
& BUS_DMA_ALLOCNOW
) != 0) {
318 struct bounce_zone
*bz
;
322 error
= alloc_bounce_zone(newtag
);
325 bz
= newtag
->bounce_zone
;
327 if ((newtag
->flags
& BUS_DMA_ALLOCALL
) == 0 &&
328 ptoa(bz
->total_bpages
) < maxsize
) {
331 if (flags
& BUS_DMA_ONEBPAGE
) {
334 pages
= atop(round_page(maxsize
)) -
336 pages
= MAX(pages
, 1);
339 /* Add pages to our bounce pool */
340 if (alloc_bounce_pages(newtag
, pages
, flags
) < pages
)
343 /* Performed initial allocation */
344 newtag
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
349 free_bounce_zone(newtag
);
350 kfree(newtag
, M_DEVBUF
);
358 bus_dma_tag_destroy(bus_dma_tag_t dmat
)
361 if (dmat
->map_count
!= 0)
364 while (dmat
!= NULL
) {
365 bus_dma_tag_t parent
;
367 parent
= dmat
->parent
;
369 if (dmat
->ref_count
== 0) {
370 free_bounce_zone(dmat
);
371 if (dmat
->segments
!= NULL
)
372 kfree(dmat
->segments
, M_DEVBUF
);
373 kfree(dmat
, M_DEVBUF
);
375 * Last reference count, so
376 * release our reference
377 * count on our parent.
388 bus_dma_tag_getmaxsize(bus_dma_tag_t tag
)
390 return(tag
->maxsize
);
394 * Allocate a handle for mapping from kva/uva/physical
395 * address space into bus device space.
398 bus_dmamap_create(bus_dma_tag_t dmat
, int flags
, bus_dmamap_t
*mapp
)
404 if (dmat
->segments
== NULL
) {
405 KKASSERT(dmat
->nsegments
&& dmat
->nsegments
< 16384);
406 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
407 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
410 if (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) {
411 struct bounce_zone
*bz
;
416 if (dmat
->bounce_zone
== NULL
) {
417 error
= alloc_bounce_zone(dmat
);
421 bz
= dmat
->bounce_zone
;
423 *mapp
= kmalloc(sizeof(**mapp
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
425 /* Initialize the new map */
426 STAILQ_INIT(&((*mapp
)->bpages
));
429 * Attempt to add pages to our pool on a per-instance
430 * basis up to a sane limit.
432 if (dmat
->flags
& BUS_DMA_ALLOCALL
) {
433 maxpages
= Maxmem
- atop(dmat
->lowaddr
);
434 } else if (dmat
->flags
& BUS_DMA_BOUNCE_ALIGN
) {
435 maxpages
= max_bounce_pages
;
437 maxpages
= MIN(max_bounce_pages
,
438 Maxmem
- atop(dmat
->lowaddr
));
440 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0 ||
441 (dmat
->map_count
> 0 && bz
->total_bpages
< maxpages
)) {
444 if (flags
& BUS_DMA_ONEBPAGE
) {
447 pages
= atop(round_page(dmat
->maxsize
));
448 pages
= MIN(maxpages
- bz
->total_bpages
, pages
);
449 pages
= MAX(pages
, 1);
451 if (alloc_bounce_pages(dmat
, pages
, flags
) < pages
)
454 if ((dmat
->flags
& BUS_DMA_MIN_ALLOC_COMP
) == 0) {
456 (dmat
->flags
& BUS_DMA_ALLOCALL
) == 0)
457 dmat
->flags
|= BUS_DMA_MIN_ALLOC_COMP
;
468 kfree(*mapp
, M_DEVBUF
);
475 * Destroy a handle for mapping from kva/uva/physical
476 * address space into bus device space.
479 bus_dmamap_destroy(bus_dma_tag_t dmat
, bus_dmamap_t map
)
482 if (STAILQ_FIRST(&map
->bpages
) != NULL
)
484 kfree(map
, M_DEVBUF
);
490 static __inline bus_size_t
491 check_kmalloc(bus_dma_tag_t dmat
, const void *vaddr0
, int verify
)
493 bus_size_t maxsize
= 0;
494 uintptr_t vaddr
= (uintptr_t)vaddr0
;
496 if ((vaddr
^ (vaddr
+ dmat
->maxsize
- 1)) & ~PAGE_MASK
) {
498 panic("boundary check failed\n");
500 kprintf("boundary check failed\n");
501 maxsize
= dmat
->maxsize
;
503 if (vaddr
& (dmat
->alignment
- 1)) {
505 panic("alignment check failed\n");
507 kprintf("alignment check failed\n");
508 if (dmat
->maxsize
< dmat
->alignment
)
509 maxsize
= dmat
->alignment
;
511 maxsize
= dmat
->maxsize
;
517 * Allocate a piece of memory that can be efficiently mapped into
518 * bus device space based on the constraints lited in the dma tag.
520 * mapp is degenerate. By definition this allocation should not require
521 * bounce buffers so do not allocate a dma map.
524 bus_dmamem_alloc(bus_dma_tag_t dmat
, void **vaddr
, int flags
,
530 /* If we succeed, no mapping/bouncing will be required */
533 if (dmat
->segments
== NULL
) {
534 KKASSERT(dmat
->nsegments
< 16384);
535 dmat
->segments
= kmalloc(sizeof(bus_dma_segment_t
) *
536 dmat
->nsegments
, M_DEVBUF
, M_INTWAIT
);
539 if (flags
& BUS_DMA_NOWAIT
)
543 if (flags
& BUS_DMA_ZERO
)
545 if (flags
& BUS_DMA_NOCACHE
)
546 attr
= VM_MEMATTR_UNCACHEABLE
;
548 attr
= VM_MEMATTR_DEFAULT
;
550 /* XXX must alloc with correct mem attribute here */
551 if (BUS_DMAMEM_KMALLOC(dmat
)) {
554 *vaddr
= kmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
);
558 * Check whether the allocation
559 * - crossed a page boundary
561 * Retry with power-of-2 alignment in the above cases.
563 maxsize
= check_kmalloc(dmat
, *vaddr
, 0);
565 kfree(*vaddr
, M_DEVBUF
);
566 *vaddr
= kmalloc(maxsize
, M_DEVBUF
,
567 mflags
| M_POWEROF2
);
568 check_kmalloc(dmat
, *vaddr
, 1);
572 * XXX Use Contigmalloc until it is merged into this facility
573 * and handles multi-seg allocations. Nobody is doing
574 * multi-seg allocations yet though.
576 *vaddr
= contigmalloc(dmat
->maxsize
, M_DEVBUF
, mflags
,
577 0ul, dmat
->lowaddr
, dmat
->alignment
, dmat
->boundary
);
582 if (attr
!= VM_MEMATTR_DEFAULT
)
583 pmap_change_attr((vm_offset_t
)(*vaddr
), dmat
->maxsize
/ PAGE_SIZE
, attr
);
588 * Free a piece of memory and it's allociated dmamap, that was allocated
589 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
592 bus_dmamem_free(bus_dma_tag_t dmat
, void *vaddr
, bus_dmamap_t map
)
595 * dmamem does not need to be bounced, so the map should be
599 panic("bus_dmamem_free: Invalid map freed");
600 if (BUS_DMAMEM_KMALLOC(dmat
))
601 kfree(vaddr
, M_DEVBUF
);
603 contigfree(vaddr
, dmat
->maxsize
, M_DEVBUF
);
606 static __inline vm_paddr_t
607 _bus_dma_extract(pmap_t pmap
, vm_offset_t vaddr
)
610 return pmap_extract(pmap
, vaddr
, NULL
);
612 return pmap_kextract(vaddr
);
616 * Utility function to load a linear buffer. lastaddrp holds state
617 * between invocations (for multiple-buffer loads). segp contains
618 * the segment following the starting one on entrace, and the ending
619 * segment on exit. first indicates if this is the first invocation
623 _bus_dmamap_load_buffer(bus_dma_tag_t dmat
,
625 void *buf
, bus_size_t buflen
,
626 bus_dma_segment_t
*segments
,
630 vm_paddr_t
*lastpaddrp
,
635 vm_paddr_t paddr
, nextpaddr
;
636 bus_dma_segment_t
*sg
;
641 map
= &nobounce_dmamap
;
644 if (dmat
->flags
& BUS_DMA_ALIGNED
)
645 KKASSERT(((uintptr_t)buf
& (dmat
->alignment
- 1)) == 0);
649 * If we are being called during a callback, pagesneeded will
650 * be non-zero, so we can avoid doing the work twice.
652 if ((dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
653 map
!= &nobounce_dmamap
&& map
->pagesneeded
== 0) {
654 vm_offset_t vendaddr
;
657 * Count the number of bounce pages
658 * needed in order to complete this transfer
660 vaddr
= (vm_offset_t
)buf
;
661 vendaddr
= (vm_offset_t
)buf
+ buflen
;
663 while (vaddr
< vendaddr
) {
664 paddr
= _bus_dma_extract(pmap
, vaddr
);
665 if (run_filter(dmat
, paddr
) != 0)
667 vaddr
+= (PAGE_SIZE
- (vaddr
& PAGE_MASK
));
671 /* Reserve Necessary Bounce Pages */
672 if (map
->pagesneeded
!= 0) {
673 struct bounce_zone
*bz
;
675 bz
= dmat
->bounce_zone
;
677 if (flags
& BUS_DMA_NOWAIT
) {
678 if (reserve_bounce_pages(dmat
, map
, 0) != 0) {
684 if (reserve_bounce_pages(dmat
, map
, 1) != 0) {
685 /* Queue us for resources */
688 map
->buflen
= buflen
;
691 &dmat
->bounce_zone
->bounce_map_waitinglist
,
695 return (EINPROGRESS
);
701 KKASSERT(*segp
>= 1 && *segp
<= nsegments
);
703 sg
= &segments
[seg
- 1];
705 vaddr
= (vm_offset_t
)buf
;
706 nextpaddr
= *lastpaddrp
;
707 bmask
= ~(dmat
->boundary
- 1); /* note: will be 0 if boundary is 0 */
709 /* force at least one segment */
716 paddr
= _bus_dma_extract(pmap
, vaddr
);
717 size
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
720 if (map
->pagesneeded
!= 0 && run_filter(dmat
, paddr
)) {
722 * NOTE: paddr may have different in-page offset,
723 * unless BUS_DMA_KEEP_PG_OFFSET is set.
725 paddr
= add_bounce_page(dmat
, map
, vaddr
, &size
);
729 * Fill in the bus_dma_segment
735 } else if (paddr
== nextpaddr
) {
745 nextpaddr
= paddr
+ size
;
748 * Handle maxsegsz and boundary issues with a nested loop
754 * Limit to the boundary and maximum segment size
756 if (((nextpaddr
- 1) ^ sg
->ds_addr
) & bmask
) {
757 tmpsize
= dmat
->boundary
-
758 (sg
->ds_addr
& ~bmask
);
759 if (tmpsize
> dmat
->maxsegsz
)
760 tmpsize
= dmat
->maxsegsz
;
761 KKASSERT(tmpsize
< sg
->ds_len
);
762 } else if (sg
->ds_len
> dmat
->maxsegsz
) {
763 tmpsize
= dmat
->maxsegsz
;
769 * Futz, split the data into a new segment.
771 if (seg
>= nsegments
)
773 sg
[1].ds_len
= sg
[0].ds_len
- tmpsize
;
774 sg
[1].ds_addr
= sg
[0].ds_addr
+ tmpsize
;
775 sg
[0].ds_len
= tmpsize
;
785 } while (buflen
> 0);
791 *lastpaddrp
= nextpaddr
;
794 if (error
&& (dmat
->flags
& BUS_DMA_COULD_BOUNCE
) &&
795 map
!= &nobounce_dmamap
) {
796 _bus_dmamap_unload(dmat
, map
);
797 return_bounce_pages(dmat
, map
);
803 * Map the buffer buf into bus space using the dmamap map.
806 bus_dmamap_load(bus_dma_tag_t dmat
, bus_dmamap_t map
, void *buf
,
807 bus_size_t buflen
, bus_dmamap_callback_t
*callback
,
808 void *callback_arg
, int flags
)
810 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
811 bus_dma_segment_t
*segments
;
812 vm_paddr_t lastaddr
= 0;
813 int error
, nsegs
= 1;
818 * Follow old semantics. Once all of the callers are fixed,
819 * we should get rid of these internal flag "adjustment".
821 flags
&= ~BUS_DMA_NOWAIT
;
822 flags
|= BUS_DMA_WAITOK
;
824 map
->callback
= callback
;
825 map
->callback_arg
= callback_arg
;
828 segments
= bus_dma_tag_lock(dmat
, cache_segments
);
829 error
= _bus_dmamap_load_buffer(dmat
, map
, buf
, buflen
,
830 segments
, dmat
->nsegments
,
831 NULL
, flags
, &lastaddr
, &nsegs
, 1);
832 if (error
== EINPROGRESS
) {
833 KKASSERT((dmat
->flags
&
834 (BUS_DMA_PRIVBZONE
| BUS_DMA_ALLOCALL
)) !=
835 (BUS_DMA_PRIVBZONE
| BUS_DMA_ALLOCALL
));
837 if (dmat
->flags
& BUS_DMA_PROTECTED
)
838 panic("protected dmamap callback will be defered");
840 bus_dma_tag_unlock(dmat
);
843 callback(callback_arg
, segments
, nsegs
, error
);
844 bus_dma_tag_unlock(dmat
);
849 bus_dmamap_load_ccb(bus_dma_tag_t dmat
, bus_dmamap_t map
, union ccb
*ccb
,
850 bus_dmamap_callback_t
*callback
, void *callback_arg
, int flags
)
852 const struct ccb_scsiio
*csio
;
854 KASSERT(ccb
->ccb_h
.func_code
== XPT_SCSI_IO
||
855 ccb
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
,
856 ("invalid ccb func_code %u", ccb
->ccb_h
.func_code
));
859 return (bus_dmamap_load(dmat
, map
, csio
->data_ptr
, csio
->dxfer_len
,
860 callback
, callback_arg
, flags
));
864 * Like _bus_dmamap_load(), but for mbufs.
867 bus_dmamap_load_mbuf(bus_dma_tag_t dmat
, bus_dmamap_t map
,
869 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
872 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
873 bus_dma_segment_t
*segments
;
878 * Follow old semantics. Once all of the callers are fixed,
879 * we should get rid of these internal flag "adjustment".
881 flags
&= ~BUS_DMA_WAITOK
;
882 flags
|= BUS_DMA_NOWAIT
;
884 segments
= bus_dma_tag_lock(dmat
, cache_segments
);
885 error
= bus_dmamap_load_mbuf_segment(dmat
, map
, m0
,
886 segments
, dmat
->nsegments
, &nsegs
, flags
);
888 /* force "no valid mappings" in callback */
889 callback(callback_arg
, segments
, 0,
892 callback(callback_arg
, segments
, nsegs
,
893 m0
->m_pkthdr
.len
, error
);
895 bus_dma_tag_unlock(dmat
);
900 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat
, bus_dmamap_t map
,
902 bus_dma_segment_t
*segs
, int maxsegs
,
903 int *nsegs
, int flags
)
909 KASSERT(maxsegs
>= 1, ("invalid maxsegs %d", maxsegs
));
910 KASSERT(maxsegs
<= dmat
->nsegments
,
911 ("%d too many segments, dmat only supports %d segments",
912 maxsegs
, dmat
->nsegments
));
913 KASSERT(flags
& BUS_DMA_NOWAIT
,
914 ("only BUS_DMA_NOWAIT is supported"));
916 if (m0
->m_pkthdr
.len
<= dmat
->maxsize
) {
918 vm_paddr_t lastaddr
= 0;
923 for (m
= m0
; m
!= NULL
&& error
== 0; m
= m
->m_next
) {
927 error
= _bus_dmamap_load_buffer(dmat
, map
,
930 NULL
, flags
, &lastaddr
,
932 if (error
== ENOMEM
&& !first
) {
934 * Out of bounce pages due to too many
935 * fragments in the mbuf chain; return
945 KKASSERT(*nsegs
<= maxsegs
&& *nsegs
>= 1);
951 KKASSERT(error
!= EINPROGRESS
);
956 * Like _bus_dmamap_load(), but for uios.
959 bus_dmamap_load_uio(bus_dma_tag_t dmat
, bus_dmamap_t map
,
961 bus_dmamap_callback2_t
*callback
, void *callback_arg
,
965 int nsegs
, error
, first
, i
;
969 bus_dma_segment_t cache_segments
[BUS_DMA_CACHE_SEGMENTS
];
970 bus_dma_segment_t
*segments
;
971 bus_dma_segment_t
*segs
;
974 if (dmat
->nsegments
<= BUS_DMA_CACHE_SEGMENTS
)
975 segments
= cache_segments
;
977 segments
= kmalloc(sizeof(bus_dma_segment_t
) * dmat
->nsegments
,
978 M_DEVBUF
, M_WAITOK
| M_ZERO
);
982 * Follow old semantics. Once all of the callers are fixed,
983 * we should get rid of these internal flag "adjustment".
985 flags
&= ~BUS_DMA_WAITOK
;
986 flags
|= BUS_DMA_NOWAIT
;
988 resid
= (bus_size_t
)uio
->uio_resid
;
992 nsegs_left
= dmat
->nsegments
;
994 if (uio
->uio_segflg
== UIO_USERSPACE
) {
998 KASSERT(td
!= NULL
&& td
->td_proc
!= NULL
,
999 ("bus_dmamap_load_uio: USERSPACE but no proc"));
1000 pmap
= vmspace_pmap(td
->td_proc
->p_vmspace
);
1009 for (i
= 0; i
< uio
->uio_iovcnt
&& resid
!= 0 && !error
; i
++) {
1011 * Now at the first iovec to load. Load each iovec
1012 * until we have exhausted the residual count.
1015 resid
< iov
[i
].iov_len
? resid
: iov
[i
].iov_len
;
1016 caddr_t addr
= (caddr_t
) iov
[i
].iov_base
;
1018 error
= _bus_dmamap_load_buffer(dmat
, map
, addr
, minlen
,
1020 pmap
, flags
, &lastaddr
, &nsegs
, first
);
1025 nsegs_left
-= nsegs
;
1031 * Minimum one DMA segment, even if 0-length buffer.
1033 if (nsegs_left
== dmat
->nsegments
)
1037 /* force "no valid mappings" in callback */
1038 callback(callback_arg
, segments
, 0,
1041 callback(callback_arg
, segments
, dmat
->nsegments
- nsegs_left
,
1042 (bus_size_t
)uio
->uio_resid
, error
);
1044 if (dmat
->nsegments
> BUS_DMA_CACHE_SEGMENTS
)
1045 kfree(segments
, M_DEVBUF
);
1050 * Release the mapping held by map.
1053 _bus_dmamap_unload(bus_dma_tag_t dmat
, bus_dmamap_t map
)
1055 struct bounce_page
*bpage
;
1057 while ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
1058 STAILQ_REMOVE_HEAD(&map
->bpages
, links
);
1059 free_bounce_page(dmat
, bpage
);
1064 _bus_dmamap_sync(bus_dma_tag_t dmat
, bus_dmamap_t map
, bus_dmasync_op_t op
)
1066 struct bounce_page
*bpage
;
1068 if ((bpage
= STAILQ_FIRST(&map
->bpages
)) != NULL
) {
1070 * Handle data bouncing. We might also
1071 * want to add support for invalidating
1072 * the caches on broken hardware
1074 if (op
& BUS_DMASYNC_PREWRITE
) {
1075 while (bpage
!= NULL
) {
1076 bcopy((void *)bpage
->datavaddr
,
1077 (void *)bpage
->vaddr
,
1079 bpage
= STAILQ_NEXT(bpage
, links
);
1082 dmat
->bounce_zone
->total_bounced
++;
1084 if (op
& BUS_DMASYNC_POSTREAD
) {
1086 while (bpage
!= NULL
) {
1087 bcopy((void *)bpage
->vaddr
,
1088 (void *)bpage
->datavaddr
,
1090 bpage
= STAILQ_NEXT(bpage
, links
);
1092 dmat
->bounce_zone
->total_bounced
++;
1094 /* BUS_DMASYNC_PREREAD - no operation on intel */
1095 /* BUS_DMASYNC_POSTWRITE - no operation on intel */
1100 alloc_bounce_zone(bus_dma_tag_t dmat
)
1102 struct bounce_zone
*bz
, *new_bz
;
1104 KASSERT(dmat
->bounce_zone
== NULL
,
1105 ("bounce zone was already assigned"));
1107 new_bz
= kmalloc(sizeof(*new_bz
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
1109 lwkt_gettoken(&bounce_zone_tok
);
1111 if ((dmat
->flags
& BUS_DMA_PRIVBZONE
) == 0) {
1113 * For shared bounce zone, check to see
1114 * if we already have a suitable zone
1116 STAILQ_FOREACH(bz
, &bounce_zone_list
, links
) {
1117 if (dmat
->alignment
<= bz
->alignment
&&
1118 dmat
->lowaddr
>= bz
->lowaddr
) {
1119 lwkt_reltoken(&bounce_zone_tok
);
1121 dmat
->bounce_zone
= bz
;
1122 kfree(new_bz
, M_DEVBUF
);
1129 spin_init(&bz
->spin
, "allocbouncezone");
1130 STAILQ_INIT(&bz
->bounce_page_list
);
1131 STAILQ_INIT(&bz
->bounce_map_waitinglist
);
1132 bz
->free_bpages
= 0;
1133 bz
->reserved_bpages
= 0;
1134 bz
->active_bpages
= 0;
1135 bz
->lowaddr
= dmat
->lowaddr
;
1136 bz
->alignment
= round_page(dmat
->alignment
);
1137 ksnprintf(bz
->lowaddrid
, 18, "%#jx", (uintmax_t)bz
->lowaddr
);
1139 if ((dmat
->flags
& BUS_DMA_PRIVBZONE
) == 0) {
1140 ksnprintf(bz
->zoneid
, 8, "zone%d", busdma_zonecount
);
1142 STAILQ_INSERT_TAIL(&bounce_zone_list
, bz
, links
);
1144 ksnprintf(bz
->zoneid
, 8, "zone%d", busdma_priv_zonecount
);
1145 busdma_priv_zonecount
--;
1148 lwkt_reltoken(&bounce_zone_tok
);
1150 dmat
->bounce_zone
= bz
;
1152 sysctl_ctx_init(&bz
->sysctl_ctx
);
1153 bz
->sysctl_tree
= SYSCTL_ADD_NODE(&bz
->sysctl_ctx
,
1154 SYSCTL_STATIC_CHILDREN(_hw_busdma
), OID_AUTO
, bz
->zoneid
,
1156 if (bz
->sysctl_tree
== NULL
) {
1157 sysctl_ctx_free(&bz
->sysctl_ctx
);
1158 return 0; /* XXX error code? */
1161 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1162 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1163 "total_bpages", CTLFLAG_RD
, &bz
->total_bpages
, 0,
1164 "Total bounce pages");
1165 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1166 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1167 "free_bpages", CTLFLAG_RD
, &bz
->free_bpages
, 0,
1168 "Free bounce pages");
1169 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1170 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1171 "reserved_bpages", CTLFLAG_RD
, &bz
->reserved_bpages
, 0,
1172 "Reserved bounce pages");
1173 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1174 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1175 "active_bpages", CTLFLAG_RD
, &bz
->active_bpages
, 0,
1176 "Active bounce pages");
1177 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1178 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1179 "total_bounced", CTLFLAG_RD
, &bz
->total_bounced
, 0,
1180 "Total bounce requests");
1181 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1182 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1183 "total_deferred", CTLFLAG_RD
, &bz
->total_deferred
, 0,
1184 "Total bounce requests that were deferred");
1185 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1186 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1187 "reserve_failed", CTLFLAG_RD
, &bz
->reserve_failed
, 0,
1188 "Total bounce page reservations that were failed");
1189 SYSCTL_ADD_STRING(&bz
->sysctl_ctx
,
1190 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1191 "lowaddr", CTLFLAG_RD
, bz
->lowaddrid
, 0, "");
1192 SYSCTL_ADD_INT(&bz
->sysctl_ctx
,
1193 SYSCTL_CHILDREN(bz
->sysctl_tree
), OID_AUTO
,
1194 "alignment", CTLFLAG_RD
, &bz
->alignment
, 0, "");
1200 alloc_bounce_pages(bus_dma_tag_t dmat
, u_int numpages
, int flags
)
1202 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1203 int count
= 0, mflags
;
1205 if (flags
& BUS_DMA_NOWAIT
)
1210 while (numpages
> 0) {
1211 struct bounce_page
*bpage
;
1213 bpage
= kmalloc(sizeof(*bpage
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
1215 bpage
->vaddr
= (vm_offset_t
)contigmalloc(PAGE_SIZE
, M_DEVBUF
,
1219 if (bpage
->vaddr
== 0) {
1220 kfree(bpage
, M_DEVBUF
);
1223 bpage
->busaddr
= pmap_kextract(bpage
->vaddr
);
1226 STAILQ_INSERT_TAIL(&bz
->bounce_page_list
, bpage
, links
);
1227 total_bounce_pages
++;
1239 free_bounce_pages_all(bus_dma_tag_t dmat
)
1241 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1242 struct bounce_page
*bpage
;
1246 while ((bpage
= STAILQ_FIRST(&bz
->bounce_page_list
)) != NULL
) {
1247 STAILQ_REMOVE_HEAD(&bz
->bounce_page_list
, links
);
1249 KKASSERT(total_bounce_pages
> 0);
1250 total_bounce_pages
--;
1252 KKASSERT(bz
->total_bpages
> 0);
1255 KKASSERT(bz
->free_bpages
> 0);
1259 contigfree((void *)bpage
->vaddr
, PAGE_SIZE
, M_DEVBUF
);
1260 kfree(bpage
, M_DEVBUF
);
1263 if (bz
->total_bpages
) {
1264 kprintf("#%d bounce pages are still in use\n",
1266 print_backtrace(-1);
1273 free_bounce_zone(bus_dma_tag_t dmat
)
1275 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1280 if ((dmat
->flags
& BUS_DMA_PRIVBZONE
) == 0)
1283 free_bounce_pages_all(dmat
);
1284 dmat
->bounce_zone
= NULL
;
1286 if (bz
->sysctl_tree
!= NULL
)
1287 sysctl_ctx_free(&bz
->sysctl_ctx
);
1288 kfree(bz
, M_DEVBUF
);
1291 /* Assume caller holds bounce zone spinlock */
1293 reserve_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
, int commit
)
1295 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1298 pages
= MIN(bz
->free_bpages
, map
->pagesneeded
- map
->pagesreserved
);
1299 if (!commit
&& map
->pagesneeded
> (map
->pagesreserved
+ pages
)) {
1300 bz
->reserve_failed
++;
1301 return (map
->pagesneeded
- (map
->pagesreserved
+ pages
));
1304 bz
->free_bpages
-= pages
;
1306 bz
->reserved_bpages
+= pages
;
1307 KKASSERT(bz
->reserved_bpages
<= bz
->total_bpages
);
1309 map
->pagesreserved
+= pages
;
1310 pages
= map
->pagesneeded
- map
->pagesreserved
;
1316 return_bounce_pages(bus_dma_tag_t dmat
, bus_dmamap_t map
)
1318 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1319 int reserved
= map
->pagesreserved
;
1320 bus_dmamap_t wait_map
;
1322 map
->pagesreserved
= 0;
1323 map
->pagesneeded
= 0;
1330 bz
->free_bpages
+= reserved
;
1331 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1333 KKASSERT(bz
->reserved_bpages
>= reserved
);
1334 bz
->reserved_bpages
-= reserved
;
1336 wait_map
= get_map_waiting(dmat
);
1340 if (wait_map
!= NULL
)
1341 add_map_callback(map
);
1345 add_bounce_page(bus_dma_tag_t dmat
, bus_dmamap_t map
, vm_offset_t vaddr
,
1348 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1349 struct bounce_page
*bpage
;
1352 KASSERT(map
->pagesneeded
> 0, ("map doesn't need any pages"));
1355 KASSERT(map
->pagesreserved
> 0, ("map doesn't reserve any pages"));
1356 map
->pagesreserved
--;
1360 bpage
= STAILQ_FIRST(&bz
->bounce_page_list
);
1361 KASSERT(bpage
!= NULL
, ("free page list is empty"));
1362 STAILQ_REMOVE_HEAD(&bz
->bounce_page_list
, links
);
1364 KKASSERT(bz
->reserved_bpages
> 0);
1365 bz
->reserved_bpages
--;
1367 bz
->active_bpages
++;
1368 KKASSERT(bz
->active_bpages
<= bz
->total_bpages
);
1372 if (dmat
->flags
& BUS_DMA_KEEP_PG_OFFSET
) {
1374 * Page offset needs to be preserved. No size adjustments
1377 bpage
->vaddr
|= vaddr
& PAGE_MASK
;
1378 bpage
->busaddr
|= vaddr
& PAGE_MASK
;
1382 * Realign to bounce page base address, reduce size if
1383 * necessary. Bounce pages are typically already
1386 size
= PAGE_SIZE
- (bpage
->busaddr
& PAGE_MASK
);
1387 if (size
< *sizep
) {
1394 bpage
->datavaddr
= vaddr
;
1395 bpage
->datacount
= size
;
1396 STAILQ_INSERT_TAIL(&map
->bpages
, bpage
, links
);
1397 return bpage
->busaddr
;
1401 free_bounce_page(bus_dma_tag_t dmat
, struct bounce_page
*bpage
)
1403 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1406 bpage
->datavaddr
= 0;
1407 bpage
->datacount
= 0;
1409 if (dmat
->flags
& BUS_DMA_KEEP_PG_OFFSET
) {
1411 * Reset the bounce page to start at offset 0. Other uses
1412 * of this bounce page may need to store a full page of
1413 * data and/or assume it starts on a page boundary.
1415 bpage
->vaddr
&= ~PAGE_MASK
;
1416 bpage
->busaddr
&= ~PAGE_MASK
;
1421 STAILQ_INSERT_HEAD(&bz
->bounce_page_list
, bpage
, links
);
1424 KKASSERT(bz
->free_bpages
<= bz
->total_bpages
);
1426 KKASSERT(bz
->active_bpages
> 0);
1427 bz
->active_bpages
--;
1429 map
= get_map_waiting(dmat
);
1434 add_map_callback(map
);
1437 /* Assume caller holds bounce zone spinlock */
1439 get_map_waiting(bus_dma_tag_t dmat
)
1441 struct bounce_zone
*bz
= dmat
->bounce_zone
;
1444 map
= STAILQ_FIRST(&bz
->bounce_map_waitinglist
);
1446 if (reserve_bounce_pages(map
->dmat
, map
, 1) == 0) {
1447 STAILQ_REMOVE_HEAD(&bz
->bounce_map_waitinglist
, links
);
1448 bz
->total_deferred
++;
1457 add_map_callback(bus_dmamap_t map
)
1459 spin_lock(&bounce_map_list_spin
);
1460 STAILQ_INSERT_TAIL(&bounce_map_callbacklist
, map
, links
);
1461 busdma_swi_pending
= 1;
1463 spin_unlock(&bounce_map_list_spin
);
1471 spin_lock(&bounce_map_list_spin
);
1472 while ((map
= STAILQ_FIRST(&bounce_map_callbacklist
)) != NULL
) {
1473 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist
, links
);
1474 spin_unlock(&bounce_map_list_spin
);
1475 bus_dmamap_load(map
->dmat
, map
, map
->buf
, map
->buflen
,
1476 map
->callback
, map
->callback_arg
, /*flags*/0);
1477 spin_lock(&bounce_map_list_spin
);
1479 spin_unlock(&bounce_map_list_spin
);