1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/dma-direct.h>
26 #include <linux/export.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29 #include <linux/swiotlb.h>
30 #include <linux/pfn.h>
31 #include <linux/types.h>
32 #include <linux/ctype.h>
33 #include <linux/highmem.h>
34 #include <linux/gfp.h>
35 #include <linux/scatterlist.h>
36 #include <linux/mem_encrypt.h>
37 #include <linux/set_memory.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/debugfs.h>
45 #include <linux/init.h>
46 #include <linux/memblock.h>
47 #include <linux/iommu-helper.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/swiotlb.h>
52 #define OFFSET(val,align) ((unsigned long) \
53 ( (val) & ( (align) - 1)))
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
58 * Minimum IO TLB size to bother booting with. Systems with mainly
59 * 64bit capable cards will only lightly use the swiotlb. If we can't
60 * allocate a contiguous 1MB, we're probably in trouble anyway.
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
64 enum swiotlb_force swiotlb_force
;
67 * Used to do a quick range check in swiotlb_tbl_unmap_single and
68 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
71 phys_addr_t io_tlb_start
, io_tlb_end
;
74 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
75 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
77 static unsigned long io_tlb_nslabs
;
80 * The number of used IO TLB block
82 static unsigned long io_tlb_used
;
85 * This is a free list describing the number of free entries available from
88 static unsigned int *io_tlb_list
;
89 static unsigned int io_tlb_index
;
92 * Max segment that we can provide which (if pages are contingous) will
93 * not be bounced (unless SWIOTLB_FORCE is set).
95 unsigned int max_segment
;
98 * We need to save away the original address corresponding to a mapped entry
99 * for the sync operations.
101 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
102 static phys_addr_t
*io_tlb_orig_addr
;
105 * Protect the above data structures in the map and unmap calls
107 static DEFINE_SPINLOCK(io_tlb_lock
);
109 static int late_alloc
;
112 setup_io_tlb_npages(char *str
)
115 io_tlb_nslabs
= simple_strtoul(str
, &str
, 0);
116 /* avoid tail segment of size < IO_TLB_SEGSIZE */
117 io_tlb_nslabs
= ALIGN(io_tlb_nslabs
, IO_TLB_SEGSIZE
);
121 if (!strcmp(str
, "force")) {
122 swiotlb_force
= SWIOTLB_FORCE
;
123 } else if (!strcmp(str
, "noforce")) {
124 swiotlb_force
= SWIOTLB_NO_FORCE
;
130 early_param("swiotlb", setup_io_tlb_npages
);
132 static bool no_iotlb_memory
;
134 unsigned long swiotlb_nr_tbl(void)
136 return unlikely(no_iotlb_memory
) ? 0 : io_tlb_nslabs
;
138 EXPORT_SYMBOL_GPL(swiotlb_nr_tbl
);
140 unsigned int swiotlb_max_segment(void)
142 return unlikely(no_iotlb_memory
) ? 0 : max_segment
;
144 EXPORT_SYMBOL_GPL(swiotlb_max_segment
);
146 void swiotlb_set_max_segment(unsigned int val
)
148 if (swiotlb_force
== SWIOTLB_FORCE
)
151 max_segment
= rounddown(val
, PAGE_SIZE
);
154 /* default to 64MB */
155 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
156 unsigned long swiotlb_size_or_default(void)
160 size
= io_tlb_nslabs
<< IO_TLB_SHIFT
;
162 return size
? size
: (IO_TLB_DEFAULT_SIZE
);
165 void swiotlb_print_info(void)
167 unsigned long bytes
= io_tlb_nslabs
<< IO_TLB_SHIFT
;
169 if (no_iotlb_memory
) {
170 pr_warn("No low mem\n");
174 pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
175 (unsigned long long)io_tlb_start
,
176 (unsigned long long)io_tlb_end
,
181 * Early SWIOTLB allocation may be too early to allow an architecture to
182 * perform the desired operations. This function allows the architecture to
183 * call SWIOTLB when the operations are possible. It needs to be called
184 * before the SWIOTLB memory is used.
186 void __init
swiotlb_update_mem_attributes(void)
191 if (no_iotlb_memory
|| late_alloc
)
194 vaddr
= phys_to_virt(io_tlb_start
);
195 bytes
= PAGE_ALIGN(io_tlb_nslabs
<< IO_TLB_SHIFT
);
196 set_memory_decrypted((unsigned long)vaddr
, bytes
>> PAGE_SHIFT
);
197 memset(vaddr
, 0, bytes
);
200 int __init
swiotlb_init_with_tbl(char *tlb
, unsigned long nslabs
, int verbose
)
202 unsigned long i
, bytes
;
205 bytes
= nslabs
<< IO_TLB_SHIFT
;
207 io_tlb_nslabs
= nslabs
;
208 io_tlb_start
= __pa(tlb
);
209 io_tlb_end
= io_tlb_start
+ bytes
;
212 * Allocate and initialize the free list array. This array is used
213 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
214 * between io_tlb_start and io_tlb_end.
216 alloc_size
= PAGE_ALIGN(io_tlb_nslabs
* sizeof(int));
217 io_tlb_list
= memblock_alloc(alloc_size
, PAGE_SIZE
);
219 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
220 __func__
, alloc_size
, PAGE_SIZE
);
222 alloc_size
= PAGE_ALIGN(io_tlb_nslabs
* sizeof(phys_addr_t
));
223 io_tlb_orig_addr
= memblock_alloc(alloc_size
, PAGE_SIZE
);
224 if (!io_tlb_orig_addr
)
225 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
226 __func__
, alloc_size
, PAGE_SIZE
);
228 for (i
= 0; i
< io_tlb_nslabs
; i
++) {
229 io_tlb_list
[i
] = IO_TLB_SEGSIZE
- OFFSET(i
, IO_TLB_SEGSIZE
);
230 io_tlb_orig_addr
[i
] = INVALID_PHYS_ADDR
;
235 swiotlb_print_info();
237 swiotlb_set_max_segment(io_tlb_nslabs
<< IO_TLB_SHIFT
);
242 * Statically reserve bounce buffer space and initialize bounce buffer data
243 * structures for the software IO TLB used to implement the DMA API.
246 swiotlb_init(int verbose
)
248 size_t default_size
= IO_TLB_DEFAULT_SIZE
;
249 unsigned char *vstart
;
252 if (!io_tlb_nslabs
) {
253 io_tlb_nslabs
= (default_size
>> IO_TLB_SHIFT
);
254 io_tlb_nslabs
= ALIGN(io_tlb_nslabs
, IO_TLB_SEGSIZE
);
257 bytes
= io_tlb_nslabs
<< IO_TLB_SHIFT
;
259 /* Get IO TLB memory from the low pages */
260 vstart
= memblock_alloc_low(PAGE_ALIGN(bytes
), PAGE_SIZE
);
261 if (vstart
&& !swiotlb_init_with_tbl(vstart
, io_tlb_nslabs
, verbose
))
265 memblock_free_early(io_tlb_start
,
266 PAGE_ALIGN(io_tlb_nslabs
<< IO_TLB_SHIFT
));
267 pr_warn("Cannot allocate buffer");
268 no_iotlb_memory
= true;
272 * Systems with larger DMA zones (those that don't support ISA) can
273 * initialize the swiotlb later using the slab allocator if needed.
274 * This should be just like above, but with some error catching.
277 swiotlb_late_init_with_default_size(size_t default_size
)
279 unsigned long bytes
, req_nslabs
= io_tlb_nslabs
;
280 unsigned char *vstart
= NULL
;
284 if (!io_tlb_nslabs
) {
285 io_tlb_nslabs
= (default_size
>> IO_TLB_SHIFT
);
286 io_tlb_nslabs
= ALIGN(io_tlb_nslabs
, IO_TLB_SEGSIZE
);
290 * Get IO TLB memory from the low pages
292 order
= get_order(io_tlb_nslabs
<< IO_TLB_SHIFT
);
293 io_tlb_nslabs
= SLABS_PER_PAGE
<< order
;
294 bytes
= io_tlb_nslabs
<< IO_TLB_SHIFT
;
296 while ((SLABS_PER_PAGE
<< order
) > IO_TLB_MIN_SLABS
) {
297 vstart
= (void *)__get_free_pages(GFP_DMA
| __GFP_NOWARN
,
305 io_tlb_nslabs
= req_nslabs
;
308 if (order
!= get_order(bytes
)) {
309 pr_warn("only able to allocate %ld MB\n",
310 (PAGE_SIZE
<< order
) >> 20);
311 io_tlb_nslabs
= SLABS_PER_PAGE
<< order
;
313 rc
= swiotlb_late_init_with_tbl(vstart
, io_tlb_nslabs
);
315 free_pages((unsigned long)vstart
, order
);
320 static void swiotlb_cleanup(void)
329 swiotlb_late_init_with_tbl(char *tlb
, unsigned long nslabs
)
331 unsigned long i
, bytes
;
333 bytes
= nslabs
<< IO_TLB_SHIFT
;
335 io_tlb_nslabs
= nslabs
;
336 io_tlb_start
= virt_to_phys(tlb
);
337 io_tlb_end
= io_tlb_start
+ bytes
;
339 set_memory_decrypted((unsigned long)tlb
, bytes
>> PAGE_SHIFT
);
340 memset(tlb
, 0, bytes
);
343 * Allocate and initialize the free list array. This array is used
344 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
345 * between io_tlb_start and io_tlb_end.
347 io_tlb_list
= (unsigned int *)__get_free_pages(GFP_KERNEL
,
348 get_order(io_tlb_nslabs
* sizeof(int)));
352 io_tlb_orig_addr
= (phys_addr_t
*)
353 __get_free_pages(GFP_KERNEL
,
354 get_order(io_tlb_nslabs
*
355 sizeof(phys_addr_t
)));
356 if (!io_tlb_orig_addr
)
359 for (i
= 0; i
< io_tlb_nslabs
; i
++) {
360 io_tlb_list
[i
] = IO_TLB_SEGSIZE
- OFFSET(i
, IO_TLB_SEGSIZE
);
361 io_tlb_orig_addr
[i
] = INVALID_PHYS_ADDR
;
365 swiotlb_print_info();
369 swiotlb_set_max_segment(io_tlb_nslabs
<< IO_TLB_SHIFT
);
374 free_pages((unsigned long)io_tlb_list
, get_order(io_tlb_nslabs
*
382 void __init
swiotlb_exit(void)
384 if (!io_tlb_orig_addr
)
388 free_pages((unsigned long)io_tlb_orig_addr
,
389 get_order(io_tlb_nslabs
* sizeof(phys_addr_t
)));
390 free_pages((unsigned long)io_tlb_list
, get_order(io_tlb_nslabs
*
392 free_pages((unsigned long)phys_to_virt(io_tlb_start
),
393 get_order(io_tlb_nslabs
<< IO_TLB_SHIFT
));
395 memblock_free_late(__pa(io_tlb_orig_addr
),
396 PAGE_ALIGN(io_tlb_nslabs
* sizeof(phys_addr_t
)));
397 memblock_free_late(__pa(io_tlb_list
),
398 PAGE_ALIGN(io_tlb_nslabs
* sizeof(int)));
399 memblock_free_late(io_tlb_start
,
400 PAGE_ALIGN(io_tlb_nslabs
<< IO_TLB_SHIFT
));
406 * Bounce: copy the swiotlb buffer from or back to the original dma location
408 static void swiotlb_bounce(phys_addr_t orig_addr
, phys_addr_t tlb_addr
,
409 size_t size
, enum dma_data_direction dir
)
411 unsigned long pfn
= PFN_DOWN(orig_addr
);
412 unsigned char *vaddr
= phys_to_virt(tlb_addr
);
414 if (PageHighMem(pfn_to_page(pfn
))) {
415 /* The buffer does not have a mapping. Map it in and copy */
416 unsigned int offset
= orig_addr
& ~PAGE_MASK
;
422 sz
= min_t(size_t, PAGE_SIZE
- offset
, size
);
424 local_irq_save(flags
);
425 buffer
= kmap_atomic(pfn_to_page(pfn
));
426 if (dir
== DMA_TO_DEVICE
)
427 memcpy(vaddr
, buffer
+ offset
, sz
);
429 memcpy(buffer
+ offset
, vaddr
, sz
);
430 kunmap_atomic(buffer
);
431 local_irq_restore(flags
);
438 } else if (dir
== DMA_TO_DEVICE
) {
439 memcpy(vaddr
, phys_to_virt(orig_addr
), size
);
441 memcpy(phys_to_virt(orig_addr
), vaddr
, size
);
445 phys_addr_t
swiotlb_tbl_map_single(struct device
*hwdev
,
446 dma_addr_t tbl_dma_addr
,
447 phys_addr_t orig_addr
,
450 enum dma_data_direction dir
,
454 phys_addr_t tlb_addr
;
455 unsigned int nslots
, stride
, index
, wrap
;
458 unsigned long offset_slots
;
459 unsigned long max_slots
;
460 unsigned long tmp_io_tlb_used
;
463 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
465 if (mem_encrypt_active())
466 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
468 if (mapping_size
> alloc_size
) {
469 dev_warn_once(hwdev
, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
470 mapping_size
, alloc_size
);
471 return (phys_addr_t
)DMA_MAPPING_ERROR
;
474 mask
= dma_get_seg_boundary(hwdev
);
476 tbl_dma_addr
&= mask
;
478 offset_slots
= ALIGN(tbl_dma_addr
, 1 << IO_TLB_SHIFT
) >> IO_TLB_SHIFT
;
481 * Carefully handle integer overflow which can occur when mask == ~0UL.
484 ? ALIGN(mask
+ 1, 1 << IO_TLB_SHIFT
) >> IO_TLB_SHIFT
485 : 1UL << (BITS_PER_LONG
- IO_TLB_SHIFT
);
488 * For mappings greater than or equal to a page, we limit the stride
489 * (and hence alignment) to a page size.
491 nslots
= ALIGN(alloc_size
, 1 << IO_TLB_SHIFT
) >> IO_TLB_SHIFT
;
492 if (alloc_size
>= PAGE_SIZE
)
493 stride
= (1 << (PAGE_SHIFT
- IO_TLB_SHIFT
));
500 * Find suitable number of IO TLB entries size that will fit this
501 * request and allocate a buffer from that IO TLB pool.
503 spin_lock_irqsave(&io_tlb_lock
, flags
);
505 if (unlikely(nslots
> io_tlb_nslabs
- io_tlb_used
))
508 index
= ALIGN(io_tlb_index
, stride
);
509 if (index
>= io_tlb_nslabs
)
514 while (iommu_is_span_boundary(index
, nslots
, offset_slots
,
517 if (index
>= io_tlb_nslabs
)
524 * If we find a slot that indicates we have 'nslots' number of
525 * contiguous buffers, we allocate the buffers from that slot
526 * and mark the entries as '0' indicating unavailable.
528 if (io_tlb_list
[index
] >= nslots
) {
531 for (i
= index
; i
< (int) (index
+ nslots
); i
++)
533 for (i
= index
- 1; (OFFSET(i
, IO_TLB_SEGSIZE
) != IO_TLB_SEGSIZE
- 1) && io_tlb_list
[i
]; i
--)
534 io_tlb_list
[i
] = ++count
;
535 tlb_addr
= io_tlb_start
+ (index
<< IO_TLB_SHIFT
);
538 * Update the indices to avoid searching in the next
541 io_tlb_index
= ((index
+ nslots
) < io_tlb_nslabs
542 ? (index
+ nslots
) : 0);
547 if (index
>= io_tlb_nslabs
)
549 } while (index
!= wrap
);
552 tmp_io_tlb_used
= io_tlb_used
;
554 spin_unlock_irqrestore(&io_tlb_lock
, flags
);
555 if (!(attrs
& DMA_ATTR_NO_WARN
) && printk_ratelimit())
556 dev_warn(hwdev
, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
557 alloc_size
, io_tlb_nslabs
, tmp_io_tlb_used
);
558 return (phys_addr_t
)DMA_MAPPING_ERROR
;
560 io_tlb_used
+= nslots
;
561 spin_unlock_irqrestore(&io_tlb_lock
, flags
);
564 * Save away the mapping from the original address to the DMA address.
565 * This is needed when we sync the memory. Then we sync the buffer if
568 for (i
= 0; i
< nslots
; i
++)
569 io_tlb_orig_addr
[index
+i
] = orig_addr
+ (i
<< IO_TLB_SHIFT
);
570 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
571 (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
))
572 swiotlb_bounce(orig_addr
, tlb_addr
, mapping_size
, DMA_TO_DEVICE
);
578 * tlb_addr is the physical address of the bounce buffer to unmap.
580 void swiotlb_tbl_unmap_single(struct device
*hwdev
, phys_addr_t tlb_addr
,
581 size_t mapping_size
, size_t alloc_size
,
582 enum dma_data_direction dir
, unsigned long attrs
)
585 int i
, count
, nslots
= ALIGN(alloc_size
, 1 << IO_TLB_SHIFT
) >> IO_TLB_SHIFT
;
586 int index
= (tlb_addr
- io_tlb_start
) >> IO_TLB_SHIFT
;
587 phys_addr_t orig_addr
= io_tlb_orig_addr
[index
];
590 * First, sync the memory before unmapping the entry
592 if (orig_addr
!= INVALID_PHYS_ADDR
&&
593 !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
594 ((dir
== DMA_FROM_DEVICE
) || (dir
== DMA_BIDIRECTIONAL
)))
595 swiotlb_bounce(orig_addr
, tlb_addr
, mapping_size
, DMA_FROM_DEVICE
);
598 * Return the buffer to the free list by setting the corresponding
599 * entries to indicate the number of contiguous entries available.
600 * While returning the entries to the free list, we merge the entries
601 * with slots below and above the pool being returned.
603 spin_lock_irqsave(&io_tlb_lock
, flags
);
605 count
= ((index
+ nslots
) < ALIGN(index
+ 1, IO_TLB_SEGSIZE
) ?
606 io_tlb_list
[index
+ nslots
] : 0);
608 * Step 1: return the slots to the free list, merging the
609 * slots with superceeding slots
611 for (i
= index
+ nslots
- 1; i
>= index
; i
--) {
612 io_tlb_list
[i
] = ++count
;
613 io_tlb_orig_addr
[i
] = INVALID_PHYS_ADDR
;
616 * Step 2: merge the returned slots with the preceding slots,
617 * if available (non zero)
619 for (i
= index
- 1; (OFFSET(i
, IO_TLB_SEGSIZE
) != IO_TLB_SEGSIZE
-1) && io_tlb_list
[i
]; i
--)
620 io_tlb_list
[i
] = ++count
;
622 io_tlb_used
-= nslots
;
624 spin_unlock_irqrestore(&io_tlb_lock
, flags
);
627 void swiotlb_tbl_sync_single(struct device
*hwdev
, phys_addr_t tlb_addr
,
628 size_t size
, enum dma_data_direction dir
,
629 enum dma_sync_target target
)
631 int index
= (tlb_addr
- io_tlb_start
) >> IO_TLB_SHIFT
;
632 phys_addr_t orig_addr
= io_tlb_orig_addr
[index
];
634 if (orig_addr
== INVALID_PHYS_ADDR
)
636 orig_addr
+= (unsigned long)tlb_addr
& ((1 << IO_TLB_SHIFT
) - 1);
640 if (likely(dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
))
641 swiotlb_bounce(orig_addr
, tlb_addr
,
642 size
, DMA_FROM_DEVICE
);
644 BUG_ON(dir
!= DMA_TO_DEVICE
);
646 case SYNC_FOR_DEVICE
:
647 if (likely(dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
))
648 swiotlb_bounce(orig_addr
, tlb_addr
,
649 size
, DMA_TO_DEVICE
);
651 BUG_ON(dir
!= DMA_FROM_DEVICE
);
659 * Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
660 * to the device copy the data into it as well.
662 bool swiotlb_map(struct device
*dev
, phys_addr_t
*phys
, dma_addr_t
*dma_addr
,
663 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
665 trace_swiotlb_bounced(dev
, *dma_addr
, size
, swiotlb_force
);
667 if (unlikely(swiotlb_force
== SWIOTLB_NO_FORCE
)) {
668 dev_warn_ratelimited(dev
,
669 "Cannot do DMA to address %pa\n", phys
);
673 /* Oh well, have to allocate and map a bounce buffer. */
674 *phys
= swiotlb_tbl_map_single(dev
, __phys_to_dma(dev
, io_tlb_start
),
675 *phys
, size
, size
, dir
, attrs
);
676 if (*phys
== (phys_addr_t
)DMA_MAPPING_ERROR
)
679 /* Ensure that the address returned is DMA'ble */
680 *dma_addr
= __phys_to_dma(dev
, *phys
);
681 if (unlikely(!dma_capable(dev
, *dma_addr
, size
, true))) {
682 swiotlb_tbl_unmap_single(dev
, *phys
, size
, size
, dir
,
683 attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
690 size_t swiotlb_max_mapping_size(struct device
*dev
)
692 return ((size_t)1 << IO_TLB_SHIFT
) * IO_TLB_SEGSIZE
;
695 bool is_swiotlb_active(void)
698 * When SWIOTLB is initialized, even if io_tlb_start points to physical
699 * address zero, io_tlb_end surely doesn't.
701 return io_tlb_end
!= 0;
704 #ifdef CONFIG_DEBUG_FS
706 static int __init
swiotlb_create_debugfs(void)
710 root
= debugfs_create_dir("swiotlb", NULL
);
711 debugfs_create_ulong("io_tlb_nslabs", 0400, root
, &io_tlb_nslabs
);
712 debugfs_create_ulong("io_tlb_used", 0400, root
, &io_tlb_used
);
716 late_initcall(swiotlb_create_debugfs
);