1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/atomic.h>
13 #include <linux/crash_dump.h>
14 #include <linux/device.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/gfp.h>
18 #include <linux/huge_mm.h>
19 #include <linux/iommu.h>
20 #include <linux/iommu-dma.h>
21 #include <linux/iova.h>
22 #include <linux/irq.h>
23 #include <linux/list_sort.h>
24 #include <linux/memremap.h>
26 #include <linux/mutex.h>
27 #include <linux/of_iommu.h>
28 #include <linux/pci.h>
29 #include <linux/scatterlist.h>
30 #include <linux/spinlock.h>
31 #include <linux/swiotlb.h>
32 #include <linux/vmalloc.h>
33 #include <trace/events/swiotlb.h>
35 #include "dma-iommu.h"
36 #include "iommu-pages.h"
38 struct iommu_dma_msi_page
{
39 struct list_head list
;
44 enum iommu_dma_cookie_type
{
45 IOMMU_DMA_IOVA_COOKIE
,
49 enum iommu_dma_queue_type
{
50 IOMMU_DMA_OPTS_PER_CPU_QUEUE
,
51 IOMMU_DMA_OPTS_SINGLE_QUEUE
,
54 struct iommu_dma_options
{
55 enum iommu_dma_queue_type qt
;
57 unsigned int fq_timeout
;
60 struct iommu_dma_cookie
{
61 enum iommu_dma_cookie_type type
;
63 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
65 struct iova_domain iovad
;
68 struct iova_fq
*single_fq
;
69 struct iova_fq __percpu
*percpu_fq
;
71 /* Number of TLB flushes that have been started */
72 atomic64_t fq_flush_start_cnt
;
73 /* Number of TLB flushes that have been finished */
74 atomic64_t fq_flush_finish_cnt
;
75 /* Timer to regularily empty the flush queues */
76 struct timer_list fq_timer
;
77 /* 1 when timer is active, 0 when not */
80 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
83 struct list_head msi_page_list
;
85 /* Domain for flush queue callback; NULL if flush queue not in use */
86 struct iommu_domain
*fq_domain
;
87 /* Options for dma-iommu use */
88 struct iommu_dma_options options
;
92 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled
);
93 bool iommu_dma_forcedac __read_mostly
;
95 static int __init
iommu_dma_forcedac_setup(char *str
)
97 int ret
= kstrtobool(str
, &iommu_dma_forcedac
);
99 if (!ret
&& iommu_dma_forcedac
)
100 pr_info("Forcing DAC for PCI devices\n");
103 early_param("iommu.forcedac", iommu_dma_forcedac_setup
);
105 /* Number of entries per flush queue */
106 #define IOVA_DEFAULT_FQ_SIZE 256
107 #define IOVA_SINGLE_FQ_SIZE 32768
109 /* Timeout (in ms) after which entries are flushed from the queue */
110 #define IOVA_DEFAULT_FQ_TIMEOUT 10
111 #define IOVA_SINGLE_FQ_TIMEOUT 1000
113 /* Flush queue entry for deferred flushing */
114 struct iova_fq_entry
{
115 unsigned long iova_pfn
;
117 struct list_head freelist
;
118 u64 counter
; /* Flush counter when this entry was added */
121 /* Per-CPU flush queue structure */
124 unsigned int head
, tail
;
125 unsigned int mod_mask
;
126 struct iova_fq_entry entries
[];
129 #define fq_ring_for_each(i, fq) \
130 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
132 static inline bool fq_full(struct iova_fq
*fq
)
134 assert_spin_locked(&fq
->lock
);
135 return (((fq
->tail
+ 1) & fq
->mod_mask
) == fq
->head
);
138 static inline unsigned int fq_ring_add(struct iova_fq
*fq
)
140 unsigned int idx
= fq
->tail
;
142 assert_spin_locked(&fq
->lock
);
144 fq
->tail
= (idx
+ 1) & fq
->mod_mask
;
149 static void fq_ring_free_locked(struct iommu_dma_cookie
*cookie
, struct iova_fq
*fq
)
151 u64 counter
= atomic64_read(&cookie
->fq_flush_finish_cnt
);
154 assert_spin_locked(&fq
->lock
);
156 fq_ring_for_each(idx
, fq
) {
158 if (fq
->entries
[idx
].counter
>= counter
)
161 iommu_put_pages_list(&fq
->entries
[idx
].freelist
);
162 free_iova_fast(&cookie
->iovad
,
163 fq
->entries
[idx
].iova_pfn
,
164 fq
->entries
[idx
].pages
);
166 fq
->head
= (fq
->head
+ 1) & fq
->mod_mask
;
170 static void fq_ring_free(struct iommu_dma_cookie
*cookie
, struct iova_fq
*fq
)
174 spin_lock_irqsave(&fq
->lock
, flags
);
175 fq_ring_free_locked(cookie
, fq
);
176 spin_unlock_irqrestore(&fq
->lock
, flags
);
179 static void fq_flush_iotlb(struct iommu_dma_cookie
*cookie
)
181 atomic64_inc(&cookie
->fq_flush_start_cnt
);
182 cookie
->fq_domain
->ops
->flush_iotlb_all(cookie
->fq_domain
);
183 atomic64_inc(&cookie
->fq_flush_finish_cnt
);
186 static void fq_flush_timeout(struct timer_list
*t
)
188 struct iommu_dma_cookie
*cookie
= from_timer(cookie
, t
, fq_timer
);
191 atomic_set(&cookie
->fq_timer_on
, 0);
192 fq_flush_iotlb(cookie
);
194 if (cookie
->options
.qt
== IOMMU_DMA_OPTS_SINGLE_QUEUE
) {
195 fq_ring_free(cookie
, cookie
->single_fq
);
197 for_each_possible_cpu(cpu
)
198 fq_ring_free(cookie
, per_cpu_ptr(cookie
->percpu_fq
, cpu
));
202 static void queue_iova(struct iommu_dma_cookie
*cookie
,
203 unsigned long pfn
, unsigned long pages
,
204 struct list_head
*freelist
)
211 * Order against the IOMMU driver's pagetable update from unmapping
212 * @pte, to guarantee that fq_flush_iotlb() observes that if called
213 * from a different CPU before we release the lock below. Full barrier
214 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
215 * written fq state here.
219 if (cookie
->options
.qt
== IOMMU_DMA_OPTS_SINGLE_QUEUE
)
220 fq
= cookie
->single_fq
;
222 fq
= raw_cpu_ptr(cookie
->percpu_fq
);
224 spin_lock_irqsave(&fq
->lock
, flags
);
227 * First remove all entries from the flush queue that have already been
228 * flushed out on another CPU. This makes the fq_full() check below less
231 fq_ring_free_locked(cookie
, fq
);
234 fq_flush_iotlb(cookie
);
235 fq_ring_free_locked(cookie
, fq
);
238 idx
= fq_ring_add(fq
);
240 fq
->entries
[idx
].iova_pfn
= pfn
;
241 fq
->entries
[idx
].pages
= pages
;
242 fq
->entries
[idx
].counter
= atomic64_read(&cookie
->fq_flush_start_cnt
);
243 list_splice(freelist
, &fq
->entries
[idx
].freelist
);
245 spin_unlock_irqrestore(&fq
->lock
, flags
);
247 /* Avoid false sharing as much as possible. */
248 if (!atomic_read(&cookie
->fq_timer_on
) &&
249 !atomic_xchg(&cookie
->fq_timer_on
, 1))
250 mod_timer(&cookie
->fq_timer
,
251 jiffies
+ msecs_to_jiffies(cookie
->options
.fq_timeout
));
254 static void iommu_dma_free_fq_single(struct iova_fq
*fq
)
258 fq_ring_for_each(idx
, fq
)
259 iommu_put_pages_list(&fq
->entries
[idx
].freelist
);
263 static void iommu_dma_free_fq_percpu(struct iova_fq __percpu
*percpu_fq
)
267 /* The IOVAs will be torn down separately, so just free our queued pages */
268 for_each_possible_cpu(cpu
) {
269 struct iova_fq
*fq
= per_cpu_ptr(percpu_fq
, cpu
);
271 fq_ring_for_each(idx
, fq
)
272 iommu_put_pages_list(&fq
->entries
[idx
].freelist
);
275 free_percpu(percpu_fq
);
278 static void iommu_dma_free_fq(struct iommu_dma_cookie
*cookie
)
280 if (!cookie
->fq_domain
)
283 del_timer_sync(&cookie
->fq_timer
);
284 if (cookie
->options
.qt
== IOMMU_DMA_OPTS_SINGLE_QUEUE
)
285 iommu_dma_free_fq_single(cookie
->single_fq
);
287 iommu_dma_free_fq_percpu(cookie
->percpu_fq
);
290 static void iommu_dma_init_one_fq(struct iova_fq
*fq
, size_t fq_size
)
296 fq
->mod_mask
= fq_size
- 1;
298 spin_lock_init(&fq
->lock
);
300 for (i
= 0; i
< fq_size
; i
++)
301 INIT_LIST_HEAD(&fq
->entries
[i
].freelist
);
304 static int iommu_dma_init_fq_single(struct iommu_dma_cookie
*cookie
)
306 size_t fq_size
= cookie
->options
.fq_size
;
307 struct iova_fq
*queue
;
309 queue
= vmalloc(struct_size(queue
, entries
, fq_size
));
312 iommu_dma_init_one_fq(queue
, fq_size
);
313 cookie
->single_fq
= queue
;
318 static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie
*cookie
)
320 size_t fq_size
= cookie
->options
.fq_size
;
321 struct iova_fq __percpu
*queue
;
324 queue
= __alloc_percpu(struct_size(queue
, entries
, fq_size
),
325 __alignof__(*queue
));
329 for_each_possible_cpu(cpu
)
330 iommu_dma_init_one_fq(per_cpu_ptr(queue
, cpu
), fq_size
);
331 cookie
->percpu_fq
= queue
;
335 /* sysfs updates are serialised by the mutex of the group owning @domain */
336 int iommu_dma_init_fq(struct iommu_domain
*domain
)
338 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
341 if (cookie
->fq_domain
)
344 atomic64_set(&cookie
->fq_flush_start_cnt
, 0);
345 atomic64_set(&cookie
->fq_flush_finish_cnt
, 0);
347 if (cookie
->options
.qt
== IOMMU_DMA_OPTS_SINGLE_QUEUE
)
348 rc
= iommu_dma_init_fq_single(cookie
);
350 rc
= iommu_dma_init_fq_percpu(cookie
);
353 pr_warn("iova flush queue initialization failed\n");
357 timer_setup(&cookie
->fq_timer
, fq_flush_timeout
, 0);
358 atomic_set(&cookie
->fq_timer_on
, 0);
360 * Prevent incomplete fq state being observable. Pairs with path from
361 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
364 WRITE_ONCE(cookie
->fq_domain
, domain
);
368 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
370 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
371 return cookie
->iovad
.granule
;
375 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
377 struct iommu_dma_cookie
*cookie
;
379 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
381 INIT_LIST_HEAD(&cookie
->msi_page_list
);
388 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
389 * @domain: IOMMU domain to prepare for DMA-API usage
391 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
393 if (domain
->iova_cookie
)
396 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
397 if (!domain
->iova_cookie
)
400 mutex_init(&domain
->iova_cookie
->mutex
);
405 * iommu_get_msi_cookie - Acquire just MSI remapping resources
406 * @domain: IOMMU domain to prepare
407 * @base: Start address of IOVA region for MSI mappings
409 * Users who manage their own IOVA allocation and do not want DMA API support,
410 * but would still like to take advantage of automatic MSI remapping, can use
411 * this to initialise their own domain appropriately. Users should reserve a
412 * contiguous IOVA region, starting at @base, large enough to accommodate the
413 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
414 * used by the devices attached to @domain.
416 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
418 struct iommu_dma_cookie
*cookie
;
420 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
423 if (domain
->iova_cookie
)
426 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
430 cookie
->msi_iova
= base
;
431 domain
->iova_cookie
= cookie
;
434 EXPORT_SYMBOL(iommu_get_msi_cookie
);
437 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
438 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
439 * iommu_get_msi_cookie()
441 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
443 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
444 struct iommu_dma_msi_page
*msi
, *tmp
;
449 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
) {
450 iommu_dma_free_fq(cookie
);
451 put_iova_domain(&cookie
->iovad
);
454 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
455 list_del(&msi
->list
);
459 domain
->iova_cookie
= NULL
;
463 * iommu_dma_get_resv_regions - Reserved region driver helper
464 * @dev: Device from iommu_get_resv_regions()
465 * @list: Reserved region list from iommu_get_resv_regions()
467 * IOMMU drivers can use this to implement their .get_resv_regions callback
468 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
469 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
472 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
475 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
476 iort_iommu_get_resv_regions(dev
, list
);
479 of_iommu_get_resv_regions(dev
, list
);
481 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
483 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
484 phys_addr_t start
, phys_addr_t end
)
486 struct iova_domain
*iovad
= &cookie
->iovad
;
487 struct iommu_dma_msi_page
*msi_page
;
490 start
-= iova_offset(iovad
, start
);
491 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
493 for (i
= 0; i
< num_pages
; i
++) {
494 msi_page
= kmalloc(sizeof(*msi_page
), GFP_KERNEL
);
498 msi_page
->phys
= start
;
499 msi_page
->iova
= start
;
500 INIT_LIST_HEAD(&msi_page
->list
);
501 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
502 start
+= iovad
->granule
;
508 static int iommu_dma_ranges_sort(void *priv
, const struct list_head
*a
,
509 const struct list_head
*b
)
511 struct resource_entry
*res_a
= list_entry(a
, typeof(*res_a
), node
);
512 struct resource_entry
*res_b
= list_entry(b
, typeof(*res_b
), node
);
514 return res_a
->res
->start
> res_b
->res
->start
;
517 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
518 struct iova_domain
*iovad
)
520 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
521 struct resource_entry
*window
;
522 unsigned long lo
, hi
;
523 phys_addr_t start
= 0, end
;
525 resource_list_for_each_entry(window
, &bridge
->windows
) {
526 if (resource_type(window
->res
) != IORESOURCE_MEM
)
529 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
530 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
531 reserve_iova(iovad
, lo
, hi
);
534 /* Get reserved DMA windows from host bridge */
535 list_sort(NULL
, &bridge
->dma_ranges
, iommu_dma_ranges_sort
);
536 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
537 end
= window
->res
->start
- window
->offset
;
540 lo
= iova_pfn(iovad
, start
);
541 hi
= iova_pfn(iovad
, end
);
542 reserve_iova(iovad
, lo
, hi
);
543 } else if (end
< start
) {
544 /* DMA ranges should be non-overlapping */
546 "Failed to reserve IOVA [%pa-%pa]\n",
551 start
= window
->res
->end
- window
->offset
+ 1;
552 /* If window is last entry */
553 if (window
->node
.next
== &bridge
->dma_ranges
&&
554 end
!= ~(phys_addr_t
)0) {
555 end
= ~(phys_addr_t
)0;
563 static int iova_reserve_iommu_regions(struct device
*dev
,
564 struct iommu_domain
*domain
)
566 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
567 struct iova_domain
*iovad
= &cookie
->iovad
;
568 struct iommu_resv_region
*region
;
569 LIST_HEAD(resv_regions
);
572 if (dev_is_pci(dev
)) {
573 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
578 iommu_get_resv_regions(dev
, &resv_regions
);
579 list_for_each_entry(region
, &resv_regions
, list
) {
580 unsigned long lo
, hi
;
582 /* We ARE the software that manages these! */
583 if (region
->type
== IOMMU_RESV_SW_MSI
)
586 lo
= iova_pfn(iovad
, region
->start
);
587 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
588 reserve_iova(iovad
, lo
, hi
);
590 if (region
->type
== IOMMU_RESV_MSI
)
591 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
592 region
->start
+ region
->length
);
596 iommu_put_resv_regions(dev
, &resv_regions
);
601 static bool dev_is_untrusted(struct device
*dev
)
603 return dev_is_pci(dev
) && to_pci_dev(dev
)->untrusted
;
606 static bool dev_use_swiotlb(struct device
*dev
, size_t size
,
607 enum dma_data_direction dir
)
609 return IS_ENABLED(CONFIG_SWIOTLB
) &&
610 (dev_is_untrusted(dev
) ||
611 dma_kmalloc_needs_bounce(dev
, size
, dir
));
614 static bool dev_use_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
615 int nents
, enum dma_data_direction dir
)
617 struct scatterlist
*s
;
620 if (!IS_ENABLED(CONFIG_SWIOTLB
))
623 if (dev_is_untrusted(dev
))
627 * If kmalloc() buffers are not DMA-safe for this device and
628 * direction, check the individual lengths in the sg list. If any
629 * element is deemed unsafe, use the swiotlb for bouncing.
631 if (!dma_kmalloc_safe(dev
, dir
)) {
632 for_each_sg(sg
, s
, nents
, i
)
633 if (!dma_kmalloc_size_aligned(s
->length
))
641 * iommu_dma_init_options - Initialize dma-iommu options
642 * @options: The options to be initialized
643 * @dev: Device the options are set for
645 * This allows tuning dma-iommu specific to device properties
647 static void iommu_dma_init_options(struct iommu_dma_options
*options
,
650 /* Shadowing IOTLB flushes do better with a single large queue */
651 if (dev
->iommu
->shadow_on_flush
) {
652 options
->qt
= IOMMU_DMA_OPTS_SINGLE_QUEUE
;
653 options
->fq_timeout
= IOVA_SINGLE_FQ_TIMEOUT
;
654 options
->fq_size
= IOVA_SINGLE_FQ_SIZE
;
656 options
->qt
= IOMMU_DMA_OPTS_PER_CPU_QUEUE
;
657 options
->fq_size
= IOVA_DEFAULT_FQ_SIZE
;
658 options
->fq_timeout
= IOVA_DEFAULT_FQ_TIMEOUT
;
663 * iommu_dma_init_domain - Initialise a DMA mapping domain
664 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
665 * @dev: Device the domain is being initialised for
667 * If the geometry and dma_range_map include address 0, we reserve that page
668 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
669 * any change which could make prior IOVAs invalid will fail.
671 static int iommu_dma_init_domain(struct iommu_domain
*domain
, struct device
*dev
)
673 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
674 const struct bus_dma_region
*map
= dev
->dma_range_map
;
675 unsigned long order
, base_pfn
;
676 struct iova_domain
*iovad
;
679 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
682 iovad
= &cookie
->iovad
;
684 /* Use the smallest supported page size for IOVA granularity */
685 order
= __ffs(domain
->pgsize_bitmap
);
688 /* Check the domain allows at least some access to the device... */
690 if (dma_range_map_min(map
) > domain
->geometry
.aperture_end
||
691 dma_range_map_max(map
) < domain
->geometry
.aperture_start
) {
692 pr_warn("specified DMA range outside IOMMU capability\n");
696 /* ...then finally give it a kicking to make sure it fits */
697 base_pfn
= max_t(unsigned long, base_pfn
,
698 domain
->geometry
.aperture_start
>> order
);
700 /* start_pfn is always nonzero for an already-initialised domain */
701 mutex_lock(&cookie
->mutex
);
702 if (iovad
->start_pfn
) {
703 if (1UL << order
!= iovad
->granule
||
704 base_pfn
!= iovad
->start_pfn
) {
705 pr_warn("Incompatible range for DMA domain\n");
714 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
715 ret
= iova_domain_init_rcaches(iovad
);
719 iommu_dma_init_options(&cookie
->options
, dev
);
721 /* If the FQ fails we can simply fall back to strict mode */
722 if (domain
->type
== IOMMU_DOMAIN_DMA_FQ
&&
723 (!device_iommu_capable(dev
, IOMMU_CAP_DEFERRED_FLUSH
) || iommu_dma_init_fq(domain
)))
724 domain
->type
= IOMMU_DOMAIN_DMA
;
726 ret
= iova_reserve_iommu_regions(dev
, domain
);
729 mutex_unlock(&cookie
->mutex
);
734 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
736 * @dir: Direction of DMA transfer
737 * @coherent: Is the DMA master cache-coherent?
738 * @attrs: DMA attributes for the mapping
740 * Return: corresponding IOMMU API page protection flags
742 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
745 int prot
= coherent
? IOMMU_CACHE
: 0;
747 if (attrs
& DMA_ATTR_PRIVILEGED
)
751 case DMA_BIDIRECTIONAL
:
752 return prot
| IOMMU_READ
| IOMMU_WRITE
;
754 return prot
| IOMMU_READ
;
755 case DMA_FROM_DEVICE
:
756 return prot
| IOMMU_WRITE
;
762 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
763 size_t size
, u64 dma_limit
, struct device
*dev
)
765 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
766 struct iova_domain
*iovad
= &cookie
->iovad
;
767 unsigned long shift
, iova_len
, iova
;
769 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
770 cookie
->msi_iova
+= size
;
771 return cookie
->msi_iova
- size
;
774 shift
= iova_shift(iovad
);
775 iova_len
= size
>> shift
;
777 dma_limit
= min_not_zero(dma_limit
, dev
->bus_dma_limit
);
779 if (domain
->geometry
.force_aperture
)
780 dma_limit
= min(dma_limit
, (u64
)domain
->geometry
.aperture_end
);
783 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
784 * DAC reasoning loses relevance with PCIe, but enough hardware and
785 * firmware bugs are still lurking out there that it's safest not to
786 * venture into the 64-bit space until necessary.
788 * If your device goes wrong after seeing the notice then likely either
789 * its driver is not setting DMA masks accurately, the hardware has
790 * some inherent bug in handling >32-bit addresses, or not all the
791 * expected address bits are wired up between the device and the IOMMU.
793 if (dma_limit
> DMA_BIT_MASK(32) && dev
->iommu
->pci_32bit_workaround
) {
794 iova
= alloc_iova_fast(iovad
, iova_len
,
795 DMA_BIT_MASK(32) >> shift
, false);
799 dev
->iommu
->pci_32bit_workaround
= false;
800 dev_notice(dev
, "Using %d-bit DMA addresses\n", bits_per(dma_limit
));
803 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
, true);
805 return (dma_addr_t
)iova
<< shift
;
808 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
809 dma_addr_t iova
, size_t size
, struct iommu_iotlb_gather
*gather
)
811 struct iova_domain
*iovad
= &cookie
->iovad
;
813 /* The MSI case is only ever cleaning up its most recent allocation */
814 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
815 cookie
->msi_iova
-= size
;
816 else if (gather
&& gather
->queued
)
817 queue_iova(cookie
, iova_pfn(iovad
, iova
),
818 size
>> iova_shift(iovad
),
821 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
822 size
>> iova_shift(iovad
));
825 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
828 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
829 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
830 struct iova_domain
*iovad
= &cookie
->iovad
;
831 size_t iova_off
= iova_offset(iovad
, dma_addr
);
832 struct iommu_iotlb_gather iotlb_gather
;
835 dma_addr
-= iova_off
;
836 size
= iova_align(iovad
, size
+ iova_off
);
837 iommu_iotlb_gather_init(&iotlb_gather
);
838 iotlb_gather
.queued
= READ_ONCE(cookie
->fq_domain
);
840 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
841 WARN_ON(unmapped
!= size
);
843 if (!iotlb_gather
.queued
)
844 iommu_iotlb_sync(domain
, &iotlb_gather
);
845 iommu_dma_free_iova(cookie
, dma_addr
, size
, &iotlb_gather
);
848 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
849 size_t size
, int prot
, u64 dma_mask
)
851 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
852 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
853 struct iova_domain
*iovad
= &cookie
->iovad
;
854 size_t iova_off
= iova_offset(iovad
, phys
);
857 if (static_branch_unlikely(&iommu_deferred_attach_enabled
) &&
858 iommu_deferred_attach(dev
, domain
))
859 return DMA_MAPPING_ERROR
;
861 /* If anyone ever wants this we'd need support in the IOVA allocator */
862 if (dev_WARN_ONCE(dev
, dma_get_min_align_mask(dev
) > iova_mask(iovad
),
863 "Unsupported alignment constraint\n"))
864 return DMA_MAPPING_ERROR
;
866 size
= iova_align(iovad
, size
+ iova_off
);
868 iova
= iommu_dma_alloc_iova(domain
, size
, dma_mask
, dev
);
870 return DMA_MAPPING_ERROR
;
872 if (iommu_map(domain
, iova
, phys
- iova_off
, size
, prot
, GFP_ATOMIC
)) {
873 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
874 return DMA_MAPPING_ERROR
;
876 return iova
+ iova_off
;
879 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
882 __free_page(pages
[count
]);
886 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
887 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
890 unsigned int i
= 0, nid
= dev_to_node(dev
);
892 order_mask
&= GENMASK(MAX_PAGE_ORDER
, 0);
896 pages
= kvcalloc(count
, sizeof(*pages
), GFP_KERNEL
);
900 /* IOMMU can map any pages, so himem can also be used here */
901 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
904 struct page
*page
= NULL
;
905 unsigned int order_size
;
908 * Higher-order allocations are a convenience rather
909 * than a necessity, hence using __GFP_NORETRY until
910 * falling back to minimum-order allocations.
912 for (order_mask
&= GENMASK(__fls(count
), 0);
913 order_mask
; order_mask
&= ~order_size
) {
914 unsigned int order
= __fls(order_mask
);
915 gfp_t alloc_flags
= gfp
;
917 order_size
= 1U << order
;
918 if (order_mask
> order_size
)
919 alloc_flags
|= __GFP_NORETRY
;
920 page
= alloc_pages_node(nid
, alloc_flags
, order
);
924 split_page(page
, order
);
928 __iommu_dma_free_pages(pages
, i
);
939 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
940 * but an IOMMU which supports smaller pages might not map the whole thing.
942 static struct page
**__iommu_dma_alloc_noncontiguous(struct device
*dev
,
943 size_t size
, struct sg_table
*sgt
, gfp_t gfp
, unsigned long attrs
)
945 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
946 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
947 struct iova_domain
*iovad
= &cookie
->iovad
;
948 bool coherent
= dev_is_dma_coherent(dev
);
949 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
950 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
955 if (static_branch_unlikely(&iommu_deferred_attach_enabled
) &&
956 iommu_deferred_attach(dev
, domain
))
959 min_size
= alloc_sizes
& -alloc_sizes
;
960 if (min_size
< PAGE_SIZE
) {
961 min_size
= PAGE_SIZE
;
962 alloc_sizes
|= PAGE_SIZE
;
964 size
= ALIGN(size
, min_size
);
966 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
967 alloc_sizes
= min_size
;
969 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
970 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
975 size
= iova_align(iovad
, size
);
976 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
981 * Remove the zone/policy flags from the GFP - these are applied to the
982 * __iommu_dma_alloc_pages() but are not used for the supporting
983 * internal allocations that follow.
985 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
| __GFP_COMP
);
987 if (sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
, gfp
))
990 if (!(ioprot
& IOMMU_CACHE
)) {
991 struct scatterlist
*sg
;
994 for_each_sg(sgt
->sgl
, sg
, sgt
->orig_nents
, i
)
995 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
998 ret
= iommu_map_sg(domain
, iova
, sgt
->sgl
, sgt
->orig_nents
, ioprot
,
1000 if (ret
< 0 || ret
< size
)
1003 sgt
->sgl
->dma_address
= iova
;
1004 sgt
->sgl
->dma_length
= size
;
1010 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
1012 __iommu_dma_free_pages(pages
, count
);
1016 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
1017 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
1019 struct page
**pages
;
1020 struct sg_table sgt
;
1022 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
1024 pages
= __iommu_dma_alloc_noncontiguous(dev
, size
, &sgt
, gfp
, attrs
);
1027 *dma_handle
= sgt
.sgl
->dma_address
;
1028 sg_free_table(&sgt
);
1029 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
1030 __builtin_return_address(0));
1036 __iommu_dma_unmap(dev
, *dma_handle
, size
);
1037 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
1042 * This is the actual return value from the iommu_dma_alloc_noncontiguous.
1044 * The users of the DMA API should only care about the sg_table, but to make
1045 * the DMA-API internal vmaping and freeing easier we stash away the page
1046 * array as well (except for the fallback case). This can go away any time,
1047 * e.g. when a vmap-variant that takes a scatterlist comes along.
1049 struct dma_sgt_handle
{
1050 struct sg_table sgt
;
1051 struct page
**pages
;
1053 #define sgt_handle(sgt) \
1054 container_of((sgt), struct dma_sgt_handle, sgt)
1056 struct sg_table
*iommu_dma_alloc_noncontiguous(struct device
*dev
, size_t size
,
1057 enum dma_data_direction dir
, gfp_t gfp
, unsigned long attrs
)
1059 struct dma_sgt_handle
*sh
;
1061 sh
= kmalloc(sizeof(*sh
), gfp
);
1065 sh
->pages
= __iommu_dma_alloc_noncontiguous(dev
, size
, &sh
->sgt
, gfp
, attrs
);
1073 void iommu_dma_free_noncontiguous(struct device
*dev
, size_t size
,
1074 struct sg_table
*sgt
, enum dma_data_direction dir
)
1076 struct dma_sgt_handle
*sh
= sgt_handle(sgt
);
1078 __iommu_dma_unmap(dev
, sgt
->sgl
->dma_address
, size
);
1079 __iommu_dma_free_pages(sh
->pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
1080 sg_free_table(&sh
->sgt
);
1084 void *iommu_dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
1085 struct sg_table
*sgt
)
1087 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1089 return vmap(sgt_handle(sgt
)->pages
, count
, VM_MAP
, PAGE_KERNEL
);
1092 int iommu_dma_mmap_noncontiguous(struct device
*dev
, struct vm_area_struct
*vma
,
1093 size_t size
, struct sg_table
*sgt
)
1095 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1097 if (vma
->vm_pgoff
>= count
|| vma_pages(vma
) > count
- vma
->vm_pgoff
)
1099 return vm_map_pages(vma
, sgt_handle(sgt
)->pages
, count
);
1102 void iommu_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1103 size_t size
, enum dma_data_direction dir
)
1107 if (dev_is_dma_coherent(dev
) && !dev_use_swiotlb(dev
, size
, dir
))
1110 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
1111 if (!dev_is_dma_coherent(dev
))
1112 arch_sync_dma_for_cpu(phys
, size
, dir
);
1114 swiotlb_sync_single_for_cpu(dev
, phys
, size
, dir
);
1117 void iommu_dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
1118 size_t size
, enum dma_data_direction dir
)
1122 if (dev_is_dma_coherent(dev
) && !dev_use_swiotlb(dev
, size
, dir
))
1125 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
1126 swiotlb_sync_single_for_device(dev
, phys
, size
, dir
);
1128 if (!dev_is_dma_coherent(dev
))
1129 arch_sync_dma_for_device(phys
, size
, dir
);
1132 void iommu_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sgl
,
1133 int nelems
, enum dma_data_direction dir
)
1135 struct scatterlist
*sg
;
1138 if (sg_dma_is_swiotlb(sgl
))
1139 for_each_sg(sgl
, sg
, nelems
, i
)
1140 iommu_dma_sync_single_for_cpu(dev
, sg_dma_address(sg
),
1142 else if (!dev_is_dma_coherent(dev
))
1143 for_each_sg(sgl
, sg
, nelems
, i
)
1144 arch_sync_dma_for_cpu(sg_phys(sg
), sg
->length
, dir
);
1147 void iommu_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sgl
,
1148 int nelems
, enum dma_data_direction dir
)
1150 struct scatterlist
*sg
;
1153 if (sg_dma_is_swiotlb(sgl
))
1154 for_each_sg(sgl
, sg
, nelems
, i
)
1155 iommu_dma_sync_single_for_device(dev
,
1158 else if (!dev_is_dma_coherent(dev
))
1159 for_each_sg(sgl
, sg
, nelems
, i
)
1160 arch_sync_dma_for_device(sg_phys(sg
), sg
->length
, dir
);
1163 dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
1164 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1165 unsigned long attrs
)
1167 phys_addr_t phys
= page_to_phys(page
) + offset
;
1168 bool coherent
= dev_is_dma_coherent(dev
);
1169 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
1170 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1171 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1172 struct iova_domain
*iovad
= &cookie
->iovad
;
1173 dma_addr_t iova
, dma_mask
= dma_get_mask(dev
);
1176 * If both the physical buffer start address and size are
1177 * page aligned, we don't need to use a bounce page.
1179 if (dev_use_swiotlb(dev
, size
, dir
) &&
1180 iova_offset(iovad
, phys
| size
)) {
1181 if (!is_swiotlb_active(dev
)) {
1182 dev_warn_once(dev
, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1183 return DMA_MAPPING_ERROR
;
1186 trace_swiotlb_bounced(dev
, phys
, size
);
1188 phys
= swiotlb_tbl_map_single(dev
, phys
, size
,
1189 iova_mask(iovad
), dir
, attrs
);
1191 if (phys
== DMA_MAPPING_ERROR
)
1192 return DMA_MAPPING_ERROR
;
1195 * Untrusted devices should not see padding areas with random
1196 * leftover kernel data, so zero the pre- and post-padding.
1197 * swiotlb_tbl_map_single() has initialized the bounce buffer
1198 * proper to the contents of the original memory buffer.
1200 if (dev_is_untrusted(dev
)) {
1201 size_t start
, virt
= (size_t)phys_to_virt(phys
);
1204 start
= iova_align_down(iovad
, virt
);
1205 memset((void *)start
, 0, virt
- start
);
1208 start
= virt
+ size
;
1209 memset((void *)start
, 0,
1210 iova_align(iovad
, start
) - start
);
1214 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1215 arch_sync_dma_for_device(phys
, size
, dir
);
1217 iova
= __iommu_dma_map(dev
, phys
, size
, prot
, dma_mask
);
1218 if (iova
== DMA_MAPPING_ERROR
)
1219 swiotlb_tbl_unmap_single(dev
, phys
, size
, dir
, attrs
);
1223 void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
1224 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1226 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1229 phys
= iommu_iova_to_phys(domain
, dma_handle
);
1233 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) && !dev_is_dma_coherent(dev
))
1234 arch_sync_dma_for_cpu(phys
, size
, dir
);
1236 __iommu_dma_unmap(dev
, dma_handle
, size
);
1238 swiotlb_tbl_unmap_single(dev
, phys
, size
, dir
, attrs
);
1242 * Prepare a successfully-mapped scatterlist to give back to the caller.
1244 * At this point the segments are already laid out by iommu_dma_map_sg() to
1245 * avoid individually crossing any boundaries, so we merely need to check a
1246 * segment's start address to avoid concatenating across one.
1248 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1249 dma_addr_t dma_addr
)
1251 struct scatterlist
*s
, *cur
= sg
;
1252 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
1253 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
1256 for_each_sg(sg
, s
, nents
, i
) {
1257 /* Restore this segment's original unaligned fields first */
1258 dma_addr_t s_dma_addr
= sg_dma_address(s
);
1259 unsigned int s_iova_off
= sg_dma_address(s
);
1260 unsigned int s_length
= sg_dma_len(s
);
1261 unsigned int s_iova_len
= s
->length
;
1263 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
1266 if (sg_dma_is_bus_address(s
)) {
1270 sg_dma_unmark_bus_address(s
);
1271 sg_dma_address(cur
) = s_dma_addr
;
1272 sg_dma_len(cur
) = s_length
;
1273 sg_dma_mark_bus_address(cur
);
1279 s
->offset
+= s_iova_off
;
1280 s
->length
= s_length
;
1283 * Now fill in the real DMA data. If...
1284 * - there is a valid output segment to append to
1285 * - and this segment starts on an IOVA page boundary
1286 * - but doesn't fall at a segment boundary
1287 * - and wouldn't make the resulting output segment too long
1289 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
1290 (max_len
- cur_len
>= s_length
)) {
1291 /* ...then concatenate it with the previous one */
1292 cur_len
+= s_length
;
1294 /* Otherwise start the next output segment */
1300 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
1303 sg_dma_len(cur
) = cur_len
;
1304 dma_addr
+= s_iova_len
;
1306 if (s_length
+ s_iova_off
< s_iova_len
)
1313 * If mapping failed, then just restore the original list,
1314 * but making sure the DMA fields are invalidated.
1316 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
1318 struct scatterlist
*s
;
1321 for_each_sg(sg
, s
, nents
, i
) {
1322 if (sg_dma_is_bus_address(s
)) {
1323 sg_dma_unmark_bus_address(s
);
1325 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
1326 s
->offset
+= sg_dma_address(s
);
1328 s
->length
= sg_dma_len(s
);
1330 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
1335 static void iommu_dma_unmap_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
1336 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1338 struct scatterlist
*s
;
1341 for_each_sg(sg
, s
, nents
, i
)
1342 iommu_dma_unmap_page(dev
, sg_dma_address(s
),
1343 sg_dma_len(s
), dir
, attrs
);
1346 static int iommu_dma_map_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
1347 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1349 struct scatterlist
*s
;
1352 sg_dma_mark_swiotlb(sg
);
1354 for_each_sg(sg
, s
, nents
, i
) {
1355 sg_dma_address(s
) = iommu_dma_map_page(dev
, sg_page(s
),
1356 s
->offset
, s
->length
, dir
, attrs
);
1357 if (sg_dma_address(s
) == DMA_MAPPING_ERROR
)
1359 sg_dma_len(s
) = s
->length
;
1365 iommu_dma_unmap_sg_swiotlb(dev
, sg
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
1370 * The DMA API client is passing in a scatterlist which could describe
1371 * any old buffer layout, but the IOMMU API requires everything to be
1372 * aligned to IOMMU pages. Hence the need for this complicated bit of
1373 * impedance-matching, to be able to hand off a suitably-aligned list,
1374 * but still preserve the original offsets and sizes for the caller.
1376 int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1377 enum dma_data_direction dir
, unsigned long attrs
)
1379 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1380 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1381 struct iova_domain
*iovad
= &cookie
->iovad
;
1382 struct scatterlist
*s
, *prev
= NULL
;
1383 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
1384 struct pci_p2pdma_map_state p2pdma_state
= {};
1385 enum pci_p2pdma_map_type map
;
1387 size_t iova_len
= 0;
1388 unsigned long mask
= dma_get_seg_boundary(dev
);
1392 if (static_branch_unlikely(&iommu_deferred_attach_enabled
)) {
1393 ret
= iommu_deferred_attach(dev
, domain
);
1398 if (dev_use_sg_swiotlb(dev
, sg
, nents
, dir
))
1399 return iommu_dma_map_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
1401 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1402 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
1405 * Work out how much IOVA space we need, and align the segments to
1406 * IOVA granules for the IOMMU driver to handle. With some clever
1407 * trickery we can modify the list in-place, but reversibly, by
1408 * stashing the unaligned parts in the as-yet-unused DMA fields.
1410 for_each_sg(sg
, s
, nents
, i
) {
1411 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
1412 size_t s_length
= s
->length
;
1413 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
1415 if (is_pci_p2pdma_page(sg_page(s
))) {
1416 map
= pci_p2pdma_map_segment(&p2pdma_state
, dev
, s
);
1418 case PCI_P2PDMA_MAP_BUS_ADDR
:
1420 * iommu_map_sg() will skip this segment as
1421 * it is marked as a bus address,
1422 * __finalise_sg() will copy the dma address
1423 * into the output segment.
1426 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE
:
1428 * Mapping through host bridge should be
1429 * mapped with regular IOVAs, thus we
1430 * do nothing here and continue below.
1435 goto out_restore_sg
;
1439 sg_dma_address(s
) = s_iova_off
;
1440 sg_dma_len(s
) = s_length
;
1441 s
->offset
-= s_iova_off
;
1442 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
1443 s
->length
= s_length
;
1446 * Due to the alignment of our single IOVA allocation, we can
1447 * depend on these assumptions about the segment boundary mask:
1448 * - If mask size >= IOVA size, then the IOVA range cannot
1449 * possibly fall across a boundary, so we don't care.
1450 * - If mask size < IOVA size, then the IOVA range must start
1451 * exactly on a boundary, therefore we can lay things out
1452 * based purely on segment lengths without needing to know
1453 * the actual addresses beforehand.
1454 * - The mask must be a power of 2, so pad_len == 0 if
1455 * iova_len == 0, thus we cannot dereference prev the first
1456 * time through here (i.e. before it has a meaningful value).
1458 if (pad_len
&& pad_len
< s_length
- 1) {
1459 prev
->length
+= pad_len
;
1460 iova_len
+= pad_len
;
1463 iova_len
+= s_length
;
1468 return __finalise_sg(dev
, sg
, nents
, 0);
1470 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
1473 goto out_restore_sg
;
1477 * We'll leave any physical concatenation to the IOMMU driver's
1478 * implementation - it knows better than we do.
1480 ret
= iommu_map_sg(domain
, iova
, sg
, nents
, prot
, GFP_ATOMIC
);
1481 if (ret
< 0 || ret
< iova_len
)
1484 return __finalise_sg(dev
, sg
, nents
, iova
);
1487 iommu_dma_free_iova(cookie
, iova
, iova_len
, NULL
);
1489 __invalidate_sg(sg
, nents
);
1491 if (ret
!= -ENOMEM
&& ret
!= -EREMOTEIO
)
1496 void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1497 enum dma_data_direction dir
, unsigned long attrs
)
1499 dma_addr_t end
= 0, start
;
1500 struct scatterlist
*tmp
;
1503 if (sg_dma_is_swiotlb(sg
)) {
1504 iommu_dma_unmap_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
1508 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1509 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
1512 * The scatterlist segments are mapped into a single
1513 * contiguous IOVA allocation, the start and end points
1514 * just have to be determined.
1516 for_each_sg(sg
, tmp
, nents
, i
) {
1517 if (sg_dma_is_bus_address(tmp
)) {
1518 sg_dma_unmark_bus_address(tmp
);
1522 if (sg_dma_len(tmp
) == 0)
1525 start
= sg_dma_address(tmp
);
1530 for_each_sg(tmp
, tmp
, nents
, i
) {
1531 if (sg_dma_is_bus_address(tmp
)) {
1532 sg_dma_unmark_bus_address(tmp
);
1536 if (sg_dma_len(tmp
) == 0)
1539 end
= sg_dma_address(tmp
) + sg_dma_len(tmp
);
1543 __iommu_dma_unmap(dev
, start
, end
- start
);
1546 dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
1547 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1549 return __iommu_dma_map(dev
, phys
, size
,
1550 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
1554 void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
1555 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1557 __iommu_dma_unmap(dev
, handle
, size
);
1560 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
1562 size_t alloc_size
= PAGE_ALIGN(size
);
1563 int count
= alloc_size
>> PAGE_SHIFT
;
1564 struct page
*page
= NULL
, **pages
= NULL
;
1566 /* Non-coherent atomic allocation? Easy */
1567 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1568 dma_free_from_pool(dev
, cpu_addr
, alloc_size
))
1571 if (is_vmalloc_addr(cpu_addr
)) {
1573 * If it the address is remapped, then it's either non-coherent
1574 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1576 pages
= dma_common_find_pages(cpu_addr
);
1578 page
= vmalloc_to_page(cpu_addr
);
1579 dma_common_free_remap(cpu_addr
, alloc_size
);
1581 /* Lowmem means a coherent atomic or CMA allocation */
1582 page
= virt_to_page(cpu_addr
);
1586 __iommu_dma_free_pages(pages
, count
);
1588 dma_free_contiguous(dev
, page
, alloc_size
);
1591 void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
1592 dma_addr_t handle
, unsigned long attrs
)
1594 __iommu_dma_unmap(dev
, handle
, size
);
1595 __iommu_dma_free(dev
, size
, cpu_addr
);
1598 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
1599 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
1601 bool coherent
= dev_is_dma_coherent(dev
);
1602 size_t alloc_size
= PAGE_ALIGN(size
);
1603 int node
= dev_to_node(dev
);
1604 struct page
*page
= NULL
;
1607 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
1609 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
1613 if (!coherent
|| PageHighMem(page
)) {
1614 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
1616 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
1617 prot
, __builtin_return_address(0));
1619 goto out_free_pages
;
1622 arch_dma_prep_coherent(page
, size
);
1624 cpu_addr
= page_address(page
);
1628 memset(cpu_addr
, 0, alloc_size
);
1631 dma_free_contiguous(dev
, page
, alloc_size
);
1635 void *iommu_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
1636 gfp_t gfp
, unsigned long attrs
)
1638 bool coherent
= dev_is_dma_coherent(dev
);
1639 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1640 struct page
*page
= NULL
;
1645 if (gfpflags_allow_blocking(gfp
) &&
1646 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
)) {
1647 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
, attrs
);
1650 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1651 !gfpflags_allow_blocking(gfp
) && !coherent
)
1652 page
= dma_alloc_from_pool(dev
, PAGE_ALIGN(size
), &cpu_addr
,
1655 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1659 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
,
1660 dev
->coherent_dma_mask
);
1661 if (*handle
== DMA_MAPPING_ERROR
) {
1662 __iommu_dma_free(dev
, size
, cpu_addr
);
1669 int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1670 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1671 unsigned long attrs
)
1673 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1674 unsigned long pfn
, off
= vma
->vm_pgoff
;
1677 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1679 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1682 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1685 if (is_vmalloc_addr(cpu_addr
)) {
1686 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1689 return vm_map_pages(vma
, pages
, nr_pages
);
1690 pfn
= vmalloc_to_pfn(cpu_addr
);
1692 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1695 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1696 vma
->vm_end
- vma
->vm_start
,
1700 int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1701 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1702 unsigned long attrs
)
1707 if (is_vmalloc_addr(cpu_addr
)) {
1708 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1711 return sg_alloc_table_from_pages(sgt
, pages
,
1712 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1713 0, size
, GFP_KERNEL
);
1716 page
= vmalloc_to_page(cpu_addr
);
1718 page
= virt_to_page(cpu_addr
);
1721 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1723 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1727 unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1729 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1731 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1734 size_t iommu_dma_opt_mapping_size(void)
1736 return iova_rcache_range();
1739 size_t iommu_dma_max_mapping_size(struct device
*dev
)
1741 if (dev_is_untrusted(dev
))
1742 return swiotlb_max_mapping_size(dev
);
1747 void iommu_setup_dma_ops(struct device
*dev
)
1749 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1751 if (dev_is_pci(dev
))
1752 dev
->iommu
->pci_32bit_workaround
= !iommu_dma_forcedac
;
1754 dev
->dma_iommu
= iommu_is_dma_domain(domain
);
1755 if (dev
->dma_iommu
&& iommu_dma_init_domain(domain
, dev
))
1760 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1762 dev
->dma_iommu
= false;
1765 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1766 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1768 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1769 struct iommu_dma_msi_page
*msi_page
;
1771 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1772 size_t size
= cookie_msi_granule(cookie
);
1774 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1775 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1776 if (msi_page
->phys
== msi_addr
)
1779 msi_page
= kzalloc(sizeof(*msi_page
), GFP_KERNEL
);
1783 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1787 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
, GFP_KERNEL
))
1790 INIT_LIST_HEAD(&msi_page
->list
);
1791 msi_page
->phys
= msi_addr
;
1792 msi_page
->iova
= iova
;
1793 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1797 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
1804 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1805 * @desc: MSI descriptor, will store the MSI page
1806 * @msi_addr: MSI target address to be mapped
1808 * Return: 0 on success or negative error code if the mapping failed.
1810 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1812 struct device
*dev
= msi_desc_to_dev(desc
);
1813 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1814 struct iommu_dma_msi_page
*msi_page
;
1815 static DEFINE_MUTEX(msi_prepare_lock
); /* see below */
1817 if (!domain
|| !domain
->iova_cookie
) {
1818 desc
->iommu_cookie
= NULL
;
1823 * In fact the whole prepare operation should already be serialised by
1824 * irq_domain_mutex further up the callchain, but that's pretty subtle
1825 * on its own, so consider this locking as failsafe documentation...
1827 mutex_lock(&msi_prepare_lock
);
1828 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1829 mutex_unlock(&msi_prepare_lock
);
1831 msi_desc_set_iommu_cookie(desc
, msi_page
);
1839 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1840 * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1841 * @msg: MSI message containing target physical address
1843 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
1845 struct device
*dev
= msi_desc_to_dev(desc
);
1846 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1847 const struct iommu_dma_msi_page
*msi_page
;
1849 msi_page
= msi_desc_get_iommu_cookie(desc
);
1851 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1854 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1855 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1856 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1859 static int iommu_dma_init(void)
1861 if (is_kdump_kernel())
1862 static_branch_enable(&iommu_deferred_attach_enabled
);
1864 return iova_cache_get();
1866 arch_initcall(iommu_dma_init
);