2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/gfp.h>
22 #include <linux/bitops.h>
23 #include <linux/debugfs.h>
24 #include <linux/scatterlist.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/iommu-helper.h>
27 #include <linux/iommu.h>
28 #include <asm/proto.h>
29 #include <asm/iommu.h>
31 #include <asm/amd_iommu_proto.h>
32 #include <asm/amd_iommu_types.h>
33 #include <asm/amd_iommu.h>
35 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
37 #define EXIT_LOOP_COUNT 10000000
39 static DEFINE_RWLOCK(amd_iommu_devtable_lock
);
41 /* A list of preallocated protection domains */
42 static LIST_HEAD(iommu_pd_list
);
43 static DEFINE_SPINLOCK(iommu_pd_list_lock
);
46 * Domain for untranslated devices - only allocated
47 * if iommu=pt passed on kernel cmd line.
49 static struct protection_domain
*pt_domain
;
51 static struct iommu_ops amd_iommu_ops
;
54 * general struct to manage commands send to an IOMMU
60 static int dma_ops_unity_map(struct dma_ops_domain
*dma_dom
,
61 struct unity_map_entry
*e
);
62 static struct dma_ops_domain
*find_protection_domain(u16 devid
);
63 static u64
*alloc_pte(struct protection_domain
*domain
,
64 unsigned long address
, int end_lvl
,
65 u64
**pte_page
, gfp_t gfp
);
66 static void dma_ops_reserve_addresses(struct dma_ops_domain
*dom
,
67 unsigned long start_page
,
69 static void reset_iommu_command_buffer(struct amd_iommu
*iommu
);
70 static u64
*fetch_pte(struct protection_domain
*domain
,
71 unsigned long address
, int map_size
);
72 static void update_domain(struct protection_domain
*domain
);
74 /****************************************************************************
78 ****************************************************************************/
80 static inline u16
get_device_id(struct device
*dev
)
82 struct pci_dev
*pdev
= to_pci_dev(dev
);
84 return calc_devid(pdev
->bus
->number
, pdev
->devfn
);
87 #ifdef CONFIG_AMD_IOMMU_STATS
90 * Initialization code for statistics collection
93 DECLARE_STATS_COUNTER(compl_wait
);
94 DECLARE_STATS_COUNTER(cnt_map_single
);
95 DECLARE_STATS_COUNTER(cnt_unmap_single
);
96 DECLARE_STATS_COUNTER(cnt_map_sg
);
97 DECLARE_STATS_COUNTER(cnt_unmap_sg
);
98 DECLARE_STATS_COUNTER(cnt_alloc_coherent
);
99 DECLARE_STATS_COUNTER(cnt_free_coherent
);
100 DECLARE_STATS_COUNTER(cross_page
);
101 DECLARE_STATS_COUNTER(domain_flush_single
);
102 DECLARE_STATS_COUNTER(domain_flush_all
);
103 DECLARE_STATS_COUNTER(alloced_io_mem
);
104 DECLARE_STATS_COUNTER(total_map_requests
);
106 static struct dentry
*stats_dir
;
107 static struct dentry
*de_isolate
;
108 static struct dentry
*de_fflush
;
110 static void amd_iommu_stats_add(struct __iommu_counter
*cnt
)
112 if (stats_dir
== NULL
)
115 cnt
->dent
= debugfs_create_u64(cnt
->name
, 0444, stats_dir
,
119 static void amd_iommu_stats_init(void)
121 stats_dir
= debugfs_create_dir("amd-iommu", NULL
);
122 if (stats_dir
== NULL
)
125 de_isolate
= debugfs_create_bool("isolation", 0444, stats_dir
,
126 (u32
*)&amd_iommu_isolate
);
128 de_fflush
= debugfs_create_bool("fullflush", 0444, stats_dir
,
129 (u32
*)&amd_iommu_unmap_flush
);
131 amd_iommu_stats_add(&compl_wait
);
132 amd_iommu_stats_add(&cnt_map_single
);
133 amd_iommu_stats_add(&cnt_unmap_single
);
134 amd_iommu_stats_add(&cnt_map_sg
);
135 amd_iommu_stats_add(&cnt_unmap_sg
);
136 amd_iommu_stats_add(&cnt_alloc_coherent
);
137 amd_iommu_stats_add(&cnt_free_coherent
);
138 amd_iommu_stats_add(&cross_page
);
139 amd_iommu_stats_add(&domain_flush_single
);
140 amd_iommu_stats_add(&domain_flush_all
);
141 amd_iommu_stats_add(&alloced_io_mem
);
142 amd_iommu_stats_add(&total_map_requests
);
147 /****************************************************************************
149 * Interrupt handling functions
151 ****************************************************************************/
153 static void dump_dte_entry(u16 devid
)
157 for (i
= 0; i
< 8; ++i
)
158 pr_err("AMD-Vi: DTE[%d]: %08x\n", i
,
159 amd_iommu_dev_table
[devid
].data
[i
]);
162 static void dump_command(unsigned long phys_addr
)
164 struct iommu_cmd
*cmd
= phys_to_virt(phys_addr
);
167 for (i
= 0; i
< 4; ++i
)
168 pr_err("AMD-Vi: CMD[%d]: %08x\n", i
, cmd
->data
[i
]);
171 static void iommu_print_event(struct amd_iommu
*iommu
, void *__evt
)
174 int type
= (event
[1] >> EVENT_TYPE_SHIFT
) & EVENT_TYPE_MASK
;
175 int devid
= (event
[0] >> EVENT_DEVID_SHIFT
) & EVENT_DEVID_MASK
;
176 int domid
= (event
[1] >> EVENT_DOMID_SHIFT
) & EVENT_DOMID_MASK
;
177 int flags
= (event
[1] >> EVENT_FLAGS_SHIFT
) & EVENT_FLAGS_MASK
;
178 u64 address
= (u64
)(((u64
)event
[3]) << 32) | event
[2];
180 printk(KERN_ERR
"AMD-Vi: Event logged [");
183 case EVENT_TYPE_ILL_DEV
:
184 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
185 "address=0x%016llx flags=0x%04x]\n",
186 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
188 dump_dte_entry(devid
);
190 case EVENT_TYPE_IO_FAULT
:
191 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
192 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
193 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
194 domid
, address
, flags
);
196 case EVENT_TYPE_DEV_TAB_ERR
:
197 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
198 "address=0x%016llx flags=0x%04x]\n",
199 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
202 case EVENT_TYPE_PAGE_TAB_ERR
:
203 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
204 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
205 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
206 domid
, address
, flags
);
208 case EVENT_TYPE_ILL_CMD
:
209 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address
);
210 reset_iommu_command_buffer(iommu
);
211 dump_command(address
);
213 case EVENT_TYPE_CMD_HARD_ERR
:
214 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
215 "flags=0x%04x]\n", address
, flags
);
217 case EVENT_TYPE_IOTLB_INV_TO
:
218 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
219 "address=0x%016llx]\n",
220 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
223 case EVENT_TYPE_INV_DEV_REQ
:
224 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
225 "address=0x%016llx flags=0x%04x]\n",
226 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
230 printk(KERN_ERR
"UNKNOWN type=0x%02x]\n", type
);
234 static void iommu_poll_events(struct amd_iommu
*iommu
)
239 spin_lock_irqsave(&iommu
->lock
, flags
);
241 head
= readl(iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
242 tail
= readl(iommu
->mmio_base
+ MMIO_EVT_TAIL_OFFSET
);
244 while (head
!= tail
) {
245 iommu_print_event(iommu
, iommu
->evt_buf
+ head
);
246 head
= (head
+ EVENT_ENTRY_SIZE
) % iommu
->evt_buf_size
;
249 writel(head
, iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
251 spin_unlock_irqrestore(&iommu
->lock
, flags
);
254 irqreturn_t
amd_iommu_int_handler(int irq
, void *data
)
256 struct amd_iommu
*iommu
;
258 for_each_iommu(iommu
)
259 iommu_poll_events(iommu
);
264 /****************************************************************************
266 * IOMMU command queuing functions
268 ****************************************************************************/
271 * Writes the command to the IOMMUs command buffer and informs the
272 * hardware about the new command. Must be called with iommu->lock held.
274 static int __iommu_queue_command(struct amd_iommu
*iommu
, struct iommu_cmd
*cmd
)
279 tail
= readl(iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
280 target
= iommu
->cmd_buf
+ tail
;
281 memcpy_toio(target
, cmd
, sizeof(*cmd
));
282 tail
= (tail
+ sizeof(*cmd
)) % iommu
->cmd_buf_size
;
283 head
= readl(iommu
->mmio_base
+ MMIO_CMD_HEAD_OFFSET
);
286 writel(tail
, iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
292 * General queuing function for commands. Takes iommu->lock and calls
293 * __iommu_queue_command().
295 static int iommu_queue_command(struct amd_iommu
*iommu
, struct iommu_cmd
*cmd
)
300 spin_lock_irqsave(&iommu
->lock
, flags
);
301 ret
= __iommu_queue_command(iommu
, cmd
);
303 iommu
->need_sync
= true;
304 spin_unlock_irqrestore(&iommu
->lock
, flags
);
310 * This function waits until an IOMMU has completed a completion
313 static void __iommu_wait_for_completion(struct amd_iommu
*iommu
)
319 INC_STATS_COUNTER(compl_wait
);
321 while (!ready
&& (i
< EXIT_LOOP_COUNT
)) {
323 /* wait for the bit to become one */
324 status
= readl(iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
325 ready
= status
& MMIO_STATUS_COM_WAIT_INT_MASK
;
328 /* set bit back to zero */
329 status
&= ~MMIO_STATUS_COM_WAIT_INT_MASK
;
330 writel(status
, iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
332 if (unlikely(i
== EXIT_LOOP_COUNT
)) {
333 spin_unlock(&iommu
->lock
);
334 reset_iommu_command_buffer(iommu
);
335 spin_lock(&iommu
->lock
);
340 * This function queues a completion wait command into the command
343 static int __iommu_completion_wait(struct amd_iommu
*iommu
)
345 struct iommu_cmd cmd
;
347 memset(&cmd
, 0, sizeof(cmd
));
348 cmd
.data
[0] = CMD_COMPL_WAIT_INT_MASK
;
349 CMD_SET_TYPE(&cmd
, CMD_COMPL_WAIT
);
351 return __iommu_queue_command(iommu
, &cmd
);
355 * This function is called whenever we need to ensure that the IOMMU has
356 * completed execution of all commands we sent. It sends a
357 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
358 * us about that by writing a value to a physical address we pass with
361 static int iommu_completion_wait(struct amd_iommu
*iommu
)
366 spin_lock_irqsave(&iommu
->lock
, flags
);
368 if (!iommu
->need_sync
)
371 ret
= __iommu_completion_wait(iommu
);
373 iommu
->need_sync
= false;
378 __iommu_wait_for_completion(iommu
);
381 spin_unlock_irqrestore(&iommu
->lock
, flags
);
386 static void iommu_flush_complete(struct protection_domain
*domain
)
390 for (i
= 0; i
< amd_iommus_present
; ++i
) {
391 if (!domain
->dev_iommu
[i
])
395 * Devices of this domain are behind this IOMMU
396 * We need to wait for completion of all commands.
398 iommu_completion_wait(amd_iommus
[i
]);
403 * Command send function for invalidating a device table entry
405 static int iommu_queue_inv_dev_entry(struct amd_iommu
*iommu
, u16 devid
)
407 struct iommu_cmd cmd
;
410 BUG_ON(iommu
== NULL
);
412 memset(&cmd
, 0, sizeof(cmd
));
413 CMD_SET_TYPE(&cmd
, CMD_INV_DEV_ENTRY
);
416 ret
= iommu_queue_command(iommu
, &cmd
);
421 static void __iommu_build_inv_iommu_pages(struct iommu_cmd
*cmd
, u64 address
,
422 u16 domid
, int pde
, int s
)
424 memset(cmd
, 0, sizeof(*cmd
));
425 address
&= PAGE_MASK
;
426 CMD_SET_TYPE(cmd
, CMD_INV_IOMMU_PAGES
);
427 cmd
->data
[1] |= domid
;
428 cmd
->data
[2] = lower_32_bits(address
);
429 cmd
->data
[3] = upper_32_bits(address
);
430 if (s
) /* size bit - we flush more than one 4kb page */
431 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
432 if (pde
) /* PDE bit - we wan't flush everything not only the PTEs */
433 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK
;
437 * Generic command send function for invalidaing TLB entries
439 static int iommu_queue_inv_iommu_pages(struct amd_iommu
*iommu
,
440 u64 address
, u16 domid
, int pde
, int s
)
442 struct iommu_cmd cmd
;
445 __iommu_build_inv_iommu_pages(&cmd
, address
, domid
, pde
, s
);
447 ret
= iommu_queue_command(iommu
, &cmd
);
453 * TLB invalidation function which is called from the mapping functions.
454 * It invalidates a single PTE if the range to flush is within a single
455 * page. Otherwise it flushes the whole TLB of the IOMMU.
457 static void __iommu_flush_pages(struct protection_domain
*domain
,
458 u64 address
, size_t size
, int pde
)
461 unsigned long pages
= iommu_num_pages(address
, size
, PAGE_SIZE
);
463 address
&= PAGE_MASK
;
467 * If we have to flush more than one page, flush all
468 * TLB entries for this domain
470 address
= CMD_INV_IOMMU_ALL_PAGES_ADDRESS
;
475 for (i
= 0; i
< amd_iommus_present
; ++i
) {
476 if (!domain
->dev_iommu
[i
])
480 * Devices of this domain are behind this IOMMU
481 * We need a TLB flush
483 iommu_queue_inv_iommu_pages(amd_iommus
[i
], address
,
490 static void iommu_flush_pages(struct protection_domain
*domain
,
491 u64 address
, size_t size
)
493 __iommu_flush_pages(domain
, address
, size
, 0);
496 /* Flush the whole IO/TLB for a given protection domain */
497 static void iommu_flush_tlb(struct protection_domain
*domain
)
499 __iommu_flush_pages(domain
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
, 0);
502 /* Flush the whole IO/TLB for a given protection domain - including PDE */
503 static void iommu_flush_tlb_pde(struct protection_domain
*domain
)
505 __iommu_flush_pages(domain
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
, 1);
509 * This function flushes all domains that have devices on the given IOMMU
511 static void flush_all_domains_on_iommu(struct amd_iommu
*iommu
)
513 u64 address
= CMD_INV_IOMMU_ALL_PAGES_ADDRESS
;
514 struct protection_domain
*domain
;
517 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
519 list_for_each_entry(domain
, &amd_iommu_pd_list
, list
) {
520 if (domain
->dev_iommu
[iommu
->index
] == 0)
523 spin_lock(&domain
->lock
);
524 iommu_queue_inv_iommu_pages(iommu
, address
, domain
->id
, 1, 1);
525 iommu_flush_complete(domain
);
526 spin_unlock(&domain
->lock
);
529 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
533 * This function uses heavy locking and may disable irqs for some time. But
534 * this is no issue because it is only called during resume.
536 void amd_iommu_flush_all_domains(void)
538 struct protection_domain
*domain
;
541 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
543 list_for_each_entry(domain
, &amd_iommu_pd_list
, list
) {
544 spin_lock(&domain
->lock
);
545 iommu_flush_tlb_pde(domain
);
546 iommu_flush_complete(domain
);
547 spin_unlock(&domain
->lock
);
550 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
553 static void flush_all_devices_for_iommu(struct amd_iommu
*iommu
)
557 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
) {
558 if (iommu
!= amd_iommu_rlookup_table
[i
])
561 iommu_queue_inv_dev_entry(iommu
, i
);
562 iommu_completion_wait(iommu
);
566 static void flush_devices_by_domain(struct protection_domain
*domain
)
568 struct amd_iommu
*iommu
;
571 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
) {
572 if ((domain
== NULL
&& amd_iommu_pd_table
[i
] == NULL
) ||
573 (amd_iommu_pd_table
[i
] != domain
))
576 iommu
= amd_iommu_rlookup_table
[i
];
580 iommu_queue_inv_dev_entry(iommu
, i
);
581 iommu_completion_wait(iommu
);
585 static void reset_iommu_command_buffer(struct amd_iommu
*iommu
)
587 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
589 if (iommu
->reset_in_progress
)
590 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
592 iommu
->reset_in_progress
= true;
594 amd_iommu_reset_cmd_buffer(iommu
);
595 flush_all_devices_for_iommu(iommu
);
596 flush_all_domains_on_iommu(iommu
);
598 iommu
->reset_in_progress
= false;
601 void amd_iommu_flush_all_devices(void)
603 flush_devices_by_domain(NULL
);
606 /****************************************************************************
608 * The functions below are used the create the page table mappings for
609 * unity mapped regions.
611 ****************************************************************************/
614 * Generic mapping functions. It maps a physical address into a DMA
615 * address space. It allocates the page table pages if necessary.
616 * In the future it can be extended to a generic mapping function
617 * supporting all features of AMD IOMMU page tables like level skipping
618 * and full 64 bit address spaces.
620 static int iommu_map_page(struct protection_domain
*dom
,
621 unsigned long bus_addr
,
622 unsigned long phys_addr
,
628 bus_addr
= PAGE_ALIGN(bus_addr
);
629 phys_addr
= PAGE_ALIGN(phys_addr
);
631 BUG_ON(!PM_ALIGNED(map_size
, bus_addr
));
632 BUG_ON(!PM_ALIGNED(map_size
, phys_addr
));
634 if (!(prot
& IOMMU_PROT_MASK
))
637 pte
= alloc_pte(dom
, bus_addr
, map_size
, NULL
, GFP_KERNEL
);
639 if (IOMMU_PTE_PRESENT(*pte
))
642 __pte
= phys_addr
| IOMMU_PTE_P
;
643 if (prot
& IOMMU_PROT_IR
)
644 __pte
|= IOMMU_PTE_IR
;
645 if (prot
& IOMMU_PROT_IW
)
646 __pte
|= IOMMU_PTE_IW
;
655 static void iommu_unmap_page(struct protection_domain
*dom
,
656 unsigned long bus_addr
, int map_size
)
658 u64
*pte
= fetch_pte(dom
, bus_addr
, map_size
);
665 * This function checks if a specific unity mapping entry is needed for
666 * this specific IOMMU.
668 static int iommu_for_unity_map(struct amd_iommu
*iommu
,
669 struct unity_map_entry
*entry
)
673 for (i
= entry
->devid_start
; i
<= entry
->devid_end
; ++i
) {
674 bdf
= amd_iommu_alias_table
[i
];
675 if (amd_iommu_rlookup_table
[bdf
] == iommu
)
683 * Init the unity mappings for a specific IOMMU in the system
685 * Basically iterates over all unity mapping entries and applies them to
686 * the default domain DMA of that IOMMU if necessary.
688 static int iommu_init_unity_mappings(struct amd_iommu
*iommu
)
690 struct unity_map_entry
*entry
;
693 list_for_each_entry(entry
, &amd_iommu_unity_map
, list
) {
694 if (!iommu_for_unity_map(iommu
, entry
))
696 ret
= dma_ops_unity_map(iommu
->default_dom
, entry
);
705 * This function actually applies the mapping to the page table of the
708 static int dma_ops_unity_map(struct dma_ops_domain
*dma_dom
,
709 struct unity_map_entry
*e
)
714 for (addr
= e
->address_start
; addr
< e
->address_end
;
716 ret
= iommu_map_page(&dma_dom
->domain
, addr
, addr
, e
->prot
,
721 * if unity mapping is in aperture range mark the page
722 * as allocated in the aperture
724 if (addr
< dma_dom
->aperture_size
)
725 __set_bit(addr
>> PAGE_SHIFT
,
726 dma_dom
->aperture
[0]->bitmap
);
733 * Inits the unity mappings required for a specific device
735 static int init_unity_mappings_for_device(struct dma_ops_domain
*dma_dom
,
738 struct unity_map_entry
*e
;
741 list_for_each_entry(e
, &amd_iommu_unity_map
, list
) {
742 if (!(devid
>= e
->devid_start
&& devid
<= e
->devid_end
))
744 ret
= dma_ops_unity_map(dma_dom
, e
);
752 /****************************************************************************
754 * The next functions belong to the address allocator for the dma_ops
755 * interface functions. They work like the allocators in the other IOMMU
756 * drivers. Its basically a bitmap which marks the allocated pages in
757 * the aperture. Maybe it could be enhanced in the future to a more
758 * efficient allocator.
760 ****************************************************************************/
763 * The address allocator core functions.
765 * called with domain->lock held
769 * This function checks if there is a PTE for a given dma address. If
770 * there is one, it returns the pointer to it.
772 static u64
*fetch_pte(struct protection_domain
*domain
,
773 unsigned long address
, int map_size
)
778 level
= domain
->mode
- 1;
779 pte
= &domain
->pt_root
[PM_LEVEL_INDEX(level
, address
)];
781 while (level
> map_size
) {
782 if (!IOMMU_PTE_PRESENT(*pte
))
787 pte
= IOMMU_PTE_PAGE(*pte
);
788 pte
= &pte
[PM_LEVEL_INDEX(level
, address
)];
790 if ((PM_PTE_LEVEL(*pte
) == 0) && level
!= map_size
) {
800 * This function is used to add a new aperture range to an existing
801 * aperture in case of dma_ops domain allocation or address allocation
804 static int alloc_new_range(struct dma_ops_domain
*dma_dom
,
805 bool populate
, gfp_t gfp
)
807 int index
= dma_dom
->aperture_size
>> APERTURE_RANGE_SHIFT
;
808 struct amd_iommu
*iommu
;
811 #ifdef CONFIG_IOMMU_STRESS
815 if (index
>= APERTURE_MAX_RANGES
)
818 dma_dom
->aperture
[index
] = kzalloc(sizeof(struct aperture_range
), gfp
);
819 if (!dma_dom
->aperture
[index
])
822 dma_dom
->aperture
[index
]->bitmap
= (void *)get_zeroed_page(gfp
);
823 if (!dma_dom
->aperture
[index
]->bitmap
)
826 dma_dom
->aperture
[index
]->offset
= dma_dom
->aperture_size
;
829 unsigned long address
= dma_dom
->aperture_size
;
830 int i
, num_ptes
= APERTURE_RANGE_PAGES
/ 512;
833 for (i
= 0; i
< num_ptes
; ++i
) {
834 pte
= alloc_pte(&dma_dom
->domain
, address
, PM_MAP_4k
,
839 dma_dom
->aperture
[index
]->pte_pages
[i
] = pte_page
;
841 address
+= APERTURE_RANGE_SIZE
/ 64;
845 dma_dom
->aperture_size
+= APERTURE_RANGE_SIZE
;
847 /* Intialize the exclusion range if necessary */
848 for_each_iommu(iommu
) {
849 if (iommu
->exclusion_start
&&
850 iommu
->exclusion_start
>= dma_dom
->aperture
[index
]->offset
851 && iommu
->exclusion_start
< dma_dom
->aperture_size
) {
852 unsigned long startpage
;
853 int pages
= iommu_num_pages(iommu
->exclusion_start
,
854 iommu
->exclusion_length
,
856 startpage
= iommu
->exclusion_start
>> PAGE_SHIFT
;
857 dma_ops_reserve_addresses(dma_dom
, startpage
, pages
);
862 * Check for areas already mapped as present in the new aperture
863 * range and mark those pages as reserved in the allocator. Such
864 * mappings may already exist as a result of requested unity
865 * mappings for devices.
867 for (i
= dma_dom
->aperture
[index
]->offset
;
868 i
< dma_dom
->aperture_size
;
870 u64
*pte
= fetch_pte(&dma_dom
->domain
, i
, PM_MAP_4k
);
871 if (!pte
|| !IOMMU_PTE_PRESENT(*pte
))
874 dma_ops_reserve_addresses(dma_dom
, i
<< PAGE_SHIFT
, 1);
877 update_domain(&dma_dom
->domain
);
882 update_domain(&dma_dom
->domain
);
884 free_page((unsigned long)dma_dom
->aperture
[index
]->bitmap
);
886 kfree(dma_dom
->aperture
[index
]);
887 dma_dom
->aperture
[index
] = NULL
;
892 static unsigned long dma_ops_area_alloc(struct device
*dev
,
893 struct dma_ops_domain
*dom
,
895 unsigned long align_mask
,
899 unsigned long next_bit
= dom
->next_address
% APERTURE_RANGE_SIZE
;
900 int max_index
= dom
->aperture_size
>> APERTURE_RANGE_SHIFT
;
901 int i
= start
>> APERTURE_RANGE_SHIFT
;
902 unsigned long boundary_size
;
903 unsigned long address
= -1;
906 next_bit
>>= PAGE_SHIFT
;
908 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
909 PAGE_SIZE
) >> PAGE_SHIFT
;
911 for (;i
< max_index
; ++i
) {
912 unsigned long offset
= dom
->aperture
[i
]->offset
>> PAGE_SHIFT
;
914 if (dom
->aperture
[i
]->offset
>= dma_mask
)
917 limit
= iommu_device_max_index(APERTURE_RANGE_PAGES
, offset
,
918 dma_mask
>> PAGE_SHIFT
);
920 address
= iommu_area_alloc(dom
->aperture
[i
]->bitmap
,
921 limit
, next_bit
, pages
, 0,
922 boundary_size
, align_mask
);
924 address
= dom
->aperture
[i
]->offset
+
925 (address
<< PAGE_SHIFT
);
926 dom
->next_address
= address
+ (pages
<< PAGE_SHIFT
);
936 static unsigned long dma_ops_alloc_addresses(struct device
*dev
,
937 struct dma_ops_domain
*dom
,
939 unsigned long align_mask
,
942 unsigned long address
;
944 #ifdef CONFIG_IOMMU_STRESS
945 dom
->next_address
= 0;
946 dom
->need_flush
= true;
949 address
= dma_ops_area_alloc(dev
, dom
, pages
, align_mask
,
950 dma_mask
, dom
->next_address
);
953 dom
->next_address
= 0;
954 address
= dma_ops_area_alloc(dev
, dom
, pages
, align_mask
,
956 dom
->need_flush
= true;
959 if (unlikely(address
== -1))
960 address
= DMA_ERROR_CODE
;
962 WARN_ON((address
+ (PAGE_SIZE
*pages
)) > dom
->aperture_size
);
968 * The address free function.
970 * called with domain->lock held
972 static void dma_ops_free_addresses(struct dma_ops_domain
*dom
,
973 unsigned long address
,
976 unsigned i
= address
>> APERTURE_RANGE_SHIFT
;
977 struct aperture_range
*range
= dom
->aperture
[i
];
979 BUG_ON(i
>= APERTURE_MAX_RANGES
|| range
== NULL
);
981 #ifdef CONFIG_IOMMU_STRESS
986 if (address
>= dom
->next_address
)
987 dom
->need_flush
= true;
989 address
= (address
% APERTURE_RANGE_SIZE
) >> PAGE_SHIFT
;
991 iommu_area_free(range
->bitmap
, address
, pages
);
995 /****************************************************************************
997 * The next functions belong to the domain allocation. A domain is
998 * allocated for every IOMMU as the default domain. If device isolation
999 * is enabled, every device get its own domain. The most important thing
1000 * about domains is the page table mapping the DMA address space they
1003 ****************************************************************************/
1006 * This function adds a protection domain to the global protection domain list
1008 static void add_domain_to_list(struct protection_domain
*domain
)
1010 unsigned long flags
;
1012 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
1013 list_add(&domain
->list
, &amd_iommu_pd_list
);
1014 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
1018 * This function removes a protection domain to the global
1019 * protection domain list
1021 static void del_domain_from_list(struct protection_domain
*domain
)
1023 unsigned long flags
;
1025 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
1026 list_del(&domain
->list
);
1027 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
1030 static u16
domain_id_alloc(void)
1032 unsigned long flags
;
1035 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1036 id
= find_first_zero_bit(amd_iommu_pd_alloc_bitmap
, MAX_DOMAIN_ID
);
1038 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1039 __set_bit(id
, amd_iommu_pd_alloc_bitmap
);
1042 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1047 static void domain_id_free(int id
)
1049 unsigned long flags
;
1051 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1052 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1053 __clear_bit(id
, amd_iommu_pd_alloc_bitmap
);
1054 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1058 * Used to reserve address ranges in the aperture (e.g. for exclusion
1061 static void dma_ops_reserve_addresses(struct dma_ops_domain
*dom
,
1062 unsigned long start_page
,
1065 unsigned int i
, last_page
= dom
->aperture_size
>> PAGE_SHIFT
;
1067 if (start_page
+ pages
> last_page
)
1068 pages
= last_page
- start_page
;
1070 for (i
= start_page
; i
< start_page
+ pages
; ++i
) {
1071 int index
= i
/ APERTURE_RANGE_PAGES
;
1072 int page
= i
% APERTURE_RANGE_PAGES
;
1073 __set_bit(page
, dom
->aperture
[index
]->bitmap
);
1077 static void free_pagetable(struct protection_domain
*domain
)
1082 p1
= domain
->pt_root
;
1087 for (i
= 0; i
< 512; ++i
) {
1088 if (!IOMMU_PTE_PRESENT(p1
[i
]))
1091 p2
= IOMMU_PTE_PAGE(p1
[i
]);
1092 for (j
= 0; j
< 512; ++j
) {
1093 if (!IOMMU_PTE_PRESENT(p2
[j
]))
1095 p3
= IOMMU_PTE_PAGE(p2
[j
]);
1096 free_page((unsigned long)p3
);
1099 free_page((unsigned long)p2
);
1102 free_page((unsigned long)p1
);
1104 domain
->pt_root
= NULL
;
1108 * Free a domain, only used if something went wrong in the
1109 * allocation path and we need to free an already allocated page table
1111 static void dma_ops_domain_free(struct dma_ops_domain
*dom
)
1118 del_domain_from_list(&dom
->domain
);
1120 free_pagetable(&dom
->domain
);
1122 for (i
= 0; i
< APERTURE_MAX_RANGES
; ++i
) {
1123 if (!dom
->aperture
[i
])
1125 free_page((unsigned long)dom
->aperture
[i
]->bitmap
);
1126 kfree(dom
->aperture
[i
]);
1133 * Allocates a new protection domain usable for the dma_ops functions.
1134 * It also intializes the page table and the address allocator data
1135 * structures required for the dma_ops interface
1137 static struct dma_ops_domain
*dma_ops_domain_alloc(struct amd_iommu
*iommu
)
1139 struct dma_ops_domain
*dma_dom
;
1141 dma_dom
= kzalloc(sizeof(struct dma_ops_domain
), GFP_KERNEL
);
1145 spin_lock_init(&dma_dom
->domain
.lock
);
1147 dma_dom
->domain
.id
= domain_id_alloc();
1148 if (dma_dom
->domain
.id
== 0)
1150 dma_dom
->domain
.mode
= PAGE_MODE_2_LEVEL
;
1151 dma_dom
->domain
.pt_root
= (void *)get_zeroed_page(GFP_KERNEL
);
1152 dma_dom
->domain
.flags
= PD_DMA_OPS_MASK
;
1153 dma_dom
->domain
.priv
= dma_dom
;
1154 if (!dma_dom
->domain
.pt_root
)
1157 dma_dom
->need_flush
= false;
1158 dma_dom
->target_dev
= 0xffff;
1160 add_domain_to_list(&dma_dom
->domain
);
1162 if (alloc_new_range(dma_dom
, true, GFP_KERNEL
))
1166 * mark the first page as allocated so we never return 0 as
1167 * a valid dma-address. So we can use 0 as error value
1169 dma_dom
->aperture
[0]->bitmap
[0] = 1;
1170 dma_dom
->next_address
= 0;
1176 dma_ops_domain_free(dma_dom
);
1182 * little helper function to check whether a given protection domain is a
1185 static bool dma_ops_domain(struct protection_domain
*domain
)
1187 return domain
->flags
& PD_DMA_OPS_MASK
;
1190 static void set_dte_entry(u16 devid
, struct protection_domain
*domain
)
1192 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
1193 u64 pte_root
= virt_to_phys(domain
->pt_root
);
1195 BUG_ON(amd_iommu_pd_table
[devid
] != NULL
);
1197 pte_root
|= (domain
->mode
& DEV_ENTRY_MODE_MASK
)
1198 << DEV_ENTRY_MODE_SHIFT
;
1199 pte_root
|= IOMMU_PTE_IR
| IOMMU_PTE_IW
| IOMMU_PTE_P
| IOMMU_PTE_TV
;
1201 amd_iommu_dev_table
[devid
].data
[2] = domain
->id
;
1202 amd_iommu_dev_table
[devid
].data
[1] = upper_32_bits(pte_root
);
1203 amd_iommu_dev_table
[devid
].data
[0] = lower_32_bits(pte_root
);
1205 amd_iommu_pd_table
[devid
] = domain
;
1207 /* Do reference counting */
1208 domain
->dev_iommu
[iommu
->index
] += 1;
1209 domain
->dev_cnt
+= 1;
1211 /* Flush the changes DTE entry */
1212 iommu_queue_inv_dev_entry(iommu
, devid
);
1215 static void clear_dte_entry(u16 devid
)
1217 struct protection_domain
*domain
= amd_iommu_pd_table
[devid
];
1218 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
1220 BUG_ON(domain
== NULL
);
1222 /* remove domain from the lookup table */
1223 amd_iommu_pd_table
[devid
] = NULL
;
1225 /* remove entry from the device table seen by the hardware */
1226 amd_iommu_dev_table
[devid
].data
[0] = IOMMU_PTE_P
| IOMMU_PTE_TV
;
1227 amd_iommu_dev_table
[devid
].data
[1] = 0;
1228 amd_iommu_dev_table
[devid
].data
[2] = 0;
1230 amd_iommu_apply_erratum_63(devid
);
1232 /* decrease reference counters */
1233 domain
->dev_iommu
[iommu
->index
] -= 1;
1234 domain
->dev_cnt
-= 1;
1236 iommu_queue_inv_dev_entry(iommu
, devid
);
1240 * If a device is not yet associated with a domain, this function does
1241 * assigns it visible for the hardware
1243 static int __attach_device(struct device
*dev
,
1244 struct protection_domain
*domain
)
1246 u16 devid
= get_device_id(dev
);
1247 u16 alias
= amd_iommu_alias_table
[devid
];
1250 spin_lock(&domain
->lock
);
1252 /* Some sanity checks */
1253 if (amd_iommu_pd_table
[alias
] != NULL
&&
1254 amd_iommu_pd_table
[alias
] != domain
)
1257 if (amd_iommu_pd_table
[devid
] != NULL
&&
1258 amd_iommu_pd_table
[devid
] != domain
)
1261 /* Do real assignment */
1262 if (alias
!= devid
&&
1263 amd_iommu_pd_table
[alias
] == NULL
)
1264 set_dte_entry(alias
, domain
);
1266 if (amd_iommu_pd_table
[devid
] == NULL
)
1267 set_dte_entry(devid
, domain
);
1270 spin_unlock(&domain
->lock
);
1276 * If a device is not yet associated with a domain, this function does
1277 * assigns it visible for the hardware
1279 static int attach_device(struct device
*dev
,
1280 struct protection_domain
*domain
)
1282 unsigned long flags
;
1285 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1286 ret
= __attach_device(dev
, domain
);
1287 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1290 * We might boot into a crash-kernel here. The crashed kernel
1291 * left the caches in the IOMMU dirty. So we have to flush
1292 * here to evict all dirty stuff.
1294 iommu_flush_tlb_pde(domain
);
1300 * Removes a device from a protection domain (unlocked)
1302 static void __detach_device(struct device
*dev
)
1304 u16 devid
= get_device_id(dev
);
1305 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
1309 clear_dte_entry(devid
);
1312 * If we run in passthrough mode the device must be assigned to the
1313 * passthrough domain if it is detached from any other domain
1315 if (iommu_pass_through
)
1316 __attach_device(dev
, pt_domain
);
1320 * Removes a device from a protection domain (with devtable_lock held)
1322 static void detach_device(struct device
*dev
)
1324 unsigned long flags
;
1326 /* lock device table */
1327 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1328 __detach_device(dev
);
1329 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1333 * Find out the protection domain structure for a given PCI device. This
1334 * will give us the pointer to the page table root for example.
1336 static struct protection_domain
*domain_for_device(struct device
*dev
)
1338 struct protection_domain
*dom
;
1339 unsigned long flags
;
1342 devid
= get_device_id(dev
);
1343 alias
= amd_iommu_alias_table
[devid
];
1345 read_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1346 dom
= amd_iommu_pd_table
[devid
];
1348 amd_iommu_pd_table
[alias
] != NULL
) {
1349 __attach_device(dev
, amd_iommu_pd_table
[alias
]);
1350 dom
= amd_iommu_pd_table
[devid
];
1353 read_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1358 static int device_change_notifier(struct notifier_block
*nb
,
1359 unsigned long action
, void *data
)
1361 struct device
*dev
= data
;
1362 struct pci_dev
*pdev
= to_pci_dev(dev
);
1363 u16 devid
= calc_devid(pdev
->bus
->number
, pdev
->devfn
);
1364 struct protection_domain
*domain
;
1365 struct dma_ops_domain
*dma_domain
;
1366 struct amd_iommu
*iommu
;
1367 unsigned long flags
;
1369 if (devid
> amd_iommu_last_bdf
)
1372 devid
= amd_iommu_alias_table
[devid
];
1374 iommu
= amd_iommu_rlookup_table
[devid
];
1378 domain
= domain_for_device(dev
);
1380 if (domain
&& !dma_ops_domain(domain
))
1381 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1382 "to a non-dma-ops domain\n", dev_name(dev
));
1385 case BUS_NOTIFY_UNBOUND_DRIVER
:
1388 if (iommu_pass_through
)
1392 case BUS_NOTIFY_ADD_DEVICE
:
1393 /* allocate a protection domain if a device is added */
1394 dma_domain
= find_protection_domain(devid
);
1397 dma_domain
= dma_ops_domain_alloc(iommu
);
1400 dma_domain
->target_dev
= devid
;
1402 spin_lock_irqsave(&iommu_pd_list_lock
, flags
);
1403 list_add_tail(&dma_domain
->list
, &iommu_pd_list
);
1404 spin_unlock_irqrestore(&iommu_pd_list_lock
, flags
);
1411 iommu_queue_inv_dev_entry(iommu
, devid
);
1412 iommu_completion_wait(iommu
);
1418 static struct notifier_block device_nb
= {
1419 .notifier_call
= device_change_notifier
,
1422 /*****************************************************************************
1424 * The next functions belong to the dma_ops mapping/unmapping code.
1426 *****************************************************************************/
1429 * This function checks if the driver got a valid device from the caller to
1430 * avoid dereferencing invalid pointers.
1432 static bool check_device(struct device
*dev
)
1435 struct pci_dev
*pcidev
;
1437 if (!dev
|| !dev
->dma_mask
)
1440 /* No device or no PCI device */
1441 if (!dev
|| dev
->bus
!= &pci_bus_type
)
1444 pcidev
= to_pci_dev(dev
);
1446 bdf
= calc_devid(pcidev
->bus
->number
, pcidev
->devfn
);
1448 /* Out of our scope? */
1449 if (bdf
> amd_iommu_last_bdf
)
1452 if (amd_iommu_rlookup_table
[bdf
] == NULL
)
1459 * In this function the list of preallocated protection domains is traversed to
1460 * find the domain for a specific device
1462 static struct dma_ops_domain
*find_protection_domain(u16 devid
)
1464 struct dma_ops_domain
*entry
, *ret
= NULL
;
1465 unsigned long flags
;
1466 u16 alias
= amd_iommu_alias_table
[devid
];
1468 if (list_empty(&iommu_pd_list
))
1471 spin_lock_irqsave(&iommu_pd_list_lock
, flags
);
1473 list_for_each_entry(entry
, &iommu_pd_list
, list
) {
1474 if (entry
->target_dev
== devid
||
1475 entry
->target_dev
== alias
) {
1481 spin_unlock_irqrestore(&iommu_pd_list_lock
, flags
);
1487 * In the dma_ops path we only have the struct device. This function
1488 * finds the corresponding IOMMU, the protection domain and the
1489 * requestor id for a given device.
1490 * If the device is not yet associated with a domain this is also done
1493 static struct protection_domain
*get_domain(struct device
*dev
)
1495 struct protection_domain
*domain
;
1496 struct dma_ops_domain
*dma_dom
;
1497 u16 devid
= get_device_id(dev
);
1499 if (!check_device(dev
))
1500 return ERR_PTR(-EINVAL
);
1502 domain
= domain_for_device(dev
);
1503 if (domain
!= NULL
&& !dma_ops_domain(domain
))
1504 return ERR_PTR(-EBUSY
);
1509 /* Device not bount yet - bind it */
1510 dma_dom
= find_protection_domain(devid
);
1512 dma_dom
= amd_iommu_rlookup_table
[devid
]->default_dom
;
1513 attach_device(dev
, &dma_dom
->domain
);
1514 DUMP_printk("Using protection domain %d for device %s\n",
1515 dma_dom
->domain
.id
, dev_name(dev
));
1517 return &dma_dom
->domain
;
1520 static void update_device_table(struct protection_domain
*domain
)
1522 unsigned long flags
;
1525 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
) {
1526 if (amd_iommu_pd_table
[i
] != domain
)
1528 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1529 set_dte_entry(i
, domain
);
1530 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1534 static void update_domain(struct protection_domain
*domain
)
1536 if (!domain
->updated
)
1539 update_device_table(domain
);
1540 flush_devices_by_domain(domain
);
1541 iommu_flush_tlb_pde(domain
);
1543 domain
->updated
= false;
1547 * This function is used to add another level to an IO page table. Adding
1548 * another level increases the size of the address space by 9 bits to a size up
1551 static bool increase_address_space(struct protection_domain
*domain
,
1556 if (domain
->mode
== PAGE_MODE_6_LEVEL
)
1557 /* address space already 64 bit large */
1560 pte
= (void *)get_zeroed_page(gfp
);
1564 *pte
= PM_LEVEL_PDE(domain
->mode
,
1565 virt_to_phys(domain
->pt_root
));
1566 domain
->pt_root
= pte
;
1568 domain
->updated
= true;
1573 static u64
*alloc_pte(struct protection_domain
*domain
,
1574 unsigned long address
,
1582 while (address
> PM_LEVEL_SIZE(domain
->mode
))
1583 increase_address_space(domain
, gfp
);
1585 level
= domain
->mode
- 1;
1586 pte
= &domain
->pt_root
[PM_LEVEL_INDEX(level
, address
)];
1588 while (level
> end_lvl
) {
1589 if (!IOMMU_PTE_PRESENT(*pte
)) {
1590 page
= (u64
*)get_zeroed_page(gfp
);
1593 *pte
= PM_LEVEL_PDE(level
, virt_to_phys(page
));
1598 pte
= IOMMU_PTE_PAGE(*pte
);
1600 if (pte_page
&& level
== end_lvl
)
1603 pte
= &pte
[PM_LEVEL_INDEX(level
, address
)];
1610 * This function fetches the PTE for a given address in the aperture
1612 static u64
* dma_ops_get_pte(struct dma_ops_domain
*dom
,
1613 unsigned long address
)
1615 struct aperture_range
*aperture
;
1616 u64
*pte
, *pte_page
;
1618 aperture
= dom
->aperture
[APERTURE_RANGE_INDEX(address
)];
1622 pte
= aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)];
1624 pte
= alloc_pte(&dom
->domain
, address
, PM_MAP_4k
, &pte_page
,
1626 aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)] = pte_page
;
1628 pte
+= PM_LEVEL_INDEX(0, address
);
1630 update_domain(&dom
->domain
);
1636 * This is the generic map function. It maps one 4kb page at paddr to
1637 * the given address in the DMA address space for the domain.
1639 static dma_addr_t
dma_ops_domain_map(struct dma_ops_domain
*dom
,
1640 unsigned long address
,
1646 WARN_ON(address
> dom
->aperture_size
);
1650 pte
= dma_ops_get_pte(dom
, address
);
1652 return DMA_ERROR_CODE
;
1654 __pte
= paddr
| IOMMU_PTE_P
| IOMMU_PTE_FC
;
1656 if (direction
== DMA_TO_DEVICE
)
1657 __pte
|= IOMMU_PTE_IR
;
1658 else if (direction
== DMA_FROM_DEVICE
)
1659 __pte
|= IOMMU_PTE_IW
;
1660 else if (direction
== DMA_BIDIRECTIONAL
)
1661 __pte
|= IOMMU_PTE_IR
| IOMMU_PTE_IW
;
1667 return (dma_addr_t
)address
;
1671 * The generic unmapping function for on page in the DMA address space.
1673 static void dma_ops_domain_unmap(struct dma_ops_domain
*dom
,
1674 unsigned long address
)
1676 struct aperture_range
*aperture
;
1679 if (address
>= dom
->aperture_size
)
1682 aperture
= dom
->aperture
[APERTURE_RANGE_INDEX(address
)];
1686 pte
= aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)];
1690 pte
+= PM_LEVEL_INDEX(0, address
);
1698 * This function contains common code for mapping of a physically
1699 * contiguous memory region into DMA address space. It is used by all
1700 * mapping functions provided with this IOMMU driver.
1701 * Must be called with the domain lock held.
1703 static dma_addr_t
__map_single(struct device
*dev
,
1704 struct dma_ops_domain
*dma_dom
,
1711 dma_addr_t offset
= paddr
& ~PAGE_MASK
;
1712 dma_addr_t address
, start
, ret
;
1714 unsigned long align_mask
= 0;
1717 pages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
1720 INC_STATS_COUNTER(total_map_requests
);
1723 INC_STATS_COUNTER(cross_page
);
1726 align_mask
= (1UL << get_order(size
)) - 1;
1729 address
= dma_ops_alloc_addresses(dev
, dma_dom
, pages
, align_mask
,
1731 if (unlikely(address
== DMA_ERROR_CODE
)) {
1733 * setting next_address here will let the address
1734 * allocator only scan the new allocated range in the
1735 * first run. This is a small optimization.
1737 dma_dom
->next_address
= dma_dom
->aperture_size
;
1739 if (alloc_new_range(dma_dom
, false, GFP_ATOMIC
))
1743 * aperture was sucessfully enlarged by 128 MB, try
1750 for (i
= 0; i
< pages
; ++i
) {
1751 ret
= dma_ops_domain_map(dma_dom
, start
, paddr
, dir
);
1752 if (ret
== DMA_ERROR_CODE
)
1760 ADD_STATS_COUNTER(alloced_io_mem
, size
);
1762 if (unlikely(dma_dom
->need_flush
&& !amd_iommu_unmap_flush
)) {
1763 iommu_flush_tlb(&dma_dom
->domain
);
1764 dma_dom
->need_flush
= false;
1765 } else if (unlikely(amd_iommu_np_cache
))
1766 iommu_flush_pages(&dma_dom
->domain
, address
, size
);
1773 for (--i
; i
>= 0; --i
) {
1775 dma_ops_domain_unmap(dma_dom
, start
);
1778 dma_ops_free_addresses(dma_dom
, address
, pages
);
1780 return DMA_ERROR_CODE
;
1784 * Does the reverse of the __map_single function. Must be called with
1785 * the domain lock held too
1787 static void __unmap_single(struct dma_ops_domain
*dma_dom
,
1788 dma_addr_t dma_addr
,
1792 dma_addr_t i
, start
;
1795 if ((dma_addr
== DMA_ERROR_CODE
) ||
1796 (dma_addr
+ size
> dma_dom
->aperture_size
))
1799 pages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
1800 dma_addr
&= PAGE_MASK
;
1803 for (i
= 0; i
< pages
; ++i
) {
1804 dma_ops_domain_unmap(dma_dom
, start
);
1808 SUB_STATS_COUNTER(alloced_io_mem
, size
);
1810 dma_ops_free_addresses(dma_dom
, dma_addr
, pages
);
1812 if (amd_iommu_unmap_flush
|| dma_dom
->need_flush
) {
1813 iommu_flush_pages(&dma_dom
->domain
, dma_addr
, size
);
1814 dma_dom
->need_flush
= false;
1819 * The exported map_single function for dma_ops.
1821 static dma_addr_t
map_page(struct device
*dev
, struct page
*page
,
1822 unsigned long offset
, size_t size
,
1823 enum dma_data_direction dir
,
1824 struct dma_attrs
*attrs
)
1826 unsigned long flags
;
1827 struct protection_domain
*domain
;
1830 phys_addr_t paddr
= page_to_phys(page
) + offset
;
1832 INC_STATS_COUNTER(cnt_map_single
);
1834 domain
= get_domain(dev
);
1835 if (PTR_ERR(domain
) == -EINVAL
)
1836 return (dma_addr_t
)paddr
;
1837 else if (IS_ERR(domain
))
1838 return DMA_ERROR_CODE
;
1840 dma_mask
= *dev
->dma_mask
;
1842 spin_lock_irqsave(&domain
->lock
, flags
);
1844 addr
= __map_single(dev
, domain
->priv
, paddr
, size
, dir
, false,
1846 if (addr
== DMA_ERROR_CODE
)
1849 iommu_flush_complete(domain
);
1852 spin_unlock_irqrestore(&domain
->lock
, flags
);
1858 * The exported unmap_single function for dma_ops.
1860 static void unmap_page(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
1861 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
1863 unsigned long flags
;
1864 struct protection_domain
*domain
;
1866 INC_STATS_COUNTER(cnt_unmap_single
);
1868 domain
= get_domain(dev
);
1872 spin_lock_irqsave(&domain
->lock
, flags
);
1874 __unmap_single(domain
->priv
, dma_addr
, size
, dir
);
1876 iommu_flush_complete(domain
);
1878 spin_unlock_irqrestore(&domain
->lock
, flags
);
1882 * This is a special map_sg function which is used if we should map a
1883 * device which is not handled by an AMD IOMMU in the system.
1885 static int map_sg_no_iommu(struct device
*dev
, struct scatterlist
*sglist
,
1886 int nelems
, int dir
)
1888 struct scatterlist
*s
;
1891 for_each_sg(sglist
, s
, nelems
, i
) {
1892 s
->dma_address
= (dma_addr_t
)sg_phys(s
);
1893 s
->dma_length
= s
->length
;
1900 * The exported map_sg function for dma_ops (handles scatter-gather
1903 static int map_sg(struct device
*dev
, struct scatterlist
*sglist
,
1904 int nelems
, enum dma_data_direction dir
,
1905 struct dma_attrs
*attrs
)
1907 unsigned long flags
;
1908 struct protection_domain
*domain
;
1910 struct scatterlist
*s
;
1912 int mapped_elems
= 0;
1915 INC_STATS_COUNTER(cnt_map_sg
);
1917 domain
= get_domain(dev
);
1918 if (PTR_ERR(domain
) == -EINVAL
)
1919 return map_sg_no_iommu(dev
, sglist
, nelems
, dir
);
1920 else if (IS_ERR(domain
))
1923 dma_mask
= *dev
->dma_mask
;
1925 spin_lock_irqsave(&domain
->lock
, flags
);
1927 for_each_sg(sglist
, s
, nelems
, i
) {
1930 s
->dma_address
= __map_single(dev
, domain
->priv
,
1931 paddr
, s
->length
, dir
, false,
1934 if (s
->dma_address
) {
1935 s
->dma_length
= s
->length
;
1941 iommu_flush_complete(domain
);
1944 spin_unlock_irqrestore(&domain
->lock
, flags
);
1946 return mapped_elems
;
1948 for_each_sg(sglist
, s
, mapped_elems
, i
) {
1950 __unmap_single(domain
->priv
, s
->dma_address
,
1951 s
->dma_length
, dir
);
1952 s
->dma_address
= s
->dma_length
= 0;
1961 * The exported map_sg function for dma_ops (handles scatter-gather
1964 static void unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
1965 int nelems
, enum dma_data_direction dir
,
1966 struct dma_attrs
*attrs
)
1968 unsigned long flags
;
1969 struct protection_domain
*domain
;
1970 struct scatterlist
*s
;
1973 INC_STATS_COUNTER(cnt_unmap_sg
);
1975 domain
= get_domain(dev
);
1979 spin_lock_irqsave(&domain
->lock
, flags
);
1981 for_each_sg(sglist
, s
, nelems
, i
) {
1982 __unmap_single(domain
->priv
, s
->dma_address
,
1983 s
->dma_length
, dir
);
1984 s
->dma_address
= s
->dma_length
= 0;
1987 iommu_flush_complete(domain
);
1989 spin_unlock_irqrestore(&domain
->lock
, flags
);
1993 * The exported alloc_coherent function for dma_ops.
1995 static void *alloc_coherent(struct device
*dev
, size_t size
,
1996 dma_addr_t
*dma_addr
, gfp_t flag
)
1998 unsigned long flags
;
2000 struct protection_domain
*domain
;
2002 u64 dma_mask
= dev
->coherent_dma_mask
;
2004 INC_STATS_COUNTER(cnt_alloc_coherent
);
2006 domain
= get_domain(dev
);
2007 if (PTR_ERR(domain
) == -EINVAL
) {
2008 virt_addr
= (void *)__get_free_pages(flag
, get_order(size
));
2009 *dma_addr
= __pa(virt_addr
);
2011 } else if (IS_ERR(domain
))
2014 dma_mask
= dev
->coherent_dma_mask
;
2015 flag
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
2018 virt_addr
= (void *)__get_free_pages(flag
, get_order(size
));
2022 paddr
= virt_to_phys(virt_addr
);
2025 dma_mask
= *dev
->dma_mask
;
2027 spin_lock_irqsave(&domain
->lock
, flags
);
2029 *dma_addr
= __map_single(dev
, domain
->priv
, paddr
,
2030 size
, DMA_BIDIRECTIONAL
, true, dma_mask
);
2032 if (*dma_addr
== DMA_ERROR_CODE
) {
2033 spin_unlock_irqrestore(&domain
->lock
, flags
);
2037 iommu_flush_complete(domain
);
2039 spin_unlock_irqrestore(&domain
->lock
, flags
);
2045 free_pages((unsigned long)virt_addr
, get_order(size
));
2051 * The exported free_coherent function for dma_ops.
2053 static void free_coherent(struct device
*dev
, size_t size
,
2054 void *virt_addr
, dma_addr_t dma_addr
)
2056 unsigned long flags
;
2057 struct protection_domain
*domain
;
2059 INC_STATS_COUNTER(cnt_free_coherent
);
2061 domain
= get_domain(dev
);
2065 spin_lock_irqsave(&domain
->lock
, flags
);
2067 __unmap_single(domain
->priv
, dma_addr
, size
, DMA_BIDIRECTIONAL
);
2069 iommu_flush_complete(domain
);
2071 spin_unlock_irqrestore(&domain
->lock
, flags
);
2074 free_pages((unsigned long)virt_addr
, get_order(size
));
2078 * This function is called by the DMA layer to find out if we can handle a
2079 * particular device. It is part of the dma_ops.
2081 static int amd_iommu_dma_supported(struct device
*dev
, u64 mask
)
2083 return check_device(dev
);
2087 * The function for pre-allocating protection domains.
2089 * If the driver core informs the DMA layer if a driver grabs a device
2090 * we don't need to preallocate the protection domains anymore.
2091 * For now we have to.
2093 static void prealloc_protection_domains(void)
2095 struct pci_dev
*dev
= NULL
;
2096 struct dma_ops_domain
*dma_dom
;
2097 struct amd_iommu
*iommu
;
2100 while ((dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
)) != NULL
) {
2101 __devid
= devid
= calc_devid(dev
->bus
->number
, dev
->devfn
);
2102 if (devid
> amd_iommu_last_bdf
)
2104 devid
= amd_iommu_alias_table
[devid
];
2105 if (domain_for_device(&dev
->dev
))
2107 iommu
= amd_iommu_rlookup_table
[devid
];
2110 dma_dom
= dma_ops_domain_alloc(iommu
);
2113 init_unity_mappings_for_device(dma_dom
, devid
);
2114 dma_dom
->target_dev
= devid
;
2116 attach_device(&dev
->dev
, &dma_dom
->domain
);
2118 list_add_tail(&dma_dom
->list
, &iommu_pd_list
);
2122 static struct dma_map_ops amd_iommu_dma_ops
= {
2123 .alloc_coherent
= alloc_coherent
,
2124 .free_coherent
= free_coherent
,
2125 .map_page
= map_page
,
2126 .unmap_page
= unmap_page
,
2128 .unmap_sg
= unmap_sg
,
2129 .dma_supported
= amd_iommu_dma_supported
,
2133 * The function which clues the AMD IOMMU driver into dma_ops.
2135 int __init
amd_iommu_init_dma_ops(void)
2137 struct amd_iommu
*iommu
;
2141 * first allocate a default protection domain for every IOMMU we
2142 * found in the system. Devices not assigned to any other
2143 * protection domain will be assigned to the default one.
2145 for_each_iommu(iommu
) {
2146 iommu
->default_dom
= dma_ops_domain_alloc(iommu
);
2147 if (iommu
->default_dom
== NULL
)
2149 iommu
->default_dom
->domain
.flags
|= PD_DEFAULT_MASK
;
2150 ret
= iommu_init_unity_mappings(iommu
);
2156 * If device isolation is enabled, pre-allocate the protection
2157 * domains for each device.
2159 if (amd_iommu_isolate
)
2160 prealloc_protection_domains();
2164 #ifdef CONFIG_GART_IOMMU
2165 gart_iommu_aperture_disabled
= 1;
2166 gart_iommu_aperture
= 0;
2169 /* Make the driver finally visible to the drivers */
2170 dma_ops
= &amd_iommu_dma_ops
;
2172 register_iommu(&amd_iommu_ops
);
2174 bus_register_notifier(&pci_bus_type
, &device_nb
);
2176 amd_iommu_stats_init();
2182 for_each_iommu(iommu
) {
2183 if (iommu
->default_dom
)
2184 dma_ops_domain_free(iommu
->default_dom
);
2190 /*****************************************************************************
2192 * The following functions belong to the exported interface of AMD IOMMU
2194 * This interface allows access to lower level functions of the IOMMU
2195 * like protection domain handling and assignement of devices to domains
2196 * which is not possible with the dma_ops interface.
2198 *****************************************************************************/
2200 static void cleanup_domain(struct protection_domain
*domain
)
2202 unsigned long flags
;
2205 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
2207 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
)
2208 if (amd_iommu_pd_table
[devid
] == domain
)
2209 clear_dte_entry(devid
);
2211 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
2214 static void protection_domain_free(struct protection_domain
*domain
)
2219 del_domain_from_list(domain
);
2222 domain_id_free(domain
->id
);
2227 static struct protection_domain
*protection_domain_alloc(void)
2229 struct protection_domain
*domain
;
2231 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
2235 spin_lock_init(&domain
->lock
);
2236 domain
->id
= domain_id_alloc();
2240 add_domain_to_list(domain
);
2250 static int amd_iommu_domain_init(struct iommu_domain
*dom
)
2252 struct protection_domain
*domain
;
2254 domain
= protection_domain_alloc();
2258 domain
->mode
= PAGE_MODE_3_LEVEL
;
2259 domain
->pt_root
= (void *)get_zeroed_page(GFP_KERNEL
);
2260 if (!domain
->pt_root
)
2268 protection_domain_free(domain
);
2273 static void amd_iommu_domain_destroy(struct iommu_domain
*dom
)
2275 struct protection_domain
*domain
= dom
->priv
;
2280 if (domain
->dev_cnt
> 0)
2281 cleanup_domain(domain
);
2283 BUG_ON(domain
->dev_cnt
!= 0);
2285 free_pagetable(domain
);
2287 domain_id_free(domain
->id
);
2294 static void amd_iommu_detach_device(struct iommu_domain
*dom
,
2297 struct amd_iommu
*iommu
;
2298 struct pci_dev
*pdev
;
2301 if (dev
->bus
!= &pci_bus_type
)
2304 pdev
= to_pci_dev(dev
);
2306 devid
= calc_devid(pdev
->bus
->number
, pdev
->devfn
);
2311 iommu
= amd_iommu_rlookup_table
[devid
];
2315 iommu_queue_inv_dev_entry(iommu
, devid
);
2316 iommu_completion_wait(iommu
);
2319 static int amd_iommu_attach_device(struct iommu_domain
*dom
,
2322 struct protection_domain
*domain
= dom
->priv
;
2323 struct protection_domain
*old_domain
;
2324 struct amd_iommu
*iommu
;
2325 struct pci_dev
*pdev
;
2329 if (dev
->bus
!= &pci_bus_type
)
2332 pdev
= to_pci_dev(dev
);
2334 devid
= calc_devid(pdev
->bus
->number
, pdev
->devfn
);
2336 if (devid
>= amd_iommu_last_bdf
||
2337 devid
!= amd_iommu_alias_table
[devid
])
2340 iommu
= amd_iommu_rlookup_table
[devid
];
2344 old_domain
= amd_iommu_pd_table
[devid
];
2348 ret
= attach_device(dev
, domain
);
2350 iommu_completion_wait(iommu
);
2355 static int amd_iommu_map_range(struct iommu_domain
*dom
,
2356 unsigned long iova
, phys_addr_t paddr
,
2357 size_t size
, int iommu_prot
)
2359 struct protection_domain
*domain
= dom
->priv
;
2360 unsigned long i
, npages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
2364 if (iommu_prot
& IOMMU_READ
)
2365 prot
|= IOMMU_PROT_IR
;
2366 if (iommu_prot
& IOMMU_WRITE
)
2367 prot
|= IOMMU_PROT_IW
;
2372 for (i
= 0; i
< npages
; ++i
) {
2373 ret
= iommu_map_page(domain
, iova
, paddr
, prot
, PM_MAP_4k
);
2384 static void amd_iommu_unmap_range(struct iommu_domain
*dom
,
2385 unsigned long iova
, size_t size
)
2388 struct protection_domain
*domain
= dom
->priv
;
2389 unsigned long i
, npages
= iommu_num_pages(iova
, size
, PAGE_SIZE
);
2393 for (i
= 0; i
< npages
; ++i
) {
2394 iommu_unmap_page(domain
, iova
, PM_MAP_4k
);
2398 iommu_flush_tlb_pde(domain
);
2401 static phys_addr_t
amd_iommu_iova_to_phys(struct iommu_domain
*dom
,
2404 struct protection_domain
*domain
= dom
->priv
;
2405 unsigned long offset
= iova
& ~PAGE_MASK
;
2409 pte
= fetch_pte(domain
, iova
, PM_MAP_4k
);
2411 if (!pte
|| !IOMMU_PTE_PRESENT(*pte
))
2414 paddr
= *pte
& IOMMU_PAGE_MASK
;
2420 static int amd_iommu_domain_has_cap(struct iommu_domain
*domain
,
2426 static struct iommu_ops amd_iommu_ops
= {
2427 .domain_init
= amd_iommu_domain_init
,
2428 .domain_destroy
= amd_iommu_domain_destroy
,
2429 .attach_dev
= amd_iommu_attach_device
,
2430 .detach_dev
= amd_iommu_detach_device
,
2431 .map
= amd_iommu_map_range
,
2432 .unmap
= amd_iommu_unmap_range
,
2433 .iova_to_phys
= amd_iommu_iova_to_phys
,
2434 .domain_has_cap
= amd_iommu_domain_has_cap
,
2437 /*****************************************************************************
2439 * The next functions do a basic initialization of IOMMU for pass through
2442 * In passthrough mode the IOMMU is initialized and enabled but not used for
2443 * DMA-API translation.
2445 *****************************************************************************/
2447 int __init
amd_iommu_init_passthrough(void)
2449 struct amd_iommu
*iommu
;
2450 struct pci_dev
*dev
= NULL
;
2453 /* allocate passthroug domain */
2454 pt_domain
= protection_domain_alloc();
2458 pt_domain
->mode
|= PAGE_MODE_NONE
;
2460 while ((dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
)) != NULL
) {
2462 devid
= calc_devid(dev
->bus
->number
, dev
->devfn
);
2463 if (devid
> amd_iommu_last_bdf
)
2466 iommu
= amd_iommu_rlookup_table
[devid
];
2470 attach_device(&dev
->dev
, pt_domain
);
2473 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");