ac97: IOMMU support
[qemu-kvm/amd-iommu.git] / hw / amd_iommu.c
blobe72f0c080c198e152c4371c7e654d29d33e45631
1 /*
2 * AMD IOMMU emulation
4 * Copyright (c) 2010 Eduard - Gabriel Munteanu
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "pc.h"
26 #include "hw.h"
27 #include "pci.h"
28 #include "iommu.h"
30 /* Capability registers */
31 #define CAPAB_HEADER 0x00
32 #define CAPAB_REV_TYPE 0x02
33 #define CAPAB_FLAGS 0x03
34 #define CAPAB_BAR_LOW 0x04
35 #define CAPAB_BAR_HIGH 0x08
36 #define CAPAB_RANGE 0x0C
37 #define CAPAB_MISC 0x10
39 #define CAPAB_SIZE 0x14
41 /* Capability header data */
42 #define CAPAB_FLAG_IOTLBSUP (1 << 0)
43 #define CAPAB_FLAG_HTTUNNEL (1 << 1)
44 #define CAPAB_FLAG_NPCACHE (1 << 2)
45 #define CAPAB_INIT_REV (1 << 3)
46 #define CAPAB_INIT_TYPE 3
47 #define CAPAB_INIT_REV_TYPE (CAPAB_REV | CAPAB_TYPE)
48 #define CAPAB_INIT_FLAGS (CAPAB_FLAG_NPCACHE | CAPAB_FLAG_HTTUNNEL)
49 #define CAPAB_INIT_MISC (64 << 15) | (48 << 8)
50 #define CAPAB_BAR_MASK ~((1UL << 14) - 1)
52 /* MMIO registers */
53 #define MMIO_DEVICE_TABLE 0x0000
54 #define MMIO_COMMAND_BASE 0x0008
55 #define MMIO_EVENT_BASE 0x0010
56 #define MMIO_CONTROL 0x0018
57 #define MMIO_EXCL_BASE 0x0020
58 #define MMIO_EXCL_LIMIT 0x0028
59 #define MMIO_COMMAND_HEAD 0x2000
60 #define MMIO_COMMAND_TAIL 0x2008
61 #define MMIO_EVENT_HEAD 0x2010
62 #define MMIO_EVENT_TAIL 0x2018
63 #define MMIO_STATUS 0x2020
65 #define MMIO_SIZE 0x4000
67 #define MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1)
68 #define MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~MMIO_DEVTAB_SIZE_MASK)
69 #define MMIO_DEVTAB_ENTRY_SIZE 32
70 #define MMIO_DEVTAB_SIZE_UNIT 4096
72 #define MMIO_CMDBUF_SIZE_BYTE (MMIO_COMMAND_BASE + 7)
73 #define MMIO_CMDBUF_SIZE_MASK 0x0F
74 #define MMIO_CMDBUF_BASE_MASK MMIO_DEVTAB_BASE_MASK
75 #define MMIO_CMDBUF_DEFAULT_SIZE 8
76 #define MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0F)
77 #define MMIO_CMDBUF_TAIL_MASK MMIO_EVTLOG_HEAD_MASK
79 #define MMIO_EVTLOG_SIZE_BYTE (MMIO_EVENT_BASE + 7)
80 #define MMIO_EVTLOG_SIZE_MASK MMIO_CMDBUF_SIZE_MASK
81 #define MMIO_EVTLOG_BASE_MASK MMIO_CMDBUF_BASE_MASK
82 #define MMIO_EVTLOG_DEFAULT_SIZE MMIO_CMDBUF_DEFAULT_SIZE
83 #define MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0F)
84 #define MMIO_EVTLOG_TAIL_MASK MMIO_EVTLOG_HEAD_MASK
86 #define MMIO_EXCL_BASE_MASK MMIO_DEVTAB_BASE_MASK
87 #define MMIO_EXCL_ENABLED_MASK (1ULL << 0)
88 #define MMIO_EXCL_ALLOW_MASK (1ULL << 1)
89 #define MMIO_EXCL_LIMIT_MASK MMIO_DEVTAB_BASE_MASK
90 #define MMIO_EXCL_LIMIT_LOW 0xFFF
92 #define MMIO_CONTROL_IOMMUEN (1ULL << 0)
93 #define MMIO_CONTROL_HTTUNEN (1ULL << 1)
94 #define MMIO_CONTROL_EVENTLOGEN (1ULL << 2)
95 #define MMIO_CONTROL_EVENTINTEN (1ULL << 3)
96 #define MMIO_CONTROL_COMWAITINTEN (1ULL << 4)
97 #define MMIO_CONTROL_CMDBUFEN (1ULL << 12)
99 #define MMIO_STATUS_EVTLOG_OF (1ULL << 0)
100 #define MMIO_STATUS_EVTLOG_INTR (1ULL << 1)
101 #define MMIO_STATUS_COMWAIT_INTR (1ULL << 2)
102 #define MMIO_STATUS_EVTLOG_RUN (1ULL << 3)
103 #define MMIO_STATUS_CMDBUF_RUN (1ULL << 4)
105 #define CMDBUF_ID_BYTE 0x07
106 #define CMDBUF_ID_RSHIFT 4
107 #define CMDBUF_ENTRY_SIZE 0x10
109 #define CMD_COMPLETION_WAIT 0x01
110 #define CMD_INVAL_DEVTAB_ENTRY 0x02
111 #define CMD_INVAL_IOMMU_PAGES 0x03
112 #define CMD_INVAL_IOTLB_PAGES 0x04
113 #define CMD_INVAL_INTR_TABLE 0x05
115 #define DEVTAB_ENTRY_SIZE 32
117 /* Device table entry bits 0:63 */
118 #define DEV_VALID (1ULL << 0)
119 #define DEV_TRANSLATION_VALID (1ULL << 1)
120 #define DEV_MODE_MASK 0x7
121 #define DEV_MODE_RSHIFT 9
122 #define DEV_PT_ROOT_MASK 0xFFFFFFFFFF000
123 #define DEV_PT_ROOT_RSHIFT 12
124 #define DEV_PERM_SHIFT 61
125 #define DEV_PERM_READ (1ULL << 61)
126 #define DEV_PERM_WRITE (1ULL << 62)
128 /* Device table entry bits 64:127 */
129 #define DEV_DOMAIN_ID_MASK ((1ULL << 16) - 1)
130 #define DEV_IOTLB_SUPPORT (1ULL << 17)
131 #define DEV_SUPPRESS_PF (1ULL << 18)
132 #define DEV_SUPPRESS_ALL_PF (1ULL << 19)
133 #define DEV_IOCTL_MASK ~3
134 #define DEV_IOCTL_RSHIFT 20
135 #define DEV_IOCTL_DENY 0
136 #define DEV_IOCTL_PASSTHROUGH 1
137 #define DEV_IOCTL_TRANSLATE 2
138 #define DEV_CACHE (1ULL << 37)
139 #define DEV_SNOOP_DISABLE (1ULL << 38)
140 #define DEV_EXCL (1ULL << 39)
142 struct amd_iommu_state {
143 PCIDevice dev;
145 int capab_offset;
146 unsigned char *capab;
148 int mmio_index;
149 target_phys_addr_t mmio_addr;
150 unsigned char *mmio_buf;
151 int mmio_enabled;
153 int enabled;
154 int ats_enabled;
156 target_phys_addr_t devtab;
157 size_t devtab_len;
159 target_phys_addr_t cmdbuf;
160 int cmdbuf_enabled;
161 size_t cmdbuf_len;
162 size_t cmdbuf_head;
163 size_t cmdbuf_tail;
164 int completion_wait_intr;
166 target_phys_addr_t evtlog;
167 int evtlog_enabled;
168 int evtlog_intr;
169 size_t evtlog_len;
170 size_t evtlog_head;
171 size_t evtlog_tail;
173 target_phys_addr_t excl_base;
174 target_phys_addr_t excl_limit;
175 int excl_enabled;
176 int excl_allow;
179 static void amd_iommu_completion_wait(struct amd_iommu_state *st,
180 uint8_t *cmd)
182 uint64_t addr;
184 if (cmd[0] & 1) {
185 addr = le64_to_cpu(*(uint64_t *) cmd) & 0xFFFFFFFFFFFF8;
186 cpu_physical_memory_write(addr, cmd + 8, 8);
189 if (cmd[0] & 2)
190 st->mmio_buf[MMIO_STATUS] |= MMIO_STATUS_COMWAIT_INTR;
193 static void amd_iommu_cmdbuf_run(struct amd_iommu_state *st)
195 uint8_t cmd[16];
196 int type;
198 if (!st->cmdbuf_enabled)
199 return;
201 st->mmio_buf[MMIO_STATUS] |= MMIO_STATUS_CMDBUF_RUN;
202 st->mmio_buf[MMIO_STATUS] |= MMIO_STATUS_EVTLOG_RUN;
203 st->mmio_buf[MMIO_STATUS] |= MMIO_STATUS_COMWAIT_INTR;
205 if (st->cmdbuf_head == st->cmdbuf_tail)
206 return;
208 cpu_physical_memory_read(st->cmdbuf, cmd, 16);
209 type = cmd[CMDBUF_ID_BYTE] >> CMDBUF_ID_RSHIFT;
210 switch (type) {
211 case CMD_COMPLETION_WAIT:
212 amd_iommu_completion_wait(st, cmd);
213 break;
214 case CMD_INVAL_DEVTAB_ENTRY:
215 break;
216 case CMD_INVAL_IOMMU_PAGES:
217 break;
218 case CMD_INVAL_IOTLB_PAGES:
219 break;
220 case CMD_INVAL_INTR_TABLE:
221 break;
222 default:
223 break;
226 st->cmdbuf_head += CMDBUF_ENTRY_SIZE;
229 static uint32_t amd_iommu_mmio_buf_read(struct amd_iommu_state *st,
230 size_t offset,
231 size_t size)
233 ssize_t i;
234 uint32_t ret;
236 if (!size)
237 return 0;
239 ret = st->mmio_buf[offset + size - 1];
240 for (i = size - 2; i >= 0; i--) {
241 ret <<= 8;
242 ret |= st->mmio_buf[offset + i];
245 return ret;
248 static void amd_iommu_mmio_buf_write(struct amd_iommu_state *st,
249 size_t offset,
250 size_t size,
251 uint32_t val)
253 size_t i;
255 for (i = 0; i < size; i++) {
256 st->mmio_buf[offset + i] = val & 0xFF;
257 val >>= 8;
261 static void amd_iommu_update_mmio(struct amd_iommu_state *st,
262 target_phys_addr_t addr)
264 size_t reg = addr & ~0x07;
265 uint64_t *base = (uint64_t *) &st->mmio_buf[reg];
266 uint64_t val = *base;
268 switch (reg) {
269 case MMIO_CONTROL:
270 st->enabled = !!(val & MMIO_CONTROL_IOMMUEN);
271 st->ats_enabled = !!(val & MMIO_CONTROL_HTTUNEN);
272 st->evtlog_enabled = !!(val & MMIO_CONTROL_EVENTLOGEN);
273 st->evtlog_intr = !!(val & MMIO_CONTROL_EVENTINTEN);
274 st->completion_wait_intr = !!(val & MMIO_CONTROL_COMWAITINTEN);
275 st->cmdbuf_enabled = !!(val & MMIO_CONTROL_CMDBUFEN);
276 amd_iommu_cmdbuf_run(st);
277 break;
278 case MMIO_DEVICE_TABLE:
279 st->devtab = (target_phys_addr_t) (val & MMIO_DEVTAB_BASE_MASK);
280 st->devtab_len = ((val & MMIO_DEVTAB_SIZE_MASK) + 1) *
281 (MMIO_DEVTAB_SIZE_UNIT / MMIO_DEVTAB_ENTRY_SIZE);
282 break;
283 case MMIO_COMMAND_BASE:
284 st->cmdbuf = (target_phys_addr_t) (val & MMIO_CMDBUF_BASE_MASK);
285 st->cmdbuf_len = 1UL << (st->mmio_buf[MMIO_CMDBUF_SIZE_BYTE] &
286 MMIO_CMDBUF_SIZE_MASK);
287 amd_iommu_cmdbuf_run(st);
288 break;
289 case MMIO_COMMAND_HEAD:
290 st->cmdbuf_head = val & MMIO_CMDBUF_HEAD_MASK;
291 amd_iommu_cmdbuf_run(st);
292 break;
293 case MMIO_COMMAND_TAIL:
294 st->cmdbuf_tail = val & MMIO_CMDBUF_TAIL_MASK;
295 amd_iommu_cmdbuf_run(st);
296 break;
297 case MMIO_EVENT_BASE:
298 st->evtlog = (target_phys_addr_t) (val & MMIO_EVTLOG_BASE_MASK);
299 st->evtlog_len = 1UL << (st->mmio_buf[MMIO_EVTLOG_SIZE_BYTE] &
300 MMIO_EVTLOG_SIZE_MASK);
301 break;
302 case MMIO_EVENT_HEAD:
303 st->evtlog_head = val & MMIO_EVTLOG_HEAD_MASK;
304 break;
305 case MMIO_EVENT_TAIL:
306 st->evtlog_tail = val & MMIO_EVTLOG_TAIL_MASK;
307 break;
308 case MMIO_EXCL_BASE:
309 st->excl_base = (target_phys_addr_t) (val & MMIO_EXCL_BASE_MASK);
310 st->excl_enabled = val & MMIO_EXCL_ENABLED_MASK;
311 st->excl_allow = val & MMIO_EXCL_ALLOW_MASK;
312 break;
313 case MMIO_EXCL_LIMIT:
314 st->excl_limit = (target_phys_addr_t) ((val & MMIO_EXCL_LIMIT_MASK) |
315 MMIO_EXCL_LIMIT_LOW);
316 break;
317 default:
318 break;
322 static uint32_t amd_iommu_mmio_readb(void *opaque, target_phys_addr_t addr)
324 struct amd_iommu_state *st = opaque;
326 return amd_iommu_mmio_buf_read(st, addr, 1);
329 static uint32_t amd_iommu_mmio_readw(void *opaque, target_phys_addr_t addr)
331 struct amd_iommu_state *st = opaque;
333 return amd_iommu_mmio_buf_read(st, addr, 2);
336 static uint32_t amd_iommu_mmio_readl(void *opaque, target_phys_addr_t addr)
338 struct amd_iommu_state *st = opaque;
340 return amd_iommu_mmio_buf_read(st, addr, 4);
343 static void amd_iommu_mmio_writeb(void *opaque,
344 target_phys_addr_t addr,
345 uint32_t val)
347 struct amd_iommu_state *st = opaque;
349 amd_iommu_mmio_buf_write(st, addr, 1, val);
350 amd_iommu_update_mmio(st, addr);
353 static void amd_iommu_mmio_writew(void *opaque,
354 target_phys_addr_t addr,
355 uint32_t val)
357 struct amd_iommu_state *st = opaque;
359 amd_iommu_mmio_buf_write(st, addr, 2, val);
360 amd_iommu_update_mmio(st, addr);
363 static void amd_iommu_mmio_writel(void *opaque,
364 target_phys_addr_t addr,
365 uint32_t val)
367 struct amd_iommu_state *st = opaque;
369 amd_iommu_mmio_buf_write(st, addr, 4, val);
370 amd_iommu_update_mmio(st, addr);
373 static CPUReadMemoryFunc * const amd_iommu_mmio_read[] = {
374 amd_iommu_mmio_readb,
375 amd_iommu_mmio_readw,
376 amd_iommu_mmio_readl,
379 static CPUWriteMemoryFunc * const amd_iommu_mmio_write[] = {
380 amd_iommu_mmio_writeb,
381 amd_iommu_mmio_writew,
382 amd_iommu_mmio_writel,
385 static void amd_iommu_init_mmio(struct amd_iommu_state *st)
387 st->mmio_buf[MMIO_CMDBUF_SIZE_BYTE] = MMIO_CMDBUF_DEFAULT_SIZE;
388 st->mmio_buf[MMIO_EVTLOG_SIZE_BYTE] = MMIO_EVTLOG_DEFAULT_SIZE;
391 static void amd_iommu_enable_mmio(struct amd_iommu_state *st)
393 target_phys_addr_t addr;
395 st->mmio_index = cpu_register_io_memory(amd_iommu_mmio_read,
396 amd_iommu_mmio_write, st);
397 if (st->mmio_index < 0)
398 return;
400 addr = le64_to_cpu(*(uint64_t *) &st->capab[CAPAB_BAR_LOW]) & CAPAB_BAR_MASK;
401 cpu_register_physical_memory(addr, MMIO_SIZE, st->mmio_index);
403 st->mmio_addr = addr;
404 st->mmio_buf = qemu_mallocz(MMIO_SIZE);
405 st->mmio_enabled = 1;
406 amd_iommu_init_mmio(st);
409 static uint32_t amd_iommu_read_capab(PCIDevice *pci_dev,
410 uint32_t addr, int len)
412 return pci_default_cap_read_config(pci_dev, addr, len);
415 static void amd_iommu_write_capab(PCIDevice *dev,
416 uint32_t addr, uint32_t val, int len)
418 struct amd_iommu_state *st;
419 unsigned char *capab;
420 int reg;
422 st = DO_UPCAST(struct amd_iommu_state, dev, dev);
423 capab = st->capab;
424 reg = (addr - 0x40) & ~0x3; /* Get the 32-bits register. */
426 switch (reg) {
427 case CAPAB_HEADER:
428 case CAPAB_MISC:
429 /* Read-only. */
430 return;
431 case CAPAB_BAR_LOW:
432 case CAPAB_BAR_HIGH:
433 case CAPAB_RANGE:
434 if (st->mmio_enabled)
435 return;
436 pci_default_cap_write_config(dev, addr, val, len);
437 break;
438 default:
439 return;
442 if (capab[CAPAB_BAR_LOW] & 0x1)
443 amd_iommu_enable_mmio(st);
446 static int amd_iommu_init_capab(PCIDevice *dev)
448 struct amd_iommu_state *st;
449 unsigned char *capab;
451 st = DO_UPCAST(struct amd_iommu_state, dev, dev);
452 capab = st->dev.config + st->capab_offset;
454 capab[CAPAB_REV_TYPE] = CAPAB_REV_TYPE;
455 capab[CAPAB_FLAGS] = CAPAB_FLAGS;
456 capab[CAPAB_BAR_LOW] = 0;
457 capab[CAPAB_BAR_HIGH] = 0;
458 capab[CAPAB_RANGE] = 0;
459 *((uint32_t *) &capab[CAPAB_MISC]) = cpu_to_le32(CAPAB_INIT_MISC);
461 st->capab = capab;
462 st->dev.cap.length = CAPAB_SIZE;
464 return 0;
467 static int amd_iommu_translate(struct iommu *iommu,
468 DeviceState *dev,
469 target_phys_addr_t addr,
470 target_phys_addr_t *paddr,
471 int *len,
472 unsigned perms);
474 static int amd_iommu_pci_initfn(PCIDevice *dev)
476 struct amd_iommu_state *st;
477 struct iommu *iommu;
478 int err;
480 st = DO_UPCAST(struct amd_iommu_state, dev, dev);
482 pci_config_set_vendor_id(st->dev.config, PCI_VENDOR_ID_AMD);
483 pci_config_set_device_id(st->dev.config, PCI_DEVICE_ID_AMD_IOMMU);
484 pci_config_set_class(st->dev.config, PCI_CLASS_SYSTEM_IOMMU);
486 st->capab_offset = pci_add_capability(&st->dev,
487 PCI_CAP_ID_SEC,
488 CAPAB_SIZE);
489 err = pci_enable_capability_support(&st->dev, st->capab_offset,
490 amd_iommu_read_capab,
491 amd_iommu_write_capab,
492 amd_iommu_init_capab);
493 if (err)
494 return err;
496 iommu = qemu_mallocz(sizeof(struct iommu));
497 iommu->opaque = st;
498 iommu->translate = amd_iommu_translate;
499 st->dev.qdev.parent_bus->iommu = iommu;
501 return 0;
504 static const VMStateDescription vmstate_amd_iommu = {
505 .name = "amd-iommu",
506 .version_id = 1,
507 .minimum_version_id = 1,
508 .minimum_version_id_old = 1,
509 .fields = (VMStateField []) {
510 VMSTATE_PCI_DEVICE(dev, struct amd_iommu_state),
511 VMSTATE_END_OF_LIST()
515 static PCIDeviceInfo amd_iommu_pci_info = {
516 .qdev.name = "amd-iommu",
517 .qdev.desc = "AMD IOMMU",
518 .qdev.size = sizeof(struct amd_iommu_state),
519 .qdev.vmsd = &vmstate_amd_iommu,
520 .init = amd_iommu_pci_initfn,
523 void amd_iommu_init(PCIBus *bus)
525 pci_create_simple(bus, -1, "amd-iommu");
528 static void amd_iommu_register(void)
530 pci_qdev_register(&amd_iommu_pci_info);
533 device_init(amd_iommu_register);
535 static void amd_iommu_page_fault(struct amd_iommu_state *st,
536 int devfn,
537 unsigned domid,
538 target_phys_addr_t addr,
539 int present,
540 int is_write)
542 uint16_t entry[8];
543 uint64_t *entry_addr = (uint64_t *) &entry[4];
545 entry[0] = cpu_to_le16(devfn);
546 entry[1] = 0;
547 entry[2] = cpu_to_le16(domid);
548 entry[3] = (2UL << 12) | (!!present << 4) | (!!is_write << 5);
549 *entry_addr = cpu_to_le64(addr);
551 cpu_physical_memory_write((target_phys_addr_t) st->evtlog + st->evtlog_tail, (uint8_t *) &entry, 128);
552 st->evtlog_tail += 128;
555 static int amd_iommu_qdev_to_devfn(DeviceState *dev)
557 PCIDevice *pci_dev = DO_UPCAST(PCIDevice, qdev, dev);
559 return pci_dev->devfn;
562 static inline uint64_t amd_iommu_get_perms(uint64_t entry)
564 return (entry & (DEV_PERM_READ | DEV_PERM_WRITE)) >> DEV_PERM_SHIFT;
567 static int amd_iommu_translate(struct iommu *iommu,
568 DeviceState *dev,
569 target_phys_addr_t addr,
570 target_phys_addr_t *paddr,
571 int *len,
572 unsigned perms)
574 int devfn, present;
575 target_phys_addr_t entry_addr, pte_addr;
576 uint64_t entry[4], pte, page_offset, pte_perms;
577 unsigned level, domid;
578 struct amd_iommu_state *st = iommu->opaque;
580 if (!st->enabled)
581 goto no_translation;
583 /* Get device table entry. */
584 devfn = amd_iommu_qdev_to_devfn(dev);
585 entry_addr = st->devtab + devfn * DEVTAB_ENTRY_SIZE;
586 cpu_physical_memory_read(entry_addr, (uint8_t *) entry, 32);
588 pte = entry[0];
589 if (!(pte & DEV_VALID) || !(pte & DEV_TRANSLATION_VALID)) {
590 goto no_translation;
592 domid = entry[1] & DEV_DOMAIN_ID_MASK;
593 level = (pte >> DEV_MODE_RSHIFT) & DEV_MODE_MASK;
594 while (level > 0) {
596 * Check permissions: the bitwise
597 * implication perms -> entry_perms must be true.
599 pte_perms = amd_iommu_get_perms(pte);
600 present = pte & 1;
601 if (!present || perms != (perms & pte_perms)) {
602 amd_iommu_page_fault(st, devfn, domid, addr,
603 present, !!(perms & IOMMU_PERM_WRITE));
604 return -EPERM;
607 /* Go to the next lower level. */
608 pte_addr = pte & DEV_PT_ROOT_MASK;
609 pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
610 pte = ldq_phys(pte_addr);
611 level = (pte >> DEV_MODE_RSHIFT) & DEV_MODE_MASK;
613 page_offset = addr & 4095;
614 *paddr = (pte & DEV_PT_ROOT_MASK) + page_offset;
615 *len = 4096 - page_offset;
617 return 0;
619 no_translation:
620 return iommu_nop_translate(iommu, dev, addr, paddr, len, perms);