Merge remote-tracking branch 'qemu-project/master'
[qemu/ar7.git] / include / exec / exec-all.h
blobb4036b5e6ba0ef353eb987066e8008fad5a4e1f4
1 /*
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
23 #include "cpu.h"
24 #if defined(CONFIG_USER_ONLY)
25 #include "exec/cpu_ldst.h"
26 #endif
27 #include "exec/translation-block.h"
28 #include "qemu/clang-tsa.h"
30 /* Get a backtrace for the guest code. */
31 const char *qemu_sprint_backtrace(char *buffer, size_t length);
33 /**
34 * cpu_loop_exit_requested:
35 * @cpu: The CPU state to be tested
37 * Indicate if somebody asked for a return of the CPU to the main loop
38 * (e.g., via cpu_exit() or cpu_interrupt()).
40 * This is helpful for architectures that support interruptible
41 * instructions. After writing back all state to registers/memory, this
42 * call can be used to check if it makes sense to return to the main loop
43 * or to continue executing the interruptible instruction.
45 static inline bool cpu_loop_exit_requested(CPUState *cpu)
47 return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
50 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
51 /* cputlb.c */
52 /**
53 * tlb_init - initialize a CPU's TLB
54 * @cpu: CPU whose TLB should be initialized
56 void tlb_init(CPUState *cpu);
57 /**
58 * tlb_destroy - destroy a CPU's TLB
59 * @cpu: CPU whose TLB should be destroyed
61 void tlb_destroy(CPUState *cpu);
62 /**
63 * tlb_flush_page:
64 * @cpu: CPU whose TLB should be flushed
65 * @addr: virtual address of page to be flushed
67 * Flush one page from the TLB of the specified CPU, for all
68 * MMU indexes.
70 void tlb_flush_page(CPUState *cpu, vaddr addr);
71 /**
72 * tlb_flush_page_all_cpus:
73 * @cpu: src CPU of the flush
74 * @addr: virtual address of page to be flushed
76 * Flush one page from the TLB of the specified CPU, for all
77 * MMU indexes.
79 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
80 /**
81 * tlb_flush_page_all_cpus_synced:
82 * @cpu: src CPU of the flush
83 * @addr: virtual address of page to be flushed
85 * Flush one page from the TLB of the specified CPU, for all MMU
86 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
87 * is scheduled as safe work meaning all flushes will be complete once
88 * the source vCPUs safe work is complete. This will depend on when
89 * the guests translation ends the TB.
91 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
92 /**
93 * tlb_flush:
94 * @cpu: CPU whose TLB should be flushed
96 * Flush the entire TLB for the specified CPU. Most CPU architectures
97 * allow the implementation to drop entries from the TLB at any time
98 * so this is generally safe. If more selective flushing is required
99 * use one of the other functions for efficiency.
101 void tlb_flush(CPUState *cpu);
103 * tlb_flush_all_cpus:
104 * @cpu: src CPU of the flush
106 void tlb_flush_all_cpus(CPUState *src_cpu);
108 * tlb_flush_all_cpus_synced:
109 * @cpu: src CPU of the flush
111 * Like tlb_flush_all_cpus except this except the source vCPUs work is
112 * scheduled as safe work meaning all flushes will be complete once
113 * the source vCPUs safe work is complete. This will depend on when
114 * the guests translation ends the TB.
116 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
118 * tlb_flush_page_by_mmuidx:
119 * @cpu: CPU whose TLB should be flushed
120 * @addr: virtual address of page to be flushed
121 * @idxmap: bitmap of MMU indexes to flush
123 * Flush one page from the TLB of the specified CPU, for the specified
124 * MMU indexes.
126 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
127 uint16_t idxmap);
129 * tlb_flush_page_by_mmuidx_all_cpus:
130 * @cpu: Originating CPU of the flush
131 * @addr: virtual address of page to be flushed
132 * @idxmap: bitmap of MMU indexes to flush
134 * Flush one page from the TLB of all CPUs, for the specified
135 * MMU indexes.
137 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
138 uint16_t idxmap);
140 * tlb_flush_page_by_mmuidx_all_cpus_synced:
141 * @cpu: Originating CPU of the flush
142 * @addr: virtual address of page to be flushed
143 * @idxmap: bitmap of MMU indexes to flush
145 * Flush one page from the TLB of all CPUs, for the specified MMU
146 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
147 * vCPUs work is scheduled as safe work meaning all flushes will be
148 * complete once the source vCPUs safe work is complete. This will
149 * depend on when the guests translation ends the TB.
151 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
152 uint16_t idxmap);
154 * tlb_flush_by_mmuidx:
155 * @cpu: CPU whose TLB should be flushed
156 * @wait: If true ensure synchronisation by exiting the cpu_loop
157 * @idxmap: bitmap of MMU indexes to flush
159 * Flush all entries from the TLB of the specified CPU, for the specified
160 * MMU indexes.
162 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
164 * tlb_flush_by_mmuidx_all_cpus:
165 * @cpu: Originating CPU of the flush
166 * @idxmap: bitmap of MMU indexes to flush
168 * Flush all entries from all TLBs of all CPUs, for the specified
169 * MMU indexes.
171 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
173 * tlb_flush_by_mmuidx_all_cpus_synced:
174 * @cpu: Originating CPU of the flush
175 * @idxmap: bitmap of MMU indexes to flush
177 * Flush all entries from all TLBs of all CPUs, for the specified
178 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
179 * vCPUs work is scheduled as safe work meaning all flushes will be
180 * complete once the source vCPUs safe work is complete. This will
181 * depend on when the guests translation ends the TB.
183 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
186 * tlb_flush_page_bits_by_mmuidx
187 * @cpu: CPU whose TLB should be flushed
188 * @addr: virtual address of page to be flushed
189 * @idxmap: bitmap of mmu indexes to flush
190 * @bits: number of significant bits in address
192 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
194 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
195 uint16_t idxmap, unsigned bits);
197 /* Similarly, with broadcast and syncing. */
198 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
199 uint16_t idxmap, unsigned bits);
200 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
201 (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
204 * tlb_flush_range_by_mmuidx
205 * @cpu: CPU whose TLB should be flushed
206 * @addr: virtual address of the start of the range to be flushed
207 * @len: length of range to be flushed
208 * @idxmap: bitmap of mmu indexes to flush
209 * @bits: number of significant bits in address
211 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
212 * comparing only the low @bits worth of each virtual page.
214 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
215 vaddr len, uint16_t idxmap,
216 unsigned bits);
218 /* Similarly, with broadcast and syncing. */
219 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
220 vaddr len, uint16_t idxmap,
221 unsigned bits);
222 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
223 vaddr addr,
224 vaddr len,
225 uint16_t idxmap,
226 unsigned bits);
229 * tlb_set_page_full:
230 * @cpu: CPU context
231 * @mmu_idx: mmu index of the tlb to modify
232 * @addr: virtual address of the entry to add
233 * @full: the details of the tlb entry
235 * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
236 * @full must be filled, except for xlat_section, and constitute
237 * the complete description of the translated page.
239 * This is generally called by the target tlb_fill function after
240 * having performed a successful page table walk to find the physical
241 * address and attributes for the translation.
243 * At most one entry for a given virtual address is permitted. Only a
244 * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
245 * used by tlb_flush_page.
247 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
248 CPUTLBEntryFull *full);
251 * tlb_set_page_with_attrs:
252 * @cpu: CPU to add this TLB entry for
253 * @addr: virtual address of page to add entry for
254 * @paddr: physical address of the page
255 * @attrs: memory transaction attributes
256 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
257 * @mmu_idx: MMU index to insert TLB entry for
258 * @size: size of the page in bytes
260 * Add an entry to this CPU's TLB (a mapping from virtual address
261 * @addr to physical address @paddr) with the specified memory
262 * transaction attributes. This is generally called by the target CPU
263 * specific code after it has been called through the tlb_fill()
264 * entry point and performed a successful page table walk to find
265 * the physical address and attributes for the virtual address
266 * which provoked the TLB miss.
268 * At most one entry for a given virtual address is permitted. Only a
269 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
270 * used by tlb_flush_page.
272 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
273 hwaddr paddr, MemTxAttrs attrs,
274 int prot, int mmu_idx, vaddr size);
275 /* tlb_set_page:
277 * This function is equivalent to calling tlb_set_page_with_attrs()
278 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
279 * as a convenience for CPUs which don't use memory transaction attributes.
281 void tlb_set_page(CPUState *cpu, vaddr addr,
282 hwaddr paddr, int prot,
283 int mmu_idx, vaddr size);
284 #else
285 static inline void tlb_init(CPUState *cpu)
288 static inline void tlb_destroy(CPUState *cpu)
291 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
294 static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
297 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
300 static inline void tlb_flush(CPUState *cpu)
303 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
306 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
309 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
310 vaddr addr, uint16_t idxmap)
314 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
317 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
318 vaddr addr,
319 uint16_t idxmap)
322 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
323 vaddr addr,
324 uint16_t idxmap)
327 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
331 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
332 uint16_t idxmap)
335 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
336 vaddr addr,
337 uint16_t idxmap,
338 unsigned bits)
341 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
342 vaddr addr,
343 uint16_t idxmap,
344 unsigned bits)
347 static inline void
348 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
349 uint16_t idxmap, unsigned bits)
352 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
353 vaddr len, uint16_t idxmap,
354 unsigned bits)
357 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
358 vaddr addr,
359 vaddr len,
360 uint16_t idxmap,
361 unsigned bits)
364 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
365 vaddr addr,
366 vaddr len,
367 uint16_t idxmap,
368 unsigned bits)
371 #endif
373 * probe_access:
374 * @env: CPUArchState
375 * @addr: guest virtual address to look up
376 * @size: size of the access
377 * @access_type: read, write or execute permission
378 * @mmu_idx: MMU index to use for lookup
379 * @retaddr: return address for unwinding
381 * Look up the guest virtual address @addr. Raise an exception if the
382 * page does not satisfy @access_type. Raise an exception if the
383 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
384 * page as dirty.
386 * Finally, return the host address for a page that is backed by RAM,
387 * or NULL if the page requires I/O.
389 void *probe_access(CPUArchState *env, vaddr addr, int size,
390 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
392 static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
393 int mmu_idx, uintptr_t retaddr)
395 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
398 static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
399 int mmu_idx, uintptr_t retaddr)
401 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
405 * probe_access_flags:
406 * @env: CPUArchState
407 * @addr: guest virtual address to look up
408 * @size: size of the access
409 * @access_type: read, write or execute permission
410 * @mmu_idx: MMU index to use for lookup
411 * @nonfault: suppress the fault
412 * @phost: return value for host address
413 * @retaddr: return address for unwinding
415 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
416 * the page, and storing the host address for RAM in @phost.
418 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
419 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
420 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
421 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
423 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
424 MMUAccessType access_type, int mmu_idx,
425 bool nonfault, void **phost, uintptr_t retaddr);
427 #ifndef CONFIG_USER_ONLY
429 * probe_access_full:
430 * Like probe_access_flags, except also return into @pfull.
432 * The CPUTLBEntryFull structure returned via @pfull is transient
433 * and must be consumed or copied immediately, before any further
434 * access or changes to TLB @mmu_idx.
436 int probe_access_full(CPUArchState *env, vaddr addr, int size,
437 MMUAccessType access_type, int mmu_idx,
438 bool nonfault, void **phost,
439 CPUTLBEntryFull **pfull, uintptr_t retaddr);
442 * probe_access_mmu() - Like probe_access_full except cannot fault and
443 * doesn't trigger instrumentation.
445 * @env: CPUArchState
446 * @vaddr: virtual address to probe
447 * @size: size of the probe
448 * @access_type: read, write or execute permission
449 * @mmu_idx: softmmu index
450 * @phost: ptr to return value host address or NULL
451 * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
453 * The CPUTLBEntryFull structure returned via @pfull is transient
454 * and must be consumed or copied immediately, before any further
455 * access or changes to TLB @mmu_idx.
457 * Returns: TLB flags as per probe_access_flags()
459 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
460 MMUAccessType access_type, int mmu_idx,
461 void **phost, CPUTLBEntryFull **pfull);
463 #endif
465 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
467 #ifdef CONFIG_USER_ONLY
468 return tb->itree.start;
469 #else
470 return tb->page_addr[0];
471 #endif
474 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
476 #ifdef CONFIG_USER_ONLY
477 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
478 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
479 #else
480 return tb->page_addr[1];
481 #endif
484 static inline void tb_set_page_addr0(TranslationBlock *tb,
485 tb_page_addr_t addr)
487 #ifdef CONFIG_USER_ONLY
488 tb->itree.start = addr;
490 * To begin, we record an interval of one byte. When the translation
491 * loop encounters a second page, the interval will be extended to
492 * include the first byte of the second page, which is sufficient to
493 * allow tb_page_addr1() above to work properly. The final corrected
494 * interval will be set by tb_page_add() from tb->size before the
495 * node is added to the interval tree.
497 tb->itree.last = addr;
498 #else
499 tb->page_addr[0] = addr;
500 #endif
503 static inline void tb_set_page_addr1(TranslationBlock *tb,
504 tb_page_addr_t addr)
506 #ifdef CONFIG_USER_ONLY
507 /* Extend the interval to the first byte of the second page. See above. */
508 tb->itree.last = addr;
509 #else
510 tb->page_addr[1] = addr;
511 #endif
514 /* current cflags for hashing/comparison */
515 uint32_t curr_cflags(CPUState *cpu);
517 /* TranslationBlock invalidate API */
518 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
519 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
520 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
522 /* GETPC is the true target of the return instruction that we'll execute. */
523 #if defined(CONFIG_TCG_INTERPRETER)
524 extern __thread uintptr_t tci_tb_ptr;
525 # define GETPC() tci_tb_ptr
526 #else
527 # define GETPC() \
528 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
529 #endif
531 /* The true return address will often point to a host insn that is part of
532 the next translated guest insn. Adjust the address backward to point to
533 the middle of the call insn. Subtracting one would do the job except for
534 several compressed mode architectures (arm, mips) which set the low bit
535 to indicate the compressed mode; subtracting two works around that. It
536 is also the case that there are no host isas that contain a call insn
537 smaller than 4 bytes, so we don't worry about special-casing this. */
538 #define GETPC_ADJ 2
540 #if !defined(CONFIG_USER_ONLY)
543 * iotlb_to_section:
544 * @cpu: CPU performing the access
545 * @index: TCG CPU IOTLB entry
547 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
548 * it refers to. @index will have been initially created and returned
549 * by memory_region_section_get_iotlb().
551 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
552 hwaddr index, MemTxAttrs attrs);
553 #endif
556 * get_page_addr_code_hostp()
557 * @env: CPUArchState
558 * @addr: guest virtual address of guest code
560 * See get_page_addr_code() (full-system version) for documentation on the
561 * return value.
563 * Sets *@hostp (when @hostp is non-NULL) as follows.
564 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
565 * to the host address where @addr's content is kept.
567 * Note: this function can trigger an exception.
569 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
570 void **hostp);
573 * get_page_addr_code()
574 * @env: CPUArchState
575 * @addr: guest virtual address of guest code
577 * If we cannot translate and execute from the entire RAM page, or if
578 * the region is not backed by RAM, returns -1. Otherwise, returns the
579 * ram_addr_t corresponding to the guest code at @addr.
581 * Note: this function can trigger an exception.
583 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
584 vaddr addr)
586 return get_page_addr_code_hostp(env, addr, NULL);
589 #if defined(CONFIG_USER_ONLY)
590 void TSA_NO_TSA mmap_lock(void);
591 void TSA_NO_TSA mmap_unlock(void);
592 bool have_mmap_lock(void);
594 static inline void mmap_unlock_guard(void *unused)
596 mmap_unlock();
599 #define WITH_MMAP_LOCK_GUARD() \
600 for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
601 = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
604 * adjust_signal_pc:
605 * @pc: raw pc from the host signal ucontext_t.
606 * @is_write: host memory operation was write, or read-modify-write.
608 * Alter @pc as required for unwinding. Return the type of the
609 * guest memory access -- host reads may be for guest execution.
611 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
614 * handle_sigsegv_accerr_write:
615 * @cpu: the cpu context
616 * @old_set: the sigset_t from the signal ucontext_t
617 * @host_pc: the host pc, adjusted for the signal
618 * @host_addr: the host address of the fault
620 * Return true if the write fault has been handled, and should be re-tried.
622 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
623 uintptr_t host_pc, abi_ptr guest_addr);
626 * cpu_loop_exit_sigsegv:
627 * @cpu: the cpu context
628 * @addr: the guest address of the fault
629 * @access_type: access was read/write/execute
630 * @maperr: true for invalid page, false for permission fault
631 * @ra: host pc for unwinding
633 * Use the TCGCPUOps hook to record cpu state, do guest operating system
634 * specific things to raise SIGSEGV, and jump to the main cpu loop.
636 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
637 MMUAccessType access_type,
638 bool maperr, uintptr_t ra);
641 * cpu_loop_exit_sigbus:
642 * @cpu: the cpu context
643 * @addr: the guest address of the alignment fault
644 * @access_type: access was read/write/execute
645 * @ra: host pc for unwinding
647 * Use the TCGCPUOps hook to record cpu state, do guest operating system
648 * specific things to raise SIGBUS, and jump to the main cpu loop.
650 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
651 MMUAccessType access_type,
652 uintptr_t ra);
654 #else
655 static inline void mmap_lock(void) {}
656 static inline void mmap_unlock(void) {}
657 #define WITH_MMAP_LOCK_GUARD()
659 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
660 void tlb_set_dirty(CPUState *cpu, vaddr addr);
662 MemoryRegionSection *
663 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
664 hwaddr *xlat, hwaddr *plen,
665 MemTxAttrs attrs, int *prot);
666 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
667 MemoryRegionSection *section);
668 #endif
670 #endif