2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/exec-all.h"
23 #include "exec/memory.h"
24 #include "exec/address-spaces.h"
26 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
32 //#define DEBUG_TLB_CHECK
38 * If flush_global is true (the usual case), flush all tlb entries.
39 * If flush_global is false, flush (at least) all tlb entries not
42 * Since QEMU doesn't currently implement a global/not-global flag
43 * for tlb entries, at the moment tlb_flush() will also flush all
44 * tlb entries in the flush_global == false case. This is OK because
45 * CPU architectures generally permit an implementation to drop
46 * entries from the TLB at any time, so flushing more entries than
47 * required is only an efficiency issue, not a correctness issue.
49 void tlb_flush(CPUState
*cpu
, int flush_global
)
51 CPUArchState
*env
= cpu
->env_ptr
;
53 #if defined(DEBUG_TLB)
54 printf("tlb_flush:\n");
56 /* must reset current TB so that interrupts cannot modify the
57 links while we are modifying them */
58 cpu
->current_tb
= NULL
;
60 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
61 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
63 env
->tlb_flush_addr
= -1;
64 env
->tlb_flush_mask
= 0;
68 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
70 if (addr
== (tlb_entry
->addr_read
&
71 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
72 addr
== (tlb_entry
->addr_write
&
73 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
74 addr
== (tlb_entry
->addr_code
&
75 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
76 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
80 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
82 CPUArchState
*env
= cpu
->env_ptr
;
86 #if defined(DEBUG_TLB)
87 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
89 /* Check if we need to flush due to large pages. */
90 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
91 #if defined(DEBUG_TLB)
92 printf("tlb_flush_page: forced full flush ("
93 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
94 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
99 /* must reset current TB so that interrupts cannot modify the
100 links while we are modifying them */
101 cpu
->current_tb
= NULL
;
103 addr
&= TARGET_PAGE_MASK
;
104 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
105 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
106 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
109 tb_flush_jmp_cache(cpu
, addr
);
112 /* update the TLBs so that writes to code in the virtual page 'addr'
114 void tlb_protect_code(ram_addr_t ram_addr
)
116 cpu_physical_memory_reset_dirty(ram_addr
, TARGET_PAGE_SIZE
,
120 /* update the TLB so that writes in physical page 'phys_addr' are no longer
121 tested for self modifying code */
122 void tlb_unprotect_code_phys(CPUState
*cpu
, ram_addr_t ram_addr
,
125 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
128 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
130 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
133 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
138 if (tlb_is_dirty_ram(tlb_entry
)) {
139 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
140 if ((addr
- start
) < length
) {
141 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
146 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
150 if (qemu_ram_addr_from_host(ptr
, &ram_addr
) == NULL
) {
151 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
157 void cpu_tlb_reset_dirty_all(ram_addr_t start1
, ram_addr_t length
)
166 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
169 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
170 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
177 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
179 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
180 tlb_entry
->addr_write
= vaddr
;
184 /* update the TLB corresponding to virtual page vaddr
185 so that it is no longer dirty */
186 void tlb_set_dirty(CPUArchState
*env
, target_ulong vaddr
)
191 vaddr
&= TARGET_PAGE_MASK
;
192 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
193 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
194 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
198 /* Our TLB does not support large pages, so remember the area covered by
199 large pages and trigger a full TLB flush if these are invalidated. */
200 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
203 target_ulong mask
= ~(size
- 1);
205 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
206 env
->tlb_flush_addr
= vaddr
& mask
;
207 env
->tlb_flush_mask
= mask
;
210 /* Extend the existing region to include the new page.
211 This is a compromise between unnecessary flushes and the cost
212 of maintaining a full variable size TLB. */
213 mask
&= env
->tlb_flush_mask
;
214 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
217 env
->tlb_flush_addr
&= mask
;
218 env
->tlb_flush_mask
= mask
;
221 /* Add a new TLB entry. At most one entry for a given virtual address
222 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
223 supplied size is only used by tlb_flush_page. */
224 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
225 hwaddr paddr
, int prot
,
226 int mmu_idx
, target_ulong size
)
228 CPUArchState
*env
= cpu
->env_ptr
;
229 MemoryRegionSection
*section
;
231 target_ulong address
;
232 target_ulong code_address
;
235 hwaddr iotlb
, xlat
, sz
;
237 assert(size
>= TARGET_PAGE_SIZE
);
238 if (size
!= TARGET_PAGE_SIZE
) {
239 tlb_add_large_page(env
, vaddr
, size
);
243 section
= address_space_translate_for_iotlb(cpu
->as
, paddr
,
245 assert(sz
>= TARGET_PAGE_SIZE
);
247 #if defined(DEBUG_TLB)
248 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
250 vaddr
, paddr
, prot
, mmu_idx
);
254 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
259 /* TLB_MMIO for rom/romd handled below */
260 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
263 code_address
= address
;
264 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
267 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
268 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
269 te
= &env
->tlb_table
[mmu_idx
][index
];
270 te
->addend
= addend
- vaddr
;
271 if (prot
& PAGE_READ
) {
272 te
->addr_read
= address
;
277 if (prot
& PAGE_EXEC
) {
278 te
->addr_code
= code_address
;
282 if (prot
& PAGE_WRITE
) {
283 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
284 || memory_region_is_romd(section
->mr
)) {
285 /* Write access calls the I/O callback. */
286 te
->addr_write
= address
| TLB_MMIO
;
287 } else if (memory_region_is_ram(section
->mr
)
288 && cpu_physical_memory_is_clean(section
->mr
->ram_addr
290 te
->addr_write
= address
| TLB_NOTDIRTY
;
292 te
->addr_write
= address
;
299 /* NOTE: this function can trigger an exception */
300 /* NOTE2: the returned address is not exactly the physical address: it
301 * is actually a ram_addr_t (in system mode; the user mode emulation
302 * version of this function returns a guest virtual address).
304 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
306 int mmu_idx
, page_index
, pd
;
309 CPUState
*cpu
= ENV_GET_CPU(env1
);
311 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
312 mmu_idx
= cpu_mmu_index(env1
);
313 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
314 (addr
& TARGET_PAGE_MASK
))) {
315 cpu_ldub_code(env1
, addr
);
317 pd
= env1
->iotlb
[mmu_idx
][page_index
] & ~TARGET_PAGE_MASK
;
318 mr
= iotlb_to_region(cpu
->as
, pd
);
319 if (memory_region_is_unassigned(mr
)) {
320 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
322 if (cc
->do_unassigned_access
) {
323 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
325 cpu_abort(cpu
, "Trying to execute code outside RAM or ROM at 0x"
326 TARGET_FMT_lx
"\n", addr
);
329 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
330 return qemu_ram_addr_from_host_nofail(p
);
333 #define MMUSUFFIX _cmmu
337 #define GETRA() ((uintptr_t)0)
338 #define SOFTMMU_CODE_ACCESS
341 #include "exec/softmmu_template.h"
344 #include "exec/softmmu_template.h"
347 #include "exec/softmmu_template.h"
350 #include "exec/softmmu_template.h"