accel/tcg: add size paremeter in tlb_fill()
[qemu.git] / target / i386 / excp_helper.c
blobcb4d1b7d336155a261538902eb0fd6814cfe416b
1 /*
2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "sysemu/sysemu.h"
25 #include "exec/helper-proto.h"
27 void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
29 raise_interrupt(env, intno, 1, 0, next_eip_addend);
32 void helper_raise_exception(CPUX86State *env, int exception_index)
34 raise_exception(env, exception_index);
38 * Check nested exceptions and change to double or triple fault if
39 * needed. It should only be called, if this is not an interrupt.
40 * Returns the new exception number.
42 static int check_exception(CPUX86State *env, int intno, int *error_code,
43 uintptr_t retaddr)
45 int first_contributory = env->old_exception == 0 ||
46 (env->old_exception >= 10 &&
47 env->old_exception <= 13);
48 int second_contributory = intno == 0 ||
49 (intno >= 10 && intno <= 13);
51 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
52 env->old_exception, intno);
54 #if !defined(CONFIG_USER_ONLY)
55 if (env->old_exception == EXCP08_DBLE) {
56 if (env->hflags & HF_SVMI_MASK) {
57 cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
60 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
62 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
63 return EXCP_HLT;
65 #endif
67 if ((first_contributory && second_contributory)
68 || (env->old_exception == EXCP0E_PAGE &&
69 (second_contributory || (intno == EXCP0E_PAGE)))) {
70 intno = EXCP08_DBLE;
71 *error_code = 0;
74 if (second_contributory || (intno == EXCP0E_PAGE) ||
75 (intno == EXCP08_DBLE)) {
76 env->old_exception = intno;
79 return intno;
83 * Signal an interruption. It is executed in the main CPU loop.
84 * is_int is TRUE if coming from the int instruction. next_eip is the
85 * env->eip value AFTER the interrupt instruction. It is only relevant if
86 * is_int is TRUE.
88 static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
89 int is_int, int error_code,
90 int next_eip_addend,
91 uintptr_t retaddr)
93 CPUState *cs = CPU(x86_env_get_cpu(env));
95 if (!is_int) {
96 cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
97 error_code, retaddr);
98 intno = check_exception(env, intno, &error_code, retaddr);
99 } else {
100 cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
103 cs->exception_index = intno;
104 env->error_code = error_code;
105 env->exception_is_int = is_int;
106 env->exception_next_eip = env->eip + next_eip_addend;
107 cpu_loop_exit_restore(cs, retaddr);
110 /* shortcuts to generate exceptions */
112 void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
113 int error_code, int next_eip_addend)
115 raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
118 void raise_exception_err(CPUX86State *env, int exception_index,
119 int error_code)
121 raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
124 void raise_exception_err_ra(CPUX86State *env, int exception_index,
125 int error_code, uintptr_t retaddr)
127 raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
130 void raise_exception(CPUX86State *env, int exception_index)
132 raise_interrupt2(env, exception_index, 0, 0, 0, 0);
135 void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
137 raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
140 #if defined(CONFIG_USER_ONLY)
141 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
142 int is_write, int mmu_idx)
144 X86CPU *cpu = X86_CPU(cs);
145 CPUX86State *env = &cpu->env;
147 /* user mode only emulation */
148 is_write &= 1;
149 env->cr[2] = addr;
150 env->error_code = (is_write << PG_ERROR_W_BIT);
151 env->error_code |= PG_ERROR_U_MASK;
152 cs->exception_index = EXCP0E_PAGE;
153 env->exception_is_int = 0;
154 env->exception_next_eip = -1;
155 return 1;
158 #else
160 /* return value:
161 * -1 = cannot handle fault
162 * 0 = nothing more to do
163 * 1 = generate PF fault
165 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
166 int is_write1, int mmu_idx)
168 X86CPU *cpu = X86_CPU(cs);
169 CPUX86State *env = &cpu->env;
170 uint64_t ptep, pte;
171 int32_t a20_mask;
172 target_ulong pde_addr, pte_addr;
173 int error_code = 0;
174 int is_dirty, prot, page_size, is_write, is_user;
175 hwaddr paddr;
176 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
177 uint32_t page_offset;
178 target_ulong vaddr;
180 is_user = mmu_idx == MMU_USER_IDX;
181 #if defined(DEBUG_MMU)
182 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
183 addr, is_write1, is_user, env->eip);
184 #endif
185 is_write = is_write1 & 1;
187 a20_mask = x86_get_a20_mask(env);
188 if (!(env->cr[0] & CR0_PG_MASK)) {
189 pte = addr;
190 #ifdef TARGET_X86_64
191 if (!(env->hflags & HF_LMA_MASK)) {
192 /* Without long mode we can only address 32bits in real mode */
193 pte = (uint32_t)pte;
195 #endif
196 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
197 page_size = 4096;
198 goto do_mapping;
201 if (!(env->efer & MSR_EFER_NXE)) {
202 rsvd_mask |= PG_NX_MASK;
205 if (env->cr[4] & CR4_PAE_MASK) {
206 uint64_t pde, pdpe;
207 target_ulong pdpe_addr;
209 #ifdef TARGET_X86_64
210 if (env->hflags & HF_LMA_MASK) {
211 bool la57 = env->cr[4] & CR4_LA57_MASK;
212 uint64_t pml5e_addr, pml5e;
213 uint64_t pml4e_addr, pml4e;
214 int32_t sext;
216 /* test virtual address sign extension */
217 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
218 if (sext != 0 && sext != -1) {
219 env->error_code = 0;
220 cs->exception_index = EXCP0D_GPF;
221 return 1;
224 if (la57) {
225 pml5e_addr = ((env->cr[3] & ~0xfff) +
226 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
227 pml5e = x86_ldq_phys(cs, pml5e_addr);
228 if (!(pml5e & PG_PRESENT_MASK)) {
229 goto do_fault;
231 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
232 goto do_fault_rsvd;
234 if (!(pml5e & PG_ACCESSED_MASK)) {
235 pml5e |= PG_ACCESSED_MASK;
236 x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
238 ptep = pml5e ^ PG_NX_MASK;
239 } else {
240 pml5e = env->cr[3];
241 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
244 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
245 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
246 pml4e = x86_ldq_phys(cs, pml4e_addr);
247 if (!(pml4e & PG_PRESENT_MASK)) {
248 goto do_fault;
250 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
251 goto do_fault_rsvd;
253 if (!(pml4e & PG_ACCESSED_MASK)) {
254 pml4e |= PG_ACCESSED_MASK;
255 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
257 ptep &= pml4e ^ PG_NX_MASK;
258 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
259 a20_mask;
260 pdpe = x86_ldq_phys(cs, pdpe_addr);
261 if (!(pdpe & PG_PRESENT_MASK)) {
262 goto do_fault;
264 if (pdpe & rsvd_mask) {
265 goto do_fault_rsvd;
267 ptep &= pdpe ^ PG_NX_MASK;
268 if (!(pdpe & PG_ACCESSED_MASK)) {
269 pdpe |= PG_ACCESSED_MASK;
270 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
272 if (pdpe & PG_PSE_MASK) {
273 /* 1 GB page */
274 page_size = 1024 * 1024 * 1024;
275 pte_addr = pdpe_addr;
276 pte = pdpe;
277 goto do_check_protect;
279 } else
280 #endif
282 /* XXX: load them when cr3 is loaded ? */
283 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
284 a20_mask;
285 pdpe = x86_ldq_phys(cs, pdpe_addr);
286 if (!(pdpe & PG_PRESENT_MASK)) {
287 goto do_fault;
289 rsvd_mask |= PG_HI_USER_MASK;
290 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
291 goto do_fault_rsvd;
293 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
296 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
297 a20_mask;
298 pde = x86_ldq_phys(cs, pde_addr);
299 if (!(pde & PG_PRESENT_MASK)) {
300 goto do_fault;
302 if (pde & rsvd_mask) {
303 goto do_fault_rsvd;
305 ptep &= pde ^ PG_NX_MASK;
306 if (pde & PG_PSE_MASK) {
307 /* 2 MB page */
308 page_size = 2048 * 1024;
309 pte_addr = pde_addr;
310 pte = pde;
311 goto do_check_protect;
313 /* 4 KB page */
314 if (!(pde & PG_ACCESSED_MASK)) {
315 pde |= PG_ACCESSED_MASK;
316 x86_stl_phys_notdirty(cs, pde_addr, pde);
318 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
319 a20_mask;
320 pte = x86_ldq_phys(cs, pte_addr);
321 if (!(pte & PG_PRESENT_MASK)) {
322 goto do_fault;
324 if (pte & rsvd_mask) {
325 goto do_fault_rsvd;
327 /* combine pde and pte nx, user and rw protections */
328 ptep &= pte ^ PG_NX_MASK;
329 page_size = 4096;
330 } else {
331 uint32_t pde;
333 /* page directory entry */
334 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
335 a20_mask;
336 pde = x86_ldl_phys(cs, pde_addr);
337 if (!(pde & PG_PRESENT_MASK)) {
338 goto do_fault;
340 ptep = pde | PG_NX_MASK;
342 /* if PSE bit is set, then we use a 4MB page */
343 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
344 page_size = 4096 * 1024;
345 pte_addr = pde_addr;
347 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
348 * Leave bits 20-13 in place for setting accessed/dirty bits below.
350 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
351 rsvd_mask = 0x200000;
352 goto do_check_protect_pse36;
355 if (!(pde & PG_ACCESSED_MASK)) {
356 pde |= PG_ACCESSED_MASK;
357 x86_stl_phys_notdirty(cs, pde_addr, pde);
360 /* page directory entry */
361 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
362 a20_mask;
363 pte = x86_ldl_phys(cs, pte_addr);
364 if (!(pte & PG_PRESENT_MASK)) {
365 goto do_fault;
367 /* combine pde and pte user and rw protections */
368 ptep &= pte | PG_NX_MASK;
369 page_size = 4096;
370 rsvd_mask = 0;
373 do_check_protect:
374 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
375 do_check_protect_pse36:
376 if (pte & rsvd_mask) {
377 goto do_fault_rsvd;
379 ptep ^= PG_NX_MASK;
381 /* can the page can be put in the TLB? prot will tell us */
382 if (is_user && !(ptep & PG_USER_MASK)) {
383 goto do_fault_protect;
386 prot = 0;
387 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
388 prot |= PAGE_READ;
389 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
390 prot |= PAGE_WRITE;
393 if (!(ptep & PG_NX_MASK) &&
394 (mmu_idx == MMU_USER_IDX ||
395 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
396 prot |= PAGE_EXEC;
398 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
399 (ptep & PG_USER_MASK) && env->pkru) {
400 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
401 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
402 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
403 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
405 if (pkru_ad) {
406 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
407 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
408 pkru_prot &= ~PAGE_WRITE;
411 prot &= pkru_prot;
412 if ((pkru_prot & (1 << is_write1)) == 0) {
413 assert(is_write1 != 2);
414 error_code |= PG_ERROR_PK_MASK;
415 goto do_fault_protect;
419 if ((prot & (1 << is_write1)) == 0) {
420 goto do_fault_protect;
423 /* yes, it can! */
424 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
425 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
426 pte |= PG_ACCESSED_MASK;
427 if (is_dirty) {
428 pte |= PG_DIRTY_MASK;
430 x86_stl_phys_notdirty(cs, pte_addr, pte);
433 if (!(pte & PG_DIRTY_MASK)) {
434 /* only set write access if already dirty... otherwise wait
435 for dirty access */
436 assert(!is_write);
437 prot &= ~PAGE_WRITE;
440 do_mapping:
441 pte = pte & a20_mask;
443 /* align to page_size */
444 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
446 /* Even if 4MB pages, we map only one 4KB page in the cache to
447 avoid filling it too fast */
448 vaddr = addr & TARGET_PAGE_MASK;
449 page_offset = vaddr & (page_size - 1);
450 paddr = pte + page_offset;
452 assert(prot & (1 << is_write1));
453 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
454 prot, mmu_idx, page_size);
455 return 0;
456 do_fault_rsvd:
457 error_code |= PG_ERROR_RSVD_MASK;
458 do_fault_protect:
459 error_code |= PG_ERROR_P_MASK;
460 do_fault:
461 error_code |= (is_write << PG_ERROR_W_BIT);
462 if (is_user)
463 error_code |= PG_ERROR_U_MASK;
464 if (is_write1 == 2 &&
465 (((env->efer & MSR_EFER_NXE) &&
466 (env->cr[4] & CR4_PAE_MASK)) ||
467 (env->cr[4] & CR4_SMEP_MASK)))
468 error_code |= PG_ERROR_I_D_MASK;
469 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
470 /* cr2 is not modified in case of exceptions */
471 x86_stq_phys(cs,
472 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
473 addr);
474 } else {
475 env->cr[2] = addr;
477 env->error_code = error_code;
478 cs->exception_index = EXCP0E_PAGE;
479 return 1;
481 #endif