4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
30 #include "hw/sh_intc.h"
32 #if defined(CONFIG_USER_ONLY)
34 void do_interrupt (CPUState
*env
)
36 env
->exception_index
= -1;
39 int cpu_sh4_handle_mmu_fault(CPUState
* env
, target_ulong address
, int rw
,
40 int mmu_idx
, int is_softmmu
)
43 env
->exception_index
= 0;
46 env
->exception_index
= 0x0a0;
49 env
->exception_index
= 0x0c0;
52 env
->exception_index
= 0x0a0;
58 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
* env
, target_ulong addr
)
63 int cpu_sh4_is_cached(CPUSH4State
* env
, target_ulong addr
)
65 /* For user mode, only U0 area is cachable. */
66 return !(addr
& 0x80000000);
69 #else /* !CONFIG_USER_ONLY */
72 #define MMU_ITLB_MISS (-1)
73 #define MMU_ITLB_MULTIPLE (-2)
74 #define MMU_ITLB_VIOLATION (-3)
75 #define MMU_DTLB_MISS_READ (-4)
76 #define MMU_DTLB_MISS_WRITE (-5)
77 #define MMU_DTLB_INITIAL_WRITE (-6)
78 #define MMU_DTLB_VIOLATION_READ (-7)
79 #define MMU_DTLB_VIOLATION_WRITE (-8)
80 #define MMU_DTLB_MULTIPLE (-9)
81 #define MMU_DTLB_MISS (-10)
82 #define MMU_IADDR_ERROR (-11)
83 #define MMU_DADDR_ERROR_READ (-12)
84 #define MMU_DADDR_ERROR_WRITE (-13)
86 void do_interrupt(CPUState
* env
)
88 int do_irq
= env
->interrupt_request
& CPU_INTERRUPT_HARD
;
89 int do_exp
, irq_vector
= env
->exception_index
;
91 /* prioritize exceptions over interrupts */
93 do_exp
= env
->exception_index
!= -1;
94 do_irq
= do_irq
&& (env
->exception_index
== -1);
96 if (env
->sr
& SR_BL
) {
97 if (do_exp
&& env
->exception_index
!= 0x1e0) {
98 env
->exception_index
= 0x000; /* masked exception -> reset */
100 if (do_irq
&& !env
->intr_at_halt
) {
103 env
->intr_at_halt
= 0;
107 irq_vector
= sh_intc_get_pending_vector(env
->intc_handle
,
108 (env
->sr
>> 4) & 0xf);
109 if (irq_vector
== -1) {
114 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
116 switch (env
->exception_index
) {
118 expname
= "addr_error";
121 expname
= "tlb_miss";
124 expname
= "tlb_violation";
127 expname
= "illegal_instruction";
130 expname
= "slot_illegal_instruction";
133 expname
= "fpu_disable";
136 expname
= "slot_fpu";
139 expname
= "data_write";
142 expname
= "dtlb_miss_write";
145 expname
= "dtlb_violation_write";
148 expname
= "fpu_exception";
151 expname
= "initial_page_write";
157 expname
= do_irq
? "interrupt" : "???";
160 qemu_log("exception 0x%03x [%s] raised\n",
161 irq_vector
, expname
);
162 log_cpu_state(env
, 0);
167 env
->sgr
= env
->gregs
[15];
168 env
->sr
|= SR_BL
| SR_MD
| SR_RB
;
170 if (env
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
171 /* Branch instruction should be executed again before delay slot. */
173 /* Clear flags for exception/interrupt routine. */
174 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
| DELAY_SLOT_TRUE
);
176 if (env
->flags
& DELAY_SLOT_CLEARME
)
180 env
->expevt
= env
->exception_index
;
181 switch (env
->exception_index
) {
186 env
->sr
|= 0xf << 4; /* IMASK */
187 env
->pc
= 0xa0000000;
191 env
->pc
= env
->vbr
+ 0x400;
194 env
->spc
+= 2; /* special case for TRAPA */
197 env
->pc
= env
->vbr
+ 0x100;
204 env
->intevt
= irq_vector
;
205 env
->pc
= env
->vbr
+ 0x600;
210 static void update_itlb_use(CPUState
* env
, int itlbnb
)
212 uint8_t or_mask
= 0, and_mask
= (uint8_t) - 1;
231 env
->mmucr
&= (and_mask
<< 24) | 0x00ffffff;
232 env
->mmucr
|= (or_mask
<< 24);
235 static int itlb_replacement(CPUState
* env
)
237 if ((env
->mmucr
& 0xe0000000) == 0xe0000000)
239 if ((env
->mmucr
& 0x98000000) == 0x18000000)
241 if ((env
->mmucr
& 0x54000000) == 0x04000000)
243 if ((env
->mmucr
& 0x2c000000) == 0x00000000)
248 /* Find the corresponding entry in the right TLB
249 Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
251 static int find_tlb_entry(CPUState
* env
, target_ulong address
,
252 tlb_t
* entries
, uint8_t nbtlb
, int use_asid
)
254 int match
= MMU_DTLB_MISS
;
259 asid
= env
->pteh
& 0xff;
261 for (i
= 0; i
< nbtlb
; i
++) {
263 continue; /* Invalid entry */
264 if (!entries
[i
].sh
&& use_asid
&& entries
[i
].asid
!= asid
)
265 continue; /* Bad ASID */
267 switch (entries
[i
].sz
) {
269 size
= 1024; /* 1kB */
272 size
= 4 * 1024; /* 4kB */
275 size
= 64 * 1024; /* 64kB */
278 size
= 1024 * 1024; /* 1MB */
284 start
= (entries
[i
].vpn
<< 10) & ~(entries
[i
].size
- 1);
285 end
= start
+ entries
[i
].size
- 1;
286 if (address
>= start
&& address
<= end
) { /* Match */
287 if (match
!= MMU_DTLB_MISS
)
288 return MMU_DTLB_MULTIPLE
; /* Multiple match */
295 static int same_tlb_entry_exists(const tlb_t
* haystack
, uint8_t nbtlb
,
296 const tlb_t
* needle
)
299 for (i
= 0; i
< nbtlb
; i
++)
300 if (!memcmp(&haystack
[i
], needle
, sizeof(tlb_t
)))
305 static void increment_urc(CPUState
* env
)
310 urb
= ((env
->mmucr
) >> 18) & 0x3f;
311 urc
= ((env
->mmucr
) >> 10) & 0x3f;
313 if ((urb
> 0 && urc
> urb
) || urc
> (UTLB_SIZE
- 1))
315 env
->mmucr
= (env
->mmucr
& 0xffff03ff) | (urc
<< 10);
318 /* Find itlb entry - update itlb from utlb if necessary and asked for
319 Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
320 Update the itlb from utlb if update is not 0
322 static int find_itlb_entry(CPUState
* env
, target_ulong address
,
323 int use_asid
, int update
)
327 e
= find_tlb_entry(env
, address
, env
->itlb
, ITLB_SIZE
, use_asid
);
328 if (e
== MMU_DTLB_MULTIPLE
)
329 e
= MMU_ITLB_MULTIPLE
;
330 else if (e
== MMU_DTLB_MISS
&& update
) {
331 e
= find_tlb_entry(env
, address
, env
->utlb
, UTLB_SIZE
, use_asid
);
334 n
= itlb_replacement(env
);
335 ientry
= &env
->itlb
[n
];
337 if (!same_tlb_entry_exists(env
->utlb
, UTLB_SIZE
, ientry
))
338 tlb_flush_page(env
, ientry
->vpn
<< 10);
340 *ientry
= env
->utlb
[e
];
342 } else if (e
== MMU_DTLB_MISS
)
344 } else if (e
== MMU_DTLB_MISS
)
347 update_itlb_use(env
, e
);
352 Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
353 static int find_utlb_entry(CPUState
* env
, target_ulong address
, int use_asid
)
355 /* per utlb access */
359 return find_tlb_entry(env
, address
, env
->utlb
, UTLB_SIZE
, use_asid
);
362 /* Match address against MMU
363 Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE,
364 MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ,
365 MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS,
366 MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
367 MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
369 static int get_mmu_address(CPUState
* env
, target_ulong
* physical
,
370 int *prot
, target_ulong address
,
371 int rw
, int access_type
)
374 tlb_t
*matching
= NULL
;
376 use_asid
= (env
->mmucr
& MMUCR_SV
) == 0 || (env
->sr
& SR_MD
) == 0;
379 n
= find_itlb_entry(env
, address
, use_asid
, 1);
381 matching
= &env
->itlb
[n
];
382 if ((env
->sr
& SR_MD
) & !(matching
->pr
& 2))
383 n
= MMU_ITLB_VIOLATION
;
388 n
= find_utlb_entry(env
, address
, use_asid
);
390 matching
= &env
->utlb
[n
];
391 switch ((matching
->pr
<< 1) | ((env
->sr
& SR_MD
) ? 1 : 0)) {
394 n
= (rw
== 1) ? MMU_DTLB_VIOLATION_WRITE
:
395 MMU_DTLB_VIOLATION_READ
;
401 n
= MMU_DTLB_VIOLATION_WRITE
;
408 *prot
= (rw
== 1)? PAGE_WRITE
: PAGE_READ
;
411 } else if (n
== MMU_DTLB_MISS
) {
412 n
= (rw
== 1) ? MMU_DTLB_MISS_WRITE
:
417 *physical
= ((matching
->ppn
<< 10) & ~(matching
->size
- 1)) |
418 (address
& (matching
->size
- 1));
419 if ((rw
== 1) & !matching
->d
)
420 n
= MMU_DTLB_INITIAL_WRITE
;
427 static int get_physical_address(CPUState
* env
, target_ulong
* physical
,
428 int *prot
, target_ulong address
,
429 int rw
, int access_type
)
431 /* P1, P2 and P4 areas do not use translation */
432 if ((address
>= 0x80000000 && address
< 0xc0000000) ||
433 address
>= 0xe0000000) {
434 if (!(env
->sr
& SR_MD
)
435 && (address
< 0xe0000000 || address
> 0xe4000000)) {
436 /* Unauthorized access in user mode (only store queues are available) */
437 fprintf(stderr
, "Unauthorized access\n");
439 return MMU_DADDR_ERROR_READ
;
441 return MMU_DADDR_ERROR_WRITE
;
443 return MMU_IADDR_ERROR
;
445 if (address
>= 0x80000000 && address
< 0xc0000000) {
446 /* Mask upper 3 bits for P1 and P2 areas */
447 *physical
= address
& 0x1fffffff;
451 *prot
= PAGE_READ
| PAGE_WRITE
;
455 /* If MMU is disabled, return the corresponding physical page */
456 if (!env
->mmucr
& MMUCR_AT
) {
457 *physical
= address
& 0x1FFFFFFF;
458 *prot
= PAGE_READ
| PAGE_WRITE
;
462 /* We need to resort to the MMU */
463 return get_mmu_address(env
, physical
, prot
, address
, rw
, access_type
);
466 int cpu_sh4_handle_mmu_fault(CPUState
* env
, target_ulong address
, int rw
,
467 int mmu_idx
, int is_softmmu
)
469 target_ulong physical
, page_offset
, page_size
;
470 int prot
, ret
, access_type
;
472 access_type
= ACCESS_INT
;
474 get_physical_address(env
, &physical
, &prot
, address
, rw
,
481 case MMU_DTLB_MISS_READ
:
482 env
->exception_index
= 0x040;
484 case MMU_DTLB_MULTIPLE
:
485 case MMU_ITLB_MULTIPLE
:
486 env
->exception_index
= 0x140;
488 case MMU_ITLB_VIOLATION
:
489 env
->exception_index
= 0x0a0;
491 case MMU_DTLB_MISS_WRITE
:
492 env
->exception_index
= 0x060;
494 case MMU_DTLB_INITIAL_WRITE
:
495 env
->exception_index
= 0x080;
497 case MMU_DTLB_VIOLATION_READ
:
498 env
->exception_index
= 0x0a0;
500 case MMU_DTLB_VIOLATION_WRITE
:
501 env
->exception_index
= 0x0c0;
503 case MMU_IADDR_ERROR
:
504 case MMU_DADDR_ERROR_READ
:
505 env
->exception_index
= 0x0c0;
507 case MMU_DADDR_ERROR_WRITE
:
508 env
->exception_index
= 0x100;
516 page_size
= TARGET_PAGE_SIZE
;
518 (address
- (address
& TARGET_PAGE_MASK
)) & ~(page_size
- 1);
519 address
= (address
& TARGET_PAGE_MASK
) + page_offset
;
520 physical
= (physical
& TARGET_PAGE_MASK
) + page_offset
;
522 return tlb_set_page(env
, address
, physical
, prot
, mmu_idx
, is_softmmu
);
525 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
* env
, target_ulong addr
)
527 target_ulong physical
;
530 get_physical_address(env
, &physical
, &prot
, addr
, 0, 0);
534 void cpu_load_tlb(CPUSH4State
* env
)
536 int n
= cpu_mmucr_urc(env
->mmucr
);
537 tlb_t
* entry
= &env
->utlb
[n
];
540 /* Overwriting valid entry in utlb. */
541 target_ulong address
= entry
->vpn
<< 10;
542 if (!same_tlb_entry_exists(env
->itlb
, ITLB_SIZE
, entry
)) {
543 tlb_flush_page(env
, address
);
547 /* Take values into cpu status from registers. */
548 entry
->asid
= (uint8_t)cpu_pteh_asid(env
->pteh
);
549 entry
->vpn
= cpu_pteh_vpn(env
->pteh
);
550 entry
->v
= (uint8_t)cpu_ptel_v(env
->ptel
);
551 entry
->ppn
= cpu_ptel_ppn(env
->ptel
);
552 entry
->sz
= (uint8_t)cpu_ptel_sz(env
->ptel
);
555 entry
->size
= 1024; /* 1K */
558 entry
->size
= 1024 * 4; /* 4K */
561 entry
->size
= 1024 * 64; /* 64K */
564 entry
->size
= 1024 * 1024; /* 1M */
570 entry
->sh
= (uint8_t)cpu_ptel_sh(env
->ptel
);
571 entry
->c
= (uint8_t)cpu_ptel_c(env
->ptel
);
572 entry
->pr
= (uint8_t)cpu_ptel_pr(env
->ptel
);
573 entry
->d
= (uint8_t)cpu_ptel_d(env
->ptel
);
574 entry
->wt
= (uint8_t)cpu_ptel_wt(env
->ptel
);
575 entry
->sa
= (uint8_t)cpu_ptea_sa(env
->ptea
);
576 entry
->tc
= (uint8_t)cpu_ptea_tc(env
->ptea
);
579 void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State
*s
, target_phys_addr_t addr
,
582 int associate
= addr
& 0x0000080;
583 uint32_t vpn
= (mem_value
& 0xfffffc00) >> 10;
584 uint8_t d
= (uint8_t)((mem_value
& 0x00000200) >> 9);
585 uint8_t v
= (uint8_t)((mem_value
& 0x00000100) >> 8);
586 uint8_t asid
= (uint8_t)(mem_value
& 0x000000ff);
587 int use_asid
= (s
->mmucr
& MMUCR_SV
) == 0 || (s
->sr
& SR_MD
) == 0;
591 tlb_t
* utlb_match_entry
= NULL
;
592 int needs_tlb_flush
= 0;
595 for (i
= 0; i
< UTLB_SIZE
; i
++) {
596 tlb_t
* entry
= &s
->utlb
[i
];
600 if (entry
->vpn
== vpn
601 && (!use_asid
|| entry
->asid
== asid
|| entry
->sh
)) {
602 if (utlb_match_entry
) {
603 /* Multiple TLB Exception */
604 s
->exception_index
= 0x140;
612 utlb_match_entry
= entry
;
614 increment_urc(s
); /* per utlb access */
618 for (i
= 0; i
< ITLB_SIZE
; i
++) {
619 tlb_t
* entry
= &s
->itlb
[i
];
620 if (entry
->vpn
== vpn
621 && (!use_asid
|| entry
->asid
== asid
|| entry
->sh
)) {
624 if (utlb_match_entry
)
625 *entry
= *utlb_match_entry
;
633 tlb_flush_page(s
, vpn
<< 10);
636 int index
= (addr
& 0x00003f00) >> 8;
637 tlb_t
* entry
= &s
->utlb
[index
];
639 /* Overwriting valid entry in utlb. */
640 target_ulong address
= entry
->vpn
<< 10;
641 if (!same_tlb_entry_exists(s
->itlb
, ITLB_SIZE
, entry
)) {
642 tlb_flush_page(s
, address
);
653 int cpu_sh4_is_cached(CPUSH4State
* env
, target_ulong addr
)
656 int use_asid
= (env
->mmucr
& MMUCR_SV
) == 0 || (env
->sr
& SR_MD
) == 0;
659 if (env
->sr
& SR_MD
) {
660 /* For previledged mode, P2 and P4 area is not cachable. */
661 if ((0xA0000000 <= addr
&& addr
< 0xC0000000) || 0xE0000000 <= addr
)
664 /* For user mode, only U0 area is cachable. */
665 if (0x80000000 <= addr
)
670 * TODO : Evaluate CCR and check if the cache is on or off.
671 * Now CCR is not in CPUSH4State, but in SH7750State.
672 * When you move the ccr inot CPUSH4State, the code will be
676 /* check if operand cache is enabled or not. */
681 /* if MMU is off, no check for TLB. */
682 if (env
->mmucr
& MMUCR_AT
)
686 n
= find_tlb_entry(env
, addr
, env
->itlb
, ITLB_SIZE
, use_asid
);
688 return env
->itlb
[n
].c
;
690 n
= find_tlb_entry(env
, addr
, env
->utlb
, UTLB_SIZE
, use_asid
);
692 return env
->utlb
[n
].c
;