2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/traps.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
15 * 'Traps.c' handles hardware traps and faults after we have saved some
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
25 #include <linux/smp.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/spinlock.h>
29 #include <linux/kallsyms.h>
30 #include <linux/interrupt.h>
31 #include <linux/sysctl.h>
32 #include <linux/module.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
36 #include <asm/atomic.h>
37 #include <asm/processor.h>
38 #include <asm/pgtable.h>
40 #undef DEBUG_EXCEPTION
41 #ifdef DEBUG_EXCEPTION
42 /* implemented in ../lib/dbg.c */
43 extern void show_excp_regs(char *fname
, int trapnr
, int signr
,
44 struct pt_regs
*regs
);
46 #define show_excp_regs(a, b, c, d)
49 static void do_unhandled_exception(int trapnr
, int signr
, char *str
, char *fn_name
,
50 unsigned long error_code
, struct pt_regs
*regs
, struct task_struct
*tsk
);
52 #define DO_ERROR(trapnr, signr, str, name, tsk) \
53 asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
55 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
60 void die(const char * str
, struct pt_regs
* regs
, long err
)
63 spin_lock_irq(&die_lock
);
64 printk("%s: %lx\n", str
, (err
& 0xffffff));
66 spin_unlock_irq(&die_lock
);
70 static inline void die_if_kernel(const char * str
, struct pt_regs
* regs
, long err
)
76 static void die_if_no_fixup(const char * str
, struct pt_regs
* regs
, long err
)
78 if (!user_mode(regs
)) {
79 const struct exception_table_entry
*fixup
;
80 fixup
= search_exception_tables(regs
->pc
);
82 regs
->pc
= fixup
->fixup
;
89 DO_ERROR(13, SIGILL
, "illegal slot instruction", illegal_slot_inst
, current
)
90 DO_ERROR(87, SIGSEGV
, "address error (exec)", address_error_exec
, current
)
93 /* Implement misaligned load/store handling for kernel (and optionally for user
94 mode too). Limitation : only SHmedia mode code is handled - there is no
95 handling at all for misaligned accesses occurring in SHcompact code yet. */
97 static int misaligned_fixup(struct pt_regs
*regs
);
99 asmlinkage
void do_address_error_load(unsigned long error_code
, struct pt_regs
*regs
)
101 if (misaligned_fixup(regs
) < 0) {
102 do_unhandled_exception(7, SIGSEGV
, "address error(load)",
103 "do_address_error_load",
104 error_code
, regs
, current
);
109 asmlinkage
void do_address_error_store(unsigned long error_code
, struct pt_regs
*regs
)
111 if (misaligned_fixup(regs
) < 0) {
112 do_unhandled_exception(8, SIGSEGV
, "address error(store)",
113 "do_address_error_store",
114 error_code
, regs
, current
);
119 #if defined(CONFIG_SH64_ID2815_WORKAROUND)
121 #define OPCODE_INVALID 0
122 #define OPCODE_USER_VALID 1
123 #define OPCODE_PRIV_VALID 2
125 /* getcon/putcon - requires checking which control register is referenced. */
126 #define OPCODE_CTRL_REG 3
128 /* Table of valid opcodes for SHmedia mode.
129 Form a 10-bit value by concatenating the major/minor opcodes i.e.
130 opcode[31:26,20:16]. The 6 MSBs of this value index into the following
131 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
132 LSBs==4'b0000 etc). */
133 static unsigned long shmedia_opcode_table
[64] = {
134 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
135 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
136 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
137 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
138 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
139 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
140 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
144 void do_reserved_inst(unsigned long error_code
, struct pt_regs
*regs
)
146 /* Workaround SH5-101 cut2 silicon defect #2815 :
147 in some situations, inter-mode branches from SHcompact -> SHmedia
148 which should take ITLBMISS or EXECPROT exceptions at the target
149 falsely take RESINST at the target instead. */
151 unsigned long opcode
= 0x6ff4fff0; /* guaranteed reserved opcode */
152 unsigned long pc
, aligned_pc
;
156 char *exception_name
= "reserved_instruction";
160 /* SHmedia : check for defect. This requires executable vmas
161 to be readable too. */
162 aligned_pc
= pc
& ~3;
163 if (!access_ok(VERIFY_READ
, aligned_pc
, sizeof(unsigned long))) {
164 get_user_error
= -EFAULT
;
166 get_user_error
= __get_user(opcode
, (unsigned long *)aligned_pc
);
168 if (get_user_error
>= 0) {
169 unsigned long index
, shift
;
170 unsigned long major
, minor
, combined
;
171 unsigned long reserved_field
;
172 reserved_field
= opcode
& 0xf; /* These bits are currently reserved as zero in all valid opcodes */
173 major
= (opcode
>> 26) & 0x3f;
174 minor
= (opcode
>> 16) & 0xf;
175 combined
= (major
<< 4) | minor
;
178 if (reserved_field
== 0) {
179 int opcode_state
= (shmedia_opcode_table
[index
] >> shift
) & 0x3;
180 switch (opcode_state
) {
184 case OPCODE_USER_VALID
:
185 /* Restart the instruction : the branch to the instruction will now be from an RTE
186 not from SHcompact so the silicon defect won't be triggered. */
188 case OPCODE_PRIV_VALID
:
189 if (!user_mode(regs
)) {
190 /* Should only ever get here if a module has
191 SHcompact code inside it. If so, the same fix up is needed. */
192 return; /* same reason */
194 /* Otherwise, user mode trying to execute a privileged instruction -
195 fall through to trap. */
197 case OPCODE_CTRL_REG
:
198 /* If in privileged mode, return as above. */
199 if (!user_mode(regs
)) return;
200 /* In user mode ... */
201 if (combined
== 0x9f) { /* GETCON */
202 unsigned long regno
= (opcode
>> 20) & 0x3f;
206 /* Otherwise, reserved or privileged control register, => trap */
207 } else if (combined
== 0x1bf) { /* PUTCON */
208 unsigned long regno
= (opcode
>> 4) & 0x3f;
212 /* Otherwise, reserved or privileged control register, => trap */
218 /* Fall through to trap. */
222 /* fall through to normal resinst processing */
224 /* Error trying to read opcode. This typically means a
225 real fault, not a RESINST any more. So change the
228 exception_name
= "address error (exec)";
233 do_unhandled_exception(trapnr
, signr
, exception_name
, "do_reserved_inst", error_code
, regs
, current
);
236 #else /* CONFIG_SH64_ID2815_WORKAROUND */
238 /* If the workaround isn't needed, this is just a straightforward reserved
240 DO_ERROR(12, SIGILL
, "reserved instruction", reserved_inst
, current
)
242 #endif /* CONFIG_SH64_ID2815_WORKAROUND */
244 /* Called with interrupts disabled */
245 asmlinkage
void do_exception_error(unsigned long ex
, struct pt_regs
*regs
)
247 show_excp_regs(__FUNCTION__
, -1, -1, regs
);
248 die_if_kernel("exception", regs
, ex
);
251 int do_unknown_trapa(unsigned long scId
, struct pt_regs
*regs
)
254 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId
);
256 die_if_kernel("unknown trapa", regs
, scId
);
261 void show_stack(struct task_struct
*tsk
, unsigned long *sp
)
263 #ifdef CONFIG_KALLSYMS
264 extern void sh64_unwind(struct pt_regs
*regs
);
265 struct pt_regs
*regs
;
267 regs
= tsk
? tsk
->thread
.kregs
: NULL
;
271 printk(KERN_ERR
"Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
275 void show_task(unsigned long *sp
)
277 show_stack(NULL
, sp
);
280 void dump_stack(void)
284 /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
285 EXPORT_SYMBOL(dump_stack
);
287 static void do_unhandled_exception(int trapnr
, int signr
, char *str
, char *fn_name
,
288 unsigned long error_code
, struct pt_regs
*regs
, struct task_struct
*tsk
)
290 show_excp_regs(fn_name
, trapnr
, signr
, regs
);
291 tsk
->thread
.error_code
= error_code
;
292 tsk
->thread
.trap_no
= trapnr
;
295 force_sig(signr
, tsk
);
297 die_if_no_fixup(str
, regs
, error_code
);
300 static int read_opcode(unsigned long long pc
, unsigned long *result_opcode
, int from_user_mode
)
303 unsigned long aligned_pc
;
304 unsigned long opcode
;
308 aligned_pc
= pc
& ~3;
309 if (from_user_mode
) {
310 if (!access_ok(VERIFY_READ
, aligned_pc
, sizeof(unsigned long))) {
311 get_user_error
= -EFAULT
;
313 get_user_error
= __get_user(opcode
, (unsigned long *)aligned_pc
);
314 *result_opcode
= opcode
;
316 return get_user_error
;
318 /* If the fault was in the kernel, we can either read
319 * this directly, or if not, we fault.
321 *result_opcode
= *(unsigned long *) aligned_pc
;
324 } else if ((pc
& 1) == 0) {
326 /* TODO : provide handling for this. We don't really support
327 user-mode SHcompact yet, and for a kernel fault, this would
328 have to come from a module built for SHcompact. */
336 static int address_is_sign_extended(__u64 a
)
340 b
= (__u64
)(__s64
)(__s32
)(a
& 0xffffffffUL
);
341 return (b
== a
) ? 1 : 0;
343 #error "Sign extend check only works for NEFF==32"
347 static int generate_and_check_address(struct pt_regs
*regs
,
349 int displacement_not_indexed
,
353 /* return -1 for fault, 0 for OK */
355 __u64 base_address
, addr
;
358 basereg
= (opcode
>> 20) & 0x3f;
359 base_address
= regs
->regs
[basereg
];
360 if (displacement_not_indexed
) {
362 displacement
= (opcode
>> 10) & 0x3ff;
363 displacement
= ((displacement
<< 54) >> 54); /* sign extend */
364 addr
= (__u64
)((__s64
)base_address
+ (displacement
<< width_shift
));
368 offsetreg
= (opcode
>> 10) & 0x3f;
369 offset
= regs
->regs
[offsetreg
];
370 addr
= base_address
+ offset
;
373 /* Check sign extended */
374 if (!address_is_sign_extended(addr
)) {
378 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
379 /* Check accessible. For misaligned access in the kernel, assume the
380 address is always accessible (and if not, just fault when the
381 load/store gets done.) */
382 if (user_mode(regs
)) {
383 if (addr
>= TASK_SIZE
) {
386 /* Do access_ok check later - it depends on whether it's a load or a store. */
394 /* Default value as for sh */
395 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
396 static int user_mode_unaligned_fixup_count
= 10;
397 static int user_mode_unaligned_fixup_enable
= 1;
400 static int kernel_mode_unaligned_fixup_count
= 32;
402 static void misaligned_kernel_word_load(__u64 address
, int do_sign_extend
, __u64
*result
)
405 unsigned char *p
, *q
;
406 p
= (unsigned char *) (int) address
;
407 q
= (unsigned char *) &x
;
411 if (do_sign_extend
) {
412 *result
= (__u64
)(__s64
) *(short *) &x
;
418 static void misaligned_kernel_word_store(__u64 address
, __u64 value
)
421 unsigned char *p
, *q
;
422 p
= (unsigned char *) (int) address
;
423 q
= (unsigned char *) &x
;
430 static int misaligned_load(struct pt_regs
*regs
,
432 int displacement_not_indexed
,
436 /* Return -1 for a fault, 0 for OK */
441 error
= generate_and_check_address(regs
, opcode
,
442 displacement_not_indexed
, width_shift
, &address
);
447 destreg
= (opcode
>> 4) & 0x3f;
448 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
449 if (user_mode(regs
)) {
452 if (!access_ok(VERIFY_READ
, (unsigned long) address
, 1UL<<width_shift
)) {
456 if (__copy_user(&buffer
, (const void *)(int)address
, (1 << width_shift
)) > 0) {
457 return -1; /* fault */
459 switch (width_shift
) {
461 if (do_sign_extend
) {
462 regs
->regs
[destreg
] = (__u64
)(__s64
) *(__s16
*) &buffer
;
464 regs
->regs
[destreg
] = (__u64
) *(__u16
*) &buffer
;
468 regs
->regs
[destreg
] = (__u64
)(__s64
) *(__s32
*) &buffer
;
471 regs
->regs
[destreg
] = buffer
;
474 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
475 width_shift
, (unsigned long) regs
->pc
);
481 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
484 switch (width_shift
) {
486 misaligned_kernel_word_load(address
, do_sign_extend
, ®s
->regs
[destreg
]);
489 asm ("ldlo.l %1, 0, %0" : "=r" (lo
) : "r" (address
));
490 asm ("ldhi.l %1, 3, %0" : "=r" (hi
) : "r" (address
));
491 regs
->regs
[destreg
] = lo
| hi
;
494 asm ("ldlo.q %1, 0, %0" : "=r" (lo
) : "r" (address
));
495 asm ("ldhi.q %1, 7, %0" : "=r" (hi
) : "r" (address
));
496 regs
->regs
[destreg
] = lo
| hi
;
500 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
501 width_shift
, (unsigned long) regs
->pc
);
510 static int misaligned_store(struct pt_regs
*regs
,
512 int displacement_not_indexed
,
515 /* Return -1 for a fault, 0 for OK */
520 error
= generate_and_check_address(regs
, opcode
,
521 displacement_not_indexed
, width_shift
, &address
);
526 srcreg
= (opcode
>> 4) & 0x3f;
527 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
528 if (user_mode(regs
)) {
531 if (!access_ok(VERIFY_WRITE
, (unsigned long) address
, 1UL<<width_shift
)) {
535 switch (width_shift
) {
537 *(__u16
*) &buffer
= (__u16
) regs
->regs
[srcreg
];
540 *(__u32
*) &buffer
= (__u32
) regs
->regs
[srcreg
];
543 buffer
= regs
->regs
[srcreg
];
546 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
547 width_shift
, (unsigned long) regs
->pc
);
551 if (__copy_user((void *)(int)address
, &buffer
, (1 << width_shift
)) > 0) {
552 return -1; /* fault */
557 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
558 __u64 val
= regs
->regs
[srcreg
];
560 switch (width_shift
) {
562 misaligned_kernel_word_store(address
, val
);
565 asm ("stlo.l %1, 0, %0" : : "r" (val
), "r" (address
));
566 asm ("sthi.l %1, 3, %0" : : "r" (val
), "r" (address
));
569 asm ("stlo.q %1, 0, %0" : : "r" (val
), "r" (address
));
570 asm ("sthi.q %1, 7, %0" : : "r" (val
), "r" (address
));
574 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
575 width_shift
, (unsigned long) regs
->pc
);
584 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
585 /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
587 static int misaligned_fpu_load(struct pt_regs
*regs
,
589 int displacement_not_indexed
,
593 /* Return -1 for a fault, 0 for OK */
598 error
= generate_and_check_address(regs
, opcode
,
599 displacement_not_indexed
, width_shift
, &address
);
604 destreg
= (opcode
>> 4) & 0x3f;
605 if (user_mode(regs
)) {
609 if (!access_ok(VERIFY_READ
, (unsigned long) address
, 1UL<<width_shift
)) {
613 if (__copy_user(&buffer
, (const void *)(int)address
, (1 << width_shift
)) > 0) {
614 return -1; /* fault */
616 /* 'current' may be the current owner of the FPU state, so
617 context switch the registers into memory so they can be
618 indexed by register number. */
619 if (last_task_used_math
== current
) {
621 save_fpu(current
, regs
);
623 last_task_used_math
= NULL
;
627 buflo
= *(__u32
*) &buffer
;
628 bufhi
= *(1 + (__u32
*) &buffer
);
630 switch (width_shift
) {
632 current
->thread
.fpu
.hard
.fp_regs
[destreg
] = buflo
;
635 if (do_paired_load
) {
636 current
->thread
.fpu
.hard
.fp_regs
[destreg
] = buflo
;
637 current
->thread
.fpu
.hard
.fp_regs
[destreg
+1] = bufhi
;
639 #if defined(CONFIG_LITTLE_ENDIAN)
640 current
->thread
.fpu
.hard
.fp_regs
[destreg
] = bufhi
;
641 current
->thread
.fpu
.hard
.fp_regs
[destreg
+1] = buflo
;
643 current
->thread
.fpu
.hard
.fp_regs
[destreg
] = buflo
;
644 current
->thread
.fpu
.hard
.fp_regs
[destreg
+1] = bufhi
;
649 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
650 width_shift
, (unsigned long) regs
->pc
);
655 die ("Misaligned FPU load inside kernel", regs
, 0);
662 static int misaligned_fpu_store(struct pt_regs
*regs
,
664 int displacement_not_indexed
,
668 /* Return -1 for a fault, 0 for OK */
673 error
= generate_and_check_address(regs
, opcode
,
674 displacement_not_indexed
, width_shift
, &address
);
679 srcreg
= (opcode
>> 4) & 0x3f;
680 if (user_mode(regs
)) {
682 /* Initialise these to NaNs. */
683 __u32 buflo
=0xffffffffUL
, bufhi
=0xffffffffUL
;
685 if (!access_ok(VERIFY_WRITE
, (unsigned long) address
, 1UL<<width_shift
)) {
689 /* 'current' may be the current owner of the FPU state, so
690 context switch the registers into memory so they can be
691 indexed by register number. */
692 if (last_task_used_math
== current
) {
694 save_fpu(current
, regs
);
696 last_task_used_math
= NULL
;
700 switch (width_shift
) {
702 buflo
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
];
705 if (do_paired_load
) {
706 buflo
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
];
707 bufhi
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
+1];
709 #if defined(CONFIG_LITTLE_ENDIAN)
710 bufhi
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
];
711 buflo
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
+1];
713 buflo
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
];
714 bufhi
= current
->thread
.fpu
.hard
.fp_regs
[srcreg
+1];
719 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
720 width_shift
, (unsigned long) regs
->pc
);
724 *(__u32
*) &buffer
= buflo
;
725 *(1 + (__u32
*) &buffer
) = bufhi
;
726 if (__copy_user((void *)(int)address
, &buffer
, (1 << width_shift
)) > 0) {
727 return -1; /* fault */
731 die ("Misaligned FPU load inside kernel", regs
, 0);
737 static int misaligned_fixup(struct pt_regs
*regs
)
739 unsigned long opcode
;
743 #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
744 /* Never fixup user mode misaligned accesses without this option enabled. */
747 if (!user_mode_unaligned_fixup_enable
) return -1;
750 error
= read_opcode(regs
->pc
, &opcode
, user_mode(regs
));
754 major
= (opcode
>> 26) & 0x3f;
755 minor
= (opcode
>> 16) & 0xf;
757 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
758 if (user_mode(regs
) && (user_mode_unaligned_fixup_count
> 0)) {
759 --user_mode_unaligned_fixup_count
;
760 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
761 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
762 current
->comm
, task_pid_nr(current
), (__u32
)regs
->pc
, opcode
);
765 if (!user_mode(regs
) && (kernel_mode_unaligned_fixup_count
> 0)) {
766 --kernel_mode_unaligned_fixup_count
;
767 if (in_interrupt()) {
768 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
769 (__u32
)regs
->pc
, opcode
);
771 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
772 current
->comm
, task_pid_nr(current
), (__u32
)regs
->pc
, opcode
);
778 case (0x84>>2): /* LD.W */
779 error
= misaligned_load(regs
, opcode
, 1, 1, 1);
781 case (0xb0>>2): /* LD.UW */
782 error
= misaligned_load(regs
, opcode
, 1, 1, 0);
784 case (0x88>>2): /* LD.L */
785 error
= misaligned_load(regs
, opcode
, 1, 2, 1);
787 case (0x8c>>2): /* LD.Q */
788 error
= misaligned_load(regs
, opcode
, 1, 3, 0);
791 case (0xa4>>2): /* ST.W */
792 error
= misaligned_store(regs
, opcode
, 1, 1);
794 case (0xa8>>2): /* ST.L */
795 error
= misaligned_store(regs
, opcode
, 1, 2);
797 case (0xac>>2): /* ST.Q */
798 error
= misaligned_store(regs
, opcode
, 1, 3);
801 case (0x40>>2): /* indexed loads */
803 case 0x1: /* LDX.W */
804 error
= misaligned_load(regs
, opcode
, 0, 1, 1);
806 case 0x5: /* LDX.UW */
807 error
= misaligned_load(regs
, opcode
, 0, 1, 0);
809 case 0x2: /* LDX.L */
810 error
= misaligned_load(regs
, opcode
, 0, 2, 1);
812 case 0x3: /* LDX.Q */
813 error
= misaligned_load(regs
, opcode
, 0, 3, 0);
821 case (0x60>>2): /* indexed stores */
823 case 0x1: /* STX.W */
824 error
= misaligned_store(regs
, opcode
, 0, 1);
826 case 0x2: /* STX.L */
827 error
= misaligned_store(regs
, opcode
, 0, 2);
829 case 0x3: /* STX.Q */
830 error
= misaligned_store(regs
, opcode
, 0, 3);
838 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
839 case (0x94>>2): /* FLD.S */
840 error
= misaligned_fpu_load(regs
, opcode
, 1, 2, 0);
842 case (0x98>>2): /* FLD.P */
843 error
= misaligned_fpu_load(regs
, opcode
, 1, 3, 1);
845 case (0x9c>>2): /* FLD.D */
846 error
= misaligned_fpu_load(regs
, opcode
, 1, 3, 0);
848 case (0x1c>>2): /* floating indexed loads */
850 case 0x8: /* FLDX.S */
851 error
= misaligned_fpu_load(regs
, opcode
, 0, 2, 0);
853 case 0xd: /* FLDX.P */
854 error
= misaligned_fpu_load(regs
, opcode
, 0, 3, 1);
856 case 0x9: /* FLDX.D */
857 error
= misaligned_fpu_load(regs
, opcode
, 0, 3, 0);
864 case (0xb4>>2): /* FLD.S */
865 error
= misaligned_fpu_store(regs
, opcode
, 1, 2, 0);
867 case (0xb8>>2): /* FLD.P */
868 error
= misaligned_fpu_store(regs
, opcode
, 1, 3, 1);
870 case (0xbc>>2): /* FLD.D */
871 error
= misaligned_fpu_store(regs
, opcode
, 1, 3, 0);
873 case (0x3c>>2): /* floating indexed stores */
875 case 0x8: /* FSTX.S */
876 error
= misaligned_fpu_store(regs
, opcode
, 0, 2, 0);
878 case 0xd: /* FSTX.P */
879 error
= misaligned_fpu_store(regs
, opcode
, 0, 3, 1);
881 case 0x9: /* FSTX.D */
882 error
= misaligned_fpu_store(regs
, opcode
, 0, 3, 0);
900 regs
->pc
+= 4; /* Skip the instruction that's just been emulated */
906 static ctl_table unaligned_table
[] = {
908 .ctl_name
= CTL_UNNUMBERED
,
909 .procname
= "kernel_reports",
910 .data
= &kernel_mode_unaligned_fixup_count
,
911 .maxlen
= sizeof(int),
913 .proc_handler
= &proc_dointvec
915 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
917 .ctl_name
= CTL_UNNUMBERED
,
918 .procname
= "user_reports",
919 .data
= &user_mode_unaligned_fixup_count
,
920 .maxlen
= sizeof(int),
922 .proc_handler
= &proc_dointvec
925 .ctl_name
= CTL_UNNUMBERED
,
926 .procname
= "user_enable",
927 .data
= &user_mode_unaligned_fixup_enable
,
928 .maxlen
= sizeof(int),
930 .proc_handler
= &proc_dointvec
},
935 static ctl_table unaligned_root
[] = {
937 .ctl_name
= CTL_UNNUMBERED
,
938 .procname
= "unaligned_fixup",
945 static ctl_table sh64_root
[] = {
947 .ctl_name
= CTL_UNNUMBERED
,
950 .child
= unaligned_root
954 static struct ctl_table_header
*sysctl_header
;
955 static int __init
init_sysctl(void)
957 sysctl_header
= register_sysctl_table(sh64_root
);
961 __initcall(init_sysctl
);
964 asmlinkage
void do_debug_interrupt(unsigned long code
, struct pt_regs
*regs
)
966 u64
peek_real_address_q(u64 addr
);
967 u64
poke_real_address_q(u64 addr
, u64 val
);
968 unsigned long long DM_EXP_CAUSE_PHY
= 0x0c100010;
969 unsigned long long exp_cause
;
970 /* It's not worth ioremapping the debug module registers for the amount
971 of access we make to them - just go direct to their physical
973 exp_cause
= peek_real_address_q(DM_EXP_CAUSE_PHY
);
974 if (exp_cause
& ~4) {
975 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
976 (unsigned long)(exp_cause
& 0xffffffff));
979 /* Clear all DEBUGINT causes */
980 poke_real_address_q(DM_EXP_CAUSE_PHY
, 0x0);