RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / mips / kernel / unaligned.c
blob1e6123f635d716cf3e4a71fef9000a677b964507
1 /*
2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
32 * #include <sys/sysmips.h>
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
40 * Below a little program to play around with this feature.
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
45 * struct foo {
46 * unsigned char bar[8];
47 * };
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
58 * printf("*p = %08lx\n", *p);
60 * *p = 0xdeadface;
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
75 #include <linux/mm.h>
76 #include <linux/module.h>
77 #include <linux/signal.h>
78 #include <linux/smp.h>
79 #include <linux/sched.h>
80 #include <linux/debugfs.h>
81 #include <asm/asm.h>
82 #include <asm/branch.h>
83 #include <asm/byteorder.h>
84 #include <asm/cop2.h>
85 #include <asm/inst.h>
86 #include <asm/uaccess.h>
87 #include <asm/system.h>
88 #include <asm/fpu.h>
89 #include <asm/fpu_emulator.h>
91 #define STR(x) __STR(x)
92 #define __STR(x) #x
94 enum {
95 UNALIGNED_ACTION_QUIET,
96 UNALIGNED_ACTION_SIGNAL,
97 UNALIGNED_ACTION_SHOW,
99 #ifdef CONFIG_DEBUG_FS
100 static u32 unaligned_instructions;
101 static u32 unaligned_action;
102 #else
103 #define unaligned_action UNALIGNED_ACTION_QUIET
104 #endif
105 extern void show_registers(struct pt_regs *regs);
107 #ifdef __BIG_ENDIAN
108 #define LoadHW(addr, value, res) \
109 __asm__ __volatile__ (".set\tnoat\n" \
110 "1:\tlb\t%0, 0(%2)\n" \
111 "2:\tlbu\t$1, 1(%2)\n\t" \
112 "sll\t%0, 0x8\n\t" \
113 "or\t%0, $1\n\t" \
114 "li\t%1, 0\n" \
115 "3:\t.set\tat\n\t" \
116 ".insn\n\t" \
117 ".section\t.fixup,\"ax\"\n\t" \
118 "4:\tli\t%1, %3\n\t" \
119 "j\t3b\n\t" \
120 ".previous\n\t" \
121 ".section\t__ex_table,\"a\"\n\t" \
122 STR(PTR)"\t1b, 4b\n\t" \
123 STR(PTR)"\t2b, 4b\n\t" \
124 ".previous" \
125 : "=&r" (value), "=r" (res) \
126 : "r" (addr), "i" (-EFAULT));
128 #define LoadW(addr, value, res) \
129 __asm__ __volatile__ ( \
130 "1:\tlwl\t%0, (%2)\n" \
131 "2:\tlwr\t%0, 3(%2)\n\t" \
132 "li\t%1, 0\n" \
133 "3:\n\t" \
134 ".insn\n\t" \
135 ".section\t.fixup,\"ax\"\n\t" \
136 "4:\tli\t%1, %3\n\t" \
137 "j\t3b\n\t" \
138 ".previous\n\t" \
139 ".section\t__ex_table,\"a\"\n\t" \
140 STR(PTR)"\t1b, 4b\n\t" \
141 STR(PTR)"\t2b, 4b\n\t" \
142 ".previous" \
143 : "=&r" (value), "=r" (res) \
144 : "r" (addr), "i" (-EFAULT));
146 #define LoadHWU(addr, value, res) \
147 __asm__ __volatile__ ( \
148 ".set\tnoat\n" \
149 "1:\tlbu\t%0, 0(%2)\n" \
150 "2:\tlbu\t$1, 1(%2)\n\t" \
151 "sll\t%0, 0x8\n\t" \
152 "or\t%0, $1\n\t" \
153 "li\t%1, 0\n" \
154 "3:\n\t" \
155 ".insn\n\t" \
156 ".set\tat\n\t" \
157 ".section\t.fixup,\"ax\"\n\t" \
158 "4:\tli\t%1, %3\n\t" \
159 "j\t3b\n\t" \
160 ".previous\n\t" \
161 ".section\t__ex_table,\"a\"\n\t" \
162 STR(PTR)"\t1b, 4b\n\t" \
163 STR(PTR)"\t2b, 4b\n\t" \
164 ".previous" \
165 : "=&r" (value), "=r" (res) \
166 : "r" (addr), "i" (-EFAULT));
168 #define LoadWU(addr, value, res) \
169 __asm__ __volatile__ ( \
170 "1:\tlwl\t%0, (%2)\n" \
171 "2:\tlwr\t%0, 3(%2)\n\t" \
172 "dsll\t%0, %0, 32\n\t" \
173 "dsrl\t%0, %0, 32\n\t" \
174 "li\t%1, 0\n" \
175 "3:\n\t" \
176 ".insn\n\t" \
177 "\t.section\t.fixup,\"ax\"\n\t" \
178 "4:\tli\t%1, %3\n\t" \
179 "j\t3b\n\t" \
180 ".previous\n\t" \
181 ".section\t__ex_table,\"a\"\n\t" \
182 STR(PTR)"\t1b, 4b\n\t" \
183 STR(PTR)"\t2b, 4b\n\t" \
184 ".previous" \
185 : "=&r" (value), "=r" (res) \
186 : "r" (addr), "i" (-EFAULT));
188 #define LoadDW(addr, value, res) \
189 __asm__ __volatile__ ( \
190 "1:\tldl\t%0, (%2)\n" \
191 "2:\tldr\t%0, 7(%2)\n\t" \
192 "li\t%1, 0\n" \
193 "3:\n\t" \
194 ".insn\n\t" \
195 "\t.section\t.fixup,\"ax\"\n\t" \
196 "4:\tli\t%1, %3\n\t" \
197 "j\t3b\n\t" \
198 ".previous\n\t" \
199 ".section\t__ex_table,\"a\"\n\t" \
200 STR(PTR)"\t1b, 4b\n\t" \
201 STR(PTR)"\t2b, 4b\n\t" \
202 ".previous" \
203 : "=&r" (value), "=r" (res) \
204 : "r" (addr), "i" (-EFAULT));
206 #define StoreHW(addr, value, res) \
207 __asm__ __volatile__ ( \
208 ".set\tnoat\n" \
209 "1:\tsb\t%1, 1(%2)\n\t" \
210 "srl\t$1, %1, 0x8\n" \
211 "2:\tsb\t$1, 0(%2)\n\t" \
212 ".set\tat\n\t" \
213 "li\t%0, 0\n" \
214 "3:\n\t" \
215 ".insn\n\t" \
216 ".section\t.fixup,\"ax\"\n\t" \
217 "4:\tli\t%0, %3\n\t" \
218 "j\t3b\n\t" \
219 ".previous\n\t" \
220 ".section\t__ex_table,\"a\"\n\t" \
221 STR(PTR)"\t1b, 4b\n\t" \
222 STR(PTR)"\t2b, 4b\n\t" \
223 ".previous" \
224 : "=r" (res) \
225 : "r" (value), "r" (addr), "i" (-EFAULT));
227 #define StoreW(addr, value, res) \
228 __asm__ __volatile__ ( \
229 "1:\tswl\t%1,(%2)\n" \
230 "2:\tswr\t%1, 3(%2)\n\t" \
231 "li\t%0, 0\n" \
232 "3:\n\t" \
233 ".insn\n\t" \
234 ".section\t.fixup,\"ax\"\n\t" \
235 "4:\tli\t%0, %3\n\t" \
236 "j\t3b\n\t" \
237 ".previous\n\t" \
238 ".section\t__ex_table,\"a\"\n\t" \
239 STR(PTR)"\t1b, 4b\n\t" \
240 STR(PTR)"\t2b, 4b\n\t" \
241 ".previous" \
242 : "=r" (res) \
243 : "r" (value), "r" (addr), "i" (-EFAULT));
245 #define StoreDW(addr, value, res) \
246 __asm__ __volatile__ ( \
247 "1:\tsdl\t%1,(%2)\n" \
248 "2:\tsdr\t%1, 7(%2)\n\t" \
249 "li\t%0, 0\n" \
250 "3:\n\t" \
251 ".insn\n\t" \
252 ".section\t.fixup,\"ax\"\n\t" \
253 "4:\tli\t%0, %3\n\t" \
254 "j\t3b\n\t" \
255 ".previous\n\t" \
256 ".section\t__ex_table,\"a\"\n\t" \
257 STR(PTR)"\t1b, 4b\n\t" \
258 STR(PTR)"\t2b, 4b\n\t" \
259 ".previous" \
260 : "=r" (res) \
261 : "r" (value), "r" (addr), "i" (-EFAULT));
262 #endif
264 #ifdef __LITTLE_ENDIAN
265 #define LoadHW(addr, value, res) \
266 __asm__ __volatile__ (".set\tnoat\n" \
267 "1:\tlb\t%0, 1(%2)\n" \
268 "2:\tlbu\t$1, 0(%2)\n\t" \
269 "sll\t%0, 0x8\n\t" \
270 "or\t%0, $1\n\t" \
271 "li\t%1, 0\n" \
272 "3:\t.set\tat\n\t" \
273 ".insn\n\t" \
274 ".section\t.fixup,\"ax\"\n\t" \
275 "4:\tli\t%1, %3\n\t" \
276 "j\t3b\n\t" \
277 ".previous\n\t" \
278 ".section\t__ex_table,\"a\"\n\t" \
279 STR(PTR)"\t1b, 4b\n\t" \
280 STR(PTR)"\t2b, 4b\n\t" \
281 ".previous" \
282 : "=&r" (value), "=r" (res) \
283 : "r" (addr), "i" (-EFAULT));
285 #define LoadW(addr, value, res) \
286 __asm__ __volatile__ ( \
287 "1:\tlwl\t%0, 3(%2)\n" \
288 "2:\tlwr\t%0, (%2)\n\t" \
289 "li\t%1, 0\n" \
290 "3:\n\t" \
291 ".insn\n\t" \
292 ".section\t.fixup,\"ax\"\n\t" \
293 "4:\tli\t%1, %3\n\t" \
294 "j\t3b\n\t" \
295 ".previous\n\t" \
296 ".section\t__ex_table,\"a\"\n\t" \
297 STR(PTR)"\t1b, 4b\n\t" \
298 STR(PTR)"\t2b, 4b\n\t" \
299 ".previous" \
300 : "=&r" (value), "=r" (res) \
301 : "r" (addr), "i" (-EFAULT));
303 #define LoadHWU(addr, value, res) \
304 __asm__ __volatile__ ( \
305 ".set\tnoat\n" \
306 "1:\tlbu\t%0, 1(%2)\n" \
307 "2:\tlbu\t$1, 0(%2)\n\t" \
308 "sll\t%0, 0x8\n\t" \
309 "or\t%0, $1\n\t" \
310 "li\t%1, 0\n" \
311 "3:\n\t" \
312 ".insn\n\t" \
313 ".set\tat\n\t" \
314 ".section\t.fixup,\"ax\"\n\t" \
315 "4:\tli\t%1, %3\n\t" \
316 "j\t3b\n\t" \
317 ".previous\n\t" \
318 ".section\t__ex_table,\"a\"\n\t" \
319 STR(PTR)"\t1b, 4b\n\t" \
320 STR(PTR)"\t2b, 4b\n\t" \
321 ".previous" \
322 : "=&r" (value), "=r" (res) \
323 : "r" (addr), "i" (-EFAULT));
325 #define LoadWU(addr, value, res) \
326 __asm__ __volatile__ ( \
327 "1:\tlwl\t%0, 3(%2)\n" \
328 "2:\tlwr\t%0, (%2)\n\t" \
329 "dsll\t%0, %0, 32\n\t" \
330 "dsrl\t%0, %0, 32\n\t" \
331 "li\t%1, 0\n" \
332 "3:\n\t" \
333 ".insn\n\t" \
334 "\t.section\t.fixup,\"ax\"\n\t" \
335 "4:\tli\t%1, %3\n\t" \
336 "j\t3b\n\t" \
337 ".previous\n\t" \
338 ".section\t__ex_table,\"a\"\n\t" \
339 STR(PTR)"\t1b, 4b\n\t" \
340 STR(PTR)"\t2b, 4b\n\t" \
341 ".previous" \
342 : "=&r" (value), "=r" (res) \
343 : "r" (addr), "i" (-EFAULT));
345 #define LoadDW(addr, value, res) \
346 __asm__ __volatile__ ( \
347 "1:\tldl\t%0, 7(%2)\n" \
348 "2:\tldr\t%0, (%2)\n\t" \
349 "li\t%1, 0\n" \
350 "3:\n\t" \
351 ".insn\n\t" \
352 "\t.section\t.fixup,\"ax\"\n\t" \
353 "4:\tli\t%1, %3\n\t" \
354 "j\t3b\n\t" \
355 ".previous\n\t" \
356 ".section\t__ex_table,\"a\"\n\t" \
357 STR(PTR)"\t1b, 4b\n\t" \
358 STR(PTR)"\t2b, 4b\n\t" \
359 ".previous" \
360 : "=&r" (value), "=r" (res) \
361 : "r" (addr), "i" (-EFAULT));
363 #define StoreHW(addr, value, res) \
364 __asm__ __volatile__ ( \
365 ".set\tnoat\n" \
366 "1:\tsb\t%1, 0(%2)\n\t" \
367 "srl\t$1,%1, 0x8\n" \
368 "2:\tsb\t$1, 1(%2)\n\t" \
369 ".set\tat\n\t" \
370 "li\t%0, 0\n" \
371 "3:\n\t" \
372 ".insn\n\t" \
373 ".section\t.fixup,\"ax\"\n\t" \
374 "4:\tli\t%0, %3\n\t" \
375 "j\t3b\n\t" \
376 ".previous\n\t" \
377 ".section\t__ex_table,\"a\"\n\t" \
378 STR(PTR)"\t1b, 4b\n\t" \
379 STR(PTR)"\t2b, 4b\n\t" \
380 ".previous" \
381 : "=r" (res) \
382 : "r" (value), "r" (addr), "i" (-EFAULT));
384 #define StoreW(addr, value, res) \
385 __asm__ __volatile__ ( \
386 "1:\tswl\t%1, 3(%2)\n" \
387 "2:\tswr\t%1, (%2)\n\t" \
388 "li\t%0, 0\n" \
389 "3:\n\t" \
390 ".insn\n\t" \
391 ".section\t.fixup,\"ax\"\n\t" \
392 "4:\tli\t%0, %3\n\t" \
393 "j\t3b\n\t" \
394 ".previous\n\t" \
395 ".section\t__ex_table,\"a\"\n\t" \
396 STR(PTR)"\t1b, 4b\n\t" \
397 STR(PTR)"\t2b, 4b\n\t" \
398 ".previous" \
399 : "=r" (res) \
400 : "r" (value), "r" (addr), "i" (-EFAULT));
402 #define StoreDW(addr, value, res) \
403 __asm__ __volatile__ ( \
404 "1:\tsdl\t%1, 7(%2)\n" \
405 "2:\tsdr\t%1, (%2)\n\t" \
406 "li\t%0, 0\n" \
407 "3:\n\t" \
408 ".insn\n\t" \
409 ".section\t.fixup,\"ax\"\n\t" \
410 "4:\tli\t%0, %3\n\t" \
411 "j\t3b\n\t" \
412 ".previous\n\t" \
413 ".section\t__ex_table,\"a\"\n\t" \
414 STR(PTR)"\t1b, 4b\n\t" \
415 STR(PTR)"\t2b, 4b\n\t" \
416 ".previous" \
417 : "=r" (res) \
418 : "r" (value), "r" (addr), "i" (-EFAULT));
419 #endif
421 static void emulate_load_store_insn(struct pt_regs *regs,
422 void __user *addr,
423 unsigned int __user *pc)
425 union mips_instruction insn;
426 unsigned long value;
427 unsigned int res;
428 unsigned long origpc;
429 unsigned long orig31;
430 void __user *fault_addr = NULL;
432 origpc = (unsigned long)pc;
433 orig31 = regs->regs[31];
436 * This load never faults.
438 __get_user(insn.word, pc);
440 switch (insn.i_format.opcode) {
442 * These are instructions that a compiler doesn't generate. We
443 * can assume therefore that the code is MIPS-aware and
444 * really buggy. Emulating these instructions would break the
445 * semantics anyway.
447 case ll_op:
448 case lld_op:
449 case sc_op:
450 case scd_op:
453 * For these instructions the only way to create an address
454 * error is an attempted access to kernel/supervisor address
455 * space.
457 case ldl_op:
458 case ldr_op:
459 case lwl_op:
460 case lwr_op:
461 case sdl_op:
462 case sdr_op:
463 case swl_op:
464 case swr_op:
465 case lb_op:
466 case lbu_op:
467 case sb_op:
468 goto sigbus;
471 * The remaining opcodes are the ones that are really of
472 * interest.
474 case lh_op:
475 if (!access_ok(VERIFY_READ, addr, 2))
476 goto sigbus;
478 LoadHW(addr, value, res);
479 if (res)
480 goto fault;
481 compute_return_epc(regs);
482 regs->regs[insn.i_format.rt] = value;
483 break;
485 case lw_op:
486 if (!access_ok(VERIFY_READ, addr, 4))
487 goto sigbus;
489 LoadW(addr, value, res);
490 if (res)
491 goto fault;
492 compute_return_epc(regs);
493 regs->regs[insn.i_format.rt] = value;
494 break;
496 case lhu_op:
497 if (!access_ok(VERIFY_READ, addr, 2))
498 goto sigbus;
500 LoadHWU(addr, value, res);
501 if (res)
502 goto fault;
503 compute_return_epc(regs);
504 regs->regs[insn.i_format.rt] = value;
505 break;
507 case lwu_op:
508 #ifdef CONFIG_64BIT
510 * A 32-bit kernel might be running on a 64-bit processor. But
511 * if we're on a 32-bit processor and an i-cache incoherency
512 * or race makes us see a 64-bit instruction here the sdl/sdr
513 * would blow up, so for now we don't handle unaligned 64-bit
514 * instructions on 32-bit kernels.
516 if (!access_ok(VERIFY_READ, addr, 4))
517 goto sigbus;
519 LoadWU(addr, value, res);
520 if (res)
521 goto fault;
522 compute_return_epc(regs);
523 regs->regs[insn.i_format.rt] = value;
524 break;
525 #endif /* CONFIG_64BIT */
527 /* Cannot handle 64-bit instructions in 32-bit kernel */
528 goto sigill;
530 case ld_op:
531 #ifdef CONFIG_64BIT
533 * A 32-bit kernel might be running on a 64-bit processor. But
534 * if we're on a 32-bit processor and an i-cache incoherency
535 * or race makes us see a 64-bit instruction here the sdl/sdr
536 * would blow up, so for now we don't handle unaligned 64-bit
537 * instructions on 32-bit kernels.
539 if (!access_ok(VERIFY_READ, addr, 8))
540 goto sigbus;
542 LoadDW(addr, value, res);
543 if (res)
544 goto fault;
545 compute_return_epc(regs);
546 regs->regs[insn.i_format.rt] = value;
547 break;
548 #endif /* CONFIG_64BIT */
550 /* Cannot handle 64-bit instructions in 32-bit kernel */
551 goto sigill;
553 case sh_op:
554 if (!access_ok(VERIFY_WRITE, addr, 2))
555 goto sigbus;
557 compute_return_epc(regs);
558 value = regs->regs[insn.i_format.rt];
559 StoreHW(addr, value, res);
560 if (res)
561 goto fault;
562 break;
564 case sw_op:
565 if (!access_ok(VERIFY_WRITE, addr, 4))
566 goto sigbus;
568 compute_return_epc(regs);
569 value = regs->regs[insn.i_format.rt];
570 StoreW(addr, value, res);
571 if (res)
572 goto fault;
573 break;
575 case sd_op:
576 #ifdef CONFIG_64BIT
578 * A 32-bit kernel might be running on a 64-bit processor. But
579 * if we're on a 32-bit processor and an i-cache incoherency
580 * or race makes us see a 64-bit instruction here the sdl/sdr
581 * would blow up, so for now we don't handle unaligned 64-bit
582 * instructions on 32-bit kernels.
584 if (!access_ok(VERIFY_WRITE, addr, 8))
585 goto sigbus;
587 compute_return_epc(regs);
588 value = regs->regs[insn.i_format.rt];
589 StoreDW(addr, value, res);
590 if (res)
591 goto fault;
592 break;
593 #endif /* CONFIG_64BIT */
595 /* Cannot handle 64-bit instructions in 32-bit kernel */
596 goto sigill;
598 case lwc1_op:
599 case ldc1_op:
600 case swc1_op:
601 case sdc1_op:
602 die_if_kernel("Unaligned FP access in kernel code", regs);
603 BUG_ON(!used_math());
604 BUG_ON(!is_fpu_owner());
606 lose_fpu(1); /* save the FPU state for the emulator */
607 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
608 &fault_addr);
609 own_fpu(1); /* restore FPU state */
611 /* If something went wrong, signal */
612 process_fpemu_return(res, fault_addr);
614 if (res == 0)
615 break;
616 return;
619 * COP2 is available to implementor for application specific use.
620 * It's up to applications to register a notifier chain and do
621 * whatever they have to do, including possible sending of signals.
623 case lwc2_op:
624 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
625 break;
627 case ldc2_op:
628 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
629 break;
631 case swc2_op:
632 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
633 break;
635 case sdc2_op:
636 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
637 break;
639 default:
641 * Pheeee... We encountered an yet unknown instruction or
642 * cache coherence problem. Die sucker, die ...
644 goto sigill;
647 #ifdef CONFIG_DEBUG_FS
648 unaligned_instructions++;
649 #endif
651 return;
653 fault:
654 /* roll back jump/branch */
655 regs->cp0_epc = origpc;
656 regs->regs[31] = orig31;
657 /* Did we have an exception handler installed? */
658 if (fixup_exception(regs))
659 return;
661 die_if_kernel("Unhandled kernel unaligned access", regs);
662 force_sig(SIGSEGV, current);
664 return;
666 sigbus:
667 die_if_kernel("Unhandled kernel unaligned access", regs);
668 force_sig(SIGBUS, current);
670 return;
672 sigill:
673 die_if_kernel
674 ("Unhandled kernel unaligned access or invalid instruction", regs);
675 force_sig(SIGILL, current);
678 /* recode table from micromips register notation to GPR */
679 static int mmreg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
681 /* recode table from micromips STORE register notation to GPR */
682 static int mmreg16to32_st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
684 void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
686 unsigned long value;
687 unsigned int res;
688 int i;
689 unsigned int reg = 0, rvar;
690 unsigned long orig31;
691 u16 __user *pc16;
692 u16 halfword;
693 unsigned int word;
694 unsigned long origpc, contpc;
695 union mips_instruction insn;
696 struct decoded_instn mminst;
697 void __user *fault_addr = NULL;
699 origpc = regs->cp0_epc;
700 orig31 = regs->regs[31];
702 mminst.micro_mips_mode = 1;
705 * This load never faults.
707 pc16 = (unsigned short __user *)(regs->cp0_epc & ~MIPS_ISA_MODE);
708 __get_user(halfword, pc16);
709 pc16++;
710 contpc = regs->cp0_epc + 2;
711 word = ((unsigned int)halfword << 16);
712 mminst.pc_inc = 2;
714 if (!mm_is16bit(halfword)) {
715 __get_user(halfword, pc16);
716 pc16++;
717 contpc = regs->cp0_epc + 4;
718 mminst.pc_inc = 4;
719 word |= halfword;
721 mminst.insn = word;
723 if (get_user(halfword, pc16))
724 goto fault;
725 mminst.next_pc_inc = 2;
726 word = ((unsigned int)halfword << 16);
728 if (!mm_is16bit(halfword)) {
729 pc16++;
730 if (get_user(halfword, pc16))
731 goto fault;
732 mminst.next_pc_inc = 4;
733 word |= halfword;
735 mminst.next_insn = word;
737 insn = (union mips_instruction)(mminst.insn);
738 if (mm_isBranchInstr(regs, mminst, &contpc))
739 insn = (union mips_instruction)(mminst.next_insn);
741 /* Parse instruction to find what to do */
743 switch (insn.mm_i_format.opcode) {
745 case mm_pool32a_op:
746 switch (insn.mm_x_format.func) {
747 case mm_lwxs32_func:
748 reg = insn.mm_x_format.rd;
749 goto loadW;
752 goto sigbus;
754 case mm_pool32b_op:
755 switch (insn.mm_m_format.func) {
756 case mm_lwp32_func:
757 reg = insn.mm_m_format.rd;
758 if (reg == 31)
759 goto sigbus;
761 if (!access_ok(VERIFY_READ, addr, 8))
762 goto sigbus;
764 LoadW(addr, value, res);
765 if (res)
766 goto fault;
767 regs->regs[reg] = value;
768 addr += 4;
769 LoadW(addr, value, res);
770 if (res)
771 goto fault;
772 regs->regs[reg + 1] = value;
773 goto success;
775 case mm_swp32_func:
776 reg = insn.mm_m_format.rd;
777 if (reg == 31)
778 goto sigbus;
780 if (!access_ok(VERIFY_WRITE, addr, 8))
781 goto sigbus;
783 value = regs->regs[reg];
784 StoreW(addr, value, res);
785 if (res)
786 goto fault;
787 addr += 4;
788 value = regs->regs[reg + 1];
789 StoreW(addr, value, res);
790 if (res)
791 goto fault;
792 goto success;
794 case mm_ldp32_func:
795 #ifdef CONFIG_64BIT
796 reg = insn.mm_m_format.rd;
797 if (reg == 31)
798 goto sigbus;
800 if (!access_ok(VERIFY_READ, addr, 16))
801 goto sigbus;
803 LoadDW(addr, value, res);
804 if (res)
805 goto fault;
806 regs->regs[reg] = value;
807 addr += 8;
808 LoadDW(addr, value, res);
809 if (res)
810 goto fault;
811 regs->regs[reg + 1] = value;
812 goto success;
813 #endif /* CONFIG_64BIT */
815 goto sigill;
817 case mm_sdp32_func:
818 #ifdef CONFIG_64BIT
819 reg = insn.mm_m_format.rd;
820 if (reg == 31)
821 goto sigbus;
823 if (!access_ok(VERIFY_WRITE, addr, 16))
824 goto sigbus;
826 value = regs->regs[reg];
827 StoreDW(addr, value, res);
828 if (res)
829 goto fault;
830 addr += 8;
831 value = regs->regs[reg + 1];
832 StoreDW(addr, value, res);
833 if (res)
834 goto fault;
835 goto success;
836 #endif /* CONFIG_64BIT */
838 goto sigill;
840 case mm_lwm32_func:
841 reg = insn.mm_m_format.rd;
842 rvar = reg & 0xf;
843 if ((rvar > 9) || !reg)
844 goto sigill;
845 if (reg & 0x10) {
846 if (!access_ok
847 (VERIFY_READ, addr, 4 * (rvar + 1)))
848 goto sigbus;
849 } else {
850 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
851 goto sigbus;
853 if (rvar == 9)
854 rvar = 8;
855 for (i = 16; rvar; rvar--, i++) {
856 LoadW(addr, value, res);
857 if (res)
858 goto fault;
859 addr += 4;
860 regs->regs[i] = value;
862 if ((reg & 0xf) == 9) {
863 LoadW(addr, value, res);
864 if (res)
865 goto fault;
866 addr += 4;
867 regs->regs[30] = value;
869 if (reg & 0x10) {
870 LoadW(addr, value, res);
871 if (res)
872 goto fault;
873 regs->regs[31] = value;
875 goto success;
877 case mm_swm32_func:
878 reg = insn.mm_m_format.rd;
879 rvar = reg & 0xf;
880 if ((rvar > 9) || !reg)
881 goto sigill;
882 if (reg & 0x10) {
883 if (!access_ok
884 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
885 goto sigbus;
886 } else {
887 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
888 goto sigbus;
890 if (rvar == 9)
891 rvar = 8;
892 for (i = 16; rvar; rvar--, i++) {
893 value = regs->regs[i];
894 StoreW(addr, value, res);
895 if (res)
896 goto fault;
897 addr += 4;
899 if ((reg & 0xf) == 9) {
900 value = regs->regs[30];
901 StoreW(addr, value, res);
902 if (res)
903 goto fault;
904 addr += 4;
906 if (reg & 0x10) {
907 value = regs->regs[31];
908 StoreW(addr, value, res);
909 if (res)
910 goto fault;
912 goto success;
914 case mm_ldm32_func:
915 #ifdef CONFIG_64BIT
916 reg = insn.mm_m_format.rd;
917 rvar = reg & 0xf;
918 if ((rvar > 9) || !reg)
919 goto sigill;
920 if (reg & 0x10) {
921 if (!access_ok
922 (VERIFY_READ, addr, 8 * (rvar + 1)))
923 goto sigbus;
924 } else {
925 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
926 goto sigbus;
928 if (rvar == 9)
929 rvar = 8;
931 for (i = 16; rvar; rvar--, i++) {
932 LoadDW(addr, value, res);
933 if (res)
934 goto fault;
935 addr += 4;
936 regs->regs[i] = value;
938 if ((reg & 0xf) == 9) {
939 LoadDW(addr, value, res);
940 if (res)
941 goto fault;
942 addr += 8;
943 regs->regs[30] = value;
945 if (reg & 0x10) {
946 LoadDW(addr, value, res);
947 if (res)
948 goto fault;
949 regs->regs[31] = value;
951 goto success;
952 #endif /* CONFIG_64BIT */
954 goto sigill;
956 case mm_sdm32_func:
957 #ifdef CONFIG_64BIT
958 reg = insn.mm_m_format.rd;
959 rvar = reg & 0xf;
960 if ((rvar > 9) || !reg)
961 goto sigill;
962 if (reg & 0x10) {
963 if (!access_ok
964 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
965 goto sigbus;
966 } else {
967 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
968 goto sigbus;
970 if (rvar == 9)
971 rvar = 8;
973 for (i = 16; rvar; rvar--, i++) {
974 value = regs->regs[i];
975 StoreDW(addr, value, res);
976 if (res)
977 goto fault;
978 addr += 8;
980 if ((reg & 0xf) == 9) {
981 value = regs->regs[30];
982 StoreDW(addr, value, res);
983 if (res)
984 goto fault;
985 addr += 8;
987 if (reg & 0x10) {
988 value = regs->regs[31];
989 StoreDW(addr, value, res);
990 if (res)
991 goto fault;
993 goto success;
994 #endif /* CONFIG_64BIT */
996 goto sigill;
998 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1001 goto sigbus;
1003 case mm_pool32c_op:
1004 switch (insn.mm_m_format.func) {
1005 case mm_lwu32_func:
1006 reg = insn.mm_m_format.rd;
1007 goto loadWU;
1010 /* LL,SC,LLD,SCD are not serviced */
1011 goto sigbus;
1013 case mm_pool32f_op:
1014 switch (insn.mm_x_format.func) {
1015 case mm_lwxc1_func:
1016 case mm_swxc1_func:
1017 case mm_ldxc1_func:
1018 case mm_sdxc1_func:
1019 goto fpu_emul;
1022 goto sigbus;
1024 case mm_ldc132_op:
1025 case mm_sdc132_op:
1026 case mm_lwc132_op:
1027 case mm_swc132_op:
1028 fpu_emul:
1029 /* roll back jump/branch */
1030 regs->cp0_epc = origpc;
1031 regs->regs[31] = orig31;
1033 die_if_kernel("Unaligned FP access in kernel code", regs);
1034 BUG_ON(!used_math());
1035 BUG_ON(!is_fpu_owner());
1037 lose_fpu(1); /* save the FPU state for the emulator */
1038 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1039 &fault_addr);
1040 own_fpu(1); /* restore FPU state */
1042 /* If something went wrong, signal */
1043 process_fpemu_return(res, fault_addr);
1045 if (res == 0)
1046 goto success;
1047 return;
1049 case mm_lh32_op:
1050 reg = insn.mm_i_format.rt;
1051 goto loadHW;
1053 case mm_lhu32_op:
1054 reg = insn.mm_i_format.rt;
1055 goto loadHWU;
1057 case mm_lw32_op:
1058 reg = insn.mm_i_format.rt;
1059 goto loadW;
1061 case mm_sh32_op:
1062 reg = insn.mm_i_format.rt;
1063 goto storeHW;
1065 case mm_sw32_op:
1066 reg = insn.mm_i_format.rt;
1067 goto storeW;
1069 case mm_ld32_op:
1070 reg = insn.mm_i_format.rt;
1071 goto loadDW;
1073 case mm_sd32_op:
1074 reg = insn.mm_i_format.rt;
1075 goto storeDW;
1077 case mm_pool16c_op:
1078 switch (insn.mm16_m_format.func) {
1079 case mm_lwm16_func:
1080 reg = insn.mm16_m_format.rlist;
1081 rvar = reg + 1;
1082 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1083 goto sigbus;
1085 for (i = 16; rvar; rvar--, i++) {
1086 LoadW(addr, value, res);
1087 if (res)
1088 goto fault;
1089 addr += 4;
1090 regs->regs[i] = value;
1092 LoadW(addr, value, res);
1093 if (res)
1094 goto fault;
1095 regs->regs[31] = value;
1097 goto success;
1099 case mm_swm16_func:
1100 reg = insn.mm16_m_format.rlist;
1101 rvar = reg + 1;
1102 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1103 goto sigbus;
1105 for (i = 16; rvar; rvar--, i++) {
1106 value = regs->regs[i];
1107 StoreW(addr, value, res);
1108 if (res)
1109 goto fault;
1110 addr += 4;
1112 value = regs->regs[31];
1113 StoreW(addr, value, res);
1114 if (res)
1115 goto fault;
1117 goto success;
1121 goto sigbus;
1123 case mm_lhu16_op:
1124 reg = mmreg16to32[insn.mm16_rb_format.rt];
1125 goto loadHWU;
1127 case mm_lw16_op:
1128 reg = mmreg16to32[insn.mm16_rb_format.rt];
1129 goto loadW;
1131 case mm_sh16_op:
1132 reg = mmreg16to32_st[insn.mm16_rb_format.rt];
1133 goto storeHW;
1135 case mm_sw16_op:
1136 reg = mmreg16to32_st[insn.mm16_rb_format.rt];
1137 goto storeW;
1139 case mm_lwsp16_op:
1140 reg = insn.mm16_r5_format.rt;
1141 goto loadW;
1143 case mm_swsp16_op:
1144 reg = insn.mm16_r5_format.rt;
1145 goto storeW;
1147 case mm_lwgp16_op:
1148 reg = mmreg16to32[insn.mm16_r3_format.rt];
1149 goto loadW;
1151 default:
1152 goto sigill;
1155 loadHW:
1156 if (!access_ok(VERIFY_READ, addr, 2))
1157 goto sigbus;
1159 LoadHW(addr, value, res);
1160 if (res)
1161 goto fault;
1162 regs->regs[reg] = value;
1163 goto success;
1165 loadHWU:
1166 if (!access_ok(VERIFY_READ, addr, 2))
1167 goto sigbus;
1169 LoadHWU(addr, value, res);
1170 if (res)
1171 goto fault;
1172 regs->regs[reg] = value;
1173 goto success;
1175 loadW:
1176 if (!access_ok(VERIFY_READ, addr, 4))
1177 goto sigbus;
1179 LoadW(addr, value, res);
1180 if (res)
1181 goto fault;
1182 regs->regs[reg] = value;
1183 goto success;
1185 loadWU:
1186 #ifdef CONFIG_64BIT
1188 * A 32-bit kernel might be running on a 64-bit processor. But
1189 * if we're on a 32-bit processor and an i-cache incoherency
1190 * or race makes us see a 64-bit instruction here the sdl/sdr
1191 * would blow up, so for now we don't handle unaligned 64-bit
1192 * instructions on 32-bit kernels.
1194 if (!access_ok(VERIFY_READ, addr, 4))
1195 goto sigbus;
1197 LoadWU(addr, value, res);
1198 if (res)
1199 goto fault;
1200 regs->regs[reg] = value;
1201 goto success;
1202 #endif /* CONFIG_64BIT */
1204 /* Cannot handle 64-bit instructions in 32-bit kernel */
1205 goto sigill;
1207 loadDW:
1208 #ifdef CONFIG_64BIT
1210 * A 32-bit kernel might be running on a 64-bit processor. But
1211 * if we're on a 32-bit processor and an i-cache incoherency
1212 * or race makes us see a 64-bit instruction here the sdl/sdr
1213 * would blow up, so for now we don't handle unaligned 64-bit
1214 * instructions on 32-bit kernels.
1216 if (!access_ok(VERIFY_READ, addr, 8))
1217 goto sigbus;
1219 LoadDW(addr, value, res);
1220 if (res)
1221 goto fault;
1222 regs->regs[reg] = value;
1223 goto success;
1224 #endif /* CONFIG_64BIT */
1226 /* Cannot handle 64-bit instructions in 32-bit kernel */
1227 goto sigill;
1229 storeHW:
1230 if (!access_ok(VERIFY_WRITE, addr, 2))
1231 goto sigbus;
1233 value = regs->regs[reg];
1234 StoreHW(addr, value, res);
1235 if (res)
1236 goto fault;
1237 goto success;
1239 storeW:
1240 if (!access_ok(VERIFY_WRITE, addr, 4))
1241 goto sigbus;
1243 value = regs->regs[reg];
1244 StoreW(addr, value, res);
1245 if (res)
1246 goto fault;
1247 goto success;
1249 storeDW:
1250 #ifdef CONFIG_64BIT
1252 * A 32-bit kernel might be running on a 64-bit processor. But
1253 * if we're on a 32-bit processor and an i-cache incoherency
1254 * or race makes us see a 64-bit instruction here the sdl/sdr
1255 * would blow up, so for now we don't handle unaligned 64-bit
1256 * instructions on 32-bit kernels.
1258 if (!access_ok(VERIFY_WRITE, addr, 8))
1259 goto sigbus;
1261 value = regs->regs[reg];
1262 StoreDW(addr, value, res);
1263 if (res)
1264 goto fault;
1265 goto success;
1266 #endif /* CONFIG_64BIT */
1268 /* Cannot handle 64-bit instructions in 32-bit kernel */
1269 goto sigill;
1271 success:
1272 regs->cp0_epc = contpc; /* advance or branch */
1274 #ifdef CONFIG_DEBUG_FS
1275 unaligned_instructions++;
1276 #endif
1277 return;
1279 fault:
1280 /* roll back jump/branch */
1281 regs->cp0_epc = origpc;
1282 regs->regs[31] = orig31;
1283 /* Did we have an exception handler installed? */
1284 if (fixup_exception(regs))
1285 return;
1287 die_if_kernel("Unhandled kernel unaligned access", regs);
1288 force_sig(SIGSEGV, current);
1290 return;
1292 sigbus:
1293 die_if_kernel("Unhandled kernel unaligned access", regs);
1294 force_sig(SIGBUS, current);
1296 return;
1298 sigill:
1299 die_if_kernel
1300 ("Unhandled kernel unaligned access or invalid instruction", regs);
1301 force_sig(SIGILL, current);
1304 /* recode table from MIPS16e register notation to GPR */
1305 int mips16e_reg2gpr[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1307 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1309 unsigned long value;
1310 unsigned int res;
1311 int reg;
1312 unsigned long orig31;
1313 u16 __user *pc16;
1314 unsigned long origpc;
1315 union mips16e_instruction mips16inst, oldinst;
1317 origpc = regs->cp0_epc;
1318 orig31 = regs->regs[31];
1319 pc16 = (unsigned short __user *)(origpc & ~MIPS_ISA_MODE);
1321 * This load never faults.
1323 __get_user(mips16inst.full, pc16);
1324 oldinst = mips16inst;
1326 /* skip EXTEND instruction */
1327 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1328 pc16++;
1329 __get_user(mips16inst.full, pc16);
1330 } else if (delay_slot(regs)) {
1331 /* skip jump instructions */
1332 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1333 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1334 pc16++;
1335 pc16++;
1336 if (get_user(mips16inst.full, pc16))
1337 goto sigbus;
1340 switch (mips16inst.ri.opcode) {
1341 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1342 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1343 case MIPS16e_ldpc_func:
1344 case MIPS16e_ldsp_func:
1345 reg = mips16e_reg2gpr[mips16inst.ri64.ry];
1346 goto loadDW;
1348 case MIPS16e_sdsp_func:
1349 reg = mips16e_reg2gpr[mips16inst.ri64.ry];
1350 goto writeDW;
1352 case MIPS16e_sdrasp_func:
1353 reg = 29; /* GPRSP */
1354 goto writeDW;
1357 goto sigbus;
1359 case MIPS16e_swsp_op:
1360 case MIPS16e_lwpc_op:
1361 case MIPS16e_lwsp_op:
1362 reg = mips16e_reg2gpr[mips16inst.ri.rx];
1363 break;
1365 case MIPS16e_i8_op:
1366 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1367 goto sigbus;
1368 reg = 29; /* GPRSP */
1369 break;
1371 default:
1372 reg = mips16e_reg2gpr[mips16inst.rri.ry];
1373 break;
1376 switch (mips16inst.ri.opcode) {
1378 case MIPS16e_lb_op:
1379 case MIPS16e_lbu_op:
1380 case MIPS16e_sb_op:
1381 goto sigbus;
1383 case MIPS16e_lh_op:
1384 if (!access_ok(VERIFY_READ, addr, 2))
1385 goto sigbus;
1387 LoadHW(addr, value, res);
1388 if (res)
1389 goto fault;
1390 MIPS16e_compute_return_epc(regs, &oldinst);
1391 regs->regs[reg] = value;
1392 break;
1394 case MIPS16e_lhu_op:
1395 if (!access_ok(VERIFY_READ, addr, 2))
1396 goto sigbus;
1398 LoadHWU(addr, value, res);
1399 if (res)
1400 goto fault;
1401 MIPS16e_compute_return_epc(regs, &oldinst);
1402 regs->regs[reg] = value;
1403 break;
1405 case MIPS16e_lw_op:
1406 case MIPS16e_lwpc_op:
1407 case MIPS16e_lwsp_op:
1408 if (!access_ok(VERIFY_READ, addr, 4))
1409 goto sigbus;
1411 LoadW(addr, value, res);
1412 if (res)
1413 goto fault;
1414 MIPS16e_compute_return_epc(regs, &oldinst);
1415 regs->regs[reg] = value;
1416 break;
1418 case MIPS16e_lwu_op:
1419 #ifdef CONFIG_64BIT
1421 * A 32-bit kernel might be running on a 64-bit processor. But
1422 * if we're on a 32-bit processor and an i-cache incoherency
1423 * or race makes us see a 64-bit instruction here the sdl/sdr
1424 * would blow up, so for now we don't handle unaligned 64-bit
1425 * instructions on 32-bit kernels.
1427 if (!access_ok(VERIFY_READ, addr, 4))
1428 goto sigbus;
1430 LoadWU(addr, value, res);
1431 if (res)
1432 goto fault;
1433 MIPS16e_compute_return_epc(regs, &oldinst);
1434 regs->regs[reg] = value;
1435 break;
1436 #endif /* CONFIG_64BIT */
1438 /* Cannot handle 64-bit instructions in 32-bit kernel */
1439 goto sigill;
1441 case MIPS16e_ld_op:
1442 loadDW:
1443 #ifdef CONFIG_64BIT
1445 * A 32-bit kernel might be running on a 64-bit processor. But
1446 * if we're on a 32-bit processor and an i-cache incoherency
1447 * or race makes us see a 64-bit instruction here the sdl/sdr
1448 * would blow up, so for now we don't handle unaligned 64-bit
1449 * instructions on 32-bit kernels.
1451 if (!access_ok(VERIFY_READ, addr, 8))
1452 goto sigbus;
1454 LoadDW(addr, value, res);
1455 if (res)
1456 goto fault;
1457 MIPS16e_compute_return_epc(regs, &oldinst);
1458 regs->regs[reg] = value;
1459 break;
1460 #endif /* CONFIG_64BIT */
1462 /* Cannot handle 64-bit instructions in 32-bit kernel */
1463 goto sigill;
1465 case MIPS16e_sh_op:
1466 if (!access_ok(VERIFY_WRITE, addr, 2))
1467 goto sigbus;
1469 MIPS16e_compute_return_epc(regs, &oldinst);
1470 value = regs->regs[reg];
1471 StoreHW(addr, value, res);
1472 if (res)
1473 goto fault;
1474 break;
1476 case MIPS16e_sw_op:
1477 case MIPS16e_swsp_op:
1478 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1479 if (!access_ok(VERIFY_WRITE, addr, 4))
1480 goto sigbus;
1482 MIPS16e_compute_return_epc(regs, &oldinst);
1483 value = regs->regs[reg];
1484 StoreW(addr, value, res);
1485 if (res)
1486 goto fault;
1487 break;
1489 case MIPS16e_sd_op:
1490 writeDW:
1491 #ifdef CONFIG_64BIT
1493 * A 32-bit kernel might be running on a 64-bit processor. But
1494 * if we're on a 32-bit processor and an i-cache incoherency
1495 * or race makes us see a 64-bit instruction here the sdl/sdr
1496 * would blow up, so for now we don't handle unaligned 64-bit
1497 * instructions on 32-bit kernels.
1499 if (!access_ok(VERIFY_WRITE, addr, 8))
1500 goto sigbus;
1502 MIPS16e_compute_return_epc(regs, &oldinst);
1503 value = regs->regs[reg];
1504 StoreDW(addr, value, res);
1505 if (res)
1506 goto fault;
1507 break;
1508 #endif /* CONFIG_64BIT */
1510 /* Cannot handle 64-bit instructions in 32-bit kernel */
1511 goto sigill;
1513 default:
1515 * Pheeee... We encountered an yet unknown instruction or
1516 * cache coherence problem. Die sucker, die ...
1518 goto sigill;
1521 #ifdef CONFIG_DEBUG_FS
1522 unaligned_instructions++;
1523 #endif
1525 return;
1527 fault:
1528 /* roll back jump/branch */
1529 regs->cp0_epc = origpc;
1530 regs->regs[31] = orig31;
1531 /* Did we have an exception handler installed? */
1532 if (fixup_exception(regs))
1533 return;
1535 die_if_kernel("Unhandled kernel unaligned access", regs);
1536 force_sig(SIGSEGV, current);
1538 return;
1540 sigbus:
1541 die_if_kernel("Unhandled kernel unaligned access", regs);
1542 force_sig(SIGBUS, current);
1544 return;
1546 sigill:
1547 die_if_kernel
1548 ("Unhandled kernel unaligned access or invalid instruction", regs);
1549 force_sig(SIGILL, current);
1552 asmlinkage void do_ade(struct pt_regs *regs)
1554 unsigned int __user *pc;
1555 mm_segment_t seg;
1558 * Did we catch a fault trying to load an instruction?
1560 if (regs->cp0_badvaddr == regs->cp0_epc)
1561 goto sigbus;
1563 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1564 goto sigbus;
1565 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1566 goto sigbus;
1569 * Do branch emulation only if we didn't forward the exception.
1570 * This is all so but ugly ...
1574 * Are we running in MIPS16e/microMIPS mode?
1576 if (is16mode(regs)) {
1578 * Did we catch a fault trying to load an instruction in
1579 * 16bit mode?
1581 if (regs->cp0_badvaddr == (regs->cp0_epc & ~MIPS_ISA_MODE))
1582 goto sigbus;
1583 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1584 show_registers(regs);
1586 if (cpu_has_mips16) {
1587 seg = get_fs();
1588 if (!user_mode(regs))
1589 set_fs(KERNEL_DS);
1590 emulate_load_store_MIPS16e(regs,
1591 (void __user *)regs->
1592 cp0_badvaddr);
1593 set_fs(seg);
1595 return;
1598 if (cpu_has_mmips) { /* micromips unaligned access */
1599 seg = get_fs();
1600 if (!user_mode(regs))
1601 set_fs(KERNEL_DS);
1602 emulate_load_store_microMIPS(regs,
1603 (void __user *)regs->
1604 cp0_badvaddr);
1605 set_fs(seg);
1607 return;
1610 goto sigbus;
1613 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1614 show_registers(regs);
1615 pc = (unsigned int __user *)exception_epc(regs);
1617 seg = get_fs();
1618 if (!user_mode(regs))
1619 set_fs(KERNEL_DS);
1620 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1621 set_fs(seg);
1623 return;
1625 sigbus:
1626 die_if_kernel("Kernel unaligned instruction access", regs);
1627 force_sig(SIGBUS, current);
1631 #ifdef CONFIG_DEBUG_FS
1632 extern struct dentry *mips_debugfs_dir;
1633 static int __init debugfs_unaligned(void)
1635 struct dentry *d;
1637 if (!mips_debugfs_dir)
1638 return -ENODEV;
1639 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1640 mips_debugfs_dir, &unaligned_instructions);
1641 if (!d)
1642 return -ENOMEM;
1643 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1644 mips_debugfs_dir, &unaligned_action);
1645 if (!d)
1646 return -ENOMEM;
1647 return 0;
1649 __initcall(debugfs_unaligned);
1650 #endif