4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common
[];
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
26 #define MSR_MASK 0x87c0ffff
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn
, unsigned long ea
);
39 extern int do_lfd(int rn
, unsigned long ea
);
40 extern int do_stfs(int rn
, unsigned long ea
);
41 extern int do_stfd(int rn
, unsigned long ea
);
42 extern int do_lvx(int rn
, unsigned long ea
);
43 extern int do_stvx(int rn
, unsigned long ea
);
44 extern int do_lxvd2x(int rn
, unsigned long ea
);
45 extern int do_stxvd2x(int rn
, unsigned long ea
);
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr
, unsigned long val
)
54 if ((msr
& MSR_64BIT
) == 0)
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes
branch_taken(unsigned int instr
, struct pt_regs
*regs
)
65 unsigned int bo
= (instr
>> 21) & 0x1f;
69 /* decrement counter */
71 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 0))
74 if ((bo
& 0x10) == 0) {
75 /* check bit from CR */
76 bi
= (instr
>> 16) & 0x1f;
77 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
84 static long __kprobes
address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
88 return __access_ok(ea
, nb
, USER_DS
);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes
dform_ea(unsigned int instr
, struct pt_regs
*regs
)
99 ra
= (instr
>> 16) & 0x1f;
100 ea
= (signed short) instr
; /* sign-extend */
103 if (instr
& 0x04000000) /* update forms */
107 return truncate_if_32bit(regs
->msr
, ea
);
112 * Calculate effective address for a DS-form instruction
114 static unsigned long __kprobes
dsform_ea(unsigned int instr
, struct pt_regs
*regs
)
119 ra
= (instr
>> 16) & 0x1f;
120 ea
= (signed short) (instr
& ~3); /* sign-extend */
123 if ((instr
& 3) == 1) /* update forms */
127 return truncate_if_32bit(regs
->msr
, ea
);
129 #endif /* __powerpc64 */
132 * Calculate effective address for an X-form instruction
134 static unsigned long __kprobes
xform_ea(unsigned int instr
, struct pt_regs
*regs
,
140 ra
= (instr
>> 16) & 0x1f;
141 rb
= (instr
>> 11) & 0x1f;
145 if (do_update
) /* update forms */
149 return truncate_if_32bit(regs
->msr
, ea
);
153 * Return the largest power of 2, not greater than sizeof(unsigned long),
154 * such that x is a multiple of it.
156 static inline unsigned long max_align(unsigned long x
)
158 x
|= sizeof(unsigned long);
159 return x
& -x
; /* isolates rightmost bit */
163 static inline unsigned long byterev_2(unsigned long x
)
165 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
168 static inline unsigned long byterev_4(unsigned long x
)
170 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
171 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
175 static inline unsigned long byterev_8(unsigned long x
)
177 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
181 static int __kprobes
read_mem_aligned(unsigned long *dest
, unsigned long ea
,
189 err
= __get_user(x
, (unsigned char __user
*) ea
);
192 err
= __get_user(x
, (unsigned short __user
*) ea
);
195 err
= __get_user(x
, (unsigned int __user
*) ea
);
199 err
= __get_user(x
, (unsigned long __user
*) ea
);
208 static int __kprobes
read_mem_unaligned(unsigned long *dest
, unsigned long ea
,
209 int nb
, struct pt_regs
*regs
)
212 unsigned long x
, b
, c
;
214 /* unaligned, do this in pieces */
216 for (; nb
> 0; nb
-= c
) {
220 err
= read_mem_aligned(&b
, ea
, c
);
223 x
= (x
<< (8 * c
)) + b
;
231 * Read memory at address ea for nb bytes, return 0 for success
232 * or -EFAULT if an error occurred.
234 static int __kprobes
read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
235 struct pt_regs
*regs
)
237 if (!address_ok(regs
, ea
, nb
))
239 if ((ea
& (nb
- 1)) == 0)
240 return read_mem_aligned(dest
, ea
, nb
);
241 return read_mem_unaligned(dest
, ea
, nb
, regs
);
244 static int __kprobes
write_mem_aligned(unsigned long val
, unsigned long ea
,
251 err
= __put_user(val
, (unsigned char __user
*) ea
);
254 err
= __put_user(val
, (unsigned short __user
*) ea
);
257 err
= __put_user(val
, (unsigned int __user
*) ea
);
261 err
= __put_user(val
, (unsigned long __user
*) ea
);
268 static int __kprobes
write_mem_unaligned(unsigned long val
, unsigned long ea
,
269 int nb
, struct pt_regs
*regs
)
274 /* unaligned or little-endian, do this in pieces */
275 for (; nb
> 0; nb
-= c
) {
279 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
288 * Write memory at address ea for nb bytes, return 0 for success
289 * or -EFAULT if an error occurred.
291 static int __kprobes
write_mem(unsigned long val
, unsigned long ea
, int nb
,
292 struct pt_regs
*regs
)
294 if (!address_ok(regs
, ea
, nb
))
296 if ((ea
& (nb
- 1)) == 0)
297 return write_mem_aligned(val
, ea
, nb
);
298 return write_mem_unaligned(val
, ea
, nb
, regs
);
301 #ifdef CONFIG_PPC_FPU
303 * Check the address and alignment, and call func to do the actual
306 static int __kprobes
do_fp_load(int rn
, int (*func
)(int, unsigned long),
307 unsigned long ea
, int nb
,
308 struct pt_regs
*regs
)
311 unsigned long val
[sizeof(double) / sizeof(long)];
314 if (!address_ok(regs
, ea
, nb
))
317 return (*func
)(rn
, ea
);
318 ptr
= (unsigned long) &val
[0];
319 if (sizeof(unsigned long) == 8 || nb
== 4) {
320 err
= read_mem_unaligned(&val
[0], ea
, nb
, regs
);
321 ptr
+= sizeof(unsigned long) - nb
;
323 /* reading a double on 32-bit */
324 err
= read_mem_unaligned(&val
[0], ea
, 4, regs
);
326 err
= read_mem_unaligned(&val
[1], ea
+ 4, 4, regs
);
330 return (*func
)(rn
, ptr
);
333 static int __kprobes
do_fp_store(int rn
, int (*func
)(int, unsigned long),
334 unsigned long ea
, int nb
,
335 struct pt_regs
*regs
)
338 unsigned long val
[sizeof(double) / sizeof(long)];
341 if (!address_ok(regs
, ea
, nb
))
344 return (*func
)(rn
, ea
);
345 ptr
= (unsigned long) &val
[0];
346 if (sizeof(unsigned long) == 8 || nb
== 4) {
347 ptr
+= sizeof(unsigned long) - nb
;
348 err
= (*func
)(rn
, ptr
);
351 err
= write_mem_unaligned(val
[0], ea
, nb
, regs
);
353 /* writing a double on 32-bit */
354 err
= (*func
)(rn
, ptr
);
357 err
= write_mem_unaligned(val
[0], ea
, 4, regs
);
359 err
= write_mem_unaligned(val
[1], ea
+ 4, 4, regs
);
365 #ifdef CONFIG_ALTIVEC
366 /* For Altivec/VMX, no need to worry about alignment */
367 static int __kprobes
do_vec_load(int rn
, int (*func
)(int, unsigned long),
368 unsigned long ea
, struct pt_regs
*regs
)
370 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
372 return (*func
)(rn
, ea
);
375 static int __kprobes
do_vec_store(int rn
, int (*func
)(int, unsigned long),
376 unsigned long ea
, struct pt_regs
*regs
)
378 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
380 return (*func
)(rn
, ea
);
382 #endif /* CONFIG_ALTIVEC */
385 static int __kprobes
do_vsx_load(int rn
, int (*func
)(int, unsigned long),
386 unsigned long ea
, struct pt_regs
*regs
)
389 unsigned long val
[2];
391 if (!address_ok(regs
, ea
, 16))
394 return (*func
)(rn
, ea
);
395 err
= read_mem_unaligned(&val
[0], ea
, 8, regs
);
397 err
= read_mem_unaligned(&val
[1], ea
+ 8, 8, regs
);
399 err
= (*func
)(rn
, (unsigned long) &val
[0]);
403 static int __kprobes
do_vsx_store(int rn
, int (*func
)(int, unsigned long),
404 unsigned long ea
, struct pt_regs
*regs
)
407 unsigned long val
[2];
409 if (!address_ok(regs
, ea
, 16))
412 return (*func
)(rn
, ea
);
413 err
= (*func
)(rn
, (unsigned long) &val
[0]);
416 err
= write_mem_unaligned(val
[0], ea
, 8, regs
);
418 err
= write_mem_unaligned(val
[1], ea
+ 8, 8, regs
);
421 #endif /* CONFIG_VSX */
423 #define __put_user_asmx(x, addr, err, op, cr) \
424 __asm__ __volatile__( \
425 "1: " op " %2,0,%3\n" \
428 ".section .fixup,\"ax\"\n" \
432 ".section __ex_table,\"a\"\n" \
433 PPC_LONG_ALIGN "\n" \
436 : "=r" (err), "=r" (cr) \
437 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
439 #define __get_user_asmx(x, addr, err, op) \
440 __asm__ __volatile__( \
441 "1: "op" %1,0,%2\n" \
443 ".section .fixup,\"ax\"\n" \
447 ".section __ex_table,\"a\"\n" \
448 PPC_LONG_ALIGN "\n" \
451 : "=r" (err), "=r" (x) \
452 : "r" (addr), "i" (-EFAULT), "0" (err))
454 #define __cacheop_user_asmx(addr, err, op) \
455 __asm__ __volatile__( \
458 ".section .fixup,\"ax\"\n" \
462 ".section __ex_table,\"a\"\n" \
463 PPC_LONG_ALIGN "\n" \
467 : "r" (addr), "i" (-EFAULT), "0" (err))
469 static void __kprobes
set_cr0(struct pt_regs
*regs
, int rd
)
471 long val
= regs
->gpr
[rd
];
473 regs
->ccr
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
475 if (!(regs
->msr
& MSR_64BIT
))
479 regs
->ccr
|= 0x80000000;
481 regs
->ccr
|= 0x40000000;
483 regs
->ccr
|= 0x20000000;
486 static void __kprobes
add_with_carry(struct pt_regs
*regs
, int rd
,
487 unsigned long val1
, unsigned long val2
,
488 unsigned long carry_in
)
490 unsigned long val
= val1
+ val2
;
496 if (!(regs
->msr
& MSR_64BIT
)) {
497 val
= (unsigned int) val
;
498 val1
= (unsigned int) val1
;
501 if (val
< val1
|| (carry_in
&& val
== val1
))
504 regs
->xer
&= ~XER_CA
;
507 static void __kprobes
do_cmp_signed(struct pt_regs
*regs
, long v1
, long v2
,
510 unsigned int crval
, shift
;
512 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
519 shift
= (7 - crfld
) * 4;
520 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
523 static void __kprobes
do_cmp_unsigned(struct pt_regs
*regs
, unsigned long v1
,
524 unsigned long v2
, int crfld
)
526 unsigned int crval
, shift
;
528 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
535 shift
= (7 - crfld
) * 4;
536 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
540 * Elements of 32-bit rotate and mask instructions.
542 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
543 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
545 #define MASK64_L(mb) (~0UL >> (mb))
546 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
547 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
548 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
550 #define DATA32(x) (x)
552 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
555 * Emulate instructions that cause a transfer of control,
556 * loads and stores, and a few other instructions.
557 * Returns 1 if the step was emulated, 0 if not,
558 * or -1 if the instruction is one that should not be stepped,
559 * such as an rfid, or a mtmsrd that would clear MSR_RI.
561 int __kprobes
emulate_step(struct pt_regs
*regs
, unsigned int instr
)
563 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
564 unsigned long int imm
;
565 unsigned long int val
, val2
;
566 unsigned long int ea
;
567 unsigned int cr
, mb
, me
, sh
;
569 unsigned long old_ra
;
572 opcode
= instr
>> 26;
575 imm
= (signed short)(instr
& 0xfffc);
576 if ((instr
& 2) == 0)
579 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
);
581 regs
->link
= regs
->nip
;
582 if (branch_taken(instr
, regs
))
588 * N.B. this uses knowledge about how the syscall
589 * entry code works. If that is changed, this will
590 * need to be changed also.
592 if (regs
->gpr
[0] == 0x1ebe &&
593 cpu_has_feature(CPU_FTR_REAL_LE
)) {
597 regs
->gpr
[9] = regs
->gpr
[13];
598 regs
->gpr
[10] = MSR_KERNEL
;
599 regs
->gpr
[11] = regs
->nip
+ 4;
600 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
601 regs
->gpr
[13] = (unsigned long) get_paca();
602 regs
->nip
= (unsigned long) &system_call_common
;
603 regs
->msr
= MSR_KERNEL
;
607 imm
= instr
& 0x03fffffc;
608 if (imm
& 0x02000000)
610 if ((instr
& 2) == 0)
613 regs
->link
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
614 imm
= truncate_if_32bit(regs
->msr
, imm
);
618 switch ((instr
>> 1) & 0x3ff) {
620 case 528: /* bcctr */
621 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
622 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
623 imm
= truncate_if_32bit(regs
->msr
, imm
);
625 regs
->link
= regs
->nip
;
626 if (branch_taken(instr
, regs
))
630 case 18: /* rfid, scary */
633 case 150: /* isync */
638 case 129: /* crandc */
639 case 193: /* crxor */
640 case 225: /* crnand */
641 case 257: /* crand */
642 case 289: /* creqv */
643 case 417: /* crorc */
645 ra
= (instr
>> 16) & 0x1f;
646 rb
= (instr
>> 11) & 0x1f;
647 rd
= (instr
>> 21) & 0x1f;
648 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
649 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
650 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
651 regs
->ccr
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
657 switch ((instr
>> 1) & 0x3ff) {
660 switch ((instr
>> 21) & 3) {
662 asm volatile("lwsync" : : : "memory");
664 case 2: /* ptesync */
665 asm volatile("ptesync" : : : "memory");
672 case 854: /* eieio */
679 /* Following cases refer to regs->gpr[], so we need all regs */
680 if (!FULL_REGS(regs
))
683 rd
= (instr
>> 21) & 0x1f;
684 ra
= (instr
>> 16) & 0x1f;
685 rb
= (instr
>> 11) & 0x1f;
689 regs
->gpr
[rd
] = regs
->gpr
[ra
] * (short) instr
;
694 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], imm
, 1);
698 imm
= (unsigned short) instr
;
702 val
= (unsigned int) val
;
704 do_cmp_unsigned(regs
, val
, imm
, rd
>> 2);
714 do_cmp_signed(regs
, val
, imm
, rd
>> 2);
719 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
722 case 13: /* addic. */
724 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
731 imm
+= regs
->gpr
[ra
];
736 imm
= ((short) instr
) << 16;
738 imm
+= regs
->gpr
[ra
];
742 case 20: /* rlwimi */
743 mb
= (instr
>> 6) & 0x1f;
744 me
= (instr
>> 1) & 0x1f;
745 val
= DATA32(regs
->gpr
[rd
]);
746 imm
= MASK32(mb
, me
);
747 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
750 case 21: /* rlwinm */
751 mb
= (instr
>> 6) & 0x1f;
752 me
= (instr
>> 1) & 0x1f;
753 val
= DATA32(regs
->gpr
[rd
]);
754 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
758 mb
= (instr
>> 6) & 0x1f;
759 me
= (instr
>> 1) & 0x1f;
760 rb
= regs
->gpr
[rb
] & 0x1f;
761 val
= DATA32(regs
->gpr
[rd
]);
762 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
766 imm
= (unsigned short) instr
;
767 regs
->gpr
[ra
] = regs
->gpr
[rd
] | imm
;
771 imm
= (unsigned short) instr
;
772 regs
->gpr
[ra
] = regs
->gpr
[rd
] | (imm
<< 16);
776 imm
= (unsigned short) instr
;
777 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ imm
;
781 imm
= (unsigned short) instr
;
782 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ (imm
<< 16);
786 imm
= (unsigned short) instr
;
787 regs
->gpr
[ra
] = regs
->gpr
[rd
] & imm
;
791 case 29: /* andis. */
792 imm
= (unsigned short) instr
;
793 regs
->gpr
[ra
] = regs
->gpr
[rd
] & (imm
<< 16);
799 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
801 if ((instr
& 0x10) == 0) {
802 sh
= rb
| ((instr
& 2) << 4);
803 val
= ROTATE(val
, sh
);
804 switch ((instr
>> 2) & 3) {
806 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
809 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
812 regs
->gpr
[ra
] = val
& MASK64(mb
, 63 - sh
);
815 imm
= MASK64(mb
, 63 - sh
);
816 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) |
821 sh
= regs
->gpr
[rb
] & 0x3f;
822 val
= ROTATE(val
, sh
);
823 switch ((instr
>> 1) & 7) {
825 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
828 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
835 switch ((instr
>> 1) & 0x3ff) {
837 if (regs
->msr
& MSR_PR
)
839 regs
->gpr
[rd
] = regs
->msr
& MSR_MASK
;
841 case 146: /* mtmsr */
842 if (regs
->msr
& MSR_PR
)
845 if ((imm
& MSR_RI
) == 0)
846 /* can't step mtmsr that would clear MSR_RI */
851 case 178: /* mtmsrd */
852 /* only MSR_EE and MSR_RI get changed if bit 15 set */
853 /* mtmsrd doesn't change MSR_HV and MSR_ME */
854 if (regs
->msr
& MSR_PR
)
856 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffefffUL
;
857 imm
= (regs
->msr
& MSR_MASK
& ~imm
)
858 | (regs
->gpr
[rd
] & imm
);
859 if ((imm
& MSR_RI
) == 0)
860 /* can't step mtmsrd that would clear MSR_RI */
866 regs
->gpr
[rd
] = regs
->ccr
;
867 regs
->gpr
[rd
] &= 0xffffffffUL
;
870 case 144: /* mtcrf */
873 for (sh
= 0; sh
< 8; ++sh
) {
874 if (instr
& (0x80000 >> sh
))
875 regs
->ccr
= (regs
->ccr
& ~imm
) |
881 case 339: /* mfspr */
882 spr
= (instr
>> 11) & 0x3ff;
884 case 0x20: /* mfxer */
885 regs
->gpr
[rd
] = regs
->xer
;
886 regs
->gpr
[rd
] &= 0xffffffffUL
;
888 case 0x100: /* mflr */
889 regs
->gpr
[rd
] = regs
->link
;
891 case 0x120: /* mfctr */
892 regs
->gpr
[rd
] = regs
->ctr
;
897 case 467: /* mtspr */
898 spr
= (instr
>> 11) & 0x3ff;
900 case 0x20: /* mtxer */
901 regs
->xer
= (regs
->gpr
[rd
] & 0xffffffffUL
);
903 case 0x100: /* mtlr */
904 regs
->link
= regs
->gpr
[rd
];
906 case 0x120: /* mtctr */
907 regs
->ctr
= regs
->gpr
[rd
];
913 * Compare instructions
917 val2
= regs
->gpr
[rb
];
920 /* word (32-bit) compare */
925 do_cmp_signed(regs
, val
, val2
, rd
>> 2);
930 val2
= regs
->gpr
[rb
];
933 /* word (32-bit) compare */
934 val
= (unsigned int) val
;
935 val2
= (unsigned int) val2
;
938 do_cmp_unsigned(regs
, val
, val2
, rd
>> 2);
942 * Arithmetic instructions
945 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
],
950 asm("mulhdu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
951 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
955 add_with_carry(regs
, rd
, regs
->gpr
[ra
],
959 case 11: /* mulhwu */
960 asm("mulhwu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
961 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
965 regs
->gpr
[rd
] = regs
->gpr
[rb
] - regs
->gpr
[ra
];
969 asm("mulhd %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
970 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
974 asm("mulhw %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
975 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
979 regs
->gpr
[rd
] = -regs
->gpr
[ra
];
982 case 136: /* subfe */
983 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], regs
->gpr
[rb
],
988 add_with_carry(regs
, rd
, regs
->gpr
[ra
], regs
->gpr
[rb
],
992 case 200: /* subfze */
993 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], 0L,
997 case 202: /* addze */
998 add_with_carry(regs
, rd
, regs
->gpr
[ra
], 0L,
1002 case 232: /* subfme */
1003 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], -1L,
1004 regs
->xer
& XER_CA
);
1006 #ifdef __powerpc64__
1007 case 233: /* mulld */
1008 regs
->gpr
[rd
] = regs
->gpr
[ra
] * regs
->gpr
[rb
];
1011 case 234: /* addme */
1012 add_with_carry(regs
, rd
, regs
->gpr
[ra
], -1L,
1013 regs
->xer
& XER_CA
);
1016 case 235: /* mullw */
1017 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] *
1018 (unsigned int) regs
->gpr
[rb
];
1022 regs
->gpr
[rd
] = regs
->gpr
[ra
] + regs
->gpr
[rb
];
1024 #ifdef __powerpc64__
1025 case 457: /* divdu */
1026 regs
->gpr
[rd
] = regs
->gpr
[ra
] / regs
->gpr
[rb
];
1029 case 459: /* divwu */
1030 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] /
1031 (unsigned int) regs
->gpr
[rb
];
1033 #ifdef __powerpc64__
1034 case 489: /* divd */
1035 regs
->gpr
[rd
] = (long int) regs
->gpr
[ra
] /
1036 (long int) regs
->gpr
[rb
];
1039 case 491: /* divw */
1040 regs
->gpr
[rd
] = (int) regs
->gpr
[ra
] /
1041 (int) regs
->gpr
[rb
];
1046 * Logical instructions
1048 case 26: /* cntlzw */
1049 asm("cntlzw %0,%1" : "=r" (regs
->gpr
[ra
]) :
1050 "r" (regs
->gpr
[rd
]));
1052 #ifdef __powerpc64__
1053 case 58: /* cntlzd */
1054 asm("cntlzd %0,%1" : "=r" (regs
->gpr
[ra
]) :
1055 "r" (regs
->gpr
[rd
]));
1059 regs
->gpr
[ra
] = regs
->gpr
[rd
] & regs
->gpr
[rb
];
1063 regs
->gpr
[ra
] = regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1067 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1071 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1075 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1079 regs
->gpr
[ra
] = regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1083 regs
->gpr
[ra
] = regs
->gpr
[rd
] | regs
->gpr
[rb
];
1086 case 476: /* nand */
1087 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1090 case 922: /* extsh */
1091 regs
->gpr
[ra
] = (signed short) regs
->gpr
[rd
];
1094 case 954: /* extsb */
1095 regs
->gpr
[ra
] = (signed char) regs
->gpr
[rd
];
1097 #ifdef __powerpc64__
1098 case 986: /* extsw */
1099 regs
->gpr
[ra
] = (signed int) regs
->gpr
[rd
];
1104 * Shift instructions
1107 sh
= regs
->gpr
[rb
] & 0x3f;
1109 regs
->gpr
[ra
] = (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1115 sh
= regs
->gpr
[rb
] & 0x3f;
1117 regs
->gpr
[ra
] = (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1122 case 792: /* sraw */
1123 sh
= regs
->gpr
[rb
] & 0x3f;
1124 ival
= (signed int) regs
->gpr
[rd
];
1125 regs
->gpr
[ra
] = ival
>> (sh
< 32 ? sh
: 31);
1126 if (ival
< 0 && (sh
>= 32 || (ival
& ((1 << sh
) - 1)) != 0))
1127 regs
->xer
|= XER_CA
;
1129 regs
->xer
&= ~XER_CA
;
1132 case 824: /* srawi */
1134 ival
= (signed int) regs
->gpr
[rd
];
1135 regs
->gpr
[ra
] = ival
>> sh
;
1136 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1137 regs
->xer
|= XER_CA
;
1139 regs
->xer
&= ~XER_CA
;
1142 #ifdef __powerpc64__
1144 sh
= regs
->gpr
[rd
] & 0x7f;
1146 regs
->gpr
[ra
] = regs
->gpr
[rd
] << sh
;
1152 sh
= regs
->gpr
[rb
] & 0x7f;
1154 regs
->gpr
[ra
] = regs
->gpr
[rd
] >> sh
;
1159 case 794: /* srad */
1160 sh
= regs
->gpr
[rb
] & 0x7f;
1161 ival
= (signed long int) regs
->gpr
[rd
];
1162 regs
->gpr
[ra
] = ival
>> (sh
< 64 ? sh
: 63);
1163 if (ival
< 0 && (sh
>= 64 || (ival
& ((1 << sh
) - 1)) != 0))
1164 regs
->xer
|= XER_CA
;
1166 regs
->xer
&= ~XER_CA
;
1169 case 826: /* sradi with sh_5 = 0 */
1170 case 827: /* sradi with sh_5 = 1 */
1171 sh
= rb
| ((instr
& 2) << 4);
1172 ival
= (signed long int) regs
->gpr
[rd
];
1173 regs
->gpr
[ra
] = ival
>> sh
;
1174 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1175 regs
->xer
|= XER_CA
;
1177 regs
->xer
&= ~XER_CA
;
1179 #endif /* __powerpc64__ */
1182 * Cache instructions
1184 case 54: /* dcbst */
1185 ea
= xform_ea(instr
, regs
, 0);
1186 if (!address_ok(regs
, ea
, 8))
1189 __cacheop_user_asmx(ea
, err
, "dcbst");
1195 ea
= xform_ea(instr
, regs
, 0);
1196 if (!address_ok(regs
, ea
, 8))
1199 __cacheop_user_asmx(ea
, err
, "dcbf");
1204 case 246: /* dcbtst */
1206 ea
= xform_ea(instr
, regs
, 0);
1207 prefetchw((void *) ea
);
1211 case 278: /* dcbt */
1213 ea
= xform_ea(instr
, regs
, 0);
1214 prefetch((void *) ea
);
1223 * Following cases are for loads and stores, so bail out
1224 * if we're in little-endian mode.
1226 if (regs
->msr
& MSR_LE
)
1230 * Save register RA in case it's an update form load or store
1231 * and the access faults.
1233 old_ra
= regs
->gpr
[ra
];
1238 switch ((instr
>> 1) & 0x3ff) {
1239 case 20: /* lwarx */
1240 ea
= xform_ea(instr
, regs
, 0);
1242 break; /* can't handle misaligned */
1244 if (!address_ok(regs
, ea
, 4))
1247 __get_user_asmx(val
, ea
, err
, "lwarx");
1249 regs
->gpr
[rd
] = val
;
1252 case 150: /* stwcx. */
1253 ea
= xform_ea(instr
, regs
, 0);
1255 break; /* can't handle misaligned */
1257 if (!address_ok(regs
, ea
, 4))
1260 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stwcx.", cr
);
1262 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1264 ((regs
->xer
>> 3) & 0x10000000);
1267 #ifdef __powerpc64__
1268 case 84: /* ldarx */
1269 ea
= xform_ea(instr
, regs
, 0);
1271 break; /* can't handle misaligned */
1273 if (!address_ok(regs
, ea
, 8))
1276 __get_user_asmx(val
, ea
, err
, "ldarx");
1278 regs
->gpr
[rd
] = val
;
1281 case 214: /* stdcx. */
1282 ea
= xform_ea(instr
, regs
, 0);
1284 break; /* can't handle misaligned */
1286 if (!address_ok(regs
, ea
, 8))
1289 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stdcx.", cr
);
1291 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1293 ((regs
->xer
>> 3) & 0x10000000);
1298 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1304 case 55: /* lwzux */
1305 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1310 case 119: /* lbzux */
1311 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1315 #ifdef CONFIG_ALTIVEC
1317 case 359: /* lvxl */
1318 if (!(regs
->msr
& MSR_VEC
))
1320 ea
= xform_ea(instr
, regs
, 0);
1321 err
= do_vec_load(rd
, do_lvx
, ea
, regs
);
1324 case 231: /* stvx */
1325 case 487: /* stvxl */
1326 if (!(regs
->msr
& MSR_VEC
))
1328 ea
= xform_ea(instr
, regs
, 0);
1329 err
= do_vec_store(rd
, do_stvx
, ea
, regs
);
1331 #endif /* CONFIG_ALTIVEC */
1333 #ifdef __powerpc64__
1334 case 149: /* stdx */
1335 case 181: /* stdux */
1336 val
= regs
->gpr
[rd
];
1337 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 8, regs
);
1341 case 151: /* stwx */
1342 case 183: /* stwux */
1343 val
= regs
->gpr
[rd
];
1344 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 4, regs
);
1347 case 215: /* stbx */
1348 case 247: /* stbux */
1349 val
= regs
->gpr
[rd
];
1350 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 1, regs
);
1353 case 279: /* lhzx */
1354 case 311: /* lhzux */
1355 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1359 #ifdef __powerpc64__
1360 case 341: /* lwax */
1361 case 373: /* lwaux */
1362 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1365 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1369 case 343: /* lhax */
1370 case 375: /* lhaux */
1371 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1374 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1377 case 407: /* sthx */
1378 case 439: /* sthux */
1379 val
= regs
->gpr
[rd
];
1380 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 2, regs
);
1383 #ifdef __powerpc64__
1384 case 532: /* ldbrx */
1385 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 8, regs
);
1387 regs
->gpr
[rd
] = byterev_8(val
);
1392 case 534: /* lwbrx */
1393 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 4, regs
);
1395 regs
->gpr
[rd
] = byterev_4(val
);
1398 #ifdef CONFIG_PPC_CPU
1399 case 535: /* lfsx */
1400 case 567: /* lfsux */
1401 if (!(regs
->msr
& MSR_FP
))
1403 ea
= xform_ea(instr
, regs
, u
);
1404 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1407 case 599: /* lfdx */
1408 case 631: /* lfdux */
1409 if (!(regs
->msr
& MSR_FP
))
1411 ea
= xform_ea(instr
, regs
, u
);
1412 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1415 case 663: /* stfsx */
1416 case 695: /* stfsux */
1417 if (!(regs
->msr
& MSR_FP
))
1419 ea
= xform_ea(instr
, regs
, u
);
1420 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1423 case 727: /* stfdx */
1424 case 759: /* stfdux */
1425 if (!(regs
->msr
& MSR_FP
))
1427 ea
= xform_ea(instr
, regs
, u
);
1428 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1432 #ifdef __powerpc64__
1433 case 660: /* stdbrx */
1434 val
= byterev_8(regs
->gpr
[rd
]);
1435 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 8, regs
);
1439 case 662: /* stwbrx */
1440 val
= byterev_4(regs
->gpr
[rd
]);
1441 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 4, regs
);
1444 case 790: /* lhbrx */
1445 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 2, regs
);
1447 regs
->gpr
[rd
] = byterev_2(val
);
1450 case 918: /* sthbrx */
1451 val
= byterev_2(regs
->gpr
[rd
]);
1452 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 2, regs
);
1456 case 844: /* lxvd2x */
1457 case 876: /* lxvd2ux */
1458 if (!(regs
->msr
& MSR_VSX
))
1460 rd
|= (instr
& 1) << 5;
1461 ea
= xform_ea(instr
, regs
, u
);
1462 err
= do_vsx_load(rd
, do_lxvd2x
, ea
, regs
);
1465 case 972: /* stxvd2x */
1466 case 1004: /* stxvd2ux */
1467 if (!(regs
->msr
& MSR_VSX
))
1469 rd
|= (instr
& 1) << 5;
1470 ea
= xform_ea(instr
, regs
, u
);
1471 err
= do_vsx_store(rd
, do_stxvd2x
, ea
, regs
);
1474 #endif /* CONFIG_VSX */
1480 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 4, regs
);
1485 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 1, regs
);
1490 val
= regs
->gpr
[rd
];
1491 err
= write_mem(val
, dform_ea(instr
, regs
), 4, regs
);
1496 val
= regs
->gpr
[rd
];
1497 err
= write_mem(val
, dform_ea(instr
, regs
), 1, regs
);
1502 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1507 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1509 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1514 val
= regs
->gpr
[rd
];
1515 err
= write_mem(val
, dform_ea(instr
, regs
), 2, regs
);
1519 ra
= (instr
>> 16) & 0x1f;
1521 break; /* invalid form, ra in range to load */
1522 ea
= dform_ea(instr
, regs
);
1524 err
= read_mem(®s
->gpr
[rd
], ea
, 4, regs
);
1528 } while (++rd
< 32);
1532 ea
= dform_ea(instr
, regs
);
1534 err
= write_mem(regs
->gpr
[rd
], ea
, 4, regs
);
1538 } while (++rd
< 32);
1541 #ifdef CONFIG_PPC_FPU
1544 if (!(regs
->msr
& MSR_FP
))
1546 ea
= dform_ea(instr
, regs
);
1547 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1552 if (!(regs
->msr
& MSR_FP
))
1554 ea
= dform_ea(instr
, regs
);
1555 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1559 case 53: /* stfsu */
1560 if (!(regs
->msr
& MSR_FP
))
1562 ea
= dform_ea(instr
, regs
);
1563 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1567 case 55: /* stfdu */
1568 if (!(regs
->msr
& MSR_FP
))
1570 ea
= dform_ea(instr
, regs
);
1571 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1575 #ifdef __powerpc64__
1576 case 58: /* ld[u], lwa */
1577 switch (instr
& 3) {
1579 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1583 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1587 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1590 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1595 case 62: /* std[u] */
1596 val
= regs
->gpr
[rd
];
1597 switch (instr
& 3) {
1599 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1602 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1606 #endif /* __powerpc64__ */
1613 regs
->gpr
[ra
] = old_ra
;
1614 return 0; /* invoke DSI if -EFAULT? */
1617 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);