2 /*---------------------------------------------------------------*/
3 /*--- begin guest_ppc_helpers.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2017 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 #include "libvex_basictypes.h"
35 #include "libvex_emnote.h"
36 #include "libvex_guest_ppc32.h"
37 #include "libvex_guest_ppc64.h"
38 #include "libvex_ir.h"
41 #include "main_util.h"
42 #include "main_globals.h"
43 #include "guest_generic_bb_to_IR.h"
44 #include "guest_ppc_defs.h"
47 /* This file contains helper functions for ppc32 and ppc64 guest code.
48 Calls to these functions are generated by the back end. These
49 calls are of course in the host machine code and this file will be
50 compiled to host machine code, so that all makes sense.
52 Only change the signatures of these helper functions very
53 carefully. If you change the signature here, you'll have to change
54 the parameters passed to it in the IR calls constructed by
59 /*---------------------------------------------------------------*/
60 /*--- Misc integer helpers. ---*/
61 /*---------------------------------------------------------------*/
63 /* CALLED FROM GENERATED CODE */
64 /* DIRTY HELPER (non-referentially-transparent) */
65 /* Horrible hack. On non-ppc platforms, return 1. */
66 /* Reads a complete, consistent 64-bit TB value. */
67 ULong
ppcg_dirtyhelper_MFTB ( void )
69 # if defined(__powerpc__)
73 __asm__
__volatile__ ("\n"
77 : "=r" (hi1
), "=r" (lo
), "=r" (hi2
)
79 if (hi1
== hi2
) break;
81 res
= ((ULong
)hi1
) << 32;
90 /* CALLED FROM GENERATED CODE */
91 /* DIRTY HELPER (non-referentially transparent) */
92 UInt
ppc32g_dirtyhelper_MFSPR_268_269 ( UInt r269
)
94 # if defined(__powerpc__)
97 __asm__
__volatile__("mfspr %0,269" : "=b"(spr
));
99 __asm__
__volatile__("mfspr %0,268" : "=b"(spr
));
108 /* CALLED FROM GENERATED CODE */
109 /* DIRTY HELPER (I'm not really sure what the side effects are) */
110 UInt
ppc32g_dirtyhelper_MFSPR_287 ( void )
112 # if defined(__powerpc__)
114 __asm__
__volatile__("mfspr %0,287" : "=b"(spr
));
122 /* CALLED FROM GENERATED CODE */
123 /* DIRTY HELPER (reads guest state, writes guest mem) */
124 void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State
* gst
,
125 UInt vD_off
, UInt sh
, UInt shift_right
)
128 UChar ref
[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
129 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
130 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
131 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
135 vassert( vD_off
<= sizeof(VexGuestPPC32State
)-8 );
137 vassert( shift_right
<= 1 );
140 /* else shift left */
142 pU128_src
= (U128
*)&ref
[sh
];
143 pU128_dst
= (U128
*)( ((UChar
*)gst
) + vD_off
);
145 (*pU128_dst
)[0] = (*pU128_src
)[0];
146 (*pU128_dst
)[1] = (*pU128_src
)[1];
147 (*pU128_dst
)[2] = (*pU128_src
)[2];
148 (*pU128_dst
)[3] = (*pU128_src
)[3];
151 /* CALLED FROM GENERATED CODE */
152 /* DIRTY HELPER (reads guest state, writes guest mem) */
153 void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State
* gst
,
154 UInt vD_off
, UInt sh
, UInt shift_right
,
160 /* ref[] used to be a static const array, but this doesn't work on
161 ppc64 because VEX doesn't load the TOC pointer for the call here,
162 and so we wind up picking up some totally random other data.
163 (It's a wonder we don't segfault.) So, just to be clear, this
164 "fix" (vex r2073) is really a kludgearound for the fact that
165 VEX's 64-bit ppc code generation doesn't provide a valid TOC
166 pointer for helper function calls. Ick. (Bug 250038) */
167 for (i
= 0; i
< 32; i
++) ref
[i
] = i
;
172 vassert( vD_off
<= sizeof(VexGuestPPC64State
)-8 );
174 vassert( shift_right
<= 1 );
177 /* else shift left */
179 pU128_src
= (U128
*)&ref
[sh
];
180 pU128_dst
= (U128
*)( ((UChar
*)gst
) + vD_off
);
182 if ((0x1 & endness
) == 0x0) {
184 unsigned char *srcp
, *dstp
;
185 srcp
= (unsigned char *)pU128_src
;
186 dstp
= (unsigned char *)pU128_dst
;
187 for (k
= 15; k
>= 0; k
--, srcp
++)
190 (*pU128_dst
)[0] = (*pU128_src
)[0];
191 (*pU128_dst
)[1] = (*pU128_src
)[1];
192 (*pU128_dst
)[2] = (*pU128_src
)[2];
193 (*pU128_dst
)[3] = (*pU128_src
)[3];
198 /* Helper-function specialiser. */
200 IRExpr
* guest_ppc32_spechelper ( const HChar
* function_name
,
202 IRStmt
** precedingStmts
,
203 Int n_precedingStmts
)
208 IRExpr
* guest_ppc64_spechelper ( const HChar
* function_name
,
210 IRStmt
** precedingStmts
,
211 Int n_precedingStmts
)
217 /* 16-bit floating point number is stored in the lower 16-bits of 32-bit value */
218 #define I16_EXP_MASK 0x7C00
219 #define I16_FRACTION_MASK 0x03FF
220 #define I32_EXP_MASK 0x7F800000
221 #define I32_FRACTION_MASK 0x007FFFFF
222 #define I64_EXP_MASK 0x7FF0000000000000ULL
223 #define I64_FRACTION_MASK 0x000FFFFFFFFFFFFFULL
224 #define V128_EXP_MASK 0x7FFF000000000000ULL
225 #define V128_FRACTION_MASK 0x0000FFFFFFFFFFFFULL /* upper 64-bit fractional mask */
227 ULong
generate_C_FPCC_helper( ULong irType
, ULong src_hi
, ULong src
)
229 UInt NaN
, inf
, zero
, norm
, dnorm
, pos
;
230 UInt bit0
, bit1
, bit2
, bit3
;
232 ULong exp_mask
= 0, exp_part
= 0, frac_part
= 0;
235 if ( irType
== Ity_I16
) {
236 frac_part
= I16_FRACTION_MASK
& src
;
237 exp_mask
= I16_EXP_MASK
;
238 exp_part
= exp_mask
& src
;
239 sign_bit
= src
>> 15;
241 } else if ( irType
== Ity_I32
) {
242 frac_part
= I32_FRACTION_MASK
& src
;
243 exp_mask
= I32_EXP_MASK
;
244 exp_part
= exp_mask
& src
;
245 sign_bit
= src
>> 31;
247 } else if ( irType
== Ity_I64
) {
248 frac_part
= I64_FRACTION_MASK
& src
;
249 exp_mask
= I64_EXP_MASK
;
250 exp_part
= exp_mask
& src
;
251 sign_bit
= src
>> 63;
253 } else if ( irType
== Ity_F128
) {
254 /* only care if the frac part is zero or non-zero */
255 frac_part
= (V128_FRACTION_MASK
& src_hi
) | src
;
256 exp_mask
= V128_EXP_MASK
;
257 exp_part
= exp_mask
& src_hi
;
258 sign_bit
= src_hi
>> 63;
260 vassert(0); // Unknown value of irType
263 /* NaN: exponene is all ones, fractional part not zero */
264 if ((exp_part
== exp_mask
) && (frac_part
!= 0))
269 /* inf: exponent all 1's, fraction part is zero */
270 if ((exp_part
== exp_mask
) && (frac_part
== 0))
275 /* zero: exponent is 0, fraction part is zero */
276 if ((exp_part
== 0) && (frac_part
== 0))
281 /* norm: exponent is not 0, exponent is not all 1's */
282 if ((exp_part
!= 0) && (exp_part
!= exp_mask
))
287 /* dnorm: exponent is all 0's, fraction is not 0 */
288 if ((exp_part
== 0) && (frac_part
!= 0))
300 /* If the result is NaN then must force bits 1, 2 and 3 to zero
301 * to get correct result.
305 bit1
= (!NaN
) & zero
;
306 bit2
= (!NaN
) & ((pos
& dnorm
) | (pos
& norm
) | (pos
& inf
))
307 & ((!zero
) & (!NaN
));
308 bit3
= (!NaN
) & (((!pos
) & dnorm
) |((!pos
) & norm
) | ((!pos
) & inf
))
309 & ((!zero
) & (!NaN
));
311 fpcc
= (bit3
<< 3) | (bit2
<< 2) | (bit1
<< 1) | bit0
;
314 c
= NaN
| ((!pos
) & dnorm
) | ((!pos
) & zero
) | (pos
& dnorm
);
316 /* return C in the upper 32-bits and FPCC in the lower 32 bits */
317 return (c
<<32) | fpcc
;
321 /*---------------------------------------------------------------*/
322 /*--- Misc BCD clean helpers. ---*/
323 /*---------------------------------------------------------------*/
325 /* NOTE, the clean and dirty helpers need to called using the
326 * fnptr_to_fnentry() function wrapper to handle the Big Endian
327 * pointer-to-function ABI and the Little Endian ABI.
330 /* This C-helper takes a 128-bit BCD value as two 64-bit pieces.
331 * It checks the string to see if it is a valid 128-bit BCD value.
332 * A valid BCD value has a sign value in bits [3:0] between 0xA
333 * and 0xF inclusive. each of the BCD digits represented as a 4-bit
334 * hex number in bits BCD value[128:4] mut be between 0 and 9
335 * inclusive. Returns an unsigned 64-bit value if valid.
337 ULong
is_BCDstring128_helper( ULong Signed
, ULong bcd_string_hi
,
338 ULong bcd_string_low
) {
340 ULong valid_bcd
, sign_valid
= False
;
344 if ( Signed
== True
) {
345 sign
= bcd_string_low
& 0xF;
346 if( ( sign
>= 0xA ) && ( sign
<= 0xF ) )
349 /* Change the sign digit to a zero
350 * so the for loop below works the same
351 * for signed and unsigned BCD stings
353 bcd_string_low
&= 0xFFFFFFFFFFFFFFF0ULL
;
356 sign_valid
= True
; /* set sign to True so result is only
357 based on the validity of the digits */
360 valid_bcd
= True
; // Assume true to start
361 for( i
= 0; i
< 32; i
++ ) {
362 /* check high and low 64-bit strings in parallel */
363 digit
= bcd_string_low
& 0xF;
366 bcd_string_low
= bcd_string_low
>> 4;
368 digit
= bcd_string_hi
& 0xF;
371 bcd_string_hi
= bcd_string_hi
>> 4;
374 return valid_bcd
& sign_valid
;
377 /* This clean helper takes a signed 32-bit BCD value and a carry in
378 * and adds 1 to the value of the BCD value. The BCD value is passed
379 * in as a single 64-bit value. The incremented value is returned in
380 * the lower 32 bits of the result. If the input was signed the sign of
381 * the result is the same as the input. The carry out is returned in
382 * bits [35:32] of the result.
384 ULong
increment_BCDstring32_helper( ULong Signed
,
385 ULong bcd_string
, ULong carry_in
) {
386 UInt i
, num_digits
= 8;
387 ULong bcd_value
, result
= 0;
388 ULong carry
, digit
, new_digit
;
392 if ( Signed
== True
) {
393 bcd_value
= bcd_string
>> 4; /* remove sign */
394 num_digits
= num_digits
- 1;
396 bcd_value
= bcd_string
;
399 for( i
= 0; i
< num_digits
; i
++ ) {
400 digit
= bcd_value
& 0xF;
401 bcd_value
= bcd_value
>> 4;
402 new_digit
= digit
+ carry
;
404 if ( new_digit
> 10 ) {
406 new_digit
= new_digit
- 10;
411 result
= result
| (new_digit
<< (i
*4) );
414 if ( Signed
== True
) {
415 result
= ( carry
<< 32) | ( result
<< 4 ) | ( bcd_string
& 0xF );
417 result
= ( carry
<< 32) | result
;
423 /*---------------------------------------------------------------*/
424 /*--- Misc packed decimal clean helpers. ---*/
425 /*---------------------------------------------------------------*/
427 /* This C-helper takes a 64-bit packed decimal value stored in a
428 * 64-bit value. It converts the zoned decimal format. The lower
429 * byte may contain a sign value, set it to zero. If return_upper
430 * is zero, return lower 64 bits of result, otherwise return upper
431 * 64 bits of the result.
433 ULong
convert_to_zoned_helper( ULong src_hi
, ULong src_low
,
434 ULong upper_byte
, ULong return_upper
) {
436 ULong tmp
= 0, new_value
;
438 /* Remove the sign from the source. Put in the upper byte of result.
439 * Sign inserted later.
441 if ( return_upper
== 0 ) { /* return lower 64-bit result */
442 for(i
= 0; i
< 7; i
++) {
444 new_value
= ( ( src_low
>> sh
) & 0xf ) | upper_byte
;
445 tmp
= tmp
| ( new_value
<< ( ( 7 - i
) * 8 ) );
449 /* Byte for i=0 is in upper 64-bit of the source, do it separately */
450 new_value
= ( src_hi
& 0xf ) | upper_byte
;
451 tmp
= tmp
| new_value
<< 56;
453 for( i
= 1; i
< 8; i
++ ) {
455 new_value
= ( ( src_low
>> sh
) & 0xf ) | upper_byte
;
456 tmp
= tmp
| ( new_value
<< ( ( 7 - i
) * 8 ) );
462 /* This C-helper takes the lower 64-bits of the 128-bit packed decimal
463 * src value. It converts the src value to a 128-bit national format.
464 * If return_upper is zero, the helper returns lower 64 bits of result,
465 * otherwise it returns the upper 64-bits of the result.
467 ULong
convert_to_national_helper( ULong src
, ULong return_upper
) {
470 UInt sh
= 3, max
= 4, min
= 0; /* initialize max, min for return upper */
471 ULong tmp
= 0, new_value
;
473 if ( return_upper
== 0 ) { /* return lower 64-bit result */
479 for( i
= min
; i
< max
; i
++ ) {
480 new_value
= ( ( src
>> ( ( 7 - i
) * 4 ) ) & 0xf ) | 0x0030;
481 tmp
= tmp
| ( new_value
<< ( ( sh
- i
) * 16 ) );
486 /* This C-helper takes a 128-bit zoned value stored in a 128-bit
487 * value. It converts it to the packed 64-bit decimal format without a
488 * a sign value. The sign is supposed to be in bits [3:0] and the packed
489 * value in bits [67:4]. This helper leaves it to the caller to put the
490 * result into a V128 and shift the returned value over and put the sign
493 ULong
convert_from_zoned_helper( ULong src_hi
, ULong src_low
) {
495 ULong tmp
= 0, nibble
;
497 /* Unroll the i = 0 iteration so the sizes of the loop for the upper
498 * and lower extraction match. Skip sign in lease significant byte.
500 nibble
= ( src_hi
>> 56 ) & 0xF;
501 tmp
= tmp
| ( nibble
<< 60 );
503 for( i
= 1; i
< 8; i
++ ) {
504 /* get the high nibbles, put into result */
505 nibble
= ( src_hi
>> ( ( 7 - i
) * 8 ) ) & 0xF;
506 tmp
= tmp
| ( nibble
<< ( ( 15 - i
) * 4 ) );
508 /* get the low nibbles, put into result */
509 nibble
= ( src_low
>> ( ( 8 - i
) * 8 ) ) & 0xF;
510 tmp
= tmp
| ( nibble
<< ( ( 8 - i
) * 4 ) );
515 /* This C-helper takes a 128-bit national value stored in a 128-bit
516 * value. It converts it to a signless packed 64-bit decimal format.
518 ULong
convert_from_national_helper( ULong src_hi
, ULong src_low
) {
520 ULong tmp
= 0, hword
;
522 src_low
= src_low
& 0xFFFFFFFFFFFFFFF0ULL
; /* remove the sign */
524 for( i
= 0; i
< 4; i
++ ) {
525 /* get the high half-word, put into result */
526 hword
= ( src_hi
>> ( ( 3 - i
) * 16 ) ) & 0xF;
527 tmp
= tmp
| ( hword
<< ( ( 7 - i
) * 4 ) );
529 /* get the low half-word, put into result */
530 hword
= ( src_low
>> ( ( 3 - i
) * 16 ) ) & 0xF;
531 tmp
= tmp
| ( hword
<< ( ( 3 - i
) * 4 ) );
536 /*----------------------------------------------*/
537 /*--- The exported fns .. ---*/
538 /*----------------------------------------------*/
540 /* VISIBLE TO LIBVEX CLIENT */
541 UInt
LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State
* vex_state
)
545 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
546 | (vex_state->guest_CR##_n##_0 & 1) \
553 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
554 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
560 /* VISIBLE TO LIBVEX CLIENT */
561 /* Note: %CR is 32 bits even for ppc64 */
562 UInt
LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State
* vex_state
)
566 ( (vex_state->guest_CR##_n##_321 & (7<<1)) \
567 | (vex_state->guest_CR##_n##_0 & 1) \
574 FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
575 | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
581 /* VISIBLE TO LIBVEX CLIENT */
582 void LibVEX_GuestPPC32_put_CR ( UInt cr_native
,
583 /*OUT*/VexGuestPPC32State
* vex_state
)
589 t = cr_native >> (4*(7-(_n))); \
590 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
591 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
607 /* VISIBLE TO LIBVEX CLIENT */
608 /* Note: %CR is 32 bits even for ppc64 */
609 void LibVEX_GuestPPC64_put_CR ( UInt cr_native
,
610 /*OUT*/VexGuestPPC64State
* vex_state
)
616 t = cr_native >> (4*(7-(_n))); \
617 vex_state->guest_CR##_n##_0 = toUChar(t & 1); \
618 vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
634 /* VISIBLE TO LIBVEX CLIENT */
635 UInt
LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State
* vex_state
)
638 w
|= ( ((UInt
)vex_state
->guest_XER_BC
) & 0xFF );
639 w
|= ( (((UInt
)vex_state
->guest_XER_SO
) & 0x1) << 31 );
640 w
|= ( (((UInt
)vex_state
->guest_XER_OV
) & 0x1) << 30 );
641 w
|= ( (((UInt
)vex_state
->guest_XER_CA
) & 0x1) << 29 );
642 w
|= ( (((UInt
)vex_state
->guest_XER_OV32
) & 0x1) << 19 );
643 w
|= ( (((UInt
)vex_state
->guest_XER_CA32
) & 0x1) << 18 );
648 /* VISIBLE TO LIBVEX CLIENT */
649 /* Note: %XER is 32 bits even for ppc64 */
650 UInt
LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State
* vex_state
)
653 w
|= ( ((UInt
)vex_state
->guest_XER_BC
) & 0xFF );
654 w
|= ( (((UInt
)vex_state
->guest_XER_SO
) & 0x1) << 31 );
655 w
|= ( (((UInt
)vex_state
->guest_XER_OV
) & 0x1) << 30 );
656 w
|= ( (((UInt
)vex_state
->guest_XER_CA
) & 0x1) << 29 );
657 w
|= ( (((UInt
)vex_state
->guest_XER_OV32
) & 0x1) << 19 );
658 w
|= ( (((UInt
)vex_state
->guest_XER_CA32
) & 0x1) << 18 );
663 /* VISIBLE TO LIBVEX CLIENT */
664 void LibVEX_GuestPPC32_put_XER ( UInt xer_native
,
665 /*OUT*/VexGuestPPC32State
* vex_state
)
667 vex_state
->guest_XER_BC
= toUChar(xer_native
& 0xFF);
668 vex_state
->guest_XER_SO
= toUChar((xer_native
>> 31) & 0x1);
669 vex_state
->guest_XER_OV
= toUChar((xer_native
>> 30) & 0x1);
670 vex_state
->guest_XER_CA
= toUChar((xer_native
>> 29) & 0x1);
671 vex_state
->guest_XER_OV32
= toUChar((xer_native
>> 19) & 0x1);
672 vex_state
->guest_XER_CA32
= toUChar((xer_native
>> 18) & 0x1);
675 /* VISIBLE TO LIBVEX CLIENT */
676 /* Note: %XER is 32 bits even for ppc64 */
677 void LibVEX_GuestPPC64_put_XER ( UInt xer_native
,
678 /*OUT*/VexGuestPPC64State
* vex_state
)
680 vex_state
->guest_XER_BC
= toUChar(xer_native
& 0xFF);
681 vex_state
->guest_XER_SO
= toUChar((xer_native
>> 31) & 0x1);
682 vex_state
->guest_XER_OV
= toUChar((xer_native
>> 30) & 0x1);
683 vex_state
->guest_XER_CA
= toUChar((xer_native
>> 29) & 0x1);
684 vex_state
->guest_XER_OV32
= toUChar((xer_native
>> 19) & 0x1);
685 vex_state
->guest_XER_CA32
= toUChar((xer_native
>> 18) & 0x1);
688 /* VISIBLE TO LIBVEX CLIENT */
689 void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State
* vex_state
)
692 vex_state
->host_EvC_FAILADDR
= 0;
693 vex_state
->host_EvC_COUNTER
= 0;
697 vex_state
->guest_GPR0
= 0;
698 vex_state
->guest_GPR1
= 0;
699 vex_state
->guest_GPR2
= 0;
700 vex_state
->guest_GPR3
= 0;
701 vex_state
->guest_GPR4
= 0;
702 vex_state
->guest_GPR5
= 0;
703 vex_state
->guest_GPR6
= 0;
704 vex_state
->guest_GPR7
= 0;
705 vex_state
->guest_GPR8
= 0;
706 vex_state
->guest_GPR9
= 0;
707 vex_state
->guest_GPR10
= 0;
708 vex_state
->guest_GPR11
= 0;
709 vex_state
->guest_GPR12
= 0;
710 vex_state
->guest_GPR13
= 0;
711 vex_state
->guest_GPR14
= 0;
712 vex_state
->guest_GPR15
= 0;
713 vex_state
->guest_GPR16
= 0;
714 vex_state
->guest_GPR17
= 0;
715 vex_state
->guest_GPR18
= 0;
716 vex_state
->guest_GPR19
= 0;
717 vex_state
->guest_GPR20
= 0;
718 vex_state
->guest_GPR21
= 0;
719 vex_state
->guest_GPR22
= 0;
720 vex_state
->guest_GPR23
= 0;
721 vex_state
->guest_GPR24
= 0;
722 vex_state
->guest_GPR25
= 0;
723 vex_state
->guest_GPR26
= 0;
724 vex_state
->guest_GPR27
= 0;
725 vex_state
->guest_GPR28
= 0;
726 vex_state
->guest_GPR29
= 0;
727 vex_state
->guest_GPR30
= 0;
728 vex_state
->guest_GPR31
= 0;
730 /* Initialise the vector state. */
731 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
733 VECZERO(vex_state
->guest_VSR0
);
734 VECZERO(vex_state
->guest_VSR1
);
735 VECZERO(vex_state
->guest_VSR2
);
736 VECZERO(vex_state
->guest_VSR3
);
737 VECZERO(vex_state
->guest_VSR4
);
738 VECZERO(vex_state
->guest_VSR5
);
739 VECZERO(vex_state
->guest_VSR6
);
740 VECZERO(vex_state
->guest_VSR7
);
741 VECZERO(vex_state
->guest_VSR8
);
742 VECZERO(vex_state
->guest_VSR9
);
743 VECZERO(vex_state
->guest_VSR10
);
744 VECZERO(vex_state
->guest_VSR11
);
745 VECZERO(vex_state
->guest_VSR12
);
746 VECZERO(vex_state
->guest_VSR13
);
747 VECZERO(vex_state
->guest_VSR14
);
748 VECZERO(vex_state
->guest_VSR15
);
749 VECZERO(vex_state
->guest_VSR16
);
750 VECZERO(vex_state
->guest_VSR17
);
751 VECZERO(vex_state
->guest_VSR18
);
752 VECZERO(vex_state
->guest_VSR19
);
753 VECZERO(vex_state
->guest_VSR20
);
754 VECZERO(vex_state
->guest_VSR21
);
755 VECZERO(vex_state
->guest_VSR22
);
756 VECZERO(vex_state
->guest_VSR23
);
757 VECZERO(vex_state
->guest_VSR24
);
758 VECZERO(vex_state
->guest_VSR25
);
759 VECZERO(vex_state
->guest_VSR26
);
760 VECZERO(vex_state
->guest_VSR27
);
761 VECZERO(vex_state
->guest_VSR28
);
762 VECZERO(vex_state
->guest_VSR29
);
763 VECZERO(vex_state
->guest_VSR30
);
764 VECZERO(vex_state
->guest_VSR31
);
765 VECZERO(vex_state
->guest_VSR32
);
766 VECZERO(vex_state
->guest_VSR33
);
767 VECZERO(vex_state
->guest_VSR34
);
768 VECZERO(vex_state
->guest_VSR35
);
769 VECZERO(vex_state
->guest_VSR36
);
770 VECZERO(vex_state
->guest_VSR37
);
771 VECZERO(vex_state
->guest_VSR38
);
772 VECZERO(vex_state
->guest_VSR39
);
773 VECZERO(vex_state
->guest_VSR40
);
774 VECZERO(vex_state
->guest_VSR41
);
775 VECZERO(vex_state
->guest_VSR42
);
776 VECZERO(vex_state
->guest_VSR43
);
777 VECZERO(vex_state
->guest_VSR44
);
778 VECZERO(vex_state
->guest_VSR45
);
779 VECZERO(vex_state
->guest_VSR46
);
780 VECZERO(vex_state
->guest_VSR47
);
781 VECZERO(vex_state
->guest_VSR48
);
782 VECZERO(vex_state
->guest_VSR49
);
783 VECZERO(vex_state
->guest_VSR50
);
784 VECZERO(vex_state
->guest_VSR51
);
785 VECZERO(vex_state
->guest_VSR52
);
786 VECZERO(vex_state
->guest_VSR53
);
787 VECZERO(vex_state
->guest_VSR54
);
788 VECZERO(vex_state
->guest_VSR55
);
789 VECZERO(vex_state
->guest_VSR56
);
790 VECZERO(vex_state
->guest_VSR57
);
791 VECZERO(vex_state
->guest_VSR58
);
792 VECZERO(vex_state
->guest_VSR59
);
793 VECZERO(vex_state
->guest_VSR60
);
794 VECZERO(vex_state
->guest_VSR61
);
795 VECZERO(vex_state
->guest_VSR62
);
796 VECZERO(vex_state
->guest_VSR63
);
800 vex_state
->guest_CIA
= 0;
801 vex_state
->guest_LR
= 0;
802 vex_state
->guest_CTR
= 0;
804 vex_state
->guest_XER_SO
= 0;
805 vex_state
->guest_XER_OV
= 0;
806 vex_state
->guest_XER_CA
= 0;
807 vex_state
->guest_XER_BC
= 0;
809 vex_state
->guest_XER_OV32
= 0;
810 vex_state
->guest_XER_CA32
= 0;
812 vex_state
->guest_CR0_321
= 0;
813 vex_state
->guest_CR0_0
= 0;
814 vex_state
->guest_CR1_321
= 0;
815 vex_state
->guest_CR1_0
= 0;
816 vex_state
->guest_CR2_321
= 0;
817 vex_state
->guest_CR2_0
= 0;
818 vex_state
->guest_CR3_321
= 0;
819 vex_state
->guest_CR3_0
= 0;
820 vex_state
->guest_CR4_321
= 0;
821 vex_state
->guest_CR4_0
= 0;
822 vex_state
->guest_CR5_321
= 0;
823 vex_state
->guest_CR5_0
= 0;
824 vex_state
->guest_CR6_321
= 0;
825 vex_state
->guest_CR6_0
= 0;
826 vex_state
->guest_CR7_321
= 0;
827 vex_state
->guest_CR7_0
= 0;
829 vex_state
->guest_FPROUND
= PPCrm_NEAREST
;
830 vex_state
->guest_DFPROUND
= PPCrm_NEAREST
;
831 vex_state
->guest_C_FPCC
= 0;
834 vex_state
->guest_VRSAVE
= 0;
836 # if defined(VGP_ppc64be_linux)
837 /* By default, the HW for BE sets the VSCR[NJ] bit to 1.
838 VSR is a 128-bit register, NJ bit is bit 111 (IBM numbering).
839 However, VSCR is modeled as a 64-bit register. */
840 vex_state
->guest_VSCR
= 0x1 << (127 - 111);
842 /* LE API requires NJ be set to 0. */
843 vex_state
->guest_VSCR
= 0x0;
846 vex_state
->guest_EMNOTE
= EmNote_NONE
;
848 vex_state
->guest_CMSTART
= 0;
849 vex_state
->guest_CMLEN
= 0;
851 vex_state
->guest_NRADDR
= 0;
852 vex_state
->guest_NRADDR_GPR2
= 0;
854 vex_state
->guest_REDIR_SP
= -1;
855 for (i
= 0; i
< VEX_GUEST_PPC32_REDIR_STACK_SIZE
; i
++)
856 vex_state
->guest_REDIR_STACK
[i
] = 0;
858 vex_state
->guest_IP_AT_SYSCALL
= 0;
859 vex_state
->guest_SPRG3_RO
= 0;
860 vex_state
->guest_PPR
= 0x4ULL
<< 50; // medium priority
861 vex_state
->guest_PSPB
= 0x100; // an arbitrary non-zero value to start with
863 vex_state
->padding1
= 0;
864 /* vex_state->padding2 = 0; currently not used */
868 /* VISIBLE TO LIBVEX CLIENT */
869 void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State
* vex_state
)
872 vex_state
->host_EvC_FAILADDR
= 0;
873 vex_state
->host_EvC_COUNTER
= 0;
875 vex_state
->guest_GPR0
= 0;
876 vex_state
->guest_GPR1
= 0;
877 vex_state
->guest_GPR2
= 0;
878 vex_state
->guest_GPR3
= 0;
879 vex_state
->guest_GPR4
= 0;
880 vex_state
->guest_GPR5
= 0;
881 vex_state
->guest_GPR6
= 0;
882 vex_state
->guest_GPR7
= 0;
883 vex_state
->guest_GPR8
= 0;
884 vex_state
->guest_GPR9
= 0;
885 vex_state
->guest_GPR10
= 0;
886 vex_state
->guest_GPR11
= 0;
887 vex_state
->guest_GPR12
= 0;
888 vex_state
->guest_GPR13
= 0;
889 vex_state
->guest_GPR14
= 0;
890 vex_state
->guest_GPR15
= 0;
891 vex_state
->guest_GPR16
= 0;
892 vex_state
->guest_GPR17
= 0;
893 vex_state
->guest_GPR18
= 0;
894 vex_state
->guest_GPR19
= 0;
895 vex_state
->guest_GPR20
= 0;
896 vex_state
->guest_GPR21
= 0;
897 vex_state
->guest_GPR22
= 0;
898 vex_state
->guest_GPR23
= 0;
899 vex_state
->guest_GPR24
= 0;
900 vex_state
->guest_GPR25
= 0;
901 vex_state
->guest_GPR26
= 0;
902 vex_state
->guest_GPR27
= 0;
903 vex_state
->guest_GPR28
= 0;
904 vex_state
->guest_GPR29
= 0;
905 vex_state
->guest_GPR30
= 0;
906 vex_state
->guest_GPR31
= 0;
908 /* Initialise the vector state. */
909 # define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
911 VECZERO(vex_state
->guest_VSR0
);
912 VECZERO(vex_state
->guest_VSR1
);
913 VECZERO(vex_state
->guest_VSR2
);
914 VECZERO(vex_state
->guest_VSR3
);
915 VECZERO(vex_state
->guest_VSR4
);
916 VECZERO(vex_state
->guest_VSR5
);
917 VECZERO(vex_state
->guest_VSR6
);
918 VECZERO(vex_state
->guest_VSR7
);
919 VECZERO(vex_state
->guest_VSR8
);
920 VECZERO(vex_state
->guest_VSR9
);
921 VECZERO(vex_state
->guest_VSR10
);
922 VECZERO(vex_state
->guest_VSR11
);
923 VECZERO(vex_state
->guest_VSR12
);
924 VECZERO(vex_state
->guest_VSR13
);
925 VECZERO(vex_state
->guest_VSR14
);
926 VECZERO(vex_state
->guest_VSR15
);
927 VECZERO(vex_state
->guest_VSR16
);
928 VECZERO(vex_state
->guest_VSR17
);
929 VECZERO(vex_state
->guest_VSR18
);
930 VECZERO(vex_state
->guest_VSR19
);
931 VECZERO(vex_state
->guest_VSR20
);
932 VECZERO(vex_state
->guest_VSR21
);
933 VECZERO(vex_state
->guest_VSR22
);
934 VECZERO(vex_state
->guest_VSR23
);
935 VECZERO(vex_state
->guest_VSR24
);
936 VECZERO(vex_state
->guest_VSR25
);
937 VECZERO(vex_state
->guest_VSR26
);
938 VECZERO(vex_state
->guest_VSR27
);
939 VECZERO(vex_state
->guest_VSR28
);
940 VECZERO(vex_state
->guest_VSR29
);
941 VECZERO(vex_state
->guest_VSR30
);
942 VECZERO(vex_state
->guest_VSR31
);
943 VECZERO(vex_state
->guest_VSR32
);
944 VECZERO(vex_state
->guest_VSR33
);
945 VECZERO(vex_state
->guest_VSR34
);
946 VECZERO(vex_state
->guest_VSR35
);
947 VECZERO(vex_state
->guest_VSR36
);
948 VECZERO(vex_state
->guest_VSR37
);
949 VECZERO(vex_state
->guest_VSR38
);
950 VECZERO(vex_state
->guest_VSR39
);
951 VECZERO(vex_state
->guest_VSR40
);
952 VECZERO(vex_state
->guest_VSR41
);
953 VECZERO(vex_state
->guest_VSR42
);
954 VECZERO(vex_state
->guest_VSR43
);
955 VECZERO(vex_state
->guest_VSR44
);
956 VECZERO(vex_state
->guest_VSR45
);
957 VECZERO(vex_state
->guest_VSR46
);
958 VECZERO(vex_state
->guest_VSR47
);
959 VECZERO(vex_state
->guest_VSR48
);
960 VECZERO(vex_state
->guest_VSR49
);
961 VECZERO(vex_state
->guest_VSR50
);
962 VECZERO(vex_state
->guest_VSR51
);
963 VECZERO(vex_state
->guest_VSR52
);
964 VECZERO(vex_state
->guest_VSR53
);
965 VECZERO(vex_state
->guest_VSR54
);
966 VECZERO(vex_state
->guest_VSR55
);
967 VECZERO(vex_state
->guest_VSR56
);
968 VECZERO(vex_state
->guest_VSR57
);
969 VECZERO(vex_state
->guest_VSR58
);
970 VECZERO(vex_state
->guest_VSR59
);
971 VECZERO(vex_state
->guest_VSR60
);
972 VECZERO(vex_state
->guest_VSR61
);
973 VECZERO(vex_state
->guest_VSR62
);
974 VECZERO(vex_state
->guest_VSR63
);
978 vex_state
->guest_CIA
= 0;
979 vex_state
->guest_LR
= 0;
980 vex_state
->guest_CTR
= 0;
982 vex_state
->guest_XER_SO
= 0;
983 vex_state
->guest_XER_OV
= 0;
984 vex_state
->guest_XER_CA
= 0;
985 vex_state
->guest_XER_BC
= 0;
987 vex_state
->guest_CR0_321
= 0;
988 vex_state
->guest_CR0_0
= 0;
989 vex_state
->guest_CR1_321
= 0;
990 vex_state
->guest_CR1_0
= 0;
991 vex_state
->guest_CR2_321
= 0;
992 vex_state
->guest_CR2_0
= 0;
993 vex_state
->guest_CR3_321
= 0;
994 vex_state
->guest_CR3_0
= 0;
995 vex_state
->guest_CR4_321
= 0;
996 vex_state
->guest_CR4_0
= 0;
997 vex_state
->guest_CR5_321
= 0;
998 vex_state
->guest_CR5_0
= 0;
999 vex_state
->guest_CR6_321
= 0;
1000 vex_state
->guest_CR6_0
= 0;
1001 vex_state
->guest_CR7_321
= 0;
1002 vex_state
->guest_CR7_0
= 0;
1004 vex_state
->guest_FPROUND
= PPCrm_NEAREST
;
1005 vex_state
->guest_DFPROUND
= PPCrm_NEAREST
;
1006 vex_state
->guest_C_FPCC
= 0;
1007 vex_state
->pad2
= 0;
1009 vex_state
->guest_VRSAVE
= 0;
1011 # if defined(VGP_ppc64be_linux)
1012 /* By default, the HW for BE sets the VSCR[NJ] bit to 1.
1013 VSR is a 128-bit register, NJ bit is bit 111 (IBM numbering).
1014 However, VSCR is modeled as a 64-bit register. */
1015 vex_state
->guest_VSCR
= 0x1 << (127 - 111);
1017 /* LE API requires NJ be set to 0. */
1018 vex_state
->guest_VSCR
= 0x0;
1021 vex_state
->guest_EMNOTE
= EmNote_NONE
;
1023 vex_state
->padding
= 0;
1025 vex_state
->guest_CMSTART
= 0;
1026 vex_state
->guest_CMLEN
= 0;
1028 vex_state
->guest_NRADDR
= 0;
1029 vex_state
->guest_NRADDR_GPR2
= 0;
1031 vex_state
->guest_REDIR_SP
= -1;
1032 for (i
= 0; i
< VEX_GUEST_PPC64_REDIR_STACK_SIZE
; i
++)
1033 vex_state
->guest_REDIR_STACK
[i
] = 0;
1035 vex_state
->guest_IP_AT_SYSCALL
= 0;
1036 vex_state
->guest_SPRG3_RO
= 0;
1037 vex_state
->guest_TFHAR
= 0;
1038 vex_state
->guest_TFIAR
= 0;
1039 vex_state
->guest_TEXASR
= 0;
1040 vex_state
->guest_PPR
= 0x4ULL
<< 50; // medium priority
1041 vex_state
->guest_PSPB
= 0x100; // an arbitrary non-zero value to start with
1042 vex_state
->guest_DSCR
= 0;
1046 /*-----------------------------------------------------------*/
1047 /*--- Describing the ppc guest state, for the benefit ---*/
1048 /*--- of iropt and instrumenters. ---*/
1049 /*-----------------------------------------------------------*/
1051 /* Figure out if any part of the guest state contained in minoff
1052 .. maxoff requires precise memory exceptions. If in doubt return
1053 True (but this is generates significantly slower code).
1055 By default we enforce precise exns for guest R1 (stack pointer),
1056 CIA (current insn address) and LR (link register). These are the
1057 minimum needed to extract correct stack backtraces from ppc
1058 code. [[NB: not sure if keeping LR up to date is actually
1061 Only R1 is needed in mode VexRegUpdSpAtMemAccess.
1063 Bool
guest_ppc32_state_requires_precise_mem_exns (
1064 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
1067 Int lr_min
= offsetof(VexGuestPPC32State
, guest_LR
);
1068 Int lr_max
= lr_min
+ 4 - 1;
1069 Int r1_min
= offsetof(VexGuestPPC32State
, guest_GPR1
);
1070 Int r1_max
= r1_min
+ 4 - 1;
1071 Int cia_min
= offsetof(VexGuestPPC32State
, guest_CIA
);
1072 Int cia_max
= cia_min
+ 4 - 1;
1074 if (maxoff
< r1_min
|| minoff
> r1_max
) {
1075 /* no overlap with R1 */
1076 if (pxControl
== VexRegUpdSpAtMemAccess
)
1077 return False
; // We only need to check stack pointer.
1082 if (maxoff
< lr_min
|| minoff
> lr_max
) {
1083 /* no overlap with LR */
1088 if (maxoff
< cia_min
|| minoff
> cia_max
) {
1089 /* no overlap with CIA */
1097 Bool
guest_ppc64_state_requires_precise_mem_exns (
1098 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
1101 /* Given that R2 is a Big Deal in the ELF ppc64 ABI, it seems
1102 prudent to be conservative with it, even though thus far there
1103 is no evidence to suggest that it actually needs to be kept up
1104 to date wrt possible exceptions. */
1105 Int lr_min
= offsetof(VexGuestPPC64State
, guest_LR
);
1106 Int lr_max
= lr_min
+ 8 - 1;
1107 Int r1_min
= offsetof(VexGuestPPC64State
, guest_GPR1
);
1108 Int r1_max
= r1_min
+ 8 - 1;
1109 Int r2_min
= offsetof(VexGuestPPC64State
, guest_GPR2
);
1110 Int r2_max
= r2_min
+ 8 - 1;
1111 Int cia_min
= offsetof(VexGuestPPC64State
, guest_CIA
);
1112 Int cia_max
= cia_min
+ 8 - 1;
1114 if (maxoff
< r1_min
|| minoff
> r1_max
) {
1115 /* no overlap with R1 */
1116 if (pxControl
== VexRegUpdSpAtMemAccess
)
1117 return False
; // We only need to check stack pointer.
1122 if (maxoff
< lr_min
|| minoff
> lr_max
) {
1123 /* no overlap with LR */
1128 if (maxoff
< r2_min
|| minoff
> r2_max
) {
1129 /* no overlap with R2 */
1134 if (maxoff
< cia_min
|| minoff
> cia_max
) {
1135 /* no overlap with CIA */
1144 #define ALWAYSDEFD32(field) \
1145 { offsetof(VexGuestPPC32State, field), \
1146 (sizeof ((VexGuestPPC32State*)0)->field) }
1151 /* Total size of the guest state, in bytes. */
1152 .total_sizeB
= sizeof(VexGuestPPC32State
),
1154 /* Describe the stack pointer. */
1155 .offset_SP
= offsetof(VexGuestPPC32State
,guest_GPR1
),
1158 /* Describe the frame pointer. */
1159 .offset_FP
= offsetof(VexGuestPPC32State
,guest_GPR1
),
1162 /* Describe the instruction pointer. */
1163 .offset_IP
= offsetof(VexGuestPPC32State
,guest_CIA
),
1166 /* Describe any sections to be regarded by Memcheck as
1167 'always-defined'. */
1171 = { /* 0 */ ALWAYSDEFD32(guest_CIA
),
1172 /* 1 */ ALWAYSDEFD32(guest_EMNOTE
),
1173 /* 2 */ ALWAYSDEFD32(guest_CMSTART
),
1174 /* 3 */ ALWAYSDEFD32(guest_CMLEN
),
1175 /* 4 */ ALWAYSDEFD32(guest_VSCR
),
1176 /* 5 */ ALWAYSDEFD32(guest_FPROUND
),
1177 /* 6 */ ALWAYSDEFD32(guest_NRADDR
),
1178 /* 7 */ ALWAYSDEFD32(guest_NRADDR_GPR2
),
1179 /* 8 */ ALWAYSDEFD32(guest_REDIR_SP
),
1180 /* 9 */ ALWAYSDEFD32(guest_REDIR_STACK
),
1181 /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL
),
1182 /* 11 */ ALWAYSDEFD32(guest_C_FPCC
)
1186 #define ALWAYSDEFD64(field) \
1187 { offsetof(VexGuestPPC64State, field), \
1188 (sizeof ((VexGuestPPC64State*)0)->field) }
1193 /* Total size of the guest state, in bytes. */
1194 .total_sizeB
= sizeof(VexGuestPPC64State
),
1196 /* Describe the stack pointer. */
1197 .offset_SP
= offsetof(VexGuestPPC64State
,guest_GPR1
),
1200 /* Describe the frame pointer. */
1201 .offset_FP
= offsetof(VexGuestPPC64State
,guest_GPR1
),
1204 /* Describe the instruction pointer. */
1205 .offset_IP
= offsetof(VexGuestPPC64State
,guest_CIA
),
1208 /* Describe any sections to be regarded by Memcheck as
1209 'always-defined'. */
1213 = { /* 0 */ ALWAYSDEFD64(guest_CIA
),
1214 /* 1 */ ALWAYSDEFD64(guest_EMNOTE
),
1215 /* 2 */ ALWAYSDEFD64(guest_CMSTART
),
1216 /* 3 */ ALWAYSDEFD64(guest_CMLEN
),
1217 /* 4 */ ALWAYSDEFD64(guest_VSCR
),
1218 /* 5 */ ALWAYSDEFD64(guest_FPROUND
),
1219 /* 6 */ ALWAYSDEFD64(guest_NRADDR
),
1220 /* 7 */ ALWAYSDEFD64(guest_NRADDR_GPR2
),
1221 /* 8 */ ALWAYSDEFD64(guest_REDIR_SP
),
1222 /* 9 */ ALWAYSDEFD64(guest_REDIR_STACK
),
1223 /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL
),
1224 /* 11 */ ALWAYSDEFD64(guest_C_FPCC
)
1228 /*---------------------------------------------------------------*/
1229 /*--- end guest_ppc_helpers.c ---*/
1230 /*---------------------------------------------------------------*/