regtest: silence a few warnings
[valgrind.git] / VEX / priv / guest_arm_helpers.c
blobd0de845d9f02ec7bcfd9fff8babf817bf4772897
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_arm_helpers.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "libvex_basictypes.h"
30 #include "libvex_emnote.h"
31 #include "libvex_guest_arm.h"
32 #include "libvex_ir.h"
33 #include "libvex.h"
35 #include "main_util.h"
36 #include "main_globals.h"
37 #include "guest_generic_bb_to_IR.h"
38 #include "guest_arm_defs.h"
39 #include "guest_arm64_defs.h" /* for crypto helper functions */
42 /* This file contains helper functions for arm guest code. Calls to
43 these functions are generated by the back end. These calls are of
44 course in the host machine code and this file will be compiled to
45 host machine code, so that all makes sense.
47 Only change the signatures of these helper functions very
48 carefully. If you change the signature here, you'll have to change
49 the parameters passed to it in the IR calls constructed by
50 guest-arm/toIR.c.
54 /* Set to 1 to get detailed profiling info about individual N, Z, C
55 and V flag evaluation. */
56 #define PROFILE_NZCV_FLAGS 0
58 #if PROFILE_NZCV_FLAGS
60 static UInt tab_n_eval[ARMG_CC_OP_NUMBER];
61 static UInt tab_z_eval[ARMG_CC_OP_NUMBER];
62 static UInt tab_c_eval[ARMG_CC_OP_NUMBER];
63 static UInt tab_v_eval[ARMG_CC_OP_NUMBER];
64 static UInt initted = 0;
65 static UInt tot_evals = 0;
67 static void initCounts ( void )
69 UInt i;
70 for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
71 tab_n_eval[i] = tab_z_eval[i] = tab_c_eval[i] = tab_v_eval[i] = 0;
73 initted = 1;
76 static void showCounts ( void )
78 UInt i;
79 vex_printf("\n N Z C V\n");
80 vex_printf( "---------------------------------------------------\n");
81 for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
82 vex_printf("CC_OP=%d %9d %9d %9d %9d\n",
84 tab_n_eval[i], tab_z_eval[i],
85 tab_c_eval[i], tab_v_eval[i] );
89 #define NOTE_N_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_n_eval)
90 #define NOTE_Z_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_z_eval)
91 #define NOTE_C_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_c_eval)
92 #define NOTE_V_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_v_eval)
94 #define NOTE_EVAL(_cc_op, _tab) \
95 do { \
96 if (!initted) initCounts(); \
97 vassert( ((UInt)(_cc_op)) < ARMG_CC_OP_NUMBER); \
98 _tab[(UInt)(_cc_op)]++; \
99 tot_evals++; \
100 if (0 == (tot_evals & 0xFFFFF)) \
101 showCounts(); \
102 } while (0)
104 #endif /* PROFILE_NZCV_FLAGS */
107 /* Calculate the N flag from the supplied thunk components, in the
108 least significant bit of the word. Returned bits 31:1 are zero. */
109 static
110 UInt armg_calculate_flag_n ( UInt cc_op, UInt cc_dep1,
111 UInt cc_dep2, UInt cc_dep3 )
113 # if PROFILE_NZCV_FLAGS
114 NOTE_N_EVAL(cc_op);
115 # endif
117 switch (cc_op) {
118 case ARMG_CC_OP_COPY: {
119 /* (nzcv:28x0, unused, unused) */
120 UInt nf = (cc_dep1 >> ARMG_CC_SHIFT_N) & 1;
121 return nf;
123 case ARMG_CC_OP_ADD: {
124 /* (argL, argR, unused) */
125 UInt argL = cc_dep1;
126 UInt argR = cc_dep2;
127 UInt res = argL + argR;
128 UInt nf = res >> 31;
129 return nf;
131 case ARMG_CC_OP_SUB: {
132 /* (argL, argR, unused) */
133 UInt argL = cc_dep1;
134 UInt argR = cc_dep2;
135 UInt res = argL - argR;
136 UInt nf = res >> 31;
137 return nf;
139 case ARMG_CC_OP_ADC: {
140 /* (argL, argR, oldC) */
141 UInt argL = cc_dep1;
142 UInt argR = cc_dep2;
143 UInt oldC = cc_dep3;
144 vassert((oldC & ~1) == 0);
145 UInt res = argL + argR + oldC;
146 UInt nf = res >> 31;
147 return nf;
149 case ARMG_CC_OP_SBB: {
150 /* (argL, argR, oldC) */
151 UInt argL = cc_dep1;
152 UInt argR = cc_dep2;
153 UInt oldC = cc_dep3;
154 vassert((oldC & ~1) == 0);
155 UInt res = argL - argR - (oldC ^ 1);
156 UInt nf = res >> 31;
157 return nf;
159 case ARMG_CC_OP_LOGIC: {
160 /* (res, shco, oldV) */
161 UInt res = cc_dep1;
162 UInt nf = res >> 31;
163 return nf;
165 case ARMG_CC_OP_MUL: {
166 /* (res, unused, oldC:oldV) */
167 UInt res = cc_dep1;
168 UInt nf = res >> 31;
169 return nf;
171 case ARMG_CC_OP_MULL: {
172 /* (resLo32, resHi32, oldC:oldV) */
173 UInt resHi32 = cc_dep2;
174 UInt nf = resHi32 >> 31;
175 return nf;
177 default:
178 /* shouldn't really make these calls from generated code */
179 vex_printf("armg_calculate_flag_n"
180 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
181 cc_op, cc_dep1, cc_dep2, cc_dep3 );
182 vpanic("armg_calculate_flags_n");
187 /* Calculate the Z flag from the supplied thunk components, in the
188 least significant bit of the word. Returned bits 31:1 are zero. */
189 static
190 UInt armg_calculate_flag_z ( UInt cc_op, UInt cc_dep1,
191 UInt cc_dep2, UInt cc_dep3 )
193 # if PROFILE_NZCV_FLAGS
194 NOTE_Z_EVAL(cc_op);
195 # endif
197 switch (cc_op) {
198 case ARMG_CC_OP_COPY: {
199 /* (nzcv:28x0, unused, unused) */
200 UInt zf = (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1;
201 return zf;
203 case ARMG_CC_OP_ADD: {
204 /* (argL, argR, unused) */
205 UInt argL = cc_dep1;
206 UInt argR = cc_dep2;
207 UInt res = argL + argR;
208 UInt zf = res == 0;
209 return zf;
211 case ARMG_CC_OP_SUB: {
212 /* (argL, argR, unused) */
213 UInt argL = cc_dep1;
214 UInt argR = cc_dep2;
215 UInt res = argL - argR;
216 UInt zf = res == 0;
217 return zf;
219 case ARMG_CC_OP_ADC: {
220 /* (argL, argR, oldC) */
221 UInt argL = cc_dep1;
222 UInt argR = cc_dep2;
223 UInt oldC = cc_dep3;
224 vassert((oldC & ~1) == 0);
225 UInt res = argL + argR + oldC;
226 UInt zf = res == 0;
227 return zf;
229 case ARMG_CC_OP_SBB: {
230 /* (argL, argR, oldC) */
231 UInt argL = cc_dep1;
232 UInt argR = cc_dep2;
233 UInt oldC = cc_dep3;
234 vassert((oldC & ~1) == 0);
235 UInt res = argL - argR - (oldC ^ 1);
236 UInt zf = res == 0;
237 return zf;
239 case ARMG_CC_OP_LOGIC: {
240 /* (res, shco, oldV) */
241 UInt res = cc_dep1;
242 UInt zf = res == 0;
243 return zf;
245 case ARMG_CC_OP_MUL: {
246 /* (res, unused, oldC:oldV) */
247 UInt res = cc_dep1;
248 UInt zf = res == 0;
249 return zf;
251 case ARMG_CC_OP_MULL: {
252 /* (resLo32, resHi32, oldC:oldV) */
253 UInt resLo32 = cc_dep1;
254 UInt resHi32 = cc_dep2;
255 UInt zf = (resHi32|resLo32) == 0;
256 return zf;
258 default:
259 /* shouldn't really make these calls from generated code */
260 vex_printf("armg_calculate_flags_z"
261 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
262 cc_op, cc_dep1, cc_dep2, cc_dep3 );
263 vpanic("armg_calculate_flags_z");
268 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
269 /* Calculate the C flag from the supplied thunk components, in the
270 least significant bit of the word. Returned bits 31:1 are zero. */
271 UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
272 UInt cc_dep2, UInt cc_dep3 )
274 # if PROFILE_NZCV_FLAGS
275 NOTE_C_EVAL(cc_op);
276 # endif
278 switch (cc_op) {
279 case ARMG_CC_OP_COPY: {
280 /* (nzcv:28x0, unused, unused) */
281 UInt cf = (cc_dep1 >> ARMG_CC_SHIFT_C) & 1;
282 return cf;
284 case ARMG_CC_OP_ADD: {
285 /* (argL, argR, unused) */
286 UInt argL = cc_dep1;
287 UInt argR = cc_dep2;
288 UInt res = argL + argR;
289 UInt cf = res < argL;
290 return cf;
292 case ARMG_CC_OP_SUB: {
293 /* (argL, argR, unused) */
294 UInt argL = cc_dep1;
295 UInt argR = cc_dep2;
296 UInt cf = argL >= argR;
297 return cf;
299 case ARMG_CC_OP_ADC: {
300 /* (argL, argR, oldC) */
301 UInt argL = cc_dep1;
302 UInt argR = cc_dep2;
303 UInt oldC = cc_dep3;
304 vassert((oldC & ~1) == 0);
305 UInt res = argL + argR + oldC;
306 UInt cf = oldC ? (res <= argL) : (res < argL);
307 return cf;
309 case ARMG_CC_OP_SBB: {
310 /* (argL, argR, oldC) */
311 UInt argL = cc_dep1;
312 UInt argR = cc_dep2;
313 UInt oldC = cc_dep3;
314 vassert((oldC & ~1) == 0);
315 UInt cf = oldC ? (argL >= argR) : (argL > argR);
316 return cf;
318 case ARMG_CC_OP_LOGIC: {
319 /* (res, shco, oldV) */
320 UInt shco = cc_dep2;
321 vassert((shco & ~1) == 0);
322 UInt cf = shco;
323 return cf;
325 case ARMG_CC_OP_MUL: {
326 /* (res, unused, oldC:oldV) */
327 UInt oldC = (cc_dep3 >> 1) & 1;
328 vassert((cc_dep3 & ~3) == 0);
329 UInt cf = oldC;
330 return cf;
332 case ARMG_CC_OP_MULL: {
333 /* (resLo32, resHi32, oldC:oldV) */
334 UInt oldC = (cc_dep3 >> 1) & 1;
335 vassert((cc_dep3 & ~3) == 0);
336 UInt cf = oldC;
337 return cf;
339 default:
340 /* shouldn't really make these calls from generated code */
341 vex_printf("armg_calculate_flag_c"
342 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
343 cc_op, cc_dep1, cc_dep2, cc_dep3 );
344 vpanic("armg_calculate_flag_c");
349 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
350 /* Calculate the V flag from the supplied thunk components, in the
351 least significant bit of the word. Returned bits 31:1 are zero. */
352 UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
353 UInt cc_dep2, UInt cc_dep3 )
355 # if PROFILE_NZCV_FLAGS
356 NOTE_V_EVAL(cc_op);
357 # endif
359 switch (cc_op) {
360 case ARMG_CC_OP_COPY: {
361 /* (nzcv:28x0, unused, unused) */
362 UInt vf = (cc_dep1 >> ARMG_CC_SHIFT_V) & 1;
363 return vf;
365 case ARMG_CC_OP_ADD: {
366 /* (argL, argR, unused) */
367 UInt argL = cc_dep1;
368 UInt argR = cc_dep2;
369 UInt res = argL + argR;
370 UInt vf = ((res ^ argL) & (res ^ argR)) >> 31;
371 return vf;
373 case ARMG_CC_OP_SUB: {
374 /* (argL, argR, unused) */
375 UInt argL = cc_dep1;
376 UInt argR = cc_dep2;
377 UInt res = argL - argR;
378 UInt vf = ((argL ^ argR) & (argL ^ res)) >> 31;
379 return vf;
381 case ARMG_CC_OP_ADC: {
382 /* (argL, argR, oldC) */
383 UInt argL = cc_dep1;
384 UInt argR = cc_dep2;
385 UInt oldC = cc_dep3;
386 vassert((oldC & ~1) == 0);
387 UInt res = argL + argR + oldC;
388 UInt vf = ((res ^ argL) & (res ^ argR)) >> 31;
389 return vf;
391 case ARMG_CC_OP_SBB: {
392 /* (argL, argR, oldC) */
393 UInt argL = cc_dep1;
394 UInt argR = cc_dep2;
395 UInt oldC = cc_dep3;
396 vassert((oldC & ~1) == 0);
397 UInt res = argL - argR - (oldC ^ 1);
398 UInt vf = ((argL ^ argR) & (argL ^ res)) >> 31;
399 return vf;
401 case ARMG_CC_OP_LOGIC: {
402 /* (res, shco, oldV) */
403 UInt oldV = cc_dep3;
404 vassert((oldV & ~1) == 0);
405 UInt vf = oldV;
406 return vf;
408 case ARMG_CC_OP_MUL: {
409 /* (res, unused, oldC:oldV) */
410 UInt oldV = (cc_dep3 >> 0) & 1;
411 vassert((cc_dep3 & ~3) == 0);
412 UInt vf = oldV;
413 return vf;
415 case ARMG_CC_OP_MULL: {
416 /* (resLo32, resHi32, oldC:oldV) */
417 UInt oldV = (cc_dep3 >> 0) & 1;
418 vassert((cc_dep3 & ~3) == 0);
419 UInt vf = oldV;
420 return vf;
422 default:
423 /* shouldn't really make these calls from generated code */
424 vex_printf("armg_calculate_flag_v"
425 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
426 cc_op, cc_dep1, cc_dep2, cc_dep3 );
427 vpanic("armg_calculate_flag_v");
432 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
433 /* Calculate NZCV from the supplied thunk components, in the positions
434 they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
435 Returned bits 27:0 are zero. */
436 UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
437 UInt cc_dep2, UInt cc_dep3 )
439 UInt f;
440 UInt res = 0;
441 f = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
442 res |= (f << ARMG_CC_SHIFT_N);
443 f = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
444 res |= (f << ARMG_CC_SHIFT_Z);
445 f = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
446 res |= (f << ARMG_CC_SHIFT_C);
447 f = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
448 res |= (f << ARMG_CC_SHIFT_V);
449 return res;
453 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
454 /* Calculate the QC flag from the arguments, in the lowest bit
455 of the word (bit 0). Urr, having this out of line is bizarre.
456 Push back inline. */
457 UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
458 UInt resR1, UInt resR2 )
460 if (resL1 != resR1 || resL2 != resR2)
461 return 1;
462 else
463 return 0;
466 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
467 /* Calculate the specified condition from the thunk components, in the
468 lowest bit of the word (bit 0). Returned bits 31:1 are zero. */
469 UInt armg_calculate_condition ( UInt cond_n_op /* (ARMCondcode << 4) | cc_op */,
470 UInt cc_dep1,
471 UInt cc_dep2, UInt cc_dep3 )
473 UInt cond = cond_n_op >> 4;
474 UInt cc_op = cond_n_op & 0xF;
475 UInt nf, zf, vf, cf, inv;
476 // vex_printf("XXXXXXXX %x %x %x %x\n",
477 // cond_n_op, cc_dep1, cc_dep2, cc_dep3);
479 // skip flags computation in this case
480 if (cond == ARMCondAL) return 1;
482 inv = cond & 1;
484 switch (cond) {
485 case ARMCondEQ: // Z=1 => z
486 case ARMCondNE: // Z=0
487 zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
488 return inv ^ zf;
490 case ARMCondHS: // C=1 => c
491 case ARMCondLO: // C=0
492 cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
493 return inv ^ cf;
495 case ARMCondMI: // N=1 => n
496 case ARMCondPL: // N=0
497 nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
498 return inv ^ nf;
500 case ARMCondVS: // V=1 => v
501 case ARMCondVC: // V=0
502 vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
503 return inv ^ vf;
505 case ARMCondHI: // C=1 && Z=0 => c & ~z
506 case ARMCondLS: // C=0 || Z=1
507 cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
508 zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
509 return inv ^ (1 & (cf & ~zf));
511 case ARMCondGE: // N=V => ~(n^v)
512 case ARMCondLT: // N!=V
513 nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
514 vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
515 return inv ^ (1 & ~(nf ^ vf));
517 case ARMCondGT: // Z=0 && N=V => ~z & ~(n^v) => ~(z | (n^v))
518 case ARMCondLE: // Z=1 || N!=V
519 nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
520 vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
521 zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
522 return inv ^ (1 & ~(zf | (nf ^ vf)));
524 case ARMCondAL: // handled above
525 case ARMCondNV: // should never get here: Illegal instr
526 default:
527 /* shouldn't really make these calls from generated code */
528 vex_printf("armg_calculate_condition(ARM)"
529 "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
530 cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
531 vpanic("armg_calculate_condition(ARM)");
536 /*---------------------------------------------------------------*/
537 /*--- Crypto instruction helpers ---*/
538 /*---------------------------------------------------------------*/
540 /* DIRTY HELPERS for doing AES support:
541 * AESE (SubBytes, then ShiftRows)
542 * AESD (InvShiftRows, then InvSubBytes)
543 * AESMC (MixColumns)
544 * AESIMC (InvMixColumns)
545 These don't actually have to be dirty helpers -- they could be
546 clean, but for the fact that they return a V128 and a clean helper
547 can't do that.
549 These just call onwards to the implementations of the same in
550 guest_arm64_helpers.c. In all of these cases, we expect |res| to
551 be at least 8 aligned.
553 /* CALLED FROM GENERATED CODE */
554 void armg_dirtyhelper_AESE (
555 /*OUT*/V128* res,
556 UInt arg32_3, UInt arg32_2, UInt arg32_1, UInt arg32_0
559 vassert(0 == (((HWord)res) & (8-1)));
560 ULong argHi = (((ULong)arg32_3) << 32) | ((ULong)arg32_2);
561 ULong argLo = (((ULong)arg32_1) << 32) | ((ULong)arg32_0);
562 arm64g_dirtyhelper_AESE(res, argHi, argLo);
565 /* CALLED FROM GENERATED CODE */
566 void armg_dirtyhelper_AESD (
567 /*OUT*/V128* res,
568 UInt arg32_3, UInt arg32_2, UInt arg32_1, UInt arg32_0
571 vassert(0 == (((HWord)res) & (8-1)));
572 ULong argHi = (((ULong)arg32_3) << 32) | ((ULong)arg32_2);
573 ULong argLo = (((ULong)arg32_1) << 32) | ((ULong)arg32_0);
574 arm64g_dirtyhelper_AESD(res, argHi, argLo);
577 /* CALLED FROM GENERATED CODE */
578 void armg_dirtyhelper_AESMC (
579 /*OUT*/V128* res,
580 UInt arg32_3, UInt arg32_2, UInt arg32_1, UInt arg32_0
583 vassert(0 == (((HWord)res) & (8-1)));
584 ULong argHi = (((ULong)arg32_3) << 32) | ((ULong)arg32_2);
585 ULong argLo = (((ULong)arg32_1) << 32) | ((ULong)arg32_0);
586 arm64g_dirtyhelper_AESMC(res, argHi, argLo);
589 /* CALLED FROM GENERATED CODE */
590 void armg_dirtyhelper_AESIMC (
591 /*OUT*/V128* res,
592 UInt arg32_3, UInt arg32_2, UInt arg32_1, UInt arg32_0
595 vassert(0 == (((HWord)res) & (8-1)));
596 ULong argHi = (((ULong)arg32_3) << 32) | ((ULong)arg32_2);
597 ULong argLo = (((ULong)arg32_1) << 32) | ((ULong)arg32_0);
598 arm64g_dirtyhelper_AESIMC(res, argHi, argLo);
602 /* DIRTY HELPERS for the SHA instruction family. Same comments
603 as for the AES group above apply.
606 /* CALLED FROM GENERATED CODE */
607 void armg_dirtyhelper_SHA1C (
608 /*OUT*/V128* res,
609 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
610 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
611 UInt argM3, UInt argM2, UInt argM1, UInt argM0
614 vassert(0 == (((HWord)res) & (8-1)));
615 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
616 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
617 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
618 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
619 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
620 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
621 arm64g_dirtyhelper_SHA1C(res, argDhi, argDlo,
622 argNhi, argNlo, argMhi, argMlo);
625 /* CALLED FROM GENERATED CODE */
626 void armg_dirtyhelper_SHA1P (
627 /*OUT*/V128* res,
628 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
629 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
630 UInt argM3, UInt argM2, UInt argM1, UInt argM0
633 vassert(0 == (((HWord)res) & (8-1)));
634 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
635 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
636 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
637 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
638 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
639 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
640 arm64g_dirtyhelper_SHA1P(res, argDhi, argDlo,
641 argNhi, argNlo, argMhi, argMlo);
644 /* CALLED FROM GENERATED CODE */
645 void armg_dirtyhelper_SHA1M (
646 /*OUT*/V128* res,
647 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
648 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
649 UInt argM3, UInt argM2, UInt argM1, UInt argM0
652 vassert(0 == (((HWord)res) & (8-1)));
653 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
654 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
655 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
656 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
657 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
658 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
659 arm64g_dirtyhelper_SHA1M(res, argDhi, argDlo,
660 argNhi, argNlo, argMhi, argMlo);
663 /* CALLED FROM GENERATED CODE */
664 void armg_dirtyhelper_SHA1SU0 (
665 /*OUT*/V128* res,
666 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
667 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
668 UInt argM3, UInt argM2, UInt argM1, UInt argM0
671 vassert(0 == (((HWord)res) & (8-1)));
672 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
673 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
674 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
675 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
676 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
677 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
678 arm64g_dirtyhelper_SHA1SU0(res, argDhi, argDlo,
679 argNhi, argNlo, argMhi, argMlo);
682 /* CALLED FROM GENERATED CODE */
683 void armg_dirtyhelper_SHA256H (
684 /*OUT*/V128* res,
685 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
686 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
687 UInt argM3, UInt argM2, UInt argM1, UInt argM0
690 vassert(0 == (((HWord)res) & (8-1)));
691 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
692 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
693 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
694 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
695 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
696 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
697 arm64g_dirtyhelper_SHA256H(res, argDhi, argDlo,
698 argNhi, argNlo, argMhi, argMlo);
701 /* CALLED FROM GENERATED CODE */
702 void armg_dirtyhelper_SHA256H2 (
703 /*OUT*/V128* res,
704 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
705 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
706 UInt argM3, UInt argM2, UInt argM1, UInt argM0
709 vassert(0 == (((HWord)res) & (8-1)));
710 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
711 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
712 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
713 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
714 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
715 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
716 arm64g_dirtyhelper_SHA256H2(res, argDhi, argDlo,
717 argNhi, argNlo, argMhi, argMlo);
720 /* CALLED FROM GENERATED CODE */
721 void armg_dirtyhelper_SHA256SU1 (
722 /*OUT*/V128* res,
723 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
724 UInt argN3, UInt argN2, UInt argN1, UInt argN0,
725 UInt argM3, UInt argM2, UInt argM1, UInt argM0
728 vassert(0 == (((HWord)res) & (8-1)));
729 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
730 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
731 ULong argNhi = (((ULong)argN3) << 32) | ((ULong)argN2);
732 ULong argNlo = (((ULong)argN1) << 32) | ((ULong)argN0);
733 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
734 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
735 arm64g_dirtyhelper_SHA256SU1(res, argDhi, argDlo,
736 argNhi, argNlo, argMhi, argMlo);
739 /* CALLED FROM GENERATED CODE */
740 void armg_dirtyhelper_SHA1SU1 (
741 /*OUT*/V128* res,
742 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
743 UInt argM3, UInt argM2, UInt argM1, UInt argM0
746 vassert(0 == (((HWord)res) & (8-1)));
747 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
748 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
749 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
750 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
751 arm64g_dirtyhelper_SHA1SU1(res, argDhi, argDlo, argMhi, argMlo);
754 /* CALLED FROM GENERATED CODE */
755 void armg_dirtyhelper_SHA256SU0 (
756 /*OUT*/V128* res,
757 UInt argD3, UInt argD2, UInt argD1, UInt argD0,
758 UInt argM3, UInt argM2, UInt argM1, UInt argM0
761 vassert(0 == (((HWord)res) & (8-1)));
762 ULong argDhi = (((ULong)argD3) << 32) | ((ULong)argD2);
763 ULong argDlo = (((ULong)argD1) << 32) | ((ULong)argD0);
764 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
765 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
766 arm64g_dirtyhelper_SHA256SU0(res, argDhi, argDlo, argMhi, argMlo);
769 /* CALLED FROM GENERATED CODE */
770 void armg_dirtyhelper_SHA1H (
771 /*OUT*/V128* res,
772 UInt argM3, UInt argM2, UInt argM1, UInt argM0
775 vassert(0 == (((HWord)res) & (8-1)));
776 ULong argMhi = (((ULong)argM3) << 32) | ((ULong)argM2);
777 ULong argMlo = (((ULong)argM1) << 32) | ((ULong)argM0);
778 arm64g_dirtyhelper_SHA1H(res, argMhi, argMlo);
781 /* CALLED FROM GENERATED CODE */
782 void armg_dirtyhelper_VMULLP64 (
783 /*OUT*/V128* res,
784 UInt argN1, UInt argN0, UInt argM1, UInt argM0
787 vassert(0 == (((HWord)res) & (8-1)));
788 ULong argN = (((ULong)argN1) << 32) | ((ULong)argN0);
789 ULong argM = (((ULong)argM1) << 32) | ((ULong)argM0);
790 arm64g_dirtyhelper_PMULLQ(res, argN, argM);
794 /*---------------------------------------------------------------*/
795 /*--- Flag-helpers translation-time function specialisers. ---*/
796 /*--- These help iropt specialise calls the above run-time ---*/
797 /*--- flags functions. ---*/
798 /*---------------------------------------------------------------*/
800 /* Used by the optimiser to try specialisations. Returns an
801 equivalent expression, or NULL if none. */
803 static Bool isU32 ( IRExpr* e, UInt n )
805 return
806 toBool( e->tag == Iex_Const
807 && e->Iex.Const.con->tag == Ico_U32
808 && e->Iex.Const.con->Ico.U32 == n );
811 IRExpr* guest_arm_spechelper ( const HChar* function_name,
812 IRExpr** args,
813 IRStmt** precedingStmts,
814 Int n_precedingStmts )
816 # define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
817 # define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
818 # define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
819 # define mkU8(_n) IRExpr_Const(IRConst_U8(_n))
821 Int i, arity = 0;
822 for (i = 0; args[i]; i++)
823 arity++;
824 # if 0
825 vex_printf("spec request:\n");
826 vex_printf(" %s ", function_name);
827 for (i = 0; i < arity; i++) {
828 vex_printf(" ");
829 ppIRExpr(args[i]);
831 vex_printf("\n");
832 # endif
834 /* --------- specialising "armg_calculate_condition" --------- */
836 if (vex_streq(function_name, "armg_calculate_condition")) {
838 /* specialise calls to the "armg_calculate_condition" function.
839 Not sure whether this is strictly necessary, but: the
840 replacement IR must produce only the values 0 or 1. Bits
841 31:1 are required to be zero. */
842 IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_ndep;
843 vassert(arity == 4);
844 cond_n_op = args[0]; /* (ARMCondcode << 4) | ARMG_CC_OP_* */
845 cc_dep1 = args[1];
846 cc_dep2 = args[2];
847 cc_ndep = args[3];
849 /*---------------- SUB ----------------*/
851 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
852 /* EQ after SUB --> test argL == argR */
853 return unop(Iop_1Uto32,
854 binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
856 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
857 /* NE after SUB --> test argL != argR */
858 return unop(Iop_1Uto32,
859 binop(Iop_CmpNE32, cc_dep1, cc_dep2));
862 if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_SUB)) {
863 /* GT after SUB --> test argL >s argR
864 --> test argR <s argL */
865 return unop(Iop_1Uto32,
866 binop(Iop_CmpLT32S, cc_dep2, cc_dep1));
868 if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
869 /* LE after SUB --> test argL <=s argR */
870 return unop(Iop_1Uto32,
871 binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
874 if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
875 /* LT after SUB --> test argL <s argR */
876 return unop(Iop_1Uto32,
877 binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
880 if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
881 /* GE after SUB --> test argL >=s argR
882 --> test argR <=s argL */
883 return unop(Iop_1Uto32,
884 binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
887 if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
888 /* HS after SUB --> test argL >=u argR
889 --> test argR <=u argL */
890 return unop(Iop_1Uto32,
891 binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
893 if (isU32(cond_n_op, (ARMCondLO << 4) | ARMG_CC_OP_SUB)) {
894 /* LO after SUB --> test argL <u argR */
895 return unop(Iop_1Uto32,
896 binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
899 if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
900 /* LS after SUB --> test argL <=u argR */
901 return unop(Iop_1Uto32,
902 binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
904 if (isU32(cond_n_op, (ARMCondHI << 4) | ARMG_CC_OP_SUB)) {
905 /* HI after SUB --> test argL >u argR
906 --> test argR <u argL */
907 return unop(Iop_1Uto32,
908 binop(Iop_CmpLT32U, cc_dep2, cc_dep1));
911 /*---------------- SBB ----------------*/
913 if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SBB)) {
914 /* This seems to happen a lot in softfloat code, eg __divdf3+140 */
915 /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
916 /* HS after SBB (same as C after SBB below)
917 --> oldC ? (argL >=u argR) : (argL >u argR)
918 --> oldC ? (argR <=u argL) : (argR <u argL)
920 return
921 IRExpr_ITE(
922 binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
923 /* case oldC != 0 */
924 unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
925 /* case oldC == 0 */
926 unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
930 /*---------------- LOGIC ----------------*/
932 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
933 /* EQ after LOGIC --> test res == 0 */
934 return unop(Iop_1Uto32,
935 binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
937 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
938 /* NE after LOGIC --> test res != 0 */
939 return unop(Iop_1Uto32,
940 binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
943 if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_LOGIC)) {
944 /* PL after LOGIC --> test (res >> 31) == 0 */
945 return unop(Iop_1Uto32,
946 binop(Iop_CmpEQ32,
947 binop(Iop_Shr32, cc_dep1, mkU8(31)),
948 mkU32(0)));
950 if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_LOGIC)) {
951 /* MI after LOGIC --> test (res >> 31) == 1 */
952 return unop(Iop_1Uto32,
953 binop(Iop_CmpEQ32,
954 binop(Iop_Shr32, cc_dep1, mkU8(31)),
955 mkU32(1)));
958 /*---------------- COPY ----------------*/
960 /* --- 0,1 --- */
961 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_COPY)) {
962 /* EQ after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1 */
963 return binop(Iop_And32,
964 binop(Iop_Shr32, cc_dep1,
965 mkU8(ARMG_CC_SHIFT_Z)),
966 mkU32(1));
968 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_COPY)) {
969 /* NE after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_Z) ^ 1) & 1 */
970 return binop(Iop_And32,
971 binop(Iop_Xor32,
972 binop(Iop_Shr32, cc_dep1,
973 mkU8(ARMG_CC_SHIFT_Z)),
974 mkU32(1)),
975 mkU32(1));
978 /* --- 4,5 --- */
979 if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_COPY)) {
980 /* MI after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_N) & 1 */
981 return binop(Iop_And32,
982 binop(Iop_Shr32, cc_dep1,
983 mkU8(ARMG_CC_SHIFT_N)),
984 mkU32(1));
986 if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_COPY)) {
987 /* PL after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_N) ^ 1) & 1 */
988 return binop(Iop_And32,
989 binop(Iop_Xor32,
990 binop(Iop_Shr32, cc_dep1,
991 mkU8(ARMG_CC_SHIFT_N)),
992 mkU32(1)),
993 mkU32(1));
996 /* --- 12,13 --- */
997 if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_COPY)) {
998 /* GT after COPY --> ((z | (n^v)) & 1) ^ 1 */
999 IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
1000 IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
1001 IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
1002 return binop(Iop_Xor32,
1003 binop(Iop_And32,
1004 binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
1005 mkU32(1)),
1006 mkU32(1));
1008 if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_COPY)) {
1009 /* LE after COPY --> ((z | (n^v)) & 1) ^ 0 */
1010 IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
1011 IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
1012 IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
1013 return binop(Iop_Xor32,
1014 binop(Iop_And32,
1015 binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
1016 mkU32(1)),
1017 mkU32(0));
1020 /*----------------- AL -----------------*/
1022 /* A critically important case for Thumb code.
1024 What we're trying to spot is the case where cond_n_op is an
1025 expression of the form Or32(..., 0xE0) since that means the
1026 caller is asking for CondAL and we can simply return 1
1027 without caring what the ... part is. This is a potentially
1028 dodgy kludge in that it assumes that the ... part has zeroes
1029 in bits 7:4, so that the result of the Or32 is guaranteed to
1030 be 0xE in bits 7:4. Given that the places where this first
1031 arg are constructed (in guest_arm_toIR.c) are very
1032 constrained, we can get away with this. To make this
1033 guaranteed safe would require to have a new primop, Slice44
1034 or some such, thusly
1036 Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
1038 and we would then look for Slice44(0xE0, ...)
1039 which would give the required safety property.
1041 It would be infeasibly expensive to scan backwards through
1042 the entire block looking for an assignment to the temp, so
1043 just look at the previous 16 statements. That should find it
1044 if it is an interesting case, as a result of how the
1045 boilerplate guff at the start of each Thumb insn translation
1046 is made.
1048 if (cond_n_op->tag == Iex_RdTmp) {
1049 Int j;
1050 IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
1051 Int limit = n_precedingStmts - 16;
1052 if (limit < 0) limit = 0;
1053 if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
1054 for (j = n_precedingStmts - 1; j >= limit; j--) {
1055 IRStmt* st = precedingStmts[j];
1056 if (st->tag == Ist_WrTmp
1057 && st->Ist.WrTmp.tmp == look_for
1058 && st->Ist.WrTmp.data->tag == Iex_Binop
1059 && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
1060 && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
1061 return mkU32(1);
1063 /* Didn't find any useful binding to the first arg
1064 in the previous 16 stmts. */
1068 /* --------- specialising "armg_calculate_flag_c" --------- */
1070 else
1071 if (vex_streq(function_name, "armg_calculate_flag_c")) {
1073 /* specialise calls to the "armg_calculate_flag_c" function.
1074 Note that the returned value must be either 0 or 1; nonzero
1075 bits 31:1 are not allowed. In turn, incoming oldV and oldC
1076 values (from the thunk) are assumed to have bits 31:1
1077 clear. */
1078 IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
1079 vassert(arity == 4);
1080 cc_op = args[0]; /* ARMG_CC_OP_* */
1081 cc_dep1 = args[1];
1082 cc_dep2 = args[2];
1083 cc_ndep = args[3];
1085 if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
1086 /* Thunk args are (result, shco, oldV) */
1087 /* C after LOGIC --> shco */
1088 return cc_dep2;
1091 if (isU32(cc_op, ARMG_CC_OP_SUB)) {
1092 /* Thunk args are (argL, argR, unused) */
1093 /* C after SUB --> argL >=u argR
1094 --> argR <=u argL */
1095 return unop(Iop_1Uto32,
1096 binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
1099 if (isU32(cc_op, ARMG_CC_OP_SBB)) {
1100 /* This happens occasionally in softfloat code, eg __divdf3+140 */
1101 /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
1102 /* C after SBB (same as HS after SBB above)
1103 --> oldC ? (argL >=u argR) : (argL >u argR)
1104 --> oldC ? (argR <=u argL) : (argR <u argL)
1106 return
1107 IRExpr_ITE(
1108 binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
1109 /* case oldC != 0 */
1110 unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
1111 /* case oldC == 0 */
1112 unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
1118 /* --------- specialising "armg_calculate_flag_v" --------- */
1120 else
1121 if (vex_streq(function_name, "armg_calculate_flag_v")) {
1123 /* specialise calls to the "armg_calculate_flag_v" function.
1124 Note that the returned value must be either 0 or 1; nonzero
1125 bits 31:1 are not allowed. In turn, incoming oldV and oldC
1126 values (from the thunk) are assumed to have bits 31:1
1127 clear. */
1128 IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
1129 vassert(arity == 4);
1130 cc_op = args[0]; /* ARMG_CC_OP_* */
1131 cc_dep1 = args[1];
1132 cc_dep2 = args[2];
1133 cc_ndep = args[3];
1135 if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
1136 /* Thunk args are (result, shco, oldV) */
1137 /* V after LOGIC --> oldV */
1138 return cc_ndep;
1141 if (isU32(cc_op, ARMG_CC_OP_SUB)) {
1142 /* Thunk args are (argL, argR, unused) */
1143 /* V after SUB
1144 --> let res = argL - argR
1145 in ((argL ^ argR) & (argL ^ res)) >> 31
1146 --> ((argL ^ argR) & (argL ^ (argL - argR))) >> 31
1148 IRExpr* argL = cc_dep1;
1149 IRExpr* argR = cc_dep2;
1150 return
1151 binop(Iop_Shr32,
1152 binop(Iop_And32,
1153 binop(Iop_Xor32, argL, argR),
1154 binop(Iop_Xor32, argL, binop(Iop_Sub32, argL, argR))
1156 mkU8(31)
1160 if (isU32(cc_op, ARMG_CC_OP_SBB)) {
1161 /* This happens occasionally in softfloat code, eg __divdf3+140 */
1162 /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
1163 /* V after SBB
1164 --> let res = argL - argR - (oldC ^ 1)
1165 in (argL ^ argR) & (argL ^ res) & 1
1167 return
1168 binop(
1169 Iop_And32,
1170 binop(
1171 Iop_And32,
1172 // argL ^ argR
1173 binop(Iop_Xor32, cc_dep1, cc_dep2),
1174 // argL ^ (argL - argR - (oldC ^ 1))
1175 binop(Iop_Xor32,
1176 cc_dep1,
1177 binop(Iop_Sub32,
1178 binop(Iop_Sub32, cc_dep1, cc_dep2),
1179 binop(Iop_Xor32, cc_ndep, mkU32(1)))
1182 mkU32(1)
1188 # undef unop
1189 # undef binop
1190 # undef mkU32
1191 # undef mkU8
1193 return NULL;
1197 /*----------------------------------------------*/
1198 /*--- The exported fns .. ---*/
1199 /*----------------------------------------------*/
1201 /* VISIBLE TO LIBVEX CLIENT */
1202 #if 0
1203 void LibVEX_GuestARM_put_flags ( UInt flags_native,
1204 /*OUT*/VexGuestARMState* vex_state )
1206 vassert(0); // FIXME
1208 /* Mask out everything except N Z V C. */
1209 flags_native
1210 &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
1212 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
1213 vex_state->guest_CC_DEP1 = flags_native;
1214 vex_state->guest_CC_DEP2 = 0;
1215 vex_state->guest_CC_NDEP = 0;
1217 #endif
1219 /* VISIBLE TO LIBVEX CLIENT */
1220 UInt LibVEX_GuestARM_get_cpsr ( /*IN*/const VexGuestARMState* vex_state )
1222 UInt cpsr = 0;
1223 // NZCV
1224 cpsr |= armg_calculate_flags_nzcv(
1225 vex_state->guest_CC_OP,
1226 vex_state->guest_CC_DEP1,
1227 vex_state->guest_CC_DEP2,
1228 vex_state->guest_CC_NDEP
1230 vassert(0 == (cpsr & 0x0FFFFFFF));
1231 // Q
1232 if (vex_state->guest_QFLAG32 > 0)
1233 cpsr |= (1 << 27);
1234 // GE
1235 if (vex_state->guest_GEFLAG0 > 0)
1236 cpsr |= (1 << 16);
1237 if (vex_state->guest_GEFLAG1 > 0)
1238 cpsr |= (1 << 17);
1239 if (vex_state->guest_GEFLAG2 > 0)
1240 cpsr |= (1 << 18);
1241 if (vex_state->guest_GEFLAG3 > 0)
1242 cpsr |= (1 << 19);
1243 // M
1244 cpsr |= (1 << 4); // 0b10000 means user-mode
1245 // J,T J (bit 24) is zero by initialisation above
1246 // T we copy from R15T[0]
1247 if (vex_state->guest_R15T & 1)
1248 cpsr |= (1 << 5);
1249 // ITSTATE we punt on for the time being. Could compute it
1250 // if needed though.
1251 // E, endianness, 0 (littleendian) from initialisation above
1252 // A,I,F disable some async exceptions. Not sure about these.
1253 // Leave as zero for the time being.
1254 return cpsr;
1257 /* VISIBLE TO LIBVEX CLIENT */
1258 void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
1260 vex_state->host_EvC_FAILADDR = 0;
1261 vex_state->host_EvC_COUNTER = 0;
1263 vex_state->guest_R0 = 0;
1264 vex_state->guest_R1 = 0;
1265 vex_state->guest_R2 = 0;
1266 vex_state->guest_R3 = 0;
1267 vex_state->guest_R4 = 0;
1268 vex_state->guest_R5 = 0;
1269 vex_state->guest_R6 = 0;
1270 vex_state->guest_R7 = 0;
1271 vex_state->guest_R8 = 0;
1272 vex_state->guest_R9 = 0;
1273 vex_state->guest_R10 = 0;
1274 vex_state->guest_R11 = 0;
1275 vex_state->guest_R12 = 0;
1276 vex_state->guest_R13 = 0;
1277 vex_state->guest_R14 = 0;
1278 vex_state->guest_R15T = 0; /* NB: implies ARM mode */
1280 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
1281 vex_state->guest_CC_DEP1 = 0;
1282 vex_state->guest_CC_DEP2 = 0;
1283 vex_state->guest_CC_NDEP = 0;
1284 vex_state->guest_QFLAG32 = 0;
1285 vex_state->guest_GEFLAG0 = 0;
1286 vex_state->guest_GEFLAG1 = 0;
1287 vex_state->guest_GEFLAG2 = 0;
1288 vex_state->guest_GEFLAG3 = 0;
1290 vex_state->guest_EMNOTE = EmNote_NONE;
1291 vex_state->guest_CMSTART = 0;
1292 vex_state->guest_CMLEN = 0;
1293 vex_state->guest_NRADDR = 0;
1294 vex_state->guest_IP_AT_SYSCALL = 0;
1296 vex_state->guest_D0 = 0;
1297 vex_state->guest_D1 = 0;
1298 vex_state->guest_D2 = 0;
1299 vex_state->guest_D3 = 0;
1300 vex_state->guest_D4 = 0;
1301 vex_state->guest_D5 = 0;
1302 vex_state->guest_D6 = 0;
1303 vex_state->guest_D7 = 0;
1304 vex_state->guest_D8 = 0;
1305 vex_state->guest_D9 = 0;
1306 vex_state->guest_D10 = 0;
1307 vex_state->guest_D11 = 0;
1308 vex_state->guest_D12 = 0;
1309 vex_state->guest_D13 = 0;
1310 vex_state->guest_D14 = 0;
1311 vex_state->guest_D15 = 0;
1312 vex_state->guest_D16 = 0;
1313 vex_state->guest_D17 = 0;
1314 vex_state->guest_D18 = 0;
1315 vex_state->guest_D19 = 0;
1316 vex_state->guest_D20 = 0;
1317 vex_state->guest_D21 = 0;
1318 vex_state->guest_D22 = 0;
1319 vex_state->guest_D23 = 0;
1320 vex_state->guest_D24 = 0;
1321 vex_state->guest_D25 = 0;
1322 vex_state->guest_D26 = 0;
1323 vex_state->guest_D27 = 0;
1324 vex_state->guest_D28 = 0;
1325 vex_state->guest_D29 = 0;
1326 vex_state->guest_D30 = 0;
1327 vex_state->guest_D31 = 0;
1329 /* ARM encoded; zero is the default as it happens (result flags
1330 (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
1331 all exns masked, all exn sticky bits cleared). */
1332 vex_state->guest_FPSCR = 0;
1334 vex_state->guest_TPIDRURO = 0;
1335 vex_state->guest_TPIDRURW = 0;
1337 /* Not in a Thumb IT block. */
1338 vex_state->guest_ITSTATE = 0;
1342 /*-----------------------------------------------------------*/
1343 /*--- Describing the arm guest state, for the benefit ---*/
1344 /*--- of iropt and instrumenters. ---*/
1345 /*-----------------------------------------------------------*/
1347 /* Figure out if any part of the guest state contained in minoff
1348 .. maxoff requires precise memory exceptions. If in doubt return
1349 True (but this generates significantly slower code).
1351 We enforce precise exns for guest R13(sp), R15T(pc), R7, R11.
1354 Only R13(sp) is needed in mode VexRegUpdSpAtMemAccess.
1356 Bool guest_arm_state_requires_precise_mem_exns (
1357 Int minoff, Int maxoff, VexRegisterUpdates pxControl
1360 Int sp_min = offsetof(VexGuestARMState, guest_R13);
1361 Int sp_max = sp_min + 4 - 1;
1362 Int pc_min = offsetof(VexGuestARMState, guest_R15T);
1363 Int pc_max = pc_min + 4 - 1;
1365 if (maxoff < sp_min || minoff > sp_max) {
1366 /* no overlap with sp */
1367 if (pxControl == VexRegUpdSpAtMemAccess)
1368 return False; // We only need to check stack pointer.
1369 } else {
1370 return True;
1373 if (maxoff < pc_min || minoff > pc_max) {
1374 /* no overlap with pc */
1375 } else {
1376 return True;
1379 /* We appear to need precise updates of R11 in order to get proper
1380 stacktraces from non-optimised code. */
1381 Int r11_min = offsetof(VexGuestARMState, guest_R11);
1382 Int r11_max = r11_min + 4 - 1;
1384 if (maxoff < r11_min || minoff > r11_max) {
1385 /* no overlap with r11 */
1386 } else {
1387 return True;
1390 /* Ditto R7, particularly needed for proper stacktraces in Thumb
1391 code. */
1392 Int r7_min = offsetof(VexGuestARMState, guest_R7);
1393 Int r7_max = r7_min + 4 - 1;
1395 if (maxoff < r7_min || minoff > r7_max) {
1396 /* no overlap with r7 */
1397 } else {
1398 return True;
1401 return False;
1406 #define ALWAYSDEFD(field) \
1407 { offsetof(VexGuestARMState, field), \
1408 (sizeof ((VexGuestARMState*)0)->field) }
1410 VexGuestLayout
1411 armGuest_layout
1412 = {
1413 /* Total size of the guest state, in bytes. */
1414 .total_sizeB = sizeof(VexGuestARMState),
1416 /* Describe the stack pointer. */
1417 .offset_SP = offsetof(VexGuestARMState,guest_R13),
1418 .sizeof_SP = 4,
1420 /* Describe the instruction pointer. */
1421 .offset_IP = offsetof(VexGuestARMState,guest_R15T),
1422 .sizeof_IP = 4,
1424 /* Describe any sections to be regarded by Memcheck as
1425 'always-defined'. */
1426 .n_alwaysDefd = 10,
1428 /* flags thunk: OP is always defd, whereas DEP1 and DEP2
1429 have to be tracked. See detailed comment in gdefs.h on
1430 meaning of thunk fields. */
1431 .alwaysDefd
1432 = { /* 0 */ ALWAYSDEFD(guest_R15T),
1433 /* 1 */ ALWAYSDEFD(guest_CC_OP),
1434 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
1435 /* 3 */ ALWAYSDEFD(guest_EMNOTE),
1436 /* 4 */ ALWAYSDEFD(guest_CMSTART),
1437 /* 5 */ ALWAYSDEFD(guest_CMLEN),
1438 /* 6 */ ALWAYSDEFD(guest_NRADDR),
1439 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
1440 /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
1441 /* 9 */ ALWAYSDEFD(guest_ITSTATE)
1446 /*---------------------------------------------------------------*/
1447 /*--- end guest_arm_helpers.c ---*/
1448 /*---------------------------------------------------------------*/