PR c++/86342 - -Wdeprecated-copy and system headers.
[official-gcc.git] / gcc / optabs.c
blobcadf4676c986c8430baafab8ef5282e890d36308
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
41 #include "except.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "optabs-tree.h"
46 #include "libfuncs.h"
48 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
49 machine_mode *);
50 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
51 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
53 /* Debug facility for use in GDB. */
54 void debug_optab_libfuncs (void);
56 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
57 the result of operation CODE applied to OP0 (and OP1 if it is a binary
58 operation).
60 If the last insn does not set TARGET, don't do anything, but return 1.
62 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
63 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
64 try again, ensuring that TARGET is not one of the operands. */
66 static int
67 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
69 rtx_insn *last_insn;
70 rtx set;
71 rtx note;
73 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
75 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
76 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
77 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
78 && GET_RTX_CLASS (code) != RTX_COMPARE
79 && GET_RTX_CLASS (code) != RTX_UNARY)
80 return 1;
82 if (GET_CODE (target) == ZERO_EXTRACT)
83 return 1;
85 for (last_insn = insns;
86 NEXT_INSN (last_insn) != NULL_RTX;
87 last_insn = NEXT_INSN (last_insn))
90 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
91 a value changing in the insn, so the note would be invalid for CSE. */
92 if (reg_overlap_mentioned_p (target, op0)
93 || (op1 && reg_overlap_mentioned_p (target, op1)))
95 if (MEM_P (target)
96 && (rtx_equal_p (target, op0)
97 || (op1 && rtx_equal_p (target, op1))))
99 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
100 over expanding it as temp = MEM op X, MEM = temp. If the target
101 supports MEM = MEM op X instructions, it is sometimes too hard
102 to reconstruct that form later, especially if X is also a memory,
103 and due to multiple occurrences of addresses the address might
104 be forced into register unnecessarily.
105 Note that not emitting the REG_EQUIV note might inhibit
106 CSE in some cases. */
107 set = single_set (last_insn);
108 if (set
109 && GET_CODE (SET_SRC (set)) == code
110 && MEM_P (SET_DEST (set))
111 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
112 || (op1 && rtx_equal_p (SET_DEST (set),
113 XEXP (SET_SRC (set), 1)))))
114 return 1;
116 return 0;
119 set = set_for_reg_notes (last_insn);
120 if (set == NULL_RTX)
121 return 1;
123 if (! rtx_equal_p (SET_DEST (set), target)
124 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
125 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
126 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
127 return 1;
129 if (GET_RTX_CLASS (code) == RTX_UNARY)
130 switch (code)
132 case FFS:
133 case CLZ:
134 case CTZ:
135 case CLRSB:
136 case POPCOUNT:
137 case PARITY:
138 case BSWAP:
139 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
141 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
142 if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
143 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
144 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
145 note, GET_MODE (op0));
146 else
147 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
148 note, GET_MODE (op0));
149 break;
151 /* FALLTHRU */
152 default:
153 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
154 break;
156 else
157 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
159 set_unique_reg_note (last_insn, REG_EQUAL, note);
161 return 1;
164 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
165 for a widening operation would be. In most cases this would be OP0, but if
166 that's a constant it'll be VOIDmode, which isn't useful. */
168 static machine_mode
169 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
171 machine_mode m0 = GET_MODE (op0);
172 machine_mode m1 = GET_MODE (op1);
173 machine_mode result;
175 if (m0 == VOIDmode && m1 == VOIDmode)
176 return to_mode;
177 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
178 result = m1;
179 else
180 result = m0;
182 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
183 return to_mode;
185 return result;
188 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
189 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
190 not actually do a sign-extend or zero-extend, but can leave the
191 higher-order bits of the result rtx undefined, for example, in the case
192 of logical operations, but not right shifts. */
194 static rtx
195 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
196 int unsignedp, int no_extend)
198 rtx result;
199 scalar_int_mode int_mode;
201 /* If we don't have to extend and this is a constant, return it. */
202 if (no_extend && GET_MODE (op) == VOIDmode)
203 return op;
205 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
206 extend since it will be more efficient to do so unless the signedness of
207 a promoted object differs from our extension. */
208 if (! no_extend
209 || !is_a <scalar_int_mode> (mode, &int_mode)
210 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
211 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
212 return convert_modes (mode, oldmode, op, unsignedp);
214 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
215 SUBREG. */
216 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
217 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
219 /* Otherwise, get an object of MODE, clobber it, and set the low-order
220 part to OP. */
222 result = gen_reg_rtx (int_mode);
223 emit_clobber (result);
224 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
225 return result;
228 /* Expand vector widening operations.
230 There are two different classes of operations handled here:
231 1) Operations whose result is wider than all the arguments to the operation.
232 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
233 In this case OP0 and optionally OP1 would be initialized,
234 but WIDE_OP wouldn't (not relevant for this case).
235 2) Operations whose result is of the same size as the last argument to the
236 operation, but wider than all the other arguments to the operation.
237 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
238 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
240 E.g, when called to expand the following operations, this is how
241 the arguments will be initialized:
242 nops OP0 OP1 WIDE_OP
243 widening-sum 2 oprnd0 - oprnd1
244 widening-dot-product 3 oprnd0 oprnd1 oprnd2
245 widening-mult 2 oprnd0 oprnd1 -
246 type-promotion (vec-unpack) 1 oprnd0 - - */
249 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
250 rtx target, int unsignedp)
252 struct expand_operand eops[4];
253 tree oprnd0, oprnd1, oprnd2;
254 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
255 optab widen_pattern_optab;
256 enum insn_code icode;
257 int nops = TREE_CODE_LENGTH (ops->code);
258 int op;
260 oprnd0 = ops->op0;
261 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
262 if (ops->code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
263 || ops->code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
264 /* The sign is from the result type rather than operand's type
265 for these ops. */
266 widen_pattern_optab
267 = optab_for_tree_code (ops->code, ops->type, optab_default);
268 else
269 widen_pattern_optab
270 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
271 if (ops->code == WIDEN_MULT_PLUS_EXPR
272 || ops->code == WIDEN_MULT_MINUS_EXPR)
273 icode = find_widening_optab_handler (widen_pattern_optab,
274 TYPE_MODE (TREE_TYPE (ops->op2)),
275 tmode0);
276 else
277 icode = optab_handler (widen_pattern_optab, tmode0);
278 gcc_assert (icode != CODE_FOR_nothing);
280 if (nops >= 2)
282 oprnd1 = ops->op1;
283 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
286 /* The last operand is of a wider mode than the rest of the operands. */
287 if (nops == 2)
288 wmode = tmode1;
289 else if (nops == 3)
291 gcc_assert (tmode1 == tmode0);
292 gcc_assert (op1);
293 oprnd2 = ops->op2;
294 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
297 op = 0;
298 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
299 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
300 if (op1)
301 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
302 if (wide_op)
303 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
304 expand_insn (icode, op, eops);
305 return eops[0].value;
308 /* Generate code to perform an operation specified by TERNARY_OPTAB
309 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
311 UNSIGNEDP is for the case where we have to widen the operands
312 to perform the operation. It says to use zero-extension.
314 If TARGET is nonzero, the value
315 is generated there, if it is convenient to do so.
316 In all cases an rtx is returned for the locus of the value;
317 this may or may not be TARGET. */
320 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
321 rtx op1, rtx op2, rtx target, int unsignedp)
323 struct expand_operand ops[4];
324 enum insn_code icode = optab_handler (ternary_optab, mode);
326 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
328 create_output_operand (&ops[0], target, mode);
329 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
330 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
331 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
332 expand_insn (icode, 4, ops);
333 return ops[0].value;
337 /* Like expand_binop, but return a constant rtx if the result can be
338 calculated at compile time. The arguments and return value are
339 otherwise the same as for expand_binop. */
342 simplify_expand_binop (machine_mode mode, optab binoptab,
343 rtx op0, rtx op1, rtx target, int unsignedp,
344 enum optab_methods methods)
346 if (CONSTANT_P (op0) && CONSTANT_P (op1))
348 rtx x = simplify_binary_operation (optab_to_code (binoptab),
349 mode, op0, op1);
350 if (x)
351 return x;
354 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
357 /* Like simplify_expand_binop, but always put the result in TARGET.
358 Return true if the expansion succeeded. */
360 bool
361 force_expand_binop (machine_mode mode, optab binoptab,
362 rtx op0, rtx op1, rtx target, int unsignedp,
363 enum optab_methods methods)
365 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
366 target, unsignedp, methods);
367 if (x == 0)
368 return false;
369 if (x != target)
370 emit_move_insn (target, x);
371 return true;
374 /* Create a new vector value in VMODE with all elements set to OP. The
375 mode of OP must be the element mode of VMODE. If OP is a constant,
376 then the return value will be a constant. */
379 expand_vector_broadcast (machine_mode vmode, rtx op)
381 int n;
382 rtvec vec;
384 gcc_checking_assert (VECTOR_MODE_P (vmode));
386 if (valid_for_const_vector_p (vmode, op))
387 return gen_const_vec_duplicate (vmode, op);
389 insn_code icode = optab_handler (vec_duplicate_optab, vmode);
390 if (icode != CODE_FOR_nothing)
392 struct expand_operand ops[2];
393 create_output_operand (&ops[0], NULL_RTX, vmode);
394 create_input_operand (&ops[1], op, GET_MODE (op));
395 expand_insn (icode, 2, ops);
396 return ops[0].value;
399 if (!GET_MODE_NUNITS (vmode).is_constant (&n))
400 return NULL;
402 /* ??? If the target doesn't have a vec_init, then we have no easy way
403 of performing this operation. Most of this sort of generic support
404 is hidden away in the vector lowering support in gimple. */
405 icode = convert_optab_handler (vec_init_optab, vmode,
406 GET_MODE_INNER (vmode));
407 if (icode == CODE_FOR_nothing)
408 return NULL;
410 vec = rtvec_alloc (n);
411 for (int i = 0; i < n; ++i)
412 RTVEC_ELT (vec, i) = op;
413 rtx ret = gen_reg_rtx (vmode);
414 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
416 return ret;
419 /* This subroutine of expand_doubleword_shift handles the cases in which
420 the effective shift value is >= BITS_PER_WORD. The arguments and return
421 value are the same as for the parent routine, except that SUPERWORD_OP1
422 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
423 INTO_TARGET may be null if the caller has decided to calculate it. */
425 static bool
426 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
427 rtx outof_target, rtx into_target,
428 int unsignedp, enum optab_methods methods)
430 if (into_target != 0)
431 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
432 into_target, unsignedp, methods))
433 return false;
435 if (outof_target != 0)
437 /* For a signed right shift, we must fill OUTOF_TARGET with copies
438 of the sign bit, otherwise we must fill it with zeros. */
439 if (binoptab != ashr_optab)
440 emit_move_insn (outof_target, CONST0_RTX (word_mode));
441 else
442 if (!force_expand_binop (word_mode, binoptab, outof_input,
443 gen_int_shift_amount (word_mode,
444 BITS_PER_WORD - 1),
445 outof_target, unsignedp, methods))
446 return false;
448 return true;
451 /* This subroutine of expand_doubleword_shift handles the cases in which
452 the effective shift value is < BITS_PER_WORD. The arguments and return
453 value are the same as for the parent routine. */
455 static bool
456 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
457 rtx outof_input, rtx into_input, rtx op1,
458 rtx outof_target, rtx into_target,
459 int unsignedp, enum optab_methods methods,
460 unsigned HOST_WIDE_INT shift_mask)
462 optab reverse_unsigned_shift, unsigned_shift;
463 rtx tmp, carries;
465 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
466 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
468 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
469 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
470 the opposite direction to BINOPTAB. */
471 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
473 carries = outof_input;
474 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
475 op1_mode), op1_mode);
476 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
477 0, true, methods);
479 else
481 /* We must avoid shifting by BITS_PER_WORD bits since that is either
482 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
483 has unknown behavior. Do a single shift first, then shift by the
484 remainder. It's OK to use ~OP1 as the remainder if shift counts
485 are truncated to the mode size. */
486 carries = expand_binop (word_mode, reverse_unsigned_shift,
487 outof_input, const1_rtx, 0, unsignedp, methods);
488 if (shift_mask == BITS_PER_WORD - 1)
490 tmp = immed_wide_int_const
491 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
492 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
493 0, true, methods);
495 else
497 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
498 op1_mode), op1_mode);
499 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
500 0, true, methods);
503 if (tmp == 0 || carries == 0)
504 return false;
505 carries = expand_binop (word_mode, reverse_unsigned_shift,
506 carries, tmp, 0, unsignedp, methods);
507 if (carries == 0)
508 return false;
510 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
511 so the result can go directly into INTO_TARGET if convenient. */
512 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
513 into_target, unsignedp, methods);
514 if (tmp == 0)
515 return false;
517 /* Now OR in the bits carried over from OUTOF_INPUT. */
518 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
519 into_target, unsignedp, methods))
520 return false;
522 /* Use a standard word_mode shift for the out-of half. */
523 if (outof_target != 0)
524 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
525 outof_target, unsignedp, methods))
526 return false;
528 return true;
532 /* Try implementing expand_doubleword_shift using conditional moves.
533 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
534 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
535 are the shift counts to use in the former and latter case. All other
536 arguments are the same as the parent routine. */
538 static bool
539 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
540 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
541 rtx outof_input, rtx into_input,
542 rtx subword_op1, rtx superword_op1,
543 rtx outof_target, rtx into_target,
544 int unsignedp, enum optab_methods methods,
545 unsigned HOST_WIDE_INT shift_mask)
547 rtx outof_superword, into_superword;
549 /* Put the superword version of the output into OUTOF_SUPERWORD and
550 INTO_SUPERWORD. */
551 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
552 if (outof_target != 0 && subword_op1 == superword_op1)
554 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
555 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
556 into_superword = outof_target;
557 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
558 outof_superword, 0, unsignedp, methods))
559 return false;
561 else
563 into_superword = gen_reg_rtx (word_mode);
564 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
565 outof_superword, into_superword,
566 unsignedp, methods))
567 return false;
570 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
571 if (!expand_subword_shift (op1_mode, binoptab,
572 outof_input, into_input, subword_op1,
573 outof_target, into_target,
574 unsignedp, methods, shift_mask))
575 return false;
577 /* Select between them. Do the INTO half first because INTO_SUPERWORD
578 might be the current value of OUTOF_TARGET. */
579 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
580 into_target, into_superword, word_mode, false))
581 return false;
583 if (outof_target != 0)
584 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
585 outof_target, outof_superword,
586 word_mode, false))
587 return false;
589 return true;
592 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
593 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
594 input operand; the shift moves bits in the direction OUTOF_INPUT->
595 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
596 of the target. OP1 is the shift count and OP1_MODE is its mode.
597 If OP1 is constant, it will have been truncated as appropriate
598 and is known to be nonzero.
600 If SHIFT_MASK is zero, the result of word shifts is undefined when the
601 shift count is outside the range [0, BITS_PER_WORD). This routine must
602 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
604 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
605 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
606 fill with zeros or sign bits as appropriate.
608 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
609 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
610 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
611 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
612 are undefined.
614 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
615 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
616 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
617 function wants to calculate it itself.
619 Return true if the shift could be successfully synthesized. */
621 static bool
622 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
623 rtx outof_input, rtx into_input, rtx op1,
624 rtx outof_target, rtx into_target,
625 int unsignedp, enum optab_methods methods,
626 unsigned HOST_WIDE_INT shift_mask)
628 rtx superword_op1, tmp, cmp1, cmp2;
629 enum rtx_code cmp_code;
631 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
632 fill the result with sign or zero bits as appropriate. If so, the value
633 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
634 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
635 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
637 This isn't worthwhile for constant shifts since the optimizers will
638 cope better with in-range shift counts. */
639 if (shift_mask >= BITS_PER_WORD
640 && outof_target != 0
641 && !CONSTANT_P (op1))
643 if (!expand_doubleword_shift (op1_mode, binoptab,
644 outof_input, into_input, op1,
645 0, into_target,
646 unsignedp, methods, shift_mask))
647 return false;
648 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
649 outof_target, unsignedp, methods))
650 return false;
651 return true;
654 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
655 is true when the effective shift value is less than BITS_PER_WORD.
656 Set SUPERWORD_OP1 to the shift count that should be used to shift
657 OUTOF_INPUT into INTO_TARGET when the condition is false. */
658 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
659 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
661 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
662 is a subword shift count. */
663 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
664 0, true, methods);
665 cmp2 = CONST0_RTX (op1_mode);
666 cmp_code = EQ;
667 superword_op1 = op1;
669 else
671 /* Set CMP1 to OP1 - BITS_PER_WORD. */
672 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
673 0, true, methods);
674 cmp2 = CONST0_RTX (op1_mode);
675 cmp_code = LT;
676 superword_op1 = cmp1;
678 if (cmp1 == 0)
679 return false;
681 /* If we can compute the condition at compile time, pick the
682 appropriate subroutine. */
683 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
684 if (tmp != 0 && CONST_INT_P (tmp))
686 if (tmp == const0_rtx)
687 return expand_superword_shift (binoptab, outof_input, superword_op1,
688 outof_target, into_target,
689 unsignedp, methods);
690 else
691 return expand_subword_shift (op1_mode, binoptab,
692 outof_input, into_input, op1,
693 outof_target, into_target,
694 unsignedp, methods, shift_mask);
697 /* Try using conditional moves to generate straight-line code. */
698 if (HAVE_conditional_move)
700 rtx_insn *start = get_last_insn ();
701 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
702 cmp_code, cmp1, cmp2,
703 outof_input, into_input,
704 op1, superword_op1,
705 outof_target, into_target,
706 unsignedp, methods, shift_mask))
707 return true;
708 delete_insns_since (start);
711 /* As a last resort, use branches to select the correct alternative. */
712 rtx_code_label *subword_label = gen_label_rtx ();
713 rtx_code_label *done_label = gen_label_rtx ();
715 NO_DEFER_POP;
716 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
717 0, 0, subword_label,
718 profile_probability::uninitialized ());
719 OK_DEFER_POP;
721 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
722 outof_target, into_target,
723 unsignedp, methods))
724 return false;
726 emit_jump_insn (targetm.gen_jump (done_label));
727 emit_barrier ();
728 emit_label (subword_label);
730 if (!expand_subword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
732 outof_target, into_target,
733 unsignedp, methods, shift_mask))
734 return false;
736 emit_label (done_label);
737 return true;
740 /* Subroutine of expand_binop. Perform a double word multiplication of
741 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
742 as the target's word_mode. This function return NULL_RTX if anything
743 goes wrong, in which case it may have already emitted instructions
744 which need to be deleted.
746 If we want to multiply two two-word values and have normal and widening
747 multiplies of single-word values, we can do this with three smaller
748 multiplications.
750 The multiplication proceeds as follows:
751 _______________________
752 [__op0_high_|__op0_low__]
753 _______________________
754 * [__op1_high_|__op1_low__]
755 _______________________________________________
756 _______________________
757 (1) [__op0_low__*__op1_low__]
758 _______________________
759 (2a) [__op0_low__*__op1_high_]
760 _______________________
761 (2b) [__op0_high_*__op1_low__]
762 _______________________
763 (3) [__op0_high_*__op1_high_]
766 This gives a 4-word result. Since we are only interested in the
767 lower 2 words, partial result (3) and the upper words of (2a) and
768 (2b) don't need to be calculated. Hence (2a) and (2b) can be
769 calculated using non-widening multiplication.
771 (1), however, needs to be calculated with an unsigned widening
772 multiplication. If this operation is not directly supported we
773 try using a signed widening multiplication and adjust the result.
774 This adjustment works as follows:
776 If both operands are positive then no adjustment is needed.
778 If the operands have different signs, for example op0_low < 0 and
779 op1_low >= 0, the instruction treats the most significant bit of
780 op0_low as a sign bit instead of a bit with significance
781 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
782 with 2**BITS_PER_WORD - op0_low, and two's complements the
783 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
784 the result.
786 Similarly, if both operands are negative, we need to add
787 (op0_low + op1_low) * 2**BITS_PER_WORD.
789 We use a trick to adjust quickly. We logically shift op0_low right
790 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
791 op0_high (op1_high) before it is used to calculate 2b (2a). If no
792 logical shift exists, we do an arithmetic right shift and subtract
793 the 0 or -1. */
795 static rtx
796 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
797 bool umulp, enum optab_methods methods)
799 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
800 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
801 rtx wordm1 = (umulp ? NULL_RTX
802 : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
803 rtx product, adjust, product_high, temp;
805 rtx op0_high = operand_subword_force (op0, high, mode);
806 rtx op0_low = operand_subword_force (op0, low, mode);
807 rtx op1_high = operand_subword_force (op1, high, mode);
808 rtx op1_low = operand_subword_force (op1, low, mode);
810 /* If we're using an unsigned multiply to directly compute the product
811 of the low-order words of the operands and perform any required
812 adjustments of the operands, we begin by trying two more multiplications
813 and then computing the appropriate sum.
815 We have checked above that the required addition is provided.
816 Full-word addition will normally always succeed, especially if
817 it is provided at all, so we don't worry about its failure. The
818 multiplication may well fail, however, so we do handle that. */
820 if (!umulp)
822 /* ??? This could be done with emit_store_flag where available. */
823 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
824 NULL_RTX, 1, methods);
825 if (temp)
826 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
827 NULL_RTX, 0, OPTAB_DIRECT);
828 else
830 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
831 NULL_RTX, 0, methods);
832 if (!temp)
833 return NULL_RTX;
834 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
835 NULL_RTX, 0, OPTAB_DIRECT);
838 if (!op0_high)
839 return NULL_RTX;
842 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
843 NULL_RTX, 0, OPTAB_DIRECT);
844 if (!adjust)
845 return NULL_RTX;
847 /* OP0_HIGH should now be dead. */
849 if (!umulp)
851 /* ??? This could be done with emit_store_flag where available. */
852 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
853 NULL_RTX, 1, methods);
854 if (temp)
855 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
856 NULL_RTX, 0, OPTAB_DIRECT);
857 else
859 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
860 NULL_RTX, 0, methods);
861 if (!temp)
862 return NULL_RTX;
863 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
864 NULL_RTX, 0, OPTAB_DIRECT);
867 if (!op1_high)
868 return NULL_RTX;
871 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
872 NULL_RTX, 0, OPTAB_DIRECT);
873 if (!temp)
874 return NULL_RTX;
876 /* OP1_HIGH should now be dead. */
878 adjust = expand_binop (word_mode, add_optab, adjust, temp,
879 NULL_RTX, 0, OPTAB_DIRECT);
881 if (target && !REG_P (target))
882 target = NULL_RTX;
884 /* *_widen_optab needs to determine operand mode, make sure at least
885 one operand has non-VOID mode. */
886 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
887 op0_low = force_reg (word_mode, op0_low);
889 if (umulp)
890 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
891 target, 1, OPTAB_DIRECT);
892 else
893 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
894 target, 1, OPTAB_DIRECT);
896 if (!product)
897 return NULL_RTX;
899 product_high = operand_subword (product, high, 1, mode);
900 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
901 NULL_RTX, 0, OPTAB_DIRECT);
902 emit_move_insn (product_high, adjust);
903 return product;
906 /* Wrapper around expand_binop which takes an rtx code to specify
907 the operation to perform, not an optab pointer. All other
908 arguments are the same. */
910 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
911 rtx op1, rtx target, int unsignedp,
912 enum optab_methods methods)
914 optab binop = code_to_optab (code);
915 gcc_assert (binop);
917 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
920 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
921 binop. Order them according to commutative_operand_precedence and, if
922 possible, try to put TARGET or a pseudo first. */
923 static bool
924 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
926 int op0_prec = commutative_operand_precedence (op0);
927 int op1_prec = commutative_operand_precedence (op1);
929 if (op0_prec < op1_prec)
930 return true;
932 if (op0_prec > op1_prec)
933 return false;
935 /* With equal precedence, both orders are ok, but it is better if the
936 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
937 if (target == 0 || REG_P (target))
938 return (REG_P (op1) && !REG_P (op0)) || target == op1;
939 else
940 return rtx_equal_p (op1, target);
943 /* Return true if BINOPTAB implements a shift operation. */
945 static bool
946 shift_optab_p (optab binoptab)
948 switch (optab_to_code (binoptab))
950 case ASHIFT:
951 case SS_ASHIFT:
952 case US_ASHIFT:
953 case ASHIFTRT:
954 case LSHIFTRT:
955 case ROTATE:
956 case ROTATERT:
957 return true;
959 default:
960 return false;
964 /* Return true if BINOPTAB implements a commutative binary operation. */
966 static bool
967 commutative_optab_p (optab binoptab)
969 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
970 || binoptab == smul_widen_optab
971 || binoptab == umul_widen_optab
972 || binoptab == smul_highpart_optab
973 || binoptab == umul_highpart_optab);
976 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
977 optimizing, and if the operand is a constant that costs more than
978 1 instruction, force the constant into a register and return that
979 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
981 static rtx
982 avoid_expensive_constant (machine_mode mode, optab binoptab,
983 int opn, rtx x, bool unsignedp)
985 bool speed = optimize_insn_for_speed_p ();
987 if (mode != VOIDmode
988 && optimize
989 && CONSTANT_P (x)
990 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
991 > set_src_cost (x, mode, speed)))
993 if (CONST_INT_P (x))
995 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
996 if (intval != INTVAL (x))
997 x = GEN_INT (intval);
999 else
1000 x = convert_modes (mode, VOIDmode, x, unsignedp);
1001 x = force_reg (mode, x);
1003 return x;
1006 /* Helper function for expand_binop: handle the case where there
1007 is an insn ICODE that directly implements the indicated operation.
1008 Returns null if this is not possible. */
1009 static rtx
1010 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1011 rtx op0, rtx op1,
1012 rtx target, int unsignedp, enum optab_methods methods,
1013 rtx_insn *last)
1015 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1016 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1017 machine_mode mode0, mode1, tmp_mode;
1018 struct expand_operand ops[3];
1019 bool commutative_p;
1020 rtx_insn *pat;
1021 rtx xop0 = op0, xop1 = op1;
1022 bool canonicalize_op1 = false;
1024 /* If it is a commutative operator and the modes would match
1025 if we would swap the operands, we can save the conversions. */
1026 commutative_p = commutative_optab_p (binoptab);
1027 if (commutative_p
1028 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1029 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1030 std::swap (xop0, xop1);
1032 /* If we are optimizing, force expensive constants into a register. */
1033 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1034 if (!shift_optab_p (binoptab))
1035 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1036 else
1037 /* Shifts and rotates often use a different mode for op1 from op0;
1038 for VOIDmode constants we don't know the mode, so force it
1039 to be canonicalized using convert_modes. */
1040 canonicalize_op1 = true;
1042 /* In case the insn wants input operands in modes different from
1043 those of the actual operands, convert the operands. It would
1044 seem that we don't need to convert CONST_INTs, but we do, so
1045 that they're properly zero-extended, sign-extended or truncated
1046 for their mode. */
1048 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1049 if (xmode0 != VOIDmode && xmode0 != mode0)
1051 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1052 mode0 = xmode0;
1055 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1056 ? GET_MODE (xop1) : mode);
1057 if (xmode1 != VOIDmode && xmode1 != mode1)
1059 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1060 mode1 = xmode1;
1063 /* If operation is commutative,
1064 try to make the first operand a register.
1065 Even better, try to make it the same as the target.
1066 Also try to make the last operand a constant. */
1067 if (commutative_p
1068 && swap_commutative_operands_with_target (target, xop0, xop1))
1069 std::swap (xop0, xop1);
1071 /* Now, if insn's predicates don't allow our operands, put them into
1072 pseudo regs. */
1074 if (binoptab == vec_pack_trunc_optab
1075 || binoptab == vec_pack_usat_optab
1076 || binoptab == vec_pack_ssat_optab
1077 || binoptab == vec_pack_ufix_trunc_optab
1078 || binoptab == vec_pack_sfix_trunc_optab
1079 || binoptab == vec_packu_float_optab
1080 || binoptab == vec_packs_float_optab)
1082 /* The mode of the result is different then the mode of the
1083 arguments. */
1084 tmp_mode = insn_data[(int) icode].operand[0].mode;
1085 if (VECTOR_MODE_P (mode)
1086 && maybe_ne (GET_MODE_NUNITS (tmp_mode), 2 * GET_MODE_NUNITS (mode)))
1088 delete_insns_since (last);
1089 return NULL_RTX;
1092 else
1093 tmp_mode = mode;
1095 create_output_operand (&ops[0], target, tmp_mode);
1096 create_input_operand (&ops[1], xop0, mode0);
1097 create_input_operand (&ops[2], xop1, mode1);
1098 pat = maybe_gen_insn (icode, 3, ops);
1099 if (pat)
1101 /* If PAT is composed of more than one insn, try to add an appropriate
1102 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1103 operand, call expand_binop again, this time without a target. */
1104 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1105 && ! add_equal_note (pat, ops[0].value,
1106 optab_to_code (binoptab),
1107 ops[1].value, ops[2].value))
1109 delete_insns_since (last);
1110 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1111 unsignedp, methods);
1114 emit_insn (pat);
1115 return ops[0].value;
1117 delete_insns_since (last);
1118 return NULL_RTX;
1121 /* Generate code to perform an operation specified by BINOPTAB
1122 on operands OP0 and OP1, with result having machine-mode MODE.
1124 UNSIGNEDP is for the case where we have to widen the operands
1125 to perform the operation. It says to use zero-extension.
1127 If TARGET is nonzero, the value
1128 is generated there, if it is convenient to do so.
1129 In all cases an rtx is returned for the locus of the value;
1130 this may or may not be TARGET. */
1133 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1134 rtx target, int unsignedp, enum optab_methods methods)
1136 enum optab_methods next_methods
1137 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1138 ? OPTAB_WIDEN : methods);
1139 enum mode_class mclass;
1140 enum insn_code icode;
1141 machine_mode wider_mode;
1142 scalar_int_mode int_mode;
1143 rtx libfunc;
1144 rtx temp;
1145 rtx_insn *entry_last = get_last_insn ();
1146 rtx_insn *last;
1148 mclass = GET_MODE_CLASS (mode);
1150 /* If subtracting an integer constant, convert this into an addition of
1151 the negated constant. */
1153 if (binoptab == sub_optab && CONST_INT_P (op1))
1155 op1 = negate_rtx (mode, op1);
1156 binoptab = add_optab;
1158 /* For shifts, constant invalid op1 might be expanded from different
1159 mode than MODE. As those are invalid, force them to a register
1160 to avoid further problems during expansion. */
1161 else if (CONST_INT_P (op1)
1162 && shift_optab_p (binoptab)
1163 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1165 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1166 op1 = force_reg (GET_MODE_INNER (mode), op1);
1169 /* Record where to delete back to if we backtrack. */
1170 last = get_last_insn ();
1172 /* If we can do it with a three-operand insn, do so. */
1174 if (methods != OPTAB_MUST_WIDEN)
1176 if (convert_optab_p (binoptab))
1178 machine_mode from_mode = widened_mode (mode, op0, op1);
1179 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1181 else
1182 icode = optab_handler (binoptab, mode);
1183 if (icode != CODE_FOR_nothing)
1185 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1186 target, unsignedp, methods, last);
1187 if (temp)
1188 return temp;
1192 /* If we were trying to rotate, and that didn't work, try rotating
1193 the other direction before falling back to shifts and bitwise-or. */
1194 if (((binoptab == rotl_optab
1195 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1196 || (binoptab == rotr_optab
1197 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1198 && is_int_mode (mode, &int_mode))
1200 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1201 rtx newop1;
1202 unsigned int bits = GET_MODE_PRECISION (int_mode);
1204 if (CONST_INT_P (op1))
1205 newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1206 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1207 newop1 = negate_rtx (GET_MODE (op1), op1);
1208 else
1209 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1210 gen_int_mode (bits, GET_MODE (op1)), op1,
1211 NULL_RTX, unsignedp, OPTAB_DIRECT);
1213 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1214 target, unsignedp, methods, last);
1215 if (temp)
1216 return temp;
1219 /* If this is a multiply, see if we can do a widening operation that
1220 takes operands of this mode and makes a wider mode. */
1222 if (binoptab == smul_optab
1223 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1224 && (convert_optab_handler ((unsignedp
1225 ? umul_widen_optab
1226 : smul_widen_optab),
1227 wider_mode, mode) != CODE_FOR_nothing))
1229 /* *_widen_optab needs to determine operand mode, make sure at least
1230 one operand has non-VOID mode. */
1231 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1232 op0 = force_reg (mode, op0);
1233 temp = expand_binop (wider_mode,
1234 unsignedp ? umul_widen_optab : smul_widen_optab,
1235 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1237 if (temp != 0)
1239 if (GET_MODE_CLASS (mode) == MODE_INT
1240 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1241 return gen_lowpart (mode, temp);
1242 else
1243 return convert_to_mode (mode, temp, unsignedp);
1247 /* If this is a vector shift by a scalar, see if we can do a vector
1248 shift by a vector. If so, broadcast the scalar into a vector. */
1249 if (mclass == MODE_VECTOR_INT)
1251 optab otheroptab = unknown_optab;
1253 if (binoptab == ashl_optab)
1254 otheroptab = vashl_optab;
1255 else if (binoptab == ashr_optab)
1256 otheroptab = vashr_optab;
1257 else if (binoptab == lshr_optab)
1258 otheroptab = vlshr_optab;
1259 else if (binoptab == rotl_optab)
1260 otheroptab = vrotl_optab;
1261 else if (binoptab == rotr_optab)
1262 otheroptab = vrotr_optab;
1264 if (otheroptab
1265 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1267 /* The scalar may have been extended to be too wide. Truncate
1268 it back to the proper size to fit in the broadcast vector. */
1269 scalar_mode inner_mode = GET_MODE_INNER (mode);
1270 if (!CONST_INT_P (op1)
1271 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1272 > GET_MODE_BITSIZE (inner_mode)))
1273 op1 = force_reg (inner_mode,
1274 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1275 GET_MODE (op1)));
1276 rtx vop1 = expand_vector_broadcast (mode, op1);
1277 if (vop1)
1279 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1280 target, unsignedp, methods, last);
1281 if (temp)
1282 return temp;
1287 /* Look for a wider mode of the same class for which we think we
1288 can open-code the operation. Check for a widening multiply at the
1289 wider mode as well. */
1291 if (CLASS_HAS_WIDER_MODES_P (mclass)
1292 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1293 FOR_EACH_WIDER_MODE (wider_mode, mode)
1295 machine_mode next_mode;
1296 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1297 || (binoptab == smul_optab
1298 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1299 && (find_widening_optab_handler ((unsignedp
1300 ? umul_widen_optab
1301 : smul_widen_optab),
1302 next_mode, mode)
1303 != CODE_FOR_nothing)))
1305 rtx xop0 = op0, xop1 = op1;
1306 int no_extend = 0;
1308 /* For certain integer operations, we need not actually extend
1309 the narrow operands, as long as we will truncate
1310 the results to the same narrowness. */
1312 if ((binoptab == ior_optab || binoptab == and_optab
1313 || binoptab == xor_optab
1314 || binoptab == add_optab || binoptab == sub_optab
1315 || binoptab == smul_optab || binoptab == ashl_optab)
1316 && mclass == MODE_INT)
1318 no_extend = 1;
1319 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1320 xop0, unsignedp);
1321 if (binoptab != ashl_optab)
1322 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1323 xop1, unsignedp);
1326 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1328 /* The second operand of a shift must always be extended. */
1329 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1330 no_extend && binoptab != ashl_optab);
1332 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1333 unsignedp, OPTAB_DIRECT);
1334 if (temp)
1336 if (mclass != MODE_INT
1337 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1339 if (target == 0)
1340 target = gen_reg_rtx (mode);
1341 convert_move (target, temp, 0);
1342 return target;
1344 else
1345 return gen_lowpart (mode, temp);
1347 else
1348 delete_insns_since (last);
1352 /* If operation is commutative,
1353 try to make the first operand a register.
1354 Even better, try to make it the same as the target.
1355 Also try to make the last operand a constant. */
1356 if (commutative_optab_p (binoptab)
1357 && swap_commutative_operands_with_target (target, op0, op1))
1358 std::swap (op0, op1);
1360 /* These can be done a word at a time. */
1361 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1362 && is_int_mode (mode, &int_mode)
1363 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1364 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1366 int i;
1367 rtx_insn *insns;
1369 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1370 won't be accurate, so use a new target. */
1371 if (target == 0
1372 || target == op0
1373 || target == op1
1374 || !valid_multiword_target_p (target))
1375 target = gen_reg_rtx (int_mode);
1377 start_sequence ();
1379 /* Do the actual arithmetic. */
1380 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1382 rtx target_piece = operand_subword (target, i, 1, int_mode);
1383 rtx x = expand_binop (word_mode, binoptab,
1384 operand_subword_force (op0, i, int_mode),
1385 operand_subword_force (op1, i, int_mode),
1386 target_piece, unsignedp, next_methods);
1388 if (x == 0)
1389 break;
1391 if (target_piece != x)
1392 emit_move_insn (target_piece, x);
1395 insns = get_insns ();
1396 end_sequence ();
1398 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1400 emit_insn (insns);
1401 return target;
1405 /* Synthesize double word shifts from single word shifts. */
1406 if ((binoptab == lshr_optab || binoptab == ashl_optab
1407 || binoptab == ashr_optab)
1408 && is_int_mode (mode, &int_mode)
1409 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1410 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1411 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1412 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1413 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1414 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1416 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1417 scalar_int_mode op1_mode;
1419 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1420 shift_mask = targetm.shift_truncation_mask (word_mode);
1421 op1_mode = (GET_MODE (op1) != VOIDmode
1422 ? as_a <scalar_int_mode> (GET_MODE (op1))
1423 : word_mode);
1425 /* Apply the truncation to constant shifts. */
1426 if (double_shift_mask > 0 && CONST_INT_P (op1))
1427 op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1429 if (op1 == CONST0_RTX (op1_mode))
1430 return op0;
1432 /* Make sure that this is a combination that expand_doubleword_shift
1433 can handle. See the comments there for details. */
1434 if (double_shift_mask == 0
1435 || (shift_mask == BITS_PER_WORD - 1
1436 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1438 rtx_insn *insns;
1439 rtx into_target, outof_target;
1440 rtx into_input, outof_input;
1441 int left_shift, outof_word;
1443 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1444 won't be accurate, so use a new target. */
1445 if (target == 0
1446 || target == op0
1447 || target == op1
1448 || !valid_multiword_target_p (target))
1449 target = gen_reg_rtx (int_mode);
1451 start_sequence ();
1453 /* OUTOF_* is the word we are shifting bits away from, and
1454 INTO_* is the word that we are shifting bits towards, thus
1455 they differ depending on the direction of the shift and
1456 WORDS_BIG_ENDIAN. */
1458 left_shift = binoptab == ashl_optab;
1459 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1461 outof_target = operand_subword (target, outof_word, 1, int_mode);
1462 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1464 outof_input = operand_subword_force (op0, outof_word, int_mode);
1465 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1467 if (expand_doubleword_shift (op1_mode, binoptab,
1468 outof_input, into_input, op1,
1469 outof_target, into_target,
1470 unsignedp, next_methods, shift_mask))
1472 insns = get_insns ();
1473 end_sequence ();
1475 emit_insn (insns);
1476 return target;
1478 end_sequence ();
1482 /* Synthesize double word rotates from single word shifts. */
1483 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1484 && is_int_mode (mode, &int_mode)
1485 && CONST_INT_P (op1)
1486 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1487 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1488 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1490 rtx_insn *insns;
1491 rtx into_target, outof_target;
1492 rtx into_input, outof_input;
1493 rtx inter;
1494 int shift_count, left_shift, outof_word;
1496 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1497 won't be accurate, so use a new target. Do this also if target is not
1498 a REG, first because having a register instead may open optimization
1499 opportunities, and second because if target and op0 happen to be MEMs
1500 designating the same location, we would risk clobbering it too early
1501 in the code sequence we generate below. */
1502 if (target == 0
1503 || target == op0
1504 || target == op1
1505 || !REG_P (target)
1506 || !valid_multiword_target_p (target))
1507 target = gen_reg_rtx (int_mode);
1509 start_sequence ();
1511 shift_count = INTVAL (op1);
1513 /* OUTOF_* is the word we are shifting bits away from, and
1514 INTO_* is the word that we are shifting bits towards, thus
1515 they differ depending on the direction of the shift and
1516 WORDS_BIG_ENDIAN. */
1518 left_shift = (binoptab == rotl_optab);
1519 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1521 outof_target = operand_subword (target, outof_word, 1, int_mode);
1522 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1524 outof_input = operand_subword_force (op0, outof_word, int_mode);
1525 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1527 if (shift_count == BITS_PER_WORD)
1529 /* This is just a word swap. */
1530 emit_move_insn (outof_target, into_input);
1531 emit_move_insn (into_target, outof_input);
1532 inter = const0_rtx;
1534 else
1536 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1537 HOST_WIDE_INT first_shift_count, second_shift_count;
1538 optab reverse_unsigned_shift, unsigned_shift;
1540 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1541 ? lshr_optab : ashl_optab);
1543 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1544 ? ashl_optab : lshr_optab);
1546 if (shift_count > BITS_PER_WORD)
1548 first_shift_count = shift_count - BITS_PER_WORD;
1549 second_shift_count = 2 * BITS_PER_WORD - shift_count;
1551 else
1553 first_shift_count = BITS_PER_WORD - shift_count;
1554 second_shift_count = shift_count;
1556 rtx first_shift_count_rtx
1557 = gen_int_shift_amount (word_mode, first_shift_count);
1558 rtx second_shift_count_rtx
1559 = gen_int_shift_amount (word_mode, second_shift_count);
1561 into_temp1 = expand_binop (word_mode, unsigned_shift,
1562 outof_input, first_shift_count_rtx,
1563 NULL_RTX, unsignedp, next_methods);
1564 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1565 into_input, second_shift_count_rtx,
1566 NULL_RTX, unsignedp, next_methods);
1568 if (into_temp1 != 0 && into_temp2 != 0)
1569 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1570 into_target, unsignedp, next_methods);
1571 else
1572 inter = 0;
1574 if (inter != 0 && inter != into_target)
1575 emit_move_insn (into_target, inter);
1577 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1578 into_input, first_shift_count_rtx,
1579 NULL_RTX, unsignedp, next_methods);
1580 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1581 outof_input, second_shift_count_rtx,
1582 NULL_RTX, unsignedp, next_methods);
1584 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1585 inter = expand_binop (word_mode, ior_optab,
1586 outof_temp1, outof_temp2,
1587 outof_target, unsignedp, next_methods);
1589 if (inter != 0 && inter != outof_target)
1590 emit_move_insn (outof_target, inter);
1593 insns = get_insns ();
1594 end_sequence ();
1596 if (inter != 0)
1598 emit_insn (insns);
1599 return target;
1603 /* These can be done a word at a time by propagating carries. */
1604 if ((binoptab == add_optab || binoptab == sub_optab)
1605 && is_int_mode (mode, &int_mode)
1606 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1607 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1609 unsigned int i;
1610 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1611 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1612 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1613 rtx xop0, xop1, xtarget;
1615 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1616 value is one of those, use it. Otherwise, use 1 since it is the
1617 one easiest to get. */
1618 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1619 int normalizep = STORE_FLAG_VALUE;
1620 #else
1621 int normalizep = 1;
1622 #endif
1624 /* Prepare the operands. */
1625 xop0 = force_reg (int_mode, op0);
1626 xop1 = force_reg (int_mode, op1);
1628 xtarget = gen_reg_rtx (int_mode);
1630 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1631 target = xtarget;
1633 /* Indicate for flow that the entire target reg is being set. */
1634 if (REG_P (target))
1635 emit_clobber (xtarget);
1637 /* Do the actual arithmetic. */
1638 for (i = 0; i < nwords; i++)
1640 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1641 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1642 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1643 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1644 rtx x;
1646 /* Main add/subtract of the input operands. */
1647 x = expand_binop (word_mode, binoptab,
1648 op0_piece, op1_piece,
1649 target_piece, unsignedp, next_methods);
1650 if (x == 0)
1651 break;
1653 if (i + 1 < nwords)
1655 /* Store carry from main add/subtract. */
1656 carry_out = gen_reg_rtx (word_mode);
1657 carry_out = emit_store_flag_force (carry_out,
1658 (binoptab == add_optab
1659 ? LT : GT),
1660 x, op0_piece,
1661 word_mode, 1, normalizep);
1664 if (i > 0)
1666 rtx newx;
1668 /* Add/subtract previous carry to main result. */
1669 newx = expand_binop (word_mode,
1670 normalizep == 1 ? binoptab : otheroptab,
1671 x, carry_in,
1672 NULL_RTX, 1, next_methods);
1674 if (i + 1 < nwords)
1676 /* Get out carry from adding/subtracting carry in. */
1677 rtx carry_tmp = gen_reg_rtx (word_mode);
1678 carry_tmp = emit_store_flag_force (carry_tmp,
1679 (binoptab == add_optab
1680 ? LT : GT),
1681 newx, x,
1682 word_mode, 1, normalizep);
1684 /* Logical-ior the two poss. carry together. */
1685 carry_out = expand_binop (word_mode, ior_optab,
1686 carry_out, carry_tmp,
1687 carry_out, 0, next_methods);
1688 if (carry_out == 0)
1689 break;
1691 emit_move_insn (target_piece, newx);
1693 else
1695 if (x != target_piece)
1696 emit_move_insn (target_piece, x);
1699 carry_in = carry_out;
1702 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1704 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1705 || ! rtx_equal_p (target, xtarget))
1707 rtx_insn *temp = emit_move_insn (target, xtarget);
1709 set_dst_reg_note (temp, REG_EQUAL,
1710 gen_rtx_fmt_ee (optab_to_code (binoptab),
1711 int_mode, copy_rtx (xop0),
1712 copy_rtx (xop1)),
1713 target);
1715 else
1716 target = xtarget;
1718 return target;
1721 else
1722 delete_insns_since (last);
1725 /* Attempt to synthesize double word multiplies using a sequence of word
1726 mode multiplications. We first attempt to generate a sequence using a
1727 more efficient unsigned widening multiply, and if that fails we then
1728 try using a signed widening multiply. */
1730 if (binoptab == smul_optab
1731 && is_int_mode (mode, &int_mode)
1732 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1733 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1734 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1736 rtx product = NULL_RTX;
1737 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
1738 != CODE_FOR_nothing)
1740 product = expand_doubleword_mult (int_mode, op0, op1, target,
1741 true, methods);
1742 if (!product)
1743 delete_insns_since (last);
1746 if (product == NULL_RTX
1747 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
1748 != CODE_FOR_nothing))
1750 product = expand_doubleword_mult (int_mode, op0, op1, target,
1751 false, methods);
1752 if (!product)
1753 delete_insns_since (last);
1756 if (product != NULL_RTX)
1758 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1760 rtx_insn *move = emit_move_insn (target ? target : product,
1761 product);
1762 set_dst_reg_note (move,
1763 REG_EQUAL,
1764 gen_rtx_fmt_ee (MULT, int_mode,
1765 copy_rtx (op0),
1766 copy_rtx (op1)),
1767 target ? target : product);
1769 return product;
1773 /* It can't be open-coded in this mode.
1774 Use a library call if one is available and caller says that's ok. */
1776 libfunc = optab_libfunc (binoptab, mode);
1777 if (libfunc
1778 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1780 rtx_insn *insns;
1781 rtx op1x = op1;
1782 machine_mode op1_mode = mode;
1783 rtx value;
1785 start_sequence ();
1787 if (shift_optab_p (binoptab))
1789 op1_mode = targetm.libgcc_shift_count_mode ();
1790 /* Specify unsigned here,
1791 since negative shift counts are meaningless. */
1792 op1x = convert_to_mode (op1_mode, op1, 1);
1795 if (GET_MODE (op0) != VOIDmode
1796 && GET_MODE (op0) != mode)
1797 op0 = convert_to_mode (mode, op0, unsignedp);
1799 /* Pass 1 for NO_QUEUE so we don't lose any increments
1800 if the libcall is cse'd or moved. */
1801 value = emit_library_call_value (libfunc,
1802 NULL_RTX, LCT_CONST, mode,
1803 op0, mode, op1x, op1_mode);
1805 insns = get_insns ();
1806 end_sequence ();
1808 bool trapv = trapv_binoptab_p (binoptab);
1809 target = gen_reg_rtx (mode);
1810 emit_libcall_block_1 (insns, target, value,
1811 trapv ? NULL_RTX
1812 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1813 mode, op0, op1), trapv);
1815 return target;
1818 delete_insns_since (last);
1820 /* It can't be done in this mode. Can we do it in a wider mode? */
1822 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1823 || methods == OPTAB_MUST_WIDEN))
1825 /* Caller says, don't even try. */
1826 delete_insns_since (entry_last);
1827 return 0;
1830 /* Compute the value of METHODS to pass to recursive calls.
1831 Don't allow widening to be tried recursively. */
1833 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1835 /* Look for a wider mode of the same class for which it appears we can do
1836 the operation. */
1838 if (CLASS_HAS_WIDER_MODES_P (mclass))
1840 /* This code doesn't make sense for conversion optabs, since we
1841 wouldn't then want to extend the operands to be the same size
1842 as the result. */
1843 gcc_assert (!convert_optab_p (binoptab));
1844 FOR_EACH_WIDER_MODE (wider_mode, mode)
1846 if (optab_handler (binoptab, wider_mode)
1847 || (methods == OPTAB_LIB
1848 && optab_libfunc (binoptab, wider_mode)))
1850 rtx xop0 = op0, xop1 = op1;
1851 int no_extend = 0;
1853 /* For certain integer operations, we need not actually extend
1854 the narrow operands, as long as we will truncate
1855 the results to the same narrowness. */
1857 if ((binoptab == ior_optab || binoptab == and_optab
1858 || binoptab == xor_optab
1859 || binoptab == add_optab || binoptab == sub_optab
1860 || binoptab == smul_optab || binoptab == ashl_optab)
1861 && mclass == MODE_INT)
1862 no_extend = 1;
1864 xop0 = widen_operand (xop0, wider_mode, mode,
1865 unsignedp, no_extend);
1867 /* The second operand of a shift must always be extended. */
1868 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1869 no_extend && binoptab != ashl_optab);
1871 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1872 unsignedp, methods);
1873 if (temp)
1875 if (mclass != MODE_INT
1876 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1878 if (target == 0)
1879 target = gen_reg_rtx (mode);
1880 convert_move (target, temp, 0);
1881 return target;
1883 else
1884 return gen_lowpart (mode, temp);
1886 else
1887 delete_insns_since (last);
1892 delete_insns_since (entry_last);
1893 return 0;
1896 /* Expand a binary operator which has both signed and unsigned forms.
1897 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1898 signed operations.
1900 If we widen unsigned operands, we may use a signed wider operation instead
1901 of an unsigned wider operation, since the result would be the same. */
1904 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1905 rtx op0, rtx op1, rtx target, int unsignedp,
1906 enum optab_methods methods)
1908 rtx temp;
1909 optab direct_optab = unsignedp ? uoptab : soptab;
1910 bool save_enable;
1912 /* Do it without widening, if possible. */
1913 temp = expand_binop (mode, direct_optab, op0, op1, target,
1914 unsignedp, OPTAB_DIRECT);
1915 if (temp || methods == OPTAB_DIRECT)
1916 return temp;
1918 /* Try widening to a signed int. Disable any direct use of any
1919 signed insn in the current mode. */
1920 save_enable = swap_optab_enable (soptab, mode, false);
1922 temp = expand_binop (mode, soptab, op0, op1, target,
1923 unsignedp, OPTAB_WIDEN);
1925 /* For unsigned operands, try widening to an unsigned int. */
1926 if (!temp && unsignedp)
1927 temp = expand_binop (mode, uoptab, op0, op1, target,
1928 unsignedp, OPTAB_WIDEN);
1929 if (temp || methods == OPTAB_WIDEN)
1930 goto egress;
1932 /* Use the right width libcall if that exists. */
1933 temp = expand_binop (mode, direct_optab, op0, op1, target,
1934 unsignedp, OPTAB_LIB);
1935 if (temp || methods == OPTAB_LIB)
1936 goto egress;
1938 /* Must widen and use a libcall, use either signed or unsigned. */
1939 temp = expand_binop (mode, soptab, op0, op1, target,
1940 unsignedp, methods);
1941 if (!temp && unsignedp)
1942 temp = expand_binop (mode, uoptab, op0, op1, target,
1943 unsignedp, methods);
1945 egress:
1946 /* Undo the fiddling above. */
1947 if (save_enable)
1948 swap_optab_enable (soptab, mode, true);
1949 return temp;
1952 /* Generate code to perform an operation specified by UNOPPTAB
1953 on operand OP0, with two results to TARG0 and TARG1.
1954 We assume that the order of the operands for the instruction
1955 is TARG0, TARG1, OP0.
1957 Either TARG0 or TARG1 may be zero, but what that means is that
1958 the result is not actually wanted. We will generate it into
1959 a dummy pseudo-reg and discard it. They may not both be zero.
1961 Returns 1 if this operation can be performed; 0 if not. */
1964 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1965 int unsignedp)
1967 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1968 enum mode_class mclass;
1969 machine_mode wider_mode;
1970 rtx_insn *entry_last = get_last_insn ();
1971 rtx_insn *last;
1973 mclass = GET_MODE_CLASS (mode);
1975 if (!targ0)
1976 targ0 = gen_reg_rtx (mode);
1977 if (!targ1)
1978 targ1 = gen_reg_rtx (mode);
1980 /* Record where to go back to if we fail. */
1981 last = get_last_insn ();
1983 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1985 struct expand_operand ops[3];
1986 enum insn_code icode = optab_handler (unoptab, mode);
1988 create_fixed_operand (&ops[0], targ0);
1989 create_fixed_operand (&ops[1], targ1);
1990 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1991 if (maybe_expand_insn (icode, 3, ops))
1992 return 1;
1995 /* It can't be done in this mode. Can we do it in a wider mode? */
1997 if (CLASS_HAS_WIDER_MODES_P (mclass))
1999 FOR_EACH_WIDER_MODE (wider_mode, mode)
2001 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2003 rtx t0 = gen_reg_rtx (wider_mode);
2004 rtx t1 = gen_reg_rtx (wider_mode);
2005 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2007 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2009 convert_move (targ0, t0, unsignedp);
2010 convert_move (targ1, t1, unsignedp);
2011 return 1;
2013 else
2014 delete_insns_since (last);
2019 delete_insns_since (entry_last);
2020 return 0;
2023 /* Generate code to perform an operation specified by BINOPTAB
2024 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2025 We assume that the order of the operands for the instruction
2026 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2027 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2029 Either TARG0 or TARG1 may be zero, but what that means is that
2030 the result is not actually wanted. We will generate it into
2031 a dummy pseudo-reg and discard it. They may not both be zero.
2033 Returns 1 if this operation can be performed; 0 if not. */
2036 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2037 int unsignedp)
2039 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2040 enum mode_class mclass;
2041 machine_mode wider_mode;
2042 rtx_insn *entry_last = get_last_insn ();
2043 rtx_insn *last;
2045 mclass = GET_MODE_CLASS (mode);
2047 if (!targ0)
2048 targ0 = gen_reg_rtx (mode);
2049 if (!targ1)
2050 targ1 = gen_reg_rtx (mode);
2052 /* Record where to go back to if we fail. */
2053 last = get_last_insn ();
2055 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2057 struct expand_operand ops[4];
2058 enum insn_code icode = optab_handler (binoptab, mode);
2059 machine_mode mode0 = insn_data[icode].operand[1].mode;
2060 machine_mode mode1 = insn_data[icode].operand[2].mode;
2061 rtx xop0 = op0, xop1 = op1;
2063 /* If we are optimizing, force expensive constants into a register. */
2064 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2065 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2067 create_fixed_operand (&ops[0], targ0);
2068 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2069 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2070 create_fixed_operand (&ops[3], targ1);
2071 if (maybe_expand_insn (icode, 4, ops))
2072 return 1;
2073 delete_insns_since (last);
2076 /* It can't be done in this mode. Can we do it in a wider mode? */
2078 if (CLASS_HAS_WIDER_MODES_P (mclass))
2080 FOR_EACH_WIDER_MODE (wider_mode, mode)
2082 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2084 rtx t0 = gen_reg_rtx (wider_mode);
2085 rtx t1 = gen_reg_rtx (wider_mode);
2086 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2087 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2089 if (expand_twoval_binop (binoptab, cop0, cop1,
2090 t0, t1, unsignedp))
2092 convert_move (targ0, t0, unsignedp);
2093 convert_move (targ1, t1, unsignedp);
2094 return 1;
2096 else
2097 delete_insns_since (last);
2102 delete_insns_since (entry_last);
2103 return 0;
2106 /* Expand the two-valued library call indicated by BINOPTAB, but
2107 preserve only one of the values. If TARG0 is non-NULL, the first
2108 value is placed into TARG0; otherwise the second value is placed
2109 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2110 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2111 This routine assumes that the value returned by the library call is
2112 as if the return value was of an integral mode twice as wide as the
2113 mode of OP0. Returns 1 if the call was successful. */
2115 bool
2116 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2117 rtx targ0, rtx targ1, enum rtx_code code)
2119 machine_mode mode;
2120 machine_mode libval_mode;
2121 rtx libval;
2122 rtx_insn *insns;
2123 rtx libfunc;
2125 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2126 gcc_assert (!targ0 != !targ1);
2128 mode = GET_MODE (op0);
2129 libfunc = optab_libfunc (binoptab, mode);
2130 if (!libfunc)
2131 return false;
2133 /* The value returned by the library function will have twice as
2134 many bits as the nominal MODE. */
2135 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2136 start_sequence ();
2137 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2138 libval_mode,
2139 op0, mode,
2140 op1, mode);
2141 /* Get the part of VAL containing the value that we want. */
2142 libval = simplify_gen_subreg (mode, libval, libval_mode,
2143 targ0 ? 0 : GET_MODE_SIZE (mode));
2144 insns = get_insns ();
2145 end_sequence ();
2146 /* Move the into the desired location. */
2147 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2148 gen_rtx_fmt_ee (code, mode, op0, op1));
2150 return true;
2154 /* Wrapper around expand_unop which takes an rtx code to specify
2155 the operation to perform, not an optab pointer. All other
2156 arguments are the same. */
2158 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2159 rtx target, int unsignedp)
2161 optab unop = code_to_optab (code);
2162 gcc_assert (unop);
2164 return expand_unop (mode, unop, op0, target, unsignedp);
2167 /* Try calculating
2168 (clz:narrow x)
2170 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2172 A similar operation can be used for clrsb. UNOPTAB says which operation
2173 we are trying to expand. */
2174 static rtx
2175 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2177 opt_scalar_int_mode wider_mode_iter;
2178 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2180 scalar_int_mode wider_mode = wider_mode_iter.require ();
2181 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2183 rtx xop0, temp;
2184 rtx_insn *last;
2186 last = get_last_insn ();
2188 if (target == 0)
2189 target = gen_reg_rtx (mode);
2190 xop0 = widen_operand (op0, wider_mode, mode,
2191 unoptab != clrsb_optab, false);
2192 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2193 unoptab != clrsb_optab);
2194 if (temp != 0)
2195 temp = expand_binop
2196 (wider_mode, sub_optab, temp,
2197 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2198 - GET_MODE_PRECISION (mode),
2199 wider_mode),
2200 target, true, OPTAB_DIRECT);
2201 if (temp == 0)
2202 delete_insns_since (last);
2204 return temp;
2207 return 0;
2210 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2211 quantities, choosing which based on whether the high word is nonzero. */
2212 static rtx
2213 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2215 rtx xop0 = force_reg (mode, op0);
2216 rtx subhi = gen_highpart (word_mode, xop0);
2217 rtx sublo = gen_lowpart (word_mode, xop0);
2218 rtx_code_label *hi0_label = gen_label_rtx ();
2219 rtx_code_label *after_label = gen_label_rtx ();
2220 rtx_insn *seq;
2221 rtx temp, result;
2223 /* If we were not given a target, use a word_mode register, not a
2224 'mode' register. The result will fit, and nobody is expecting
2225 anything bigger (the return type of __builtin_clz* is int). */
2226 if (!target)
2227 target = gen_reg_rtx (word_mode);
2229 /* In any case, write to a word_mode scratch in both branches of the
2230 conditional, so we can ensure there is a single move insn setting
2231 'target' to tag a REG_EQUAL note on. */
2232 result = gen_reg_rtx (word_mode);
2234 start_sequence ();
2236 /* If the high word is not equal to zero,
2237 then clz of the full value is clz of the high word. */
2238 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2239 word_mode, true, hi0_label);
2241 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2242 if (!temp)
2243 goto fail;
2245 if (temp != result)
2246 convert_move (result, temp, true);
2248 emit_jump_insn (targetm.gen_jump (after_label));
2249 emit_barrier ();
2251 /* Else clz of the full value is clz of the low word plus the number
2252 of bits in the high word. */
2253 emit_label (hi0_label);
2255 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2256 if (!temp)
2257 goto fail;
2258 temp = expand_binop (word_mode, add_optab, temp,
2259 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2260 result, true, OPTAB_DIRECT);
2261 if (!temp)
2262 goto fail;
2263 if (temp != result)
2264 convert_move (result, temp, true);
2266 emit_label (after_label);
2267 convert_move (target, result, true);
2269 seq = get_insns ();
2270 end_sequence ();
2272 add_equal_note (seq, target, CLZ, xop0, 0);
2273 emit_insn (seq);
2274 return target;
2276 fail:
2277 end_sequence ();
2278 return 0;
2281 /* Try calculating popcount of a double-word quantity as two popcount's of
2282 word-sized quantities and summing up the results. */
2283 static rtx
2284 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2286 rtx t0, t1, t;
2287 rtx_insn *seq;
2289 start_sequence ();
2291 t0 = expand_unop_direct (word_mode, popcount_optab,
2292 operand_subword_force (op0, 0, mode), NULL_RTX,
2293 true);
2294 t1 = expand_unop_direct (word_mode, popcount_optab,
2295 operand_subword_force (op0, 1, mode), NULL_RTX,
2296 true);
2297 if (!t0 || !t1)
2299 end_sequence ();
2300 return NULL_RTX;
2303 /* If we were not given a target, use a word_mode register, not a
2304 'mode' register. The result will fit, and nobody is expecting
2305 anything bigger (the return type of __builtin_popcount* is int). */
2306 if (!target)
2307 target = gen_reg_rtx (word_mode);
2309 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2311 seq = get_insns ();
2312 end_sequence ();
2314 add_equal_note (seq, t, POPCOUNT, op0, 0);
2315 emit_insn (seq);
2316 return t;
2319 /* Try calculating
2320 (parity:wide x)
2322 (parity:narrow (low (x) ^ high (x))) */
2323 static rtx
2324 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2326 rtx t = expand_binop (word_mode, xor_optab,
2327 operand_subword_force (op0, 0, mode),
2328 operand_subword_force (op0, 1, mode),
2329 NULL_RTX, 0, OPTAB_DIRECT);
2330 return expand_unop (word_mode, parity_optab, t, target, true);
2333 /* Try calculating
2334 (bswap:narrow x)
2336 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2337 static rtx
2338 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2340 rtx x;
2341 rtx_insn *last;
2342 opt_scalar_int_mode wider_mode_iter;
2344 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2345 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2346 != CODE_FOR_nothing)
2347 break;
2349 if (!wider_mode_iter.exists ())
2350 return NULL_RTX;
2352 scalar_int_mode wider_mode = wider_mode_iter.require ();
2353 last = get_last_insn ();
2355 x = widen_operand (op0, wider_mode, mode, true, true);
2356 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2358 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2359 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2360 if (x != 0)
2361 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2362 GET_MODE_BITSIZE (wider_mode)
2363 - GET_MODE_BITSIZE (mode),
2364 NULL_RTX, true);
2366 if (x != 0)
2368 if (target == 0)
2369 target = gen_reg_rtx (mode);
2370 emit_move_insn (target, gen_lowpart (mode, x));
2372 else
2373 delete_insns_since (last);
2375 return target;
2378 /* Try calculating bswap as two bswaps of two word-sized operands. */
2380 static rtx
2381 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2383 rtx t0, t1;
2385 t1 = expand_unop (word_mode, bswap_optab,
2386 operand_subword_force (op, 0, mode), NULL_RTX, true);
2387 t0 = expand_unop (word_mode, bswap_optab,
2388 operand_subword_force (op, 1, mode), NULL_RTX, true);
2390 if (target == 0 || !valid_multiword_target_p (target))
2391 target = gen_reg_rtx (mode);
2392 if (REG_P (target))
2393 emit_clobber (target);
2394 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2395 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2397 return target;
2400 /* Try calculating (parity x) as (and (popcount x) 1), where
2401 popcount can also be done in a wider mode. */
2402 static rtx
2403 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2405 enum mode_class mclass = GET_MODE_CLASS (mode);
2406 opt_scalar_int_mode wider_mode_iter;
2407 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2409 scalar_int_mode wider_mode = wider_mode_iter.require ();
2410 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2412 rtx xop0, temp;
2413 rtx_insn *last;
2415 last = get_last_insn ();
2417 if (target == 0 || GET_MODE (target) != wider_mode)
2418 target = gen_reg_rtx (wider_mode);
2420 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2421 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2422 true);
2423 if (temp != 0)
2424 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2425 target, true, OPTAB_DIRECT);
2427 if (temp)
2429 if (mclass != MODE_INT
2430 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2431 return convert_to_mode (mode, temp, 0);
2432 else
2433 return gen_lowpart (mode, temp);
2435 else
2436 delete_insns_since (last);
2439 return 0;
2442 /* Try calculating ctz(x) as K - clz(x & -x) ,
2443 where K is GET_MODE_PRECISION(mode) - 1.
2445 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2446 don't have to worry about what the hardware does in that case. (If
2447 the clz instruction produces the usual value at 0, which is K, the
2448 result of this code sequence will be -1; expand_ffs, below, relies
2449 on this. It might be nice to have it be K instead, for consistency
2450 with the (very few) processors that provide a ctz with a defined
2451 value, but that would take one more instruction, and it would be
2452 less convenient for expand_ffs anyway. */
2454 static rtx
2455 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2457 rtx_insn *seq;
2458 rtx temp;
2460 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2461 return 0;
2463 start_sequence ();
2465 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2466 if (temp)
2467 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2468 true, OPTAB_DIRECT);
2469 if (temp)
2470 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2471 if (temp)
2472 temp = expand_binop (mode, sub_optab,
2473 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2474 temp, target,
2475 true, OPTAB_DIRECT);
2476 if (temp == 0)
2478 end_sequence ();
2479 return 0;
2482 seq = get_insns ();
2483 end_sequence ();
2485 add_equal_note (seq, temp, CTZ, op0, 0);
2486 emit_insn (seq);
2487 return temp;
2491 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2492 else with the sequence used by expand_clz.
2494 The ffs builtin promises to return zero for a zero value and ctz/clz
2495 may have an undefined value in that case. If they do not give us a
2496 convenient value, we have to generate a test and branch. */
2497 static rtx
2498 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2500 HOST_WIDE_INT val = 0;
2501 bool defined_at_zero = false;
2502 rtx temp;
2503 rtx_insn *seq;
2505 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2507 start_sequence ();
2509 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2510 if (!temp)
2511 goto fail;
2513 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2515 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2517 start_sequence ();
2518 temp = expand_ctz (mode, op0, 0);
2519 if (!temp)
2520 goto fail;
2522 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2524 defined_at_zero = true;
2525 val = (GET_MODE_PRECISION (mode) - 1) - val;
2528 else
2529 return 0;
2531 if (defined_at_zero && val == -1)
2532 /* No correction needed at zero. */;
2533 else
2535 /* We don't try to do anything clever with the situation found
2536 on some processors (eg Alpha) where ctz(0:mode) ==
2537 bitsize(mode). If someone can think of a way to send N to -1
2538 and leave alone all values in the range 0..N-1 (where N is a
2539 power of two), cheaper than this test-and-branch, please add it.
2541 The test-and-branch is done after the operation itself, in case
2542 the operation sets condition codes that can be recycled for this.
2543 (This is true on i386, for instance.) */
2545 rtx_code_label *nonzero_label = gen_label_rtx ();
2546 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2547 mode, true, nonzero_label);
2549 convert_move (temp, GEN_INT (-1), false);
2550 emit_label (nonzero_label);
2553 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2554 to produce a value in the range 0..bitsize. */
2555 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2556 target, false, OPTAB_DIRECT);
2557 if (!temp)
2558 goto fail;
2560 seq = get_insns ();
2561 end_sequence ();
2563 add_equal_note (seq, temp, FFS, op0, 0);
2564 emit_insn (seq);
2565 return temp;
2567 fail:
2568 end_sequence ();
2569 return 0;
2572 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2573 conditions, VAL may already be a SUBREG against which we cannot generate
2574 a further SUBREG. In this case, we expect forcing the value into a
2575 register will work around the situation. */
2577 static rtx
2578 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2579 machine_mode imode)
2581 rtx ret;
2582 ret = lowpart_subreg (omode, val, imode);
2583 if (ret == NULL)
2585 val = force_reg (imode, val);
2586 ret = lowpart_subreg (omode, val, imode);
2587 gcc_assert (ret != NULL);
2589 return ret;
2592 /* Expand a floating point absolute value or negation operation via a
2593 logical operation on the sign bit. */
2595 static rtx
2596 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2597 rtx op0, rtx target)
2599 const struct real_format *fmt;
2600 int bitpos, word, nwords, i;
2601 scalar_int_mode imode;
2602 rtx temp;
2603 rtx_insn *insns;
2605 /* The format has to have a simple sign bit. */
2606 fmt = REAL_MODE_FORMAT (mode);
2607 if (fmt == NULL)
2608 return NULL_RTX;
2610 bitpos = fmt->signbit_rw;
2611 if (bitpos < 0)
2612 return NULL_RTX;
2614 /* Don't create negative zeros if the format doesn't support them. */
2615 if (code == NEG && !fmt->has_signed_zero)
2616 return NULL_RTX;
2618 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2620 if (!int_mode_for_mode (mode).exists (&imode))
2621 return NULL_RTX;
2622 word = 0;
2623 nwords = 1;
2625 else
2627 imode = word_mode;
2629 if (FLOAT_WORDS_BIG_ENDIAN)
2630 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2631 else
2632 word = bitpos / BITS_PER_WORD;
2633 bitpos = bitpos % BITS_PER_WORD;
2634 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2637 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2638 if (code == ABS)
2639 mask = ~mask;
2641 if (target == 0
2642 || target == op0
2643 || (nwords > 1 && !valid_multiword_target_p (target)))
2644 target = gen_reg_rtx (mode);
2646 if (nwords > 1)
2648 start_sequence ();
2650 for (i = 0; i < nwords; ++i)
2652 rtx targ_piece = operand_subword (target, i, 1, mode);
2653 rtx op0_piece = operand_subword_force (op0, i, mode);
2655 if (i == word)
2657 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2658 op0_piece,
2659 immed_wide_int_const (mask, imode),
2660 targ_piece, 1, OPTAB_LIB_WIDEN);
2661 if (temp != targ_piece)
2662 emit_move_insn (targ_piece, temp);
2664 else
2665 emit_move_insn (targ_piece, op0_piece);
2668 insns = get_insns ();
2669 end_sequence ();
2671 emit_insn (insns);
2673 else
2675 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2676 gen_lowpart (imode, op0),
2677 immed_wide_int_const (mask, imode),
2678 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2679 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2681 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2682 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2683 target);
2686 return target;
2689 /* As expand_unop, but will fail rather than attempt the operation in a
2690 different mode or with a libcall. */
2691 static rtx
2692 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2693 int unsignedp)
2695 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2697 struct expand_operand ops[2];
2698 enum insn_code icode = optab_handler (unoptab, mode);
2699 rtx_insn *last = get_last_insn ();
2700 rtx_insn *pat;
2702 create_output_operand (&ops[0], target, mode);
2703 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2704 pat = maybe_gen_insn (icode, 2, ops);
2705 if (pat)
2707 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2708 && ! add_equal_note (pat, ops[0].value,
2709 optab_to_code (unoptab),
2710 ops[1].value, NULL_RTX))
2712 delete_insns_since (last);
2713 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2716 emit_insn (pat);
2718 return ops[0].value;
2721 return 0;
2724 /* Generate code to perform an operation specified by UNOPTAB
2725 on operand OP0, with result having machine-mode MODE.
2727 UNSIGNEDP is for the case where we have to widen the operands
2728 to perform the operation. It says to use zero-extension.
2730 If TARGET is nonzero, the value
2731 is generated there, if it is convenient to do so.
2732 In all cases an rtx is returned for the locus of the value;
2733 this may or may not be TARGET. */
2736 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2737 int unsignedp)
2739 enum mode_class mclass = GET_MODE_CLASS (mode);
2740 machine_mode wider_mode;
2741 scalar_int_mode int_mode;
2742 scalar_float_mode float_mode;
2743 rtx temp;
2744 rtx libfunc;
2746 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2747 if (temp)
2748 return temp;
2750 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2752 /* Widening (or narrowing) clz needs special treatment. */
2753 if (unoptab == clz_optab)
2755 if (is_a <scalar_int_mode> (mode, &int_mode))
2757 temp = widen_leading (int_mode, op0, target, unoptab);
2758 if (temp)
2759 return temp;
2761 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2762 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2764 temp = expand_doubleword_clz (int_mode, op0, target);
2765 if (temp)
2766 return temp;
2770 goto try_libcall;
2773 if (unoptab == clrsb_optab)
2775 if (is_a <scalar_int_mode> (mode, &int_mode))
2777 temp = widen_leading (int_mode, op0, target, unoptab);
2778 if (temp)
2779 return temp;
2781 goto try_libcall;
2784 if (unoptab == popcount_optab
2785 && is_a <scalar_int_mode> (mode, &int_mode)
2786 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2787 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2788 && optimize_insn_for_speed_p ())
2790 temp = expand_doubleword_popcount (int_mode, op0, target);
2791 if (temp)
2792 return temp;
2795 if (unoptab == parity_optab
2796 && is_a <scalar_int_mode> (mode, &int_mode)
2797 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2798 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2799 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2800 && optimize_insn_for_speed_p ())
2802 temp = expand_doubleword_parity (int_mode, op0, target);
2803 if (temp)
2804 return temp;
2807 /* Widening (or narrowing) bswap needs special treatment. */
2808 if (unoptab == bswap_optab)
2810 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2811 or ROTATERT. First try these directly; if this fails, then try the
2812 obvious pair of shifts with allowed widening, as this will probably
2813 be always more efficient than the other fallback methods. */
2814 if (mode == HImode)
2816 rtx_insn *last;
2817 rtx temp1, temp2;
2819 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2821 temp = expand_binop (mode, rotl_optab, op0,
2822 gen_int_shift_amount (mode, 8),
2823 target, unsignedp, OPTAB_DIRECT);
2824 if (temp)
2825 return temp;
2828 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2830 temp = expand_binop (mode, rotr_optab, op0,
2831 gen_int_shift_amount (mode, 8),
2832 target, unsignedp, OPTAB_DIRECT);
2833 if (temp)
2834 return temp;
2837 last = get_last_insn ();
2839 temp1 = expand_binop (mode, ashl_optab, op0,
2840 gen_int_shift_amount (mode, 8), NULL_RTX,
2841 unsignedp, OPTAB_WIDEN);
2842 temp2 = expand_binop (mode, lshr_optab, op0,
2843 gen_int_shift_amount (mode, 8), NULL_RTX,
2844 unsignedp, OPTAB_WIDEN);
2845 if (temp1 && temp2)
2847 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2848 unsignedp, OPTAB_WIDEN);
2849 if (temp)
2850 return temp;
2853 delete_insns_since (last);
2856 if (is_a <scalar_int_mode> (mode, &int_mode))
2858 temp = widen_bswap (int_mode, op0, target);
2859 if (temp)
2860 return temp;
2862 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2863 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2865 temp = expand_doubleword_bswap (mode, op0, target);
2866 if (temp)
2867 return temp;
2871 goto try_libcall;
2874 if (CLASS_HAS_WIDER_MODES_P (mclass))
2875 FOR_EACH_WIDER_MODE (wider_mode, mode)
2877 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2879 rtx xop0 = op0;
2880 rtx_insn *last = get_last_insn ();
2882 /* For certain operations, we need not actually extend
2883 the narrow operand, as long as we will truncate the
2884 results to the same narrowness. */
2886 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2887 (unoptab == neg_optab
2888 || unoptab == one_cmpl_optab)
2889 && mclass == MODE_INT);
2891 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2892 unsignedp);
2894 if (temp)
2896 if (mclass != MODE_INT
2897 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2899 if (target == 0)
2900 target = gen_reg_rtx (mode);
2901 convert_move (target, temp, 0);
2902 return target;
2904 else
2905 return gen_lowpart (mode, temp);
2907 else
2908 delete_insns_since (last);
2912 /* These can be done a word at a time. */
2913 if (unoptab == one_cmpl_optab
2914 && is_int_mode (mode, &int_mode)
2915 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2916 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2918 int i;
2919 rtx_insn *insns;
2921 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2922 target = gen_reg_rtx (int_mode);
2924 start_sequence ();
2926 /* Do the actual arithmetic. */
2927 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2929 rtx target_piece = operand_subword (target, i, 1, int_mode);
2930 rtx x = expand_unop (word_mode, unoptab,
2931 operand_subword_force (op0, i, int_mode),
2932 target_piece, unsignedp);
2934 if (target_piece != x)
2935 emit_move_insn (target_piece, x);
2938 insns = get_insns ();
2939 end_sequence ();
2941 emit_insn (insns);
2942 return target;
2945 if (optab_to_code (unoptab) == NEG)
2947 /* Try negating floating point values by flipping the sign bit. */
2948 if (is_a <scalar_float_mode> (mode, &float_mode))
2950 temp = expand_absneg_bit (NEG, float_mode, op0, target);
2951 if (temp)
2952 return temp;
2955 /* If there is no negation pattern, and we have no negative zero,
2956 try subtracting from zero. */
2957 if (!HONOR_SIGNED_ZEROS (mode))
2959 temp = expand_binop (mode, (unoptab == negv_optab
2960 ? subv_optab : sub_optab),
2961 CONST0_RTX (mode), op0, target,
2962 unsignedp, OPTAB_DIRECT);
2963 if (temp)
2964 return temp;
2968 /* Try calculating parity (x) as popcount (x) % 2. */
2969 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
2971 temp = expand_parity (int_mode, op0, target);
2972 if (temp)
2973 return temp;
2976 /* Try implementing ffs (x) in terms of clz (x). */
2977 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
2979 temp = expand_ffs (int_mode, op0, target);
2980 if (temp)
2981 return temp;
2984 /* Try implementing ctz (x) in terms of clz (x). */
2985 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
2987 temp = expand_ctz (int_mode, op0, target);
2988 if (temp)
2989 return temp;
2992 try_libcall:
2993 /* Now try a library call in this mode. */
2994 libfunc = optab_libfunc (unoptab, mode);
2995 if (libfunc)
2997 rtx_insn *insns;
2998 rtx value;
2999 rtx eq_value;
3000 machine_mode outmode = mode;
3002 /* All of these functions return small values. Thus we choose to
3003 have them return something that isn't a double-word. */
3004 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3005 || unoptab == clrsb_optab || unoptab == popcount_optab
3006 || unoptab == parity_optab)
3007 outmode
3008 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3009 optab_libfunc (unoptab, mode)));
3011 start_sequence ();
3013 /* Pass 1 for NO_QUEUE so we don't lose any increments
3014 if the libcall is cse'd or moved. */
3015 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3016 op0, mode);
3017 insns = get_insns ();
3018 end_sequence ();
3020 target = gen_reg_rtx (outmode);
3021 bool trapv = trapv_unoptab_p (unoptab);
3022 if (trapv)
3023 eq_value = NULL_RTX;
3024 else
3026 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3027 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3028 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3029 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3030 eq_value = simplify_gen_unary (ZERO_EXTEND,
3031 outmode, eq_value, mode);
3033 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3035 return target;
3038 /* It can't be done in this mode. Can we do it in a wider mode? */
3040 if (CLASS_HAS_WIDER_MODES_P (mclass))
3042 FOR_EACH_WIDER_MODE (wider_mode, mode)
3044 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3045 || optab_libfunc (unoptab, wider_mode))
3047 rtx xop0 = op0;
3048 rtx_insn *last = get_last_insn ();
3050 /* For certain operations, we need not actually extend
3051 the narrow operand, as long as we will truncate the
3052 results to the same narrowness. */
3053 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3054 (unoptab == neg_optab
3055 || unoptab == one_cmpl_optab
3056 || unoptab == bswap_optab)
3057 && mclass == MODE_INT);
3059 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3060 unsignedp);
3062 /* If we are generating clz using wider mode, adjust the
3063 result. Similarly for clrsb. */
3064 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3065 && temp != 0)
3067 scalar_int_mode wider_int_mode
3068 = as_a <scalar_int_mode> (wider_mode);
3069 int_mode = as_a <scalar_int_mode> (mode);
3070 temp = expand_binop
3071 (wider_mode, sub_optab, temp,
3072 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3073 - GET_MODE_PRECISION (int_mode),
3074 wider_int_mode),
3075 target, true, OPTAB_DIRECT);
3078 /* Likewise for bswap. */
3079 if (unoptab == bswap_optab && temp != 0)
3081 scalar_int_mode wider_int_mode
3082 = as_a <scalar_int_mode> (wider_mode);
3083 int_mode = as_a <scalar_int_mode> (mode);
3084 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3085 == GET_MODE_BITSIZE (wider_int_mode)
3086 && GET_MODE_PRECISION (int_mode)
3087 == GET_MODE_BITSIZE (int_mode));
3089 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3090 GET_MODE_BITSIZE (wider_int_mode)
3091 - GET_MODE_BITSIZE (int_mode),
3092 NULL_RTX, true);
3095 if (temp)
3097 if (mclass != MODE_INT)
3099 if (target == 0)
3100 target = gen_reg_rtx (mode);
3101 convert_move (target, temp, 0);
3102 return target;
3104 else
3105 return gen_lowpart (mode, temp);
3107 else
3108 delete_insns_since (last);
3113 /* One final attempt at implementing negation via subtraction,
3114 this time allowing widening of the operand. */
3115 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3117 rtx temp;
3118 temp = expand_binop (mode,
3119 unoptab == negv_optab ? subv_optab : sub_optab,
3120 CONST0_RTX (mode), op0,
3121 target, unsignedp, OPTAB_LIB_WIDEN);
3122 if (temp)
3123 return temp;
3126 return 0;
3129 /* Emit code to compute the absolute value of OP0, with result to
3130 TARGET if convenient. (TARGET may be 0.) The return value says
3131 where the result actually is to be found.
3133 MODE is the mode of the operand; the mode of the result is
3134 different but can be deduced from MODE.
3139 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3140 int result_unsignedp)
3142 rtx temp;
3144 if (GET_MODE_CLASS (mode) != MODE_INT
3145 || ! flag_trapv)
3146 result_unsignedp = 1;
3148 /* First try to do it with a special abs instruction. */
3149 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3150 op0, target, 0);
3151 if (temp != 0)
3152 return temp;
3154 /* For floating point modes, try clearing the sign bit. */
3155 scalar_float_mode float_mode;
3156 if (is_a <scalar_float_mode> (mode, &float_mode))
3158 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3159 if (temp)
3160 return temp;
3163 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3164 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3165 && !HONOR_SIGNED_ZEROS (mode))
3167 rtx_insn *last = get_last_insn ();
3169 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3170 op0, NULL_RTX, 0);
3171 if (temp != 0)
3172 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3173 OPTAB_WIDEN);
3175 if (temp != 0)
3176 return temp;
3178 delete_insns_since (last);
3181 /* If this machine has expensive jumps, we can do integer absolute
3182 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3183 where W is the width of MODE. */
3185 scalar_int_mode int_mode;
3186 if (is_int_mode (mode, &int_mode)
3187 && BRANCH_COST (optimize_insn_for_speed_p (),
3188 false) >= 2)
3190 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3191 GET_MODE_PRECISION (int_mode) - 1,
3192 NULL_RTX, 0);
3194 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3195 OPTAB_LIB_WIDEN);
3196 if (temp != 0)
3197 temp = expand_binop (int_mode,
3198 result_unsignedp ? sub_optab : subv_optab,
3199 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3201 if (temp != 0)
3202 return temp;
3205 return NULL_RTX;
3209 expand_abs (machine_mode mode, rtx op0, rtx target,
3210 int result_unsignedp, int safe)
3212 rtx temp;
3213 rtx_code_label *op1;
3215 if (GET_MODE_CLASS (mode) != MODE_INT
3216 || ! flag_trapv)
3217 result_unsignedp = 1;
3219 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3220 if (temp != 0)
3221 return temp;
3223 /* If that does not win, use conditional jump and negate. */
3225 /* It is safe to use the target if it is the same
3226 as the source if this is also a pseudo register */
3227 if (op0 == target && REG_P (op0)
3228 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3229 safe = 1;
3231 op1 = gen_label_rtx ();
3232 if (target == 0 || ! safe
3233 || GET_MODE (target) != mode
3234 || (MEM_P (target) && MEM_VOLATILE_P (target))
3235 || (REG_P (target)
3236 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3237 target = gen_reg_rtx (mode);
3239 emit_move_insn (target, op0);
3240 NO_DEFER_POP;
3242 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3243 NULL_RTX, NULL, op1,
3244 profile_probability::uninitialized ());
3246 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3247 target, target, 0);
3248 if (op0 != target)
3249 emit_move_insn (target, op0);
3250 emit_label (op1);
3251 OK_DEFER_POP;
3252 return target;
3255 /* Emit code to compute the one's complement absolute value of OP0
3256 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3257 (TARGET may be NULL_RTX.) The return value says where the result
3258 actually is to be found.
3260 MODE is the mode of the operand; the mode of the result is
3261 different but can be deduced from MODE. */
3264 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3266 rtx temp;
3268 /* Not applicable for floating point modes. */
3269 if (FLOAT_MODE_P (mode))
3270 return NULL_RTX;
3272 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3273 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3275 rtx_insn *last = get_last_insn ();
3277 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3278 if (temp != 0)
3279 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3280 OPTAB_WIDEN);
3282 if (temp != 0)
3283 return temp;
3285 delete_insns_since (last);
3288 /* If this machine has expensive jumps, we can do one's complement
3289 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3291 scalar_int_mode int_mode;
3292 if (is_int_mode (mode, &int_mode)
3293 && BRANCH_COST (optimize_insn_for_speed_p (),
3294 false) >= 2)
3296 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3297 GET_MODE_PRECISION (int_mode) - 1,
3298 NULL_RTX, 0);
3300 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3301 OPTAB_LIB_WIDEN);
3303 if (temp != 0)
3304 return temp;
3307 return NULL_RTX;
3310 /* A subroutine of expand_copysign, perform the copysign operation using the
3311 abs and neg primitives advertised to exist on the target. The assumption
3312 is that we have a split register file, and leaving op0 in fp registers,
3313 and not playing with subregs so much, will help the register allocator. */
3315 static rtx
3316 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3317 int bitpos, bool op0_is_abs)
3319 scalar_int_mode imode;
3320 enum insn_code icode;
3321 rtx sign;
3322 rtx_code_label *label;
3324 if (target == op1)
3325 target = NULL_RTX;
3327 /* Check if the back end provides an insn that handles signbit for the
3328 argument's mode. */
3329 icode = optab_handler (signbit_optab, mode);
3330 if (icode != CODE_FOR_nothing)
3332 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3333 sign = gen_reg_rtx (imode);
3334 emit_unop_insn (icode, sign, op1, UNKNOWN);
3336 else
3338 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3340 if (!int_mode_for_mode (mode).exists (&imode))
3341 return NULL_RTX;
3342 op1 = gen_lowpart (imode, op1);
3344 else
3346 int word;
3348 imode = word_mode;
3349 if (FLOAT_WORDS_BIG_ENDIAN)
3350 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3351 else
3352 word = bitpos / BITS_PER_WORD;
3353 bitpos = bitpos % BITS_PER_WORD;
3354 op1 = operand_subword_force (op1, word, mode);
3357 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3358 sign = expand_binop (imode, and_optab, op1,
3359 immed_wide_int_const (mask, imode),
3360 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3363 if (!op0_is_abs)
3365 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3366 if (op0 == NULL)
3367 return NULL_RTX;
3368 target = op0;
3370 else
3372 if (target == NULL_RTX)
3373 target = copy_to_reg (op0);
3374 else
3375 emit_move_insn (target, op0);
3378 label = gen_label_rtx ();
3379 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3381 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3382 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3383 else
3384 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3385 if (op0 != target)
3386 emit_move_insn (target, op0);
3388 emit_label (label);
3390 return target;
3394 /* A subroutine of expand_copysign, perform the entire copysign operation
3395 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3396 is true if op0 is known to have its sign bit clear. */
3398 static rtx
3399 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3400 int bitpos, bool op0_is_abs)
3402 scalar_int_mode imode;
3403 int word, nwords, i;
3404 rtx temp;
3405 rtx_insn *insns;
3407 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3409 if (!int_mode_for_mode (mode).exists (&imode))
3410 return NULL_RTX;
3411 word = 0;
3412 nwords = 1;
3414 else
3416 imode = word_mode;
3418 if (FLOAT_WORDS_BIG_ENDIAN)
3419 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3420 else
3421 word = bitpos / BITS_PER_WORD;
3422 bitpos = bitpos % BITS_PER_WORD;
3423 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3426 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3428 if (target == 0
3429 || target == op0
3430 || target == op1
3431 || (nwords > 1 && !valid_multiword_target_p (target)))
3432 target = gen_reg_rtx (mode);
3434 if (nwords > 1)
3436 start_sequence ();
3438 for (i = 0; i < nwords; ++i)
3440 rtx targ_piece = operand_subword (target, i, 1, mode);
3441 rtx op0_piece = operand_subword_force (op0, i, mode);
3443 if (i == word)
3445 if (!op0_is_abs)
3446 op0_piece
3447 = expand_binop (imode, and_optab, op0_piece,
3448 immed_wide_int_const (~mask, imode),
3449 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3450 op1 = expand_binop (imode, and_optab,
3451 operand_subword_force (op1, i, mode),
3452 immed_wide_int_const (mask, imode),
3453 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3455 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3456 targ_piece, 1, OPTAB_LIB_WIDEN);
3457 if (temp != targ_piece)
3458 emit_move_insn (targ_piece, temp);
3460 else
3461 emit_move_insn (targ_piece, op0_piece);
3464 insns = get_insns ();
3465 end_sequence ();
3467 emit_insn (insns);
3469 else
3471 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3472 immed_wide_int_const (mask, imode),
3473 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3475 op0 = gen_lowpart (imode, op0);
3476 if (!op0_is_abs)
3477 op0 = expand_binop (imode, and_optab, op0,
3478 immed_wide_int_const (~mask, imode),
3479 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3481 temp = expand_binop (imode, ior_optab, op0, op1,
3482 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3483 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3486 return target;
3489 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3490 scalar floating point mode. Return NULL if we do not know how to
3491 expand the operation inline. */
3494 expand_copysign (rtx op0, rtx op1, rtx target)
3496 scalar_float_mode mode;
3497 const struct real_format *fmt;
3498 bool op0_is_abs;
3499 rtx temp;
3501 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3502 gcc_assert (GET_MODE (op1) == mode);
3504 /* First try to do it with a special instruction. */
3505 temp = expand_binop (mode, copysign_optab, op0, op1,
3506 target, 0, OPTAB_DIRECT);
3507 if (temp)
3508 return temp;
3510 fmt = REAL_MODE_FORMAT (mode);
3511 if (fmt == NULL || !fmt->has_signed_zero)
3512 return NULL_RTX;
3514 op0_is_abs = false;
3515 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3517 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3518 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3519 op0_is_abs = true;
3522 if (fmt->signbit_ro >= 0
3523 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3524 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3525 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3527 temp = expand_copysign_absneg (mode, op0, op1, target,
3528 fmt->signbit_ro, op0_is_abs);
3529 if (temp)
3530 return temp;
3533 if (fmt->signbit_rw < 0)
3534 return NULL_RTX;
3535 return expand_copysign_bit (mode, op0, op1, target,
3536 fmt->signbit_rw, op0_is_abs);
3539 /* Generate an instruction whose insn-code is INSN_CODE,
3540 with two operands: an output TARGET and an input OP0.
3541 TARGET *must* be nonzero, and the output is always stored there.
3542 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3543 the value that is stored into TARGET.
3545 Return false if expansion failed. */
3547 bool
3548 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3549 enum rtx_code code)
3551 struct expand_operand ops[2];
3552 rtx_insn *pat;
3554 create_output_operand (&ops[0], target, GET_MODE (target));
3555 create_input_operand (&ops[1], op0, GET_MODE (op0));
3556 pat = maybe_gen_insn (icode, 2, ops);
3557 if (!pat)
3558 return false;
3560 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3561 && code != UNKNOWN)
3562 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3564 emit_insn (pat);
3566 if (ops[0].value != target)
3567 emit_move_insn (target, ops[0].value);
3568 return true;
3570 /* Generate an instruction whose insn-code is INSN_CODE,
3571 with two operands: an output TARGET and an input OP0.
3572 TARGET *must* be nonzero, and the output is always stored there.
3573 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3574 the value that is stored into TARGET. */
3576 void
3577 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3579 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3580 gcc_assert (ok);
3583 struct no_conflict_data
3585 rtx target;
3586 rtx_insn *first, *insn;
3587 bool must_stay;
3590 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3591 the currently examined clobber / store has to stay in the list of
3592 insns that constitute the actual libcall block. */
3593 static void
3594 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3596 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3598 /* If this inns directly contributes to setting the target, it must stay. */
3599 if (reg_overlap_mentioned_p (p->target, dest))
3600 p->must_stay = true;
3601 /* If we haven't committed to keeping any other insns in the list yet,
3602 there is nothing more to check. */
3603 else if (p->insn == p->first)
3604 return;
3605 /* If this insn sets / clobbers a register that feeds one of the insns
3606 already in the list, this insn has to stay too. */
3607 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3608 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3609 || reg_used_between_p (dest, p->first, p->insn)
3610 /* Likewise if this insn depends on a register set by a previous
3611 insn in the list, or if it sets a result (presumably a hard
3612 register) that is set or clobbered by a previous insn.
3613 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3614 SET_DEST perform the former check on the address, and the latter
3615 check on the MEM. */
3616 || (GET_CODE (set) == SET
3617 && (modified_in_p (SET_SRC (set), p->first)
3618 || modified_in_p (SET_DEST (set), p->first)
3619 || modified_between_p (SET_SRC (set), p->first, p->insn)
3620 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3621 p->must_stay = true;
3625 /* Emit code to make a call to a constant function or a library call.
3627 INSNS is a list containing all insns emitted in the call.
3628 These insns leave the result in RESULT. Our block is to copy RESULT
3629 to TARGET, which is logically equivalent to EQUIV.
3631 We first emit any insns that set a pseudo on the assumption that these are
3632 loading constants into registers; doing so allows them to be safely cse'ed
3633 between blocks. Then we emit all the other insns in the block, followed by
3634 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3635 note with an operand of EQUIV. */
3637 static void
3638 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3639 bool equiv_may_trap)
3641 rtx final_dest = target;
3642 rtx_insn *next, *last, *insn;
3644 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3645 into a MEM later. Protect the libcall block from this change. */
3646 if (! REG_P (target) || REG_USERVAR_P (target))
3647 target = gen_reg_rtx (GET_MODE (target));
3649 /* If we're using non-call exceptions, a libcall corresponding to an
3650 operation that may trap may also trap. */
3651 /* ??? See the comment in front of make_reg_eh_region_note. */
3652 if (cfun->can_throw_non_call_exceptions
3653 && (equiv_may_trap || may_trap_p (equiv)))
3655 for (insn = insns; insn; insn = NEXT_INSN (insn))
3656 if (CALL_P (insn))
3658 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3659 if (note)
3661 int lp_nr = INTVAL (XEXP (note, 0));
3662 if (lp_nr == 0 || lp_nr == INT_MIN)
3663 remove_note (insn, note);
3667 else
3669 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3670 reg note to indicate that this call cannot throw or execute a nonlocal
3671 goto (unless there is already a REG_EH_REGION note, in which case
3672 we update it). */
3673 for (insn = insns; insn; insn = NEXT_INSN (insn))
3674 if (CALL_P (insn))
3675 make_reg_eh_region_note_nothrow_nononlocal (insn);
3678 /* First emit all insns that set pseudos. Remove them from the list as
3679 we go. Avoid insns that set pseudos which were referenced in previous
3680 insns. These can be generated by move_by_pieces, for example,
3681 to update an address. Similarly, avoid insns that reference things
3682 set in previous insns. */
3684 for (insn = insns; insn; insn = next)
3686 rtx set = single_set (insn);
3688 next = NEXT_INSN (insn);
3690 if (set != 0 && REG_P (SET_DEST (set))
3691 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3693 struct no_conflict_data data;
3695 data.target = const0_rtx;
3696 data.first = insns;
3697 data.insn = insn;
3698 data.must_stay = 0;
3699 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3700 if (! data.must_stay)
3702 if (PREV_INSN (insn))
3703 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3704 else
3705 insns = next;
3707 if (next)
3708 SET_PREV_INSN (next) = PREV_INSN (insn);
3710 add_insn (insn);
3714 /* Some ports use a loop to copy large arguments onto the stack.
3715 Don't move anything outside such a loop. */
3716 if (LABEL_P (insn))
3717 break;
3720 /* Write the remaining insns followed by the final copy. */
3721 for (insn = insns; insn; insn = next)
3723 next = NEXT_INSN (insn);
3725 add_insn (insn);
3728 last = emit_move_insn (target, result);
3729 if (equiv)
3730 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3732 if (final_dest != target)
3733 emit_move_insn (final_dest, target);
3736 void
3737 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3739 emit_libcall_block_1 (insns, target, result, equiv, false);
3742 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3743 PURPOSE describes how this comparison will be used. CODE is the rtx
3744 comparison code we will be using.
3746 ??? Actually, CODE is slightly weaker than that. A target is still
3747 required to implement all of the normal bcc operations, but not
3748 required to implement all (or any) of the unordered bcc operations. */
3751 can_compare_p (enum rtx_code code, machine_mode mode,
3752 enum can_compare_purpose purpose)
3754 rtx test;
3755 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3758 enum insn_code icode;
3760 if (purpose == ccp_jump
3761 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3762 && insn_operand_matches (icode, 0, test))
3763 return 1;
3764 if (purpose == ccp_store_flag
3765 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3766 && insn_operand_matches (icode, 1, test))
3767 return 1;
3768 if (purpose == ccp_cmov
3769 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3770 return 1;
3772 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3773 PUT_MODE (test, mode);
3775 while (mode != VOIDmode);
3777 return 0;
3780 /* This function is called when we are going to emit a compare instruction that
3781 compares the values found in X and Y, using the rtl operator COMPARISON.
3783 If they have mode BLKmode, then SIZE specifies the size of both operands.
3785 UNSIGNEDP nonzero says that the operands are unsigned;
3786 this matters if they need to be widened (as given by METHODS).
3788 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3789 if we failed to produce one.
3791 *PMODE is the mode of the inputs (in case they are const_int).
3793 This function performs all the setup necessary so that the caller only has
3794 to emit a single comparison insn. This setup can involve doing a BLKmode
3795 comparison or emitting a library call to perform the comparison if no insn
3796 is available to handle it.
3797 The values which are passed in through pointers can be modified; the caller
3798 should perform the comparison on the modified values. Constant
3799 comparisons must have already been folded. */
3801 static void
3802 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3803 int unsignedp, enum optab_methods methods,
3804 rtx *ptest, machine_mode *pmode)
3806 machine_mode mode = *pmode;
3807 rtx libfunc, test;
3808 machine_mode cmp_mode;
3809 enum mode_class mclass;
3811 /* The other methods are not needed. */
3812 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3813 || methods == OPTAB_LIB_WIDEN);
3815 /* If we are optimizing, force expensive constants into a register. */
3816 if (CONSTANT_P (x) && optimize
3817 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3818 > COSTS_N_INSNS (1)))
3819 x = force_reg (mode, x);
3821 if (CONSTANT_P (y) && optimize
3822 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3823 > COSTS_N_INSNS (1)))
3824 y = force_reg (mode, y);
3826 #if HAVE_cc0
3827 /* Make sure if we have a canonical comparison. The RTL
3828 documentation states that canonical comparisons are required only
3829 for targets which have cc0. */
3830 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3831 #endif
3833 /* Don't let both operands fail to indicate the mode. */
3834 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3835 x = force_reg (mode, x);
3836 if (mode == VOIDmode)
3837 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3839 /* Handle all BLKmode compares. */
3841 if (mode == BLKmode)
3843 machine_mode result_mode;
3844 enum insn_code cmp_code;
3845 rtx result;
3846 rtx opalign
3847 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3849 gcc_assert (size);
3851 /* Try to use a memory block compare insn - either cmpstr
3852 or cmpmem will do. */
3853 opt_scalar_int_mode cmp_mode_iter;
3854 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
3856 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
3857 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3858 if (cmp_code == CODE_FOR_nothing)
3859 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3860 if (cmp_code == CODE_FOR_nothing)
3861 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3862 if (cmp_code == CODE_FOR_nothing)
3863 continue;
3865 /* Must make sure the size fits the insn's mode. */
3866 if (CONST_INT_P (size)
3867 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3868 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3869 > GET_MODE_BITSIZE (cmp_mode)))
3870 continue;
3872 result_mode = insn_data[cmp_code].operand[0].mode;
3873 result = gen_reg_rtx (result_mode);
3874 size = convert_to_mode (cmp_mode, size, 1);
3875 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3877 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3878 *pmode = result_mode;
3879 return;
3882 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3883 goto fail;
3885 /* Otherwise call a library function. */
3886 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3888 x = result;
3889 y = const0_rtx;
3890 mode = TYPE_MODE (integer_type_node);
3891 methods = OPTAB_LIB_WIDEN;
3892 unsignedp = false;
3895 /* Don't allow operands to the compare to trap, as that can put the
3896 compare and branch in different basic blocks. */
3897 if (cfun->can_throw_non_call_exceptions)
3899 if (may_trap_p (x))
3900 x = copy_to_reg (x);
3901 if (may_trap_p (y))
3902 y = copy_to_reg (y);
3905 if (GET_MODE_CLASS (mode) == MODE_CC)
3907 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3908 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3909 gcc_assert (icode != CODE_FOR_nothing
3910 && insn_operand_matches (icode, 0, test));
3911 *ptest = test;
3912 return;
3915 mclass = GET_MODE_CLASS (mode);
3916 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3917 FOR_EACH_MODE_FROM (cmp_mode, mode)
3919 enum insn_code icode;
3920 icode = optab_handler (cbranch_optab, cmp_mode);
3921 if (icode != CODE_FOR_nothing
3922 && insn_operand_matches (icode, 0, test))
3924 rtx_insn *last = get_last_insn ();
3925 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3926 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3927 if (op0 && op1
3928 && insn_operand_matches (icode, 1, op0)
3929 && insn_operand_matches (icode, 2, op1))
3931 XEXP (test, 0) = op0;
3932 XEXP (test, 1) = op1;
3933 *ptest = test;
3934 *pmode = cmp_mode;
3935 return;
3937 delete_insns_since (last);
3940 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3941 break;
3944 if (methods != OPTAB_LIB_WIDEN)
3945 goto fail;
3947 if (SCALAR_FLOAT_MODE_P (mode))
3949 /* Small trick if UNORDERED isn't implemented by the hardware. */
3950 if (comparison == UNORDERED && rtx_equal_p (x, y))
3952 prepare_cmp_insn (x, y, UNLT, NULL_RTX, unsignedp, OPTAB_WIDEN,
3953 ptest, pmode);
3954 if (*ptest)
3955 return;
3958 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3960 else
3962 rtx result;
3963 machine_mode ret_mode;
3965 /* Handle a libcall just for the mode we are using. */
3966 libfunc = optab_libfunc (cmp_optab, mode);
3967 gcc_assert (libfunc);
3969 /* If we want unsigned, and this mode has a distinct unsigned
3970 comparison routine, use that. */
3971 if (unsignedp)
3973 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3974 if (ulibfunc)
3975 libfunc = ulibfunc;
3978 ret_mode = targetm.libgcc_cmp_return_mode ();
3979 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3980 ret_mode, x, mode, y, mode);
3982 /* There are two kinds of comparison routines. Biased routines
3983 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3984 of gcc expect that the comparison operation is equivalent
3985 to the modified comparison. For signed comparisons compare the
3986 result against 1 in the biased case, and zero in the unbiased
3987 case. For unsigned comparisons always compare against 1 after
3988 biasing the unbiased result by adding 1. This gives us a way to
3989 represent LTU.
3990 The comparisons in the fixed-point helper library are always
3991 biased. */
3992 x = result;
3993 y = const1_rtx;
3995 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3997 if (unsignedp)
3998 x = plus_constant (ret_mode, result, 1);
3999 else
4000 y = const0_rtx;
4003 *pmode = ret_mode;
4004 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4005 ptest, pmode);
4008 return;
4010 fail:
4011 *ptest = NULL_RTX;
4014 /* Before emitting an insn with code ICODE, make sure that X, which is going
4015 to be used for operand OPNUM of the insn, is converted from mode MODE to
4016 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4017 that it is accepted by the operand predicate. Return the new value. */
4020 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4021 machine_mode wider_mode, int unsignedp)
4023 if (mode != wider_mode)
4024 x = convert_modes (wider_mode, mode, x, unsignedp);
4026 if (!insn_operand_matches (icode, opnum, x))
4028 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4029 if (reload_completed)
4030 return NULL_RTX;
4031 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4032 return NULL_RTX;
4033 x = copy_to_mode_reg (op_mode, x);
4036 return x;
4039 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4040 we can do the branch. */
4042 static void
4043 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4044 profile_probability prob)
4046 machine_mode optab_mode;
4047 enum mode_class mclass;
4048 enum insn_code icode;
4049 rtx_insn *insn;
4051 mclass = GET_MODE_CLASS (mode);
4052 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4053 icode = optab_handler (cbranch_optab, optab_mode);
4055 gcc_assert (icode != CODE_FOR_nothing);
4056 gcc_assert (insn_operand_matches (icode, 0, test));
4057 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4058 XEXP (test, 1), label));
4059 if (prob.initialized_p ()
4060 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4061 && insn
4062 && JUMP_P (insn)
4063 && any_condjump_p (insn)
4064 && !find_reg_note (insn, REG_BR_PROB, 0))
4065 add_reg_br_prob_note (insn, prob);
4068 /* Generate code to compare X with Y so that the condition codes are
4069 set and to jump to LABEL if the condition is true. If X is a
4070 constant and Y is not a constant, then the comparison is swapped to
4071 ensure that the comparison RTL has the canonical form.
4073 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4074 need to be widened. UNSIGNEDP is also used to select the proper
4075 branch condition code.
4077 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4079 MODE is the mode of the inputs (in case they are const_int).
4081 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4082 It will be potentially converted into an unsigned variant based on
4083 UNSIGNEDP to select a proper jump instruction.
4085 PROB is the probability of jumping to LABEL. */
4087 void
4088 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4089 machine_mode mode, int unsignedp, rtx label,
4090 profile_probability prob)
4092 rtx op0 = x, op1 = y;
4093 rtx test;
4095 /* Swap operands and condition to ensure canonical RTL. */
4096 if (swap_commutative_operands_p (x, y)
4097 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4099 op0 = y, op1 = x;
4100 comparison = swap_condition (comparison);
4103 /* If OP0 is still a constant, then both X and Y must be constants
4104 or the opposite comparison is not supported. Force X into a register
4105 to create canonical RTL. */
4106 if (CONSTANT_P (op0))
4107 op0 = force_reg (mode, op0);
4109 if (unsignedp)
4110 comparison = unsigned_condition (comparison);
4112 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4113 &test, &mode);
4114 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4118 /* Emit a library call comparison between floating point X and Y.
4119 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4121 static void
4122 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4123 rtx *ptest, machine_mode *pmode)
4125 enum rtx_code swapped = swap_condition (comparison);
4126 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4127 machine_mode orig_mode = GET_MODE (x);
4128 machine_mode mode;
4129 rtx true_rtx, false_rtx;
4130 rtx value, target, equiv;
4131 rtx_insn *insns;
4132 rtx libfunc = 0;
4133 bool reversed_p = false;
4134 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4136 FOR_EACH_MODE_FROM (mode, orig_mode)
4138 if (code_to_optab (comparison)
4139 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4140 break;
4142 if (code_to_optab (swapped)
4143 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4145 std::swap (x, y);
4146 comparison = swapped;
4147 break;
4150 if (code_to_optab (reversed)
4151 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4153 comparison = reversed;
4154 reversed_p = true;
4155 break;
4159 gcc_assert (mode != VOIDmode);
4161 if (mode != orig_mode)
4163 x = convert_to_mode (mode, x, 0);
4164 y = convert_to_mode (mode, y, 0);
4167 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4168 the RTL. The allows the RTL optimizers to delete the libcall if the
4169 condition can be determined at compile-time. */
4170 if (comparison == UNORDERED
4171 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4173 true_rtx = const_true_rtx;
4174 false_rtx = const0_rtx;
4176 else
4178 switch (comparison)
4180 case EQ:
4181 true_rtx = const0_rtx;
4182 false_rtx = const_true_rtx;
4183 break;
4185 case NE:
4186 true_rtx = const_true_rtx;
4187 false_rtx = const0_rtx;
4188 break;
4190 case GT:
4191 true_rtx = const1_rtx;
4192 false_rtx = const0_rtx;
4193 break;
4195 case GE:
4196 true_rtx = const0_rtx;
4197 false_rtx = constm1_rtx;
4198 break;
4200 case LT:
4201 true_rtx = constm1_rtx;
4202 false_rtx = const0_rtx;
4203 break;
4205 case LE:
4206 true_rtx = const0_rtx;
4207 false_rtx = const1_rtx;
4208 break;
4210 default:
4211 gcc_unreachable ();
4215 if (comparison == UNORDERED)
4217 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4218 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4219 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4220 temp, const_true_rtx, equiv);
4222 else
4224 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4225 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4226 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4227 equiv, true_rtx, false_rtx);
4230 start_sequence ();
4231 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4232 cmp_mode, x, mode, y, mode);
4233 insns = get_insns ();
4234 end_sequence ();
4236 target = gen_reg_rtx (cmp_mode);
4237 emit_libcall_block (insns, target, value, equiv);
4239 if (comparison == UNORDERED
4240 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4241 || reversed_p)
4242 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4243 else
4244 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4246 *pmode = cmp_mode;
4249 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4251 void
4252 emit_indirect_jump (rtx loc)
4254 if (!targetm.have_indirect_jump ())
4255 sorry ("indirect jumps are not available on this target");
4256 else
4258 struct expand_operand ops[1];
4259 create_address_operand (&ops[0], loc);
4260 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4261 emit_barrier ();
4266 /* Emit a conditional move instruction if the machine supports one for that
4267 condition and machine mode.
4269 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4270 the mode to use should they be constants. If it is VOIDmode, they cannot
4271 both be constants.
4273 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4274 should be stored there. MODE is the mode to use should they be constants.
4275 If it is VOIDmode, they cannot both be constants.
4277 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4278 is not supported. */
4281 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4282 machine_mode cmode, rtx op2, rtx op3,
4283 machine_mode mode, int unsignedp)
4285 rtx comparison;
4286 rtx_insn *last;
4287 enum insn_code icode;
4288 enum rtx_code reversed;
4290 /* If the two source operands are identical, that's just a move. */
4292 if (rtx_equal_p (op2, op3))
4294 if (!target)
4295 target = gen_reg_rtx (mode);
4297 emit_move_insn (target, op3);
4298 return target;
4301 /* If one operand is constant, make it the second one. Only do this
4302 if the other operand is not constant as well. */
4304 if (swap_commutative_operands_p (op0, op1))
4306 std::swap (op0, op1);
4307 code = swap_condition (code);
4310 /* get_condition will prefer to generate LT and GT even if the old
4311 comparison was against zero, so undo that canonicalization here since
4312 comparisons against zero are cheaper. */
4313 if (code == LT && op1 == const1_rtx)
4314 code = LE, op1 = const0_rtx;
4315 else if (code == GT && op1 == constm1_rtx)
4316 code = GE, op1 = const0_rtx;
4318 if (cmode == VOIDmode)
4319 cmode = GET_MODE (op0);
4321 enum rtx_code orig_code = code;
4322 bool swapped = false;
4323 if (swap_commutative_operands_p (op2, op3)
4324 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4325 != UNKNOWN))
4327 std::swap (op2, op3);
4328 code = reversed;
4329 swapped = true;
4332 if (mode == VOIDmode)
4333 mode = GET_MODE (op2);
4335 icode = direct_optab_handler (movcc_optab, mode);
4337 if (icode == CODE_FOR_nothing)
4338 return NULL_RTX;
4340 if (!target)
4341 target = gen_reg_rtx (mode);
4343 for (int pass = 0; ; pass++)
4345 code = unsignedp ? unsigned_condition (code) : code;
4346 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4348 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4349 punt and let the caller figure out how best to deal with this
4350 situation. */
4351 if (COMPARISON_P (comparison))
4353 saved_pending_stack_adjust save;
4354 save_pending_stack_adjust (&save);
4355 last = get_last_insn ();
4356 do_pending_stack_adjust ();
4357 machine_mode cmpmode = cmode;
4358 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4359 GET_CODE (comparison), NULL_RTX, unsignedp,
4360 OPTAB_WIDEN, &comparison, &cmpmode);
4361 if (comparison)
4363 struct expand_operand ops[4];
4365 create_output_operand (&ops[0], target, mode);
4366 create_fixed_operand (&ops[1], comparison);
4367 create_input_operand (&ops[2], op2, mode);
4368 create_input_operand (&ops[3], op3, mode);
4369 if (maybe_expand_insn (icode, 4, ops))
4371 if (ops[0].value != target)
4372 convert_move (target, ops[0].value, false);
4373 return target;
4376 delete_insns_since (last);
4377 restore_pending_stack_adjust (&save);
4380 if (pass == 1)
4381 return NULL_RTX;
4383 /* If the preferred op2/op3 order is not usable, retry with other
4384 operand order, perhaps it will expand successfully. */
4385 if (swapped)
4386 code = orig_code;
4387 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4388 NULL))
4389 != UNKNOWN)
4390 code = reversed;
4391 else
4392 return NULL_RTX;
4393 std::swap (op2, op3);
4398 /* Emit a conditional negate or bitwise complement using the
4399 negcc or notcc optabs if available. Return NULL_RTX if such operations
4400 are not available. Otherwise return the RTX holding the result.
4401 TARGET is the desired destination of the result. COMP is the comparison
4402 on which to negate. If COND is true move into TARGET the negation
4403 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4404 CODE is either NEG or NOT. MODE is the machine mode in which the
4405 operation is performed. */
4408 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4409 machine_mode mode, rtx cond, rtx op1,
4410 rtx op2)
4412 optab op = unknown_optab;
4413 if (code == NEG)
4414 op = negcc_optab;
4415 else if (code == NOT)
4416 op = notcc_optab;
4417 else
4418 gcc_unreachable ();
4420 insn_code icode = direct_optab_handler (op, mode);
4422 if (icode == CODE_FOR_nothing)
4423 return NULL_RTX;
4425 if (!target)
4426 target = gen_reg_rtx (mode);
4428 rtx_insn *last = get_last_insn ();
4429 struct expand_operand ops[4];
4431 create_output_operand (&ops[0], target, mode);
4432 create_fixed_operand (&ops[1], cond);
4433 create_input_operand (&ops[2], op1, mode);
4434 create_input_operand (&ops[3], op2, mode);
4436 if (maybe_expand_insn (icode, 4, ops))
4438 if (ops[0].value != target)
4439 convert_move (target, ops[0].value, false);
4441 return target;
4443 delete_insns_since (last);
4444 return NULL_RTX;
4447 /* Emit a conditional addition instruction if the machine supports one for that
4448 condition and machine mode.
4450 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4451 the mode to use should they be constants. If it is VOIDmode, they cannot
4452 both be constants.
4454 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4455 should be stored there. MODE is the mode to use should they be constants.
4456 If it is VOIDmode, they cannot both be constants.
4458 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4459 is not supported. */
4462 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4463 machine_mode cmode, rtx op2, rtx op3,
4464 machine_mode mode, int unsignedp)
4466 rtx comparison;
4467 rtx_insn *last;
4468 enum insn_code icode;
4470 /* If one operand is constant, make it the second one. Only do this
4471 if the other operand is not constant as well. */
4473 if (swap_commutative_operands_p (op0, op1))
4475 std::swap (op0, op1);
4476 code = swap_condition (code);
4479 /* get_condition will prefer to generate LT and GT even if the old
4480 comparison was against zero, so undo that canonicalization here since
4481 comparisons against zero are cheaper. */
4482 if (code == LT && op1 == const1_rtx)
4483 code = LE, op1 = const0_rtx;
4484 else if (code == GT && op1 == constm1_rtx)
4485 code = GE, op1 = const0_rtx;
4487 if (cmode == VOIDmode)
4488 cmode = GET_MODE (op0);
4490 if (mode == VOIDmode)
4491 mode = GET_MODE (op2);
4493 icode = optab_handler (addcc_optab, mode);
4495 if (icode == CODE_FOR_nothing)
4496 return 0;
4498 if (!target)
4499 target = gen_reg_rtx (mode);
4501 code = unsignedp ? unsigned_condition (code) : code;
4502 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4504 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4505 return NULL and let the caller figure out how best to deal with this
4506 situation. */
4507 if (!COMPARISON_P (comparison))
4508 return NULL_RTX;
4510 do_pending_stack_adjust ();
4511 last = get_last_insn ();
4512 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4513 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4514 &comparison, &cmode);
4515 if (comparison)
4517 struct expand_operand ops[4];
4519 create_output_operand (&ops[0], target, mode);
4520 create_fixed_operand (&ops[1], comparison);
4521 create_input_operand (&ops[2], op2, mode);
4522 create_input_operand (&ops[3], op3, mode);
4523 if (maybe_expand_insn (icode, 4, ops))
4525 if (ops[0].value != target)
4526 convert_move (target, ops[0].value, false);
4527 return target;
4530 delete_insns_since (last);
4531 return NULL_RTX;
4534 /* These functions attempt to generate an insn body, rather than
4535 emitting the insn, but if the gen function already emits them, we
4536 make no attempt to turn them back into naked patterns. */
4538 /* Generate and return an insn body to add Y to X. */
4540 rtx_insn *
4541 gen_add2_insn (rtx x, rtx y)
4543 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4545 gcc_assert (insn_operand_matches (icode, 0, x));
4546 gcc_assert (insn_operand_matches (icode, 1, x));
4547 gcc_assert (insn_operand_matches (icode, 2, y));
4549 return GEN_FCN (icode) (x, x, y);
4552 /* Generate and return an insn body to add r1 and c,
4553 storing the result in r0. */
4555 rtx_insn *
4556 gen_add3_insn (rtx r0, rtx r1, rtx c)
4558 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4560 if (icode == CODE_FOR_nothing
4561 || !insn_operand_matches (icode, 0, r0)
4562 || !insn_operand_matches (icode, 1, r1)
4563 || !insn_operand_matches (icode, 2, c))
4564 return NULL;
4566 return GEN_FCN (icode) (r0, r1, c);
4570 have_add2_insn (rtx x, rtx y)
4572 enum insn_code icode;
4574 gcc_assert (GET_MODE (x) != VOIDmode);
4576 icode = optab_handler (add_optab, GET_MODE (x));
4578 if (icode == CODE_FOR_nothing)
4579 return 0;
4581 if (!insn_operand_matches (icode, 0, x)
4582 || !insn_operand_matches (icode, 1, x)
4583 || !insn_operand_matches (icode, 2, y))
4584 return 0;
4586 return 1;
4589 /* Generate and return an insn body to add Y to X. */
4591 rtx_insn *
4592 gen_addptr3_insn (rtx x, rtx y, rtx z)
4594 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4596 gcc_assert (insn_operand_matches (icode, 0, x));
4597 gcc_assert (insn_operand_matches (icode, 1, y));
4598 gcc_assert (insn_operand_matches (icode, 2, z));
4600 return GEN_FCN (icode) (x, y, z);
4603 /* Return true if the target implements an addptr pattern and X, Y,
4604 and Z are valid for the pattern predicates. */
4607 have_addptr3_insn (rtx x, rtx y, rtx z)
4609 enum insn_code icode;
4611 gcc_assert (GET_MODE (x) != VOIDmode);
4613 icode = optab_handler (addptr3_optab, GET_MODE (x));
4615 if (icode == CODE_FOR_nothing)
4616 return 0;
4618 if (!insn_operand_matches (icode, 0, x)
4619 || !insn_operand_matches (icode, 1, y)
4620 || !insn_operand_matches (icode, 2, z))
4621 return 0;
4623 return 1;
4626 /* Generate and return an insn body to subtract Y from X. */
4628 rtx_insn *
4629 gen_sub2_insn (rtx x, rtx y)
4631 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4633 gcc_assert (insn_operand_matches (icode, 0, x));
4634 gcc_assert (insn_operand_matches (icode, 1, x));
4635 gcc_assert (insn_operand_matches (icode, 2, y));
4637 return GEN_FCN (icode) (x, x, y);
4640 /* Generate and return an insn body to subtract r1 and c,
4641 storing the result in r0. */
4643 rtx_insn *
4644 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4646 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4648 if (icode == CODE_FOR_nothing
4649 || !insn_operand_matches (icode, 0, r0)
4650 || !insn_operand_matches (icode, 1, r1)
4651 || !insn_operand_matches (icode, 2, c))
4652 return NULL;
4654 return GEN_FCN (icode) (r0, r1, c);
4658 have_sub2_insn (rtx x, rtx y)
4660 enum insn_code icode;
4662 gcc_assert (GET_MODE (x) != VOIDmode);
4664 icode = optab_handler (sub_optab, GET_MODE (x));
4666 if (icode == CODE_FOR_nothing)
4667 return 0;
4669 if (!insn_operand_matches (icode, 0, x)
4670 || !insn_operand_matches (icode, 1, x)
4671 || !insn_operand_matches (icode, 2, y))
4672 return 0;
4674 return 1;
4677 /* Generate the body of an insn to extend Y (with mode MFROM)
4678 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4680 rtx_insn *
4681 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4682 machine_mode mfrom, int unsignedp)
4684 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4685 return GEN_FCN (icode) (x, y);
4688 /* Generate code to convert FROM to floating point
4689 and store in TO. FROM must be fixed point and not VOIDmode.
4690 UNSIGNEDP nonzero means regard FROM as unsigned.
4691 Normally this is done by correcting the final value
4692 if it is negative. */
4694 void
4695 expand_float (rtx to, rtx from, int unsignedp)
4697 enum insn_code icode;
4698 rtx target = to;
4699 scalar_mode from_mode, to_mode;
4700 machine_mode fmode, imode;
4701 bool can_do_signed = false;
4703 /* Crash now, because we won't be able to decide which mode to use. */
4704 gcc_assert (GET_MODE (from) != VOIDmode);
4706 /* Look for an insn to do the conversion. Do it in the specified
4707 modes if possible; otherwise convert either input, output or both to
4708 wider mode. If the integer mode is wider than the mode of FROM,
4709 we can do the conversion signed even if the input is unsigned. */
4711 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4712 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4714 int doing_unsigned = unsignedp;
4716 if (fmode != GET_MODE (to)
4717 && (significand_size (fmode)
4718 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
4719 continue;
4721 icode = can_float_p (fmode, imode, unsignedp);
4722 if (icode == CODE_FOR_nothing && unsignedp)
4724 enum insn_code scode = can_float_p (fmode, imode, 0);
4725 if (scode != CODE_FOR_nothing)
4726 can_do_signed = true;
4727 if (imode != GET_MODE (from))
4728 icode = scode, doing_unsigned = 0;
4731 if (icode != CODE_FOR_nothing)
4733 if (imode != GET_MODE (from))
4734 from = convert_to_mode (imode, from, unsignedp);
4736 if (fmode != GET_MODE (to))
4737 target = gen_reg_rtx (fmode);
4739 emit_unop_insn (icode, target, from,
4740 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4742 if (target != to)
4743 convert_move (to, target, 0);
4744 return;
4748 /* Unsigned integer, and no way to convert directly. Convert as signed,
4749 then unconditionally adjust the result. */
4750 if (unsignedp
4751 && can_do_signed
4752 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4753 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4755 opt_scalar_mode fmode_iter;
4756 rtx_code_label *label = gen_label_rtx ();
4757 rtx temp;
4758 REAL_VALUE_TYPE offset;
4760 /* Look for a usable floating mode FMODE wider than the source and at
4761 least as wide as the target. Using FMODE will avoid rounding woes
4762 with unsigned values greater than the signed maximum value. */
4764 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4766 scalar_mode fmode = fmode_iter.require ();
4767 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4768 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4769 break;
4772 if (!fmode_iter.exists (&fmode))
4774 /* There is no such mode. Pretend the target is wide enough. */
4775 fmode = to_mode;
4777 /* Avoid double-rounding when TO is narrower than FROM. */
4778 if ((significand_size (fmode) + 1)
4779 < GET_MODE_PRECISION (from_mode))
4781 rtx temp1;
4782 rtx_code_label *neglabel = gen_label_rtx ();
4784 /* Don't use TARGET if it isn't a register, is a hard register,
4785 or is the wrong mode. */
4786 if (!REG_P (target)
4787 || REGNO (target) < FIRST_PSEUDO_REGISTER
4788 || GET_MODE (target) != fmode)
4789 target = gen_reg_rtx (fmode);
4791 imode = from_mode;
4792 do_pending_stack_adjust ();
4794 /* Test whether the sign bit is set. */
4795 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4796 0, neglabel);
4798 /* The sign bit is not set. Convert as signed. */
4799 expand_float (target, from, 0);
4800 emit_jump_insn (targetm.gen_jump (label));
4801 emit_barrier ();
4803 /* The sign bit is set.
4804 Convert to a usable (positive signed) value by shifting right
4805 one bit, while remembering if a nonzero bit was shifted
4806 out; i.e., compute (from & 1) | (from >> 1). */
4808 emit_label (neglabel);
4809 temp = expand_binop (imode, and_optab, from, const1_rtx,
4810 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4811 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4812 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4813 OPTAB_LIB_WIDEN);
4814 expand_float (target, temp, 0);
4816 /* Multiply by 2 to undo the shift above. */
4817 temp = expand_binop (fmode, add_optab, target, target,
4818 target, 0, OPTAB_LIB_WIDEN);
4819 if (temp != target)
4820 emit_move_insn (target, temp);
4822 do_pending_stack_adjust ();
4823 emit_label (label);
4824 goto done;
4828 /* If we are about to do some arithmetic to correct for an
4829 unsigned operand, do it in a pseudo-register. */
4831 if (to_mode != fmode
4832 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4833 target = gen_reg_rtx (fmode);
4835 /* Convert as signed integer to floating. */
4836 expand_float (target, from, 0);
4838 /* If FROM is negative (and therefore TO is negative),
4839 correct its value by 2**bitwidth. */
4841 do_pending_stack_adjust ();
4842 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
4843 0, label);
4846 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
4847 temp = expand_binop (fmode, add_optab, target,
4848 const_double_from_real_value (offset, fmode),
4849 target, 0, OPTAB_LIB_WIDEN);
4850 if (temp != target)
4851 emit_move_insn (target, temp);
4853 do_pending_stack_adjust ();
4854 emit_label (label);
4855 goto done;
4858 /* No hardware instruction available; call a library routine. */
4860 rtx libfunc;
4861 rtx_insn *insns;
4862 rtx value;
4863 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4865 if (is_narrower_int_mode (GET_MODE (from), SImode))
4866 from = convert_to_mode (SImode, from, unsignedp);
4868 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4869 gcc_assert (libfunc);
4871 start_sequence ();
4873 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4874 GET_MODE (to), from, GET_MODE (from));
4875 insns = get_insns ();
4876 end_sequence ();
4878 emit_libcall_block (insns, target, value,
4879 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4880 GET_MODE (to), from));
4883 done:
4885 /* Copy result to requested destination
4886 if we have been computing in a temp location. */
4888 if (target != to)
4890 if (GET_MODE (target) == GET_MODE (to))
4891 emit_move_insn (to, target);
4892 else
4893 convert_move (to, target, 0);
4897 /* Generate code to convert FROM to fixed point and store in TO. FROM
4898 must be floating point. */
4900 void
4901 expand_fix (rtx to, rtx from, int unsignedp)
4903 enum insn_code icode;
4904 rtx target = to;
4905 machine_mode fmode, imode;
4906 opt_scalar_mode fmode_iter;
4907 bool must_trunc = false;
4909 /* We first try to find a pair of modes, one real and one integer, at
4910 least as wide as FROM and TO, respectively, in which we can open-code
4911 this conversion. If the integer mode is wider than the mode of TO,
4912 we can do the conversion either signed or unsigned. */
4914 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4915 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4917 int doing_unsigned = unsignedp;
4919 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4920 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4921 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4923 if (icode != CODE_FOR_nothing)
4925 rtx_insn *last = get_last_insn ();
4926 if (fmode != GET_MODE (from))
4927 from = convert_to_mode (fmode, from, 0);
4929 if (must_trunc)
4931 rtx temp = gen_reg_rtx (GET_MODE (from));
4932 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4933 temp, 0);
4936 if (imode != GET_MODE (to))
4937 target = gen_reg_rtx (imode);
4939 if (maybe_emit_unop_insn (icode, target, from,
4940 doing_unsigned ? UNSIGNED_FIX : FIX))
4942 if (target != to)
4943 convert_move (to, target, unsignedp);
4944 return;
4946 delete_insns_since (last);
4950 /* For an unsigned conversion, there is one more way to do it.
4951 If we have a signed conversion, we generate code that compares
4952 the real value to the largest representable positive number. If if
4953 is smaller, the conversion is done normally. Otherwise, subtract
4954 one plus the highest signed number, convert, and add it back.
4956 We only need to check all real modes, since we know we didn't find
4957 anything with a wider integer mode.
4959 This code used to extend FP value into mode wider than the destination.
4960 This is needed for decimal float modes which cannot accurately
4961 represent one plus the highest signed number of the same size, but
4962 not for binary modes. Consider, for instance conversion from SFmode
4963 into DImode.
4965 The hot path through the code is dealing with inputs smaller than 2^63
4966 and doing just the conversion, so there is no bits to lose.
4968 In the other path we know the value is positive in the range 2^63..2^64-1
4969 inclusive. (as for other input overflow happens and result is undefined)
4970 So we know that the most important bit set in mantissa corresponds to
4971 2^63. The subtraction of 2^63 should not generate any rounding as it
4972 simply clears out that bit. The rest is trivial. */
4974 scalar_int_mode to_mode;
4975 if (unsignedp
4976 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
4977 && HWI_COMPUTABLE_MODE_P (to_mode))
4978 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
4980 scalar_mode fmode = fmode_iter.require ();
4981 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
4982 0, &must_trunc)
4983 && (!DECIMAL_FLOAT_MODE_P (fmode)
4984 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
4986 int bitsize;
4987 REAL_VALUE_TYPE offset;
4988 rtx limit;
4989 rtx_code_label *lab1, *lab2;
4990 rtx_insn *insn;
4992 bitsize = GET_MODE_PRECISION (to_mode);
4993 real_2expN (&offset, bitsize - 1, fmode);
4994 limit = const_double_from_real_value (offset, fmode);
4995 lab1 = gen_label_rtx ();
4996 lab2 = gen_label_rtx ();
4998 if (fmode != GET_MODE (from))
4999 from = convert_to_mode (fmode, from, 0);
5001 /* See if we need to do the subtraction. */
5002 do_pending_stack_adjust ();
5003 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
5004 GET_MODE (from), 0, lab1);
5006 /* If not, do the signed "fix" and branch around fixup code. */
5007 expand_fix (to, from, 0);
5008 emit_jump_insn (targetm.gen_jump (lab2));
5009 emit_barrier ();
5011 /* Otherwise, subtract 2**(N-1), convert to signed number,
5012 then add 2**(N-1). Do the addition using XOR since this
5013 will often generate better code. */
5014 emit_label (lab1);
5015 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5016 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5017 expand_fix (to, target, 0);
5018 target = expand_binop (to_mode, xor_optab, to,
5019 gen_int_mode
5020 (HOST_WIDE_INT_1 << (bitsize - 1),
5021 to_mode),
5022 to, 1, OPTAB_LIB_WIDEN);
5024 if (target != to)
5025 emit_move_insn (to, target);
5027 emit_label (lab2);
5029 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5031 /* Make a place for a REG_NOTE and add it. */
5032 insn = emit_move_insn (to, to);
5033 set_dst_reg_note (insn, REG_EQUAL,
5034 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5035 copy_rtx (from)),
5036 to);
5039 return;
5043 /* We can't do it with an insn, so use a library call. But first ensure
5044 that the mode of TO is at least as wide as SImode, since those are the
5045 only library calls we know about. */
5047 if (is_narrower_int_mode (GET_MODE (to), SImode))
5049 target = gen_reg_rtx (SImode);
5051 expand_fix (target, from, unsignedp);
5053 else
5055 rtx_insn *insns;
5056 rtx value;
5057 rtx libfunc;
5059 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5060 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5061 gcc_assert (libfunc);
5063 start_sequence ();
5065 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5066 GET_MODE (to), from, GET_MODE (from));
5067 insns = get_insns ();
5068 end_sequence ();
5070 emit_libcall_block (insns, target, value,
5071 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5072 GET_MODE (to), from));
5075 if (target != to)
5077 if (GET_MODE (to) == GET_MODE (target))
5078 emit_move_insn (to, target);
5079 else
5080 convert_move (to, target, 0);
5085 /* Promote integer arguments for a libcall if necessary.
5086 emit_library_call_value cannot do the promotion because it does not
5087 know if it should do a signed or unsigned promotion. This is because
5088 there are no tree types defined for libcalls. */
5090 static rtx
5091 prepare_libcall_arg (rtx arg, int uintp)
5093 scalar_int_mode mode;
5094 machine_mode arg_mode;
5095 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5097 /* If we need to promote the integer function argument we need to do
5098 it here instead of inside emit_library_call_value because in
5099 emit_library_call_value we don't know if we should do a signed or
5100 unsigned promotion. */
5102 int unsigned_p = 0;
5103 arg_mode = promote_function_mode (NULL_TREE, mode,
5104 &unsigned_p, NULL_TREE, 0);
5105 if (arg_mode != mode)
5106 return convert_to_mode (arg_mode, arg, uintp);
5108 return arg;
5111 /* Generate code to convert FROM or TO a fixed-point.
5112 If UINTP is true, either TO or FROM is an unsigned integer.
5113 If SATP is true, we need to saturate the result. */
5115 void
5116 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5118 machine_mode to_mode = GET_MODE (to);
5119 machine_mode from_mode = GET_MODE (from);
5120 convert_optab tab;
5121 enum rtx_code this_code;
5122 enum insn_code code;
5123 rtx_insn *insns;
5124 rtx value;
5125 rtx libfunc;
5127 if (to_mode == from_mode)
5129 emit_move_insn (to, from);
5130 return;
5133 if (uintp)
5135 tab = satp ? satfractuns_optab : fractuns_optab;
5136 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5138 else
5140 tab = satp ? satfract_optab : fract_optab;
5141 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5143 code = convert_optab_handler (tab, to_mode, from_mode);
5144 if (code != CODE_FOR_nothing)
5146 emit_unop_insn (code, to, from, this_code);
5147 return;
5150 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5151 gcc_assert (libfunc);
5153 from = prepare_libcall_arg (from, uintp);
5154 from_mode = GET_MODE (from);
5156 start_sequence ();
5157 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5158 from, from_mode);
5159 insns = get_insns ();
5160 end_sequence ();
5162 emit_libcall_block (insns, to, value,
5163 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5166 /* Generate code to convert FROM to fixed point and store in TO. FROM
5167 must be floating point, TO must be signed. Use the conversion optab
5168 TAB to do the conversion. */
5170 bool
5171 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5173 enum insn_code icode;
5174 rtx target = to;
5175 machine_mode fmode, imode;
5177 /* We first try to find a pair of modes, one real and one integer, at
5178 least as wide as FROM and TO, respectively, in which we can open-code
5179 this conversion. If the integer mode is wider than the mode of TO,
5180 we can do the conversion either signed or unsigned. */
5182 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5183 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5185 icode = convert_optab_handler (tab, imode, fmode);
5186 if (icode != CODE_FOR_nothing)
5188 rtx_insn *last = get_last_insn ();
5189 if (fmode != GET_MODE (from))
5190 from = convert_to_mode (fmode, from, 0);
5192 if (imode != GET_MODE (to))
5193 target = gen_reg_rtx (imode);
5195 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5197 delete_insns_since (last);
5198 continue;
5200 if (target != to)
5201 convert_move (to, target, 0);
5202 return true;
5206 return false;
5209 /* Report whether we have an instruction to perform the operation
5210 specified by CODE on operands of mode MODE. */
5212 have_insn_for (enum rtx_code code, machine_mode mode)
5214 return (code_to_optab (code)
5215 && (optab_handler (code_to_optab (code), mode)
5216 != CODE_FOR_nothing));
5219 /* Print information about the current contents of the optabs on
5220 STDERR. */
5222 DEBUG_FUNCTION void
5223 debug_optab_libfuncs (void)
5225 int i, j, k;
5227 /* Dump the arithmetic optabs. */
5228 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5229 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5231 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5232 if (l)
5234 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5235 fprintf (stderr, "%s\t%s:\t%s\n",
5236 GET_RTX_NAME (optab_to_code ((optab) i)),
5237 GET_MODE_NAME (j),
5238 XSTR (l, 0));
5242 /* Dump the conversion optabs. */
5243 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5244 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5245 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5247 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5248 (machine_mode) k);
5249 if (l)
5251 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5252 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5253 GET_RTX_NAME (optab_to_code ((optab) i)),
5254 GET_MODE_NAME (j),
5255 GET_MODE_NAME (k),
5256 XSTR (l, 0));
5261 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5262 CODE. Return 0 on failure. */
5264 rtx_insn *
5265 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5267 machine_mode mode = GET_MODE (op1);
5268 enum insn_code icode;
5269 rtx_insn *insn;
5270 rtx trap_rtx;
5272 if (mode == VOIDmode)
5273 return 0;
5275 icode = optab_handler (ctrap_optab, mode);
5276 if (icode == CODE_FOR_nothing)
5277 return 0;
5279 /* Some targets only accept a zero trap code. */
5280 if (!insn_operand_matches (icode, 3, tcode))
5281 return 0;
5283 do_pending_stack_adjust ();
5284 start_sequence ();
5285 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5286 &trap_rtx, &mode);
5287 if (!trap_rtx)
5288 insn = NULL;
5289 else
5290 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5291 tcode);
5293 /* If that failed, then give up. */
5294 if (insn == 0)
5296 end_sequence ();
5297 return 0;
5300 emit_insn (insn);
5301 insn = get_insns ();
5302 end_sequence ();
5303 return insn;
5306 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5307 or unsigned operation code. */
5309 enum rtx_code
5310 get_rtx_code (enum tree_code tcode, bool unsignedp)
5312 enum rtx_code code;
5313 switch (tcode)
5315 case EQ_EXPR:
5316 code = EQ;
5317 break;
5318 case NE_EXPR:
5319 code = NE;
5320 break;
5321 case LT_EXPR:
5322 code = unsignedp ? LTU : LT;
5323 break;
5324 case LE_EXPR:
5325 code = unsignedp ? LEU : LE;
5326 break;
5327 case GT_EXPR:
5328 code = unsignedp ? GTU : GT;
5329 break;
5330 case GE_EXPR:
5331 code = unsignedp ? GEU : GE;
5332 break;
5334 case UNORDERED_EXPR:
5335 code = UNORDERED;
5336 break;
5337 case ORDERED_EXPR:
5338 code = ORDERED;
5339 break;
5340 case UNLT_EXPR:
5341 code = UNLT;
5342 break;
5343 case UNLE_EXPR:
5344 code = UNLE;
5345 break;
5346 case UNGT_EXPR:
5347 code = UNGT;
5348 break;
5349 case UNGE_EXPR:
5350 code = UNGE;
5351 break;
5352 case UNEQ_EXPR:
5353 code = UNEQ;
5354 break;
5355 case LTGT_EXPR:
5356 code = LTGT;
5357 break;
5359 case BIT_AND_EXPR:
5360 code = AND;
5361 break;
5363 case BIT_IOR_EXPR:
5364 code = IOR;
5365 break;
5367 default:
5368 gcc_unreachable ();
5370 return code;
5373 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5374 select signed or unsigned operators. OPNO holds the index of the
5375 first comparison operand for insn ICODE. Do not generate the
5376 compare instruction itself. */
5378 static rtx
5379 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5380 tree t_op0, tree t_op1, bool unsignedp,
5381 enum insn_code icode, unsigned int opno)
5383 struct expand_operand ops[2];
5384 rtx rtx_op0, rtx_op1;
5385 machine_mode m0, m1;
5386 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5388 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5390 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5391 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5392 cases, use the original mode. */
5393 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5394 EXPAND_STACK_PARM);
5395 m0 = GET_MODE (rtx_op0);
5396 if (m0 == VOIDmode)
5397 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5399 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5400 EXPAND_STACK_PARM);
5401 m1 = GET_MODE (rtx_op1);
5402 if (m1 == VOIDmode)
5403 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5405 create_input_operand (&ops[0], rtx_op0, m0);
5406 create_input_operand (&ops[1], rtx_op1, m1);
5407 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5408 gcc_unreachable ();
5409 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5412 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5413 the first vec_perm operand, assuming the second operand is a constant
5414 vector of zeros. Return the shift distance in bits if so, or NULL_RTX
5415 if the vec_perm is not a shift. MODE is the mode of the value being
5416 shifted. */
5417 static rtx
5418 shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel)
5420 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode);
5421 poly_int64 first = sel[0];
5422 if (maybe_ge (sel[0], GET_MODE_NUNITS (mode)))
5423 return NULL_RTX;
5425 if (!sel.series_p (0, 1, first, 1))
5427 unsigned int nelt;
5428 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5429 return NULL_RTX;
5430 for (unsigned int i = 1; i < nelt; i++)
5432 poly_int64 expected = i + first;
5433 /* Indices into the second vector are all equivalent. */
5434 if (maybe_lt (sel[i], nelt)
5435 ? maybe_ne (sel[i], expected)
5436 : maybe_lt (expected, nelt))
5437 return NULL_RTX;
5441 return gen_int_shift_amount (mode, first * bitsize);
5444 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5446 static rtx
5447 expand_vec_perm_1 (enum insn_code icode, rtx target,
5448 rtx v0, rtx v1, rtx sel)
5450 machine_mode tmode = GET_MODE (target);
5451 machine_mode smode = GET_MODE (sel);
5452 struct expand_operand ops[4];
5454 gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
5455 || mode_for_int_vector (tmode).require () == smode);
5456 create_output_operand (&ops[0], target, tmode);
5457 create_input_operand (&ops[3], sel, smode);
5459 /* Make an effort to preserve v0 == v1. The target expander is able to
5460 rely on this to determine if we're permuting a single input operand. */
5461 if (rtx_equal_p (v0, v1))
5463 if (!insn_operand_matches (icode, 1, v0))
5464 v0 = force_reg (tmode, v0);
5465 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5466 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5468 create_fixed_operand (&ops[1], v0);
5469 create_fixed_operand (&ops[2], v0);
5471 else
5473 create_input_operand (&ops[1], v0, tmode);
5474 create_input_operand (&ops[2], v1, tmode);
5477 if (maybe_expand_insn (icode, 4, ops))
5478 return ops[0].value;
5479 return NULL_RTX;
5482 /* Implement a permutation of vectors v0 and v1 using the permutation
5483 vector in SEL and return the result. Use TARGET to hold the result
5484 if nonnull and convenient.
5486 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5487 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5488 to have a particular mode. */
5491 expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
5492 const vec_perm_builder &sel, machine_mode sel_mode,
5493 rtx target)
5495 if (!target || !register_operand (target, mode))
5496 target = gen_reg_rtx (mode);
5498 /* Set QIMODE to a different vector mode with byte elements.
5499 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5500 machine_mode qimode;
5501 if (!qimode_for_vec_perm (mode).exists (&qimode))
5502 qimode = VOIDmode;
5504 rtx_insn *last = get_last_insn ();
5506 bool single_arg_p = rtx_equal_p (v0, v1);
5507 /* Always specify two input vectors here and leave the target to handle
5508 cases in which the inputs are equal. Not all backends can cope with
5509 the single-input representation when testing for a double-input
5510 target instruction. */
5511 vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode));
5513 /* See if this can be handled with a vec_shr. We only do this if the
5514 second vector is all zeroes. */
5515 insn_code shift_code = optab_handler (vec_shr_optab, mode);
5516 insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5517 ? optab_handler (vec_shr_optab, qimode)
5518 : CODE_FOR_nothing);
5520 if (v1 == CONST0_RTX (GET_MODE (v1))
5521 && (shift_code != CODE_FOR_nothing
5522 || shift_code_qi != CODE_FOR_nothing))
5524 rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices);
5525 if (shift_amt)
5527 struct expand_operand ops[3];
5528 if (shift_code != CODE_FOR_nothing)
5530 create_output_operand (&ops[0], target, mode);
5531 create_input_operand (&ops[1], v0, mode);
5532 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
5533 if (maybe_expand_insn (shift_code, 3, ops))
5534 return ops[0].value;
5536 if (shift_code_qi != CODE_FOR_nothing)
5538 rtx tmp = gen_reg_rtx (qimode);
5539 create_output_operand (&ops[0], tmp, qimode);
5540 create_input_operand (&ops[1], gen_lowpart (qimode, v0), qimode);
5541 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
5542 if (maybe_expand_insn (shift_code_qi, 3, ops))
5543 return gen_lowpart (mode, ops[0].value);
5548 if (targetm.vectorize.vec_perm_const != NULL)
5550 v0 = force_reg (mode, v0);
5551 if (single_arg_p)
5552 v1 = v0;
5553 else
5554 v1 = force_reg (mode, v1);
5556 if (targetm.vectorize.vec_perm_const (mode, target, v0, v1, indices))
5557 return target;
5560 /* Fall back to a constant byte-based permutation. */
5561 vec_perm_indices qimode_indices;
5562 rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX;
5563 if (qimode != VOIDmode)
5565 qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode));
5566 target_qi = gen_reg_rtx (qimode);
5567 v0_qi = gen_lowpart (qimode, v0);
5568 v1_qi = gen_lowpart (qimode, v1);
5569 if (targetm.vectorize.vec_perm_const != NULL
5570 && targetm.vectorize.vec_perm_const (qimode, target_qi, v0_qi,
5571 v1_qi, qimode_indices))
5572 return gen_lowpart (mode, target_qi);
5575 /* Otherwise expand as a fully variable permuation. */
5577 /* The optabs are only defined for selectors with the same width
5578 as the values being permuted. */
5579 machine_mode required_sel_mode;
5580 if (!mode_for_int_vector (mode).exists (&required_sel_mode)
5581 || !VECTOR_MODE_P (required_sel_mode))
5583 delete_insns_since (last);
5584 return NULL_RTX;
5587 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
5588 If that isn't the mode we want then we need to prove that using
5589 REQUIRED_SEL_MODE is OK. */
5590 if (sel_mode != required_sel_mode)
5592 if (!selector_fits_mode_p (required_sel_mode, indices))
5594 delete_insns_since (last);
5595 return NULL_RTX;
5597 sel_mode = required_sel_mode;
5600 insn_code icode = direct_optab_handler (vec_perm_optab, mode);
5601 if (icode != CODE_FOR_nothing)
5603 rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
5604 rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel_rtx);
5605 if (tmp)
5606 return tmp;
5609 if (qimode != VOIDmode
5610 && selector_fits_mode_p (qimode, qimode_indices))
5612 icode = direct_optab_handler (vec_perm_optab, qimode);
5613 if (icode != CODE_FOR_nothing)
5615 rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices);
5616 rtx tmp = expand_vec_perm_1 (icode, target_qi, v0_qi, v1_qi, sel_qi);
5617 if (tmp)
5618 return gen_lowpart (mode, tmp);
5622 delete_insns_since (last);
5623 return NULL_RTX;
5626 /* Implement a permutation of vectors v0 and v1 using the permutation
5627 vector in SEL and return the result. Use TARGET to hold the result
5628 if nonnull and convenient.
5630 MODE is the mode of the vectors being permuted (V0 and V1).
5631 SEL must have the integer equivalent of MODE and is known to be
5632 unsuitable for permutes with a constant permutation vector. */
5635 expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5637 enum insn_code icode;
5638 unsigned int i, u;
5639 rtx tmp, sel_qi;
5641 u = GET_MODE_UNIT_SIZE (mode);
5643 if (!target || GET_MODE (target) != mode)
5644 target = gen_reg_rtx (mode);
5646 icode = direct_optab_handler (vec_perm_optab, mode);
5647 if (icode != CODE_FOR_nothing)
5649 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5650 if (tmp)
5651 return tmp;
5654 /* As a special case to aid several targets, lower the element-based
5655 permutation to a byte-based permutation and try again. */
5656 machine_mode qimode;
5657 if (!qimode_for_vec_perm (mode).exists (&qimode)
5658 || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
5659 return NULL_RTX;
5660 icode = direct_optab_handler (vec_perm_optab, qimode);
5661 if (icode == CODE_FOR_nothing)
5662 return NULL_RTX;
5664 /* Multiply each element by its byte size. */
5665 machine_mode selmode = GET_MODE (sel);
5666 if (u == 2)
5667 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5668 NULL, 0, OPTAB_DIRECT);
5669 else
5670 sel = expand_simple_binop (selmode, ASHIFT, sel,
5671 gen_int_shift_amount (selmode, exact_log2 (u)),
5672 NULL, 0, OPTAB_DIRECT);
5673 gcc_assert (sel != NULL);
5675 /* Broadcast the low byte each element into each of its bytes.
5676 The encoding has U interleaved stepped patterns, one for each
5677 byte of an element. */
5678 vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
5679 unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
5680 for (i = 0; i < 3; ++i)
5681 for (unsigned int j = 0; j < u; ++j)
5682 const_sel.quick_push (i * u + low_byte_in_u);
5683 sel = gen_lowpart (qimode, sel);
5684 sel = expand_vec_perm_const (qimode, sel, sel, const_sel, qimode, NULL);
5685 gcc_assert (sel != NULL);
5687 /* Add the byte offset to each byte element. */
5688 /* Note that the definition of the indicies here is memory ordering,
5689 so there should be no difference between big and little endian. */
5690 rtx_vector_builder byte_indices (qimode, u, 1);
5691 for (i = 0; i < u; ++i)
5692 byte_indices.quick_push (GEN_INT (i));
5693 tmp = byte_indices.build ();
5694 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5695 sel, 0, OPTAB_DIRECT);
5696 gcc_assert (sel_qi != NULL);
5698 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5699 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5700 gen_lowpart (qimode, v1), sel_qi);
5701 if (tmp)
5702 tmp = gen_lowpart (mode, tmp);
5703 return tmp;
5706 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5707 three operands. */
5710 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5711 rtx target)
5713 struct expand_operand ops[4];
5714 machine_mode mode = TYPE_MODE (vec_cond_type);
5715 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5716 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5717 rtx mask, rtx_op1, rtx_op2;
5719 if (icode == CODE_FOR_nothing)
5720 return 0;
5722 mask = expand_normal (op0);
5723 rtx_op1 = expand_normal (op1);
5724 rtx_op2 = expand_normal (op2);
5726 mask = force_reg (mask_mode, mask);
5727 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5729 create_output_operand (&ops[0], target, mode);
5730 create_input_operand (&ops[1], rtx_op1, mode);
5731 create_input_operand (&ops[2], rtx_op2, mode);
5732 create_input_operand (&ops[3], mask, mask_mode);
5733 expand_insn (icode, 4, ops);
5735 return ops[0].value;
5738 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5739 three operands. */
5742 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5743 rtx target)
5745 struct expand_operand ops[6];
5746 enum insn_code icode;
5747 rtx comparison, rtx_op1, rtx_op2;
5748 machine_mode mode = TYPE_MODE (vec_cond_type);
5749 machine_mode cmp_op_mode;
5750 bool unsignedp;
5751 tree op0a, op0b;
5752 enum tree_code tcode;
5754 if (COMPARISON_CLASS_P (op0))
5756 op0a = TREE_OPERAND (op0, 0);
5757 op0b = TREE_OPERAND (op0, 1);
5758 tcode = TREE_CODE (op0);
5760 else
5762 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5763 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5764 != CODE_FOR_nothing)
5765 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5766 op2, target);
5767 /* Fake op0 < 0. */
5768 else
5770 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5771 == MODE_VECTOR_INT);
5772 op0a = op0;
5773 op0b = build_zero_cst (TREE_TYPE (op0));
5774 tcode = LT_EXPR;
5777 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5778 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5781 gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode))
5782 && known_eq (GET_MODE_NUNITS (mode),
5783 GET_MODE_NUNITS (cmp_op_mode)));
5785 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5786 if (icode == CODE_FOR_nothing)
5788 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5789 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5790 if (icode == CODE_FOR_nothing)
5791 return 0;
5794 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5795 icode, 4);
5796 rtx_op1 = expand_normal (op1);
5797 rtx_op2 = expand_normal (op2);
5799 create_output_operand (&ops[0], target, mode);
5800 create_input_operand (&ops[1], rtx_op1, mode);
5801 create_input_operand (&ops[2], rtx_op2, mode);
5802 create_fixed_operand (&ops[3], comparison);
5803 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5804 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5805 expand_insn (icode, 6, ops);
5806 return ops[0].value;
5809 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5810 Use TARGET for the result if nonnull and convenient. */
5813 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
5815 struct expand_operand ops[3];
5816 enum insn_code icode;
5817 machine_mode emode = GET_MODE_INNER (vmode);
5819 icode = direct_optab_handler (vec_series_optab, vmode);
5820 gcc_assert (icode != CODE_FOR_nothing);
5822 create_output_operand (&ops[0], target, vmode);
5823 create_input_operand (&ops[1], op0, emode);
5824 create_input_operand (&ops[2], op1, emode);
5826 expand_insn (icode, 3, ops);
5827 return ops[0].value;
5830 /* Generate insns for a vector comparison into a mask. */
5833 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5835 struct expand_operand ops[4];
5836 enum insn_code icode;
5837 rtx comparison;
5838 machine_mode mask_mode = TYPE_MODE (type);
5839 machine_mode vmode;
5840 bool unsignedp;
5841 tree op0a, op0b;
5842 enum tree_code tcode;
5844 op0a = TREE_OPERAND (exp, 0);
5845 op0b = TREE_OPERAND (exp, 1);
5846 tcode = TREE_CODE (exp);
5848 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5849 vmode = TYPE_MODE (TREE_TYPE (op0a));
5851 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5852 if (icode == CODE_FOR_nothing)
5854 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5855 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5856 if (icode == CODE_FOR_nothing)
5857 return 0;
5860 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5861 unsignedp, icode, 2);
5862 create_output_operand (&ops[0], target, mask_mode);
5863 create_fixed_operand (&ops[1], comparison);
5864 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5865 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5866 expand_insn (icode, 4, ops);
5867 return ops[0].value;
5870 /* Expand a highpart multiply. */
5873 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5874 rtx target, bool uns_p)
5876 struct expand_operand eops[3];
5877 enum insn_code icode;
5878 int method, i;
5879 machine_mode wmode;
5880 rtx m1, m2;
5881 optab tab1, tab2;
5883 method = can_mult_highpart_p (mode, uns_p);
5884 switch (method)
5886 case 0:
5887 return NULL_RTX;
5888 case 1:
5889 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5890 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5891 OPTAB_LIB_WIDEN);
5892 case 2:
5893 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5894 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5895 break;
5896 case 3:
5897 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5898 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5899 if (BYTES_BIG_ENDIAN)
5900 std::swap (tab1, tab2);
5901 break;
5902 default:
5903 gcc_unreachable ();
5906 icode = optab_handler (tab1, mode);
5907 wmode = insn_data[icode].operand[0].mode;
5908 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
5909 GET_MODE_NUNITS (mode)));
5910 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
5912 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5913 create_input_operand (&eops[1], op0, mode);
5914 create_input_operand (&eops[2], op1, mode);
5915 expand_insn (icode, 3, eops);
5916 m1 = gen_lowpart (mode, eops[0].value);
5918 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5919 create_input_operand (&eops[1], op0, mode);
5920 create_input_operand (&eops[2], op1, mode);
5921 expand_insn (optab_handler (tab2, mode), 3, eops);
5922 m2 = gen_lowpart (mode, eops[0].value);
5924 vec_perm_builder sel;
5925 if (method == 2)
5927 /* The encoding has 2 interleaved stepped patterns. */
5928 sel.new_vector (GET_MODE_NUNITS (mode), 2, 3);
5929 for (i = 0; i < 6; ++i)
5930 sel.quick_push (!BYTES_BIG_ENDIAN + (i & ~1)
5931 + ((i & 1) ? GET_MODE_NUNITS (mode) : 0));
5933 else
5935 /* The encoding has a single interleaved stepped pattern. */
5936 sel.new_vector (GET_MODE_NUNITS (mode), 1, 3);
5937 for (i = 0; i < 3; ++i)
5938 sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5941 return expand_vec_perm_const (mode, m1, m2, sel, BLKmode, target);
5944 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5945 pattern. */
5947 static void
5948 find_cc_set (rtx x, const_rtx pat, void *data)
5950 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5951 && GET_CODE (pat) == SET)
5953 rtx *p_cc_reg = (rtx *) data;
5954 gcc_assert (!*p_cc_reg);
5955 *p_cc_reg = x;
5959 /* This is a helper function for the other atomic operations. This function
5960 emits a loop that contains SEQ that iterates until a compare-and-swap
5961 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5962 a set of instructions that takes a value from OLD_REG as an input and
5963 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5964 set to the current contents of MEM. After SEQ, a compare-and-swap will
5965 attempt to update MEM with NEW_REG. The function returns true when the
5966 loop was generated successfully. */
5968 static bool
5969 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5971 machine_mode mode = GET_MODE (mem);
5972 rtx_code_label *label;
5973 rtx cmp_reg, success, oldval;
5975 /* The loop we want to generate looks like
5977 cmp_reg = mem;
5978 label:
5979 old_reg = cmp_reg;
5980 seq;
5981 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5982 if (success)
5983 goto label;
5985 Note that we only do the plain load from memory once. Subsequent
5986 iterations use the value loaded by the compare-and-swap pattern. */
5988 label = gen_label_rtx ();
5989 cmp_reg = gen_reg_rtx (mode);
5991 emit_move_insn (cmp_reg, mem);
5992 emit_label (label);
5993 emit_move_insn (old_reg, cmp_reg);
5994 if (seq)
5995 emit_insn (seq);
5997 success = NULL_RTX;
5998 oldval = cmp_reg;
5999 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
6000 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
6001 MEMMODEL_RELAXED))
6002 return false;
6004 if (oldval != cmp_reg)
6005 emit_move_insn (cmp_reg, oldval);
6007 /* Mark this jump predicted not taken. */
6008 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
6009 GET_MODE (success), 1, label,
6010 profile_probability::guessed_never ());
6011 return true;
6015 /* This function tries to emit an atomic_exchange intruction. VAL is written
6016 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6017 using TARGET if possible. */
6019 static rtx
6020 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6022 machine_mode mode = GET_MODE (mem);
6023 enum insn_code icode;
6025 /* If the target supports the exchange directly, great. */
6026 icode = direct_optab_handler (atomic_exchange_optab, mode);
6027 if (icode != CODE_FOR_nothing)
6029 struct expand_operand ops[4];
6031 create_output_operand (&ops[0], target, mode);
6032 create_fixed_operand (&ops[1], mem);
6033 create_input_operand (&ops[2], val, mode);
6034 create_integer_operand (&ops[3], model);
6035 if (maybe_expand_insn (icode, 4, ops))
6036 return ops[0].value;
6039 return NULL_RTX;
6042 /* This function tries to implement an atomic exchange operation using
6043 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6044 The previous contents of *MEM are returned, using TARGET if possible.
6045 Since this instructionn is an acquire barrier only, stronger memory
6046 models may require additional barriers to be emitted. */
6048 static rtx
6049 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
6050 enum memmodel model)
6052 machine_mode mode = GET_MODE (mem);
6053 enum insn_code icode;
6054 rtx_insn *last_insn = get_last_insn ();
6056 icode = optab_handler (sync_lock_test_and_set_optab, mode);
6058 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6059 exists, and the memory model is stronger than acquire, add a release
6060 barrier before the instruction. */
6062 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
6063 expand_mem_thread_fence (model);
6065 if (icode != CODE_FOR_nothing)
6067 struct expand_operand ops[3];
6068 create_output_operand (&ops[0], target, mode);
6069 create_fixed_operand (&ops[1], mem);
6070 create_input_operand (&ops[2], val, mode);
6071 if (maybe_expand_insn (icode, 3, ops))
6072 return ops[0].value;
6075 /* If an external test-and-set libcall is provided, use that instead of
6076 any external compare-and-swap that we might get from the compare-and-
6077 swap-loop expansion later. */
6078 if (!can_compare_and_swap_p (mode, false))
6080 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
6081 if (libfunc != NULL)
6083 rtx addr;
6085 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6086 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6087 mode, addr, ptr_mode,
6088 val, mode);
6092 /* If the test_and_set can't be emitted, eliminate any barrier that might
6093 have been emitted. */
6094 delete_insns_since (last_insn);
6095 return NULL_RTX;
6098 /* This function tries to implement an atomic exchange operation using a
6099 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6100 *MEM are returned, using TARGET if possible. No memory model is required
6101 since a compare_and_swap loop is seq-cst. */
6103 static rtx
6104 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6106 machine_mode mode = GET_MODE (mem);
6108 if (can_compare_and_swap_p (mode, true))
6110 if (!target || !register_operand (target, mode))
6111 target = gen_reg_rtx (mode);
6112 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6113 return target;
6116 return NULL_RTX;
6119 /* This function tries to implement an atomic test-and-set operation
6120 using the atomic_test_and_set instruction pattern. A boolean value
6121 is returned from the operation, using TARGET if possible. */
6123 static rtx
6124 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6126 machine_mode pat_bool_mode;
6127 struct expand_operand ops[3];
6129 if (!targetm.have_atomic_test_and_set ())
6130 return NULL_RTX;
6132 /* While we always get QImode from __atomic_test_and_set, we get
6133 other memory modes from __sync_lock_test_and_set. Note that we
6134 use no endian adjustment here. This matches the 4.6 behavior
6135 in the Sparc backend. */
6136 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6137 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6138 if (GET_MODE (mem) != QImode)
6139 mem = adjust_address_nv (mem, QImode, 0);
6141 pat_bool_mode = insn_data[icode].operand[0].mode;
6142 create_output_operand (&ops[0], target, pat_bool_mode);
6143 create_fixed_operand (&ops[1], mem);
6144 create_integer_operand (&ops[2], model);
6146 if (maybe_expand_insn (icode, 3, ops))
6147 return ops[0].value;
6148 return NULL_RTX;
6151 /* This function expands the legacy _sync_lock test_and_set operation which is
6152 generally an atomic exchange. Some limited targets only allow the
6153 constant 1 to be stored. This is an ACQUIRE operation.
6155 TARGET is an optional place to stick the return value.
6156 MEM is where VAL is stored. */
6159 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6161 rtx ret;
6163 /* Try an atomic_exchange first. */
6164 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6165 if (ret)
6166 return ret;
6168 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6169 MEMMODEL_SYNC_ACQUIRE);
6170 if (ret)
6171 return ret;
6173 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6174 if (ret)
6175 return ret;
6177 /* If there are no other options, try atomic_test_and_set if the value
6178 being stored is 1. */
6179 if (val == const1_rtx)
6180 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6182 return ret;
6185 /* This function expands the atomic test_and_set operation:
6186 atomically store a boolean TRUE into MEM and return the previous value.
6188 MEMMODEL is the memory model variant to use.
6189 TARGET is an optional place to stick the return value. */
6192 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6194 machine_mode mode = GET_MODE (mem);
6195 rtx ret, trueval, subtarget;
6197 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6198 if (ret)
6199 return ret;
6201 /* Be binary compatible with non-default settings of trueval, and different
6202 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6203 another only has atomic-exchange. */
6204 if (targetm.atomic_test_and_set_trueval == 1)
6206 trueval = const1_rtx;
6207 subtarget = target ? target : gen_reg_rtx (mode);
6209 else
6211 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6212 subtarget = gen_reg_rtx (mode);
6215 /* Try the atomic-exchange optab... */
6216 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6218 /* ... then an atomic-compare-and-swap loop ... */
6219 if (!ret)
6220 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6222 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6223 if (!ret)
6224 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6226 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6227 things with the value 1. Thus we try again without trueval. */
6228 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6229 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6231 /* Failing all else, assume a single threaded environment and simply
6232 perform the operation. */
6233 if (!ret)
6235 /* If the result is ignored skip the move to target. */
6236 if (subtarget != const0_rtx)
6237 emit_move_insn (subtarget, mem);
6239 emit_move_insn (mem, trueval);
6240 ret = subtarget;
6243 /* Recall that have to return a boolean value; rectify if trueval
6244 is not exactly one. */
6245 if (targetm.atomic_test_and_set_trueval != 1)
6246 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6248 return ret;
6251 /* This function expands the atomic exchange operation:
6252 atomically store VAL in MEM and return the previous value in MEM.
6254 MEMMODEL is the memory model variant to use.
6255 TARGET is an optional place to stick the return value. */
6258 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6260 machine_mode mode = GET_MODE (mem);
6261 rtx ret;
6263 /* If loads are not atomic for the required size and we are not called to
6264 provide a __sync builtin, do not do anything so that we stay consistent
6265 with atomic loads of the same size. */
6266 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6267 return NULL_RTX;
6269 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6271 /* Next try a compare-and-swap loop for the exchange. */
6272 if (!ret)
6273 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6275 return ret;
6278 /* This function expands the atomic compare exchange operation:
6280 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6281 *PTARGET_OVAL is an optional place to store the old value from memory.
6282 Both target parameters may be NULL or const0_rtx to indicate that we do
6283 not care about that return value. Both target parameters are updated on
6284 success to the actual location of the corresponding result.
6286 MEMMODEL is the memory model variant to use.
6288 The return value of the function is true for success. */
6290 bool
6291 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6292 rtx mem, rtx expected, rtx desired,
6293 bool is_weak, enum memmodel succ_model,
6294 enum memmodel fail_model)
6296 machine_mode mode = GET_MODE (mem);
6297 struct expand_operand ops[8];
6298 enum insn_code icode;
6299 rtx target_oval, target_bool = NULL_RTX;
6300 rtx libfunc;
6302 /* If loads are not atomic for the required size and we are not called to
6303 provide a __sync builtin, do not do anything so that we stay consistent
6304 with atomic loads of the same size. */
6305 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6306 return false;
6308 /* Load expected into a register for the compare and swap. */
6309 if (MEM_P (expected))
6310 expected = copy_to_reg (expected);
6312 /* Make sure we always have some place to put the return oldval.
6313 Further, make sure that place is distinct from the input expected,
6314 just in case we need that path down below. */
6315 if (ptarget_oval && *ptarget_oval == const0_rtx)
6316 ptarget_oval = NULL;
6318 if (ptarget_oval == NULL
6319 || (target_oval = *ptarget_oval) == NULL
6320 || reg_overlap_mentioned_p (expected, target_oval))
6321 target_oval = gen_reg_rtx (mode);
6323 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6324 if (icode != CODE_FOR_nothing)
6326 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6328 if (ptarget_bool && *ptarget_bool == const0_rtx)
6329 ptarget_bool = NULL;
6331 /* Make sure we always have a place for the bool operand. */
6332 if (ptarget_bool == NULL
6333 || (target_bool = *ptarget_bool) == NULL
6334 || GET_MODE (target_bool) != bool_mode)
6335 target_bool = gen_reg_rtx (bool_mode);
6337 /* Emit the compare_and_swap. */
6338 create_output_operand (&ops[0], target_bool, bool_mode);
6339 create_output_operand (&ops[1], target_oval, mode);
6340 create_fixed_operand (&ops[2], mem);
6341 create_input_operand (&ops[3], expected, mode);
6342 create_input_operand (&ops[4], desired, mode);
6343 create_integer_operand (&ops[5], is_weak);
6344 create_integer_operand (&ops[6], succ_model);
6345 create_integer_operand (&ops[7], fail_model);
6346 if (maybe_expand_insn (icode, 8, ops))
6348 /* Return success/failure. */
6349 target_bool = ops[0].value;
6350 target_oval = ops[1].value;
6351 goto success;
6355 /* Otherwise fall back to the original __sync_val_compare_and_swap
6356 which is always seq-cst. */
6357 icode = optab_handler (sync_compare_and_swap_optab, mode);
6358 if (icode != CODE_FOR_nothing)
6360 rtx cc_reg;
6362 create_output_operand (&ops[0], target_oval, mode);
6363 create_fixed_operand (&ops[1], mem);
6364 create_input_operand (&ops[2], expected, mode);
6365 create_input_operand (&ops[3], desired, mode);
6366 if (!maybe_expand_insn (icode, 4, ops))
6367 return false;
6369 target_oval = ops[0].value;
6371 /* If the caller isn't interested in the boolean return value,
6372 skip the computation of it. */
6373 if (ptarget_bool == NULL)
6374 goto success;
6376 /* Otherwise, work out if the compare-and-swap succeeded. */
6377 cc_reg = NULL_RTX;
6378 if (have_insn_for (COMPARE, CCmode))
6379 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6380 if (cc_reg)
6382 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6383 const0_rtx, VOIDmode, 0, 1);
6384 goto success;
6386 goto success_bool_from_val;
6389 /* Also check for library support for __sync_val_compare_and_swap. */
6390 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6391 if (libfunc != NULL)
6393 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6394 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6395 mode, addr, ptr_mode,
6396 expected, mode, desired, mode);
6397 emit_move_insn (target_oval, target);
6399 /* Compute the boolean return value only if requested. */
6400 if (ptarget_bool)
6401 goto success_bool_from_val;
6402 else
6403 goto success;
6406 /* Failure. */
6407 return false;
6409 success_bool_from_val:
6410 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6411 expected, VOIDmode, 1, 1);
6412 success:
6413 /* Make sure that the oval output winds up where the caller asked. */
6414 if (ptarget_oval)
6415 *ptarget_oval = target_oval;
6416 if (ptarget_bool)
6417 *ptarget_bool = target_bool;
6418 return true;
6421 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6423 static void
6424 expand_asm_memory_blockage (void)
6426 rtx asm_op, clob;
6428 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6429 rtvec_alloc (0), rtvec_alloc (0),
6430 rtvec_alloc (0), UNKNOWN_LOCATION);
6431 MEM_VOLATILE_P (asm_op) = 1;
6433 clob = gen_rtx_SCRATCH (VOIDmode);
6434 clob = gen_rtx_MEM (BLKmode, clob);
6435 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6437 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6440 /* Do not propagate memory accesses across this point. */
6442 static void
6443 expand_memory_blockage (void)
6445 if (targetm.have_memory_blockage ())
6446 emit_insn (targetm.gen_memory_blockage ());
6447 else
6448 expand_asm_memory_blockage ();
6451 /* This routine will either emit the mem_thread_fence pattern or issue a
6452 sync_synchronize to generate a fence for memory model MEMMODEL. */
6454 void
6455 expand_mem_thread_fence (enum memmodel model)
6457 if (is_mm_relaxed (model))
6458 return;
6459 if (targetm.have_mem_thread_fence ())
6461 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6462 expand_memory_blockage ();
6464 else if (targetm.have_memory_barrier ())
6465 emit_insn (targetm.gen_memory_barrier ());
6466 else if (synchronize_libfunc != NULL_RTX)
6467 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6468 else
6469 expand_memory_blockage ();
6472 /* Emit a signal fence with given memory model. */
6474 void
6475 expand_mem_signal_fence (enum memmodel model)
6477 /* No machine barrier is required to implement a signal fence, but
6478 a compiler memory barrier must be issued, except for relaxed MM. */
6479 if (!is_mm_relaxed (model))
6480 expand_memory_blockage ();
6483 /* This function expands the atomic load operation:
6484 return the atomically loaded value in MEM.
6486 MEMMODEL is the memory model variant to use.
6487 TARGET is an option place to stick the return value. */
6490 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6492 machine_mode mode = GET_MODE (mem);
6493 enum insn_code icode;
6495 /* If the target supports the load directly, great. */
6496 icode = direct_optab_handler (atomic_load_optab, mode);
6497 if (icode != CODE_FOR_nothing)
6499 struct expand_operand ops[3];
6500 rtx_insn *last = get_last_insn ();
6501 if (is_mm_seq_cst (model))
6502 expand_memory_blockage ();
6504 create_output_operand (&ops[0], target, mode);
6505 create_fixed_operand (&ops[1], mem);
6506 create_integer_operand (&ops[2], model);
6507 if (maybe_expand_insn (icode, 3, ops))
6509 if (!is_mm_relaxed (model))
6510 expand_memory_blockage ();
6511 return ops[0].value;
6513 delete_insns_since (last);
6516 /* If the size of the object is greater than word size on this target,
6517 then we assume that a load will not be atomic. We could try to
6518 emulate a load with a compare-and-swap operation, but the store that
6519 doing this could result in would be incorrect if this is a volatile
6520 atomic load or targetting read-only-mapped memory. */
6521 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
6522 /* If there is no atomic load, leave the library call. */
6523 return NULL_RTX;
6525 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6526 if (!target || target == const0_rtx)
6527 target = gen_reg_rtx (mode);
6529 /* For SEQ_CST, emit a barrier before the load. */
6530 if (is_mm_seq_cst (model))
6531 expand_mem_thread_fence (model);
6533 emit_move_insn (target, mem);
6535 /* Emit the appropriate barrier after the load. */
6536 expand_mem_thread_fence (model);
6538 return target;
6541 /* This function expands the atomic store operation:
6542 Atomically store VAL in MEM.
6543 MEMMODEL is the memory model variant to use.
6544 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6545 function returns const0_rtx if a pattern was emitted. */
6548 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6550 machine_mode mode = GET_MODE (mem);
6551 enum insn_code icode;
6552 struct expand_operand ops[3];
6554 /* If the target supports the store directly, great. */
6555 icode = direct_optab_handler (atomic_store_optab, mode);
6556 if (icode != CODE_FOR_nothing)
6558 rtx_insn *last = get_last_insn ();
6559 if (!is_mm_relaxed (model))
6560 expand_memory_blockage ();
6561 create_fixed_operand (&ops[0], mem);
6562 create_input_operand (&ops[1], val, mode);
6563 create_integer_operand (&ops[2], model);
6564 if (maybe_expand_insn (icode, 3, ops))
6566 if (is_mm_seq_cst (model))
6567 expand_memory_blockage ();
6568 return const0_rtx;
6570 delete_insns_since (last);
6573 /* If using __sync_lock_release is a viable alternative, try it.
6574 Note that this will not be set to true if we are expanding a generic
6575 __atomic_store_n. */
6576 if (use_release)
6578 icode = direct_optab_handler (sync_lock_release_optab, mode);
6579 if (icode != CODE_FOR_nothing)
6581 create_fixed_operand (&ops[0], mem);
6582 create_input_operand (&ops[1], const0_rtx, mode);
6583 if (maybe_expand_insn (icode, 2, ops))
6585 /* lock_release is only a release barrier. */
6586 if (is_mm_seq_cst (model))
6587 expand_mem_thread_fence (model);
6588 return const0_rtx;
6593 /* If the size of the object is greater than word size on this target,
6594 a default store will not be atomic. */
6595 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
6597 /* If loads are atomic or we are called to provide a __sync builtin,
6598 we can try a atomic_exchange and throw away the result. Otherwise,
6599 don't do anything so that we do not create an inconsistency between
6600 loads and stores. */
6601 if (can_atomic_load_p (mode) || is_mm_sync (model))
6603 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6604 if (!target)
6605 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6606 val);
6607 if (target)
6608 return const0_rtx;
6610 return NULL_RTX;
6613 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6614 expand_mem_thread_fence (model);
6616 emit_move_insn (mem, val);
6618 /* For SEQ_CST, also emit a barrier after the store. */
6619 if (is_mm_seq_cst (model))
6620 expand_mem_thread_fence (model);
6622 return const0_rtx;
6626 /* Structure containing the pointers and values required to process the
6627 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6629 struct atomic_op_functions
6631 direct_optab mem_fetch_before;
6632 direct_optab mem_fetch_after;
6633 direct_optab mem_no_result;
6634 optab fetch_before;
6635 optab fetch_after;
6636 direct_optab no_result;
6637 enum rtx_code reverse_code;
6641 /* Fill in structure pointed to by OP with the various optab entries for an
6642 operation of type CODE. */
6644 static void
6645 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6647 gcc_assert (op!= NULL);
6649 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6650 in the source code during compilation, and the optab entries are not
6651 computable until runtime. Fill in the values at runtime. */
6652 switch (code)
6654 case PLUS:
6655 op->mem_fetch_before = atomic_fetch_add_optab;
6656 op->mem_fetch_after = atomic_add_fetch_optab;
6657 op->mem_no_result = atomic_add_optab;
6658 op->fetch_before = sync_old_add_optab;
6659 op->fetch_after = sync_new_add_optab;
6660 op->no_result = sync_add_optab;
6661 op->reverse_code = MINUS;
6662 break;
6663 case MINUS:
6664 op->mem_fetch_before = atomic_fetch_sub_optab;
6665 op->mem_fetch_after = atomic_sub_fetch_optab;
6666 op->mem_no_result = atomic_sub_optab;
6667 op->fetch_before = sync_old_sub_optab;
6668 op->fetch_after = sync_new_sub_optab;
6669 op->no_result = sync_sub_optab;
6670 op->reverse_code = PLUS;
6671 break;
6672 case XOR:
6673 op->mem_fetch_before = atomic_fetch_xor_optab;
6674 op->mem_fetch_after = atomic_xor_fetch_optab;
6675 op->mem_no_result = atomic_xor_optab;
6676 op->fetch_before = sync_old_xor_optab;
6677 op->fetch_after = sync_new_xor_optab;
6678 op->no_result = sync_xor_optab;
6679 op->reverse_code = XOR;
6680 break;
6681 case AND:
6682 op->mem_fetch_before = atomic_fetch_and_optab;
6683 op->mem_fetch_after = atomic_and_fetch_optab;
6684 op->mem_no_result = atomic_and_optab;
6685 op->fetch_before = sync_old_and_optab;
6686 op->fetch_after = sync_new_and_optab;
6687 op->no_result = sync_and_optab;
6688 op->reverse_code = UNKNOWN;
6689 break;
6690 case IOR:
6691 op->mem_fetch_before = atomic_fetch_or_optab;
6692 op->mem_fetch_after = atomic_or_fetch_optab;
6693 op->mem_no_result = atomic_or_optab;
6694 op->fetch_before = sync_old_ior_optab;
6695 op->fetch_after = sync_new_ior_optab;
6696 op->no_result = sync_ior_optab;
6697 op->reverse_code = UNKNOWN;
6698 break;
6699 case NOT:
6700 op->mem_fetch_before = atomic_fetch_nand_optab;
6701 op->mem_fetch_after = atomic_nand_fetch_optab;
6702 op->mem_no_result = atomic_nand_optab;
6703 op->fetch_before = sync_old_nand_optab;
6704 op->fetch_after = sync_new_nand_optab;
6705 op->no_result = sync_nand_optab;
6706 op->reverse_code = UNKNOWN;
6707 break;
6708 default:
6709 gcc_unreachable ();
6713 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6714 using memory order MODEL. If AFTER is true the operation needs to return
6715 the value of *MEM after the operation, otherwise the previous value.
6716 TARGET is an optional place to place the result. The result is unused if
6717 it is const0_rtx.
6718 Return the result if there is a better sequence, otherwise NULL_RTX. */
6720 static rtx
6721 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6722 enum memmodel model, bool after)
6724 /* If the value is prefetched, or not used, it may be possible to replace
6725 the sequence with a native exchange operation. */
6726 if (!after || target == const0_rtx)
6728 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6729 if (code == AND && val == const0_rtx)
6731 if (target == const0_rtx)
6732 target = gen_reg_rtx (GET_MODE (mem));
6733 return maybe_emit_atomic_exchange (target, mem, val, model);
6736 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6737 if (code == IOR && val == constm1_rtx)
6739 if (target == const0_rtx)
6740 target = gen_reg_rtx (GET_MODE (mem));
6741 return maybe_emit_atomic_exchange (target, mem, val, model);
6745 return NULL_RTX;
6748 /* Try to emit an instruction for a specific operation varaition.
6749 OPTAB contains the OP functions.
6750 TARGET is an optional place to return the result. const0_rtx means unused.
6751 MEM is the memory location to operate on.
6752 VAL is the value to use in the operation.
6753 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6754 MODEL is the memory model, if used.
6755 AFTER is true if the returned result is the value after the operation. */
6757 static rtx
6758 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6759 rtx val, bool use_memmodel, enum memmodel model, bool after)
6761 machine_mode mode = GET_MODE (mem);
6762 struct expand_operand ops[4];
6763 enum insn_code icode;
6764 int op_counter = 0;
6765 int num_ops;
6767 /* Check to see if there is a result returned. */
6768 if (target == const0_rtx)
6770 if (use_memmodel)
6772 icode = direct_optab_handler (optab->mem_no_result, mode);
6773 create_integer_operand (&ops[2], model);
6774 num_ops = 3;
6776 else
6778 icode = direct_optab_handler (optab->no_result, mode);
6779 num_ops = 2;
6782 /* Otherwise, we need to generate a result. */
6783 else
6785 if (use_memmodel)
6787 icode = direct_optab_handler (after ? optab->mem_fetch_after
6788 : optab->mem_fetch_before, mode);
6789 create_integer_operand (&ops[3], model);
6790 num_ops = 4;
6792 else
6794 icode = optab_handler (after ? optab->fetch_after
6795 : optab->fetch_before, mode);
6796 num_ops = 3;
6798 create_output_operand (&ops[op_counter++], target, mode);
6800 if (icode == CODE_FOR_nothing)
6801 return NULL_RTX;
6803 create_fixed_operand (&ops[op_counter++], mem);
6804 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6805 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6807 if (maybe_expand_insn (icode, num_ops, ops))
6808 return (target == const0_rtx ? const0_rtx : ops[0].value);
6810 return NULL_RTX;
6814 /* This function expands an atomic fetch_OP or OP_fetch operation:
6815 TARGET is an option place to stick the return value. const0_rtx indicates
6816 the result is unused.
6817 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6818 CODE is the operation being performed (OP)
6819 MEMMODEL is the memory model variant to use.
6820 AFTER is true to return the result of the operation (OP_fetch).
6821 AFTER is false to return the value before the operation (fetch_OP).
6823 This function will *only* generate instructions if there is a direct
6824 optab. No compare and swap loops or libcalls will be generated. */
6826 static rtx
6827 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6828 enum rtx_code code, enum memmodel model,
6829 bool after)
6831 machine_mode mode = GET_MODE (mem);
6832 struct atomic_op_functions optab;
6833 rtx result;
6834 bool unused_result = (target == const0_rtx);
6836 get_atomic_op_for_code (&optab, code);
6838 /* Check to see if there are any better instructions. */
6839 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6840 if (result)
6841 return result;
6843 /* Check for the case where the result isn't used and try those patterns. */
6844 if (unused_result)
6846 /* Try the memory model variant first. */
6847 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6848 if (result)
6849 return result;
6851 /* Next try the old style withuot a memory model. */
6852 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6853 if (result)
6854 return result;
6856 /* There is no no-result pattern, so try patterns with a result. */
6857 target = NULL_RTX;
6860 /* Try the __atomic version. */
6861 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6862 if (result)
6863 return result;
6865 /* Try the older __sync version. */
6866 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6867 if (result)
6868 return result;
6870 /* If the fetch value can be calculated from the other variation of fetch,
6871 try that operation. */
6872 if (after || unused_result || optab.reverse_code != UNKNOWN)
6874 /* Try the __atomic version, then the older __sync version. */
6875 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6876 if (!result)
6877 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6879 if (result)
6881 /* If the result isn't used, no need to do compensation code. */
6882 if (unused_result)
6883 return result;
6885 /* Issue compensation code. Fetch_after == fetch_before OP val.
6886 Fetch_before == after REVERSE_OP val. */
6887 if (!after)
6888 code = optab.reverse_code;
6889 if (code == NOT)
6891 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6892 true, OPTAB_LIB_WIDEN);
6893 result = expand_simple_unop (mode, NOT, result, target, true);
6895 else
6896 result = expand_simple_binop (mode, code, result, val, target,
6897 true, OPTAB_LIB_WIDEN);
6898 return result;
6902 /* No direct opcode can be generated. */
6903 return NULL_RTX;
6908 /* This function expands an atomic fetch_OP or OP_fetch operation:
6909 TARGET is an option place to stick the return value. const0_rtx indicates
6910 the result is unused.
6911 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6912 CODE is the operation being performed (OP)
6913 MEMMODEL is the memory model variant to use.
6914 AFTER is true to return the result of the operation (OP_fetch).
6915 AFTER is false to return the value before the operation (fetch_OP). */
6917 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6918 enum memmodel model, bool after)
6920 machine_mode mode = GET_MODE (mem);
6921 rtx result;
6922 bool unused_result = (target == const0_rtx);
6924 /* If loads are not atomic for the required size and we are not called to
6925 provide a __sync builtin, do not do anything so that we stay consistent
6926 with atomic loads of the same size. */
6927 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6928 return NULL_RTX;
6930 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6931 after);
6933 if (result)
6934 return result;
6936 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6937 if (code == PLUS || code == MINUS)
6939 rtx tmp;
6940 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6942 start_sequence ();
6943 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6944 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6945 model, after);
6946 if (result)
6948 /* PLUS worked so emit the insns and return. */
6949 tmp = get_insns ();
6950 end_sequence ();
6951 emit_insn (tmp);
6952 return result;
6955 /* PLUS did not work, so throw away the negation code and continue. */
6956 end_sequence ();
6959 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6960 if (!can_compare_and_swap_p (mode, false))
6962 rtx libfunc;
6963 bool fixup = false;
6964 enum rtx_code orig_code = code;
6965 struct atomic_op_functions optab;
6967 get_atomic_op_for_code (&optab, code);
6968 libfunc = optab_libfunc (after ? optab.fetch_after
6969 : optab.fetch_before, mode);
6970 if (libfunc == NULL
6971 && (after || unused_result || optab.reverse_code != UNKNOWN))
6973 fixup = true;
6974 if (!after)
6975 code = optab.reverse_code;
6976 libfunc = optab_libfunc (after ? optab.fetch_before
6977 : optab.fetch_after, mode);
6979 if (libfunc != NULL)
6981 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6982 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6983 addr, ptr_mode, val, mode);
6985 if (!unused_result && fixup)
6986 result = expand_simple_binop (mode, code, result, val, target,
6987 true, OPTAB_LIB_WIDEN);
6988 return result;
6991 /* We need the original code for any further attempts. */
6992 code = orig_code;
6995 /* If nothing else has succeeded, default to a compare and swap loop. */
6996 if (can_compare_and_swap_p (mode, true))
6998 rtx_insn *insn;
6999 rtx t0 = gen_reg_rtx (mode), t1;
7001 start_sequence ();
7003 /* If the result is used, get a register for it. */
7004 if (!unused_result)
7006 if (!target || !register_operand (target, mode))
7007 target = gen_reg_rtx (mode);
7008 /* If fetch_before, copy the value now. */
7009 if (!after)
7010 emit_move_insn (target, t0);
7012 else
7013 target = const0_rtx;
7015 t1 = t0;
7016 if (code == NOT)
7018 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7019 true, OPTAB_LIB_WIDEN);
7020 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7022 else
7023 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7024 OPTAB_LIB_WIDEN);
7026 /* For after, copy the value now. */
7027 if (!unused_result && after)
7028 emit_move_insn (target, t1);
7029 insn = get_insns ();
7030 end_sequence ();
7032 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7033 return target;
7036 return NULL_RTX;
7039 /* Return true if OPERAND is suitable for operand number OPNO of
7040 instruction ICODE. */
7042 bool
7043 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7045 return (!insn_data[(int) icode].operand[opno].predicate
7046 || (insn_data[(int) icode].operand[opno].predicate
7047 (operand, insn_data[(int) icode].operand[opno].mode)));
7050 /* TARGET is a target of a multiword operation that we are going to
7051 implement as a series of word-mode operations. Return true if
7052 TARGET is suitable for this purpose. */
7054 bool
7055 valid_multiword_target_p (rtx target)
7057 machine_mode mode;
7058 int i, size;
7060 mode = GET_MODE (target);
7061 if (!GET_MODE_SIZE (mode).is_constant (&size))
7062 return false;
7063 for (i = 0; i < size; i += UNITS_PER_WORD)
7064 if (!validate_subreg (word_mode, mode, target, i))
7065 return false;
7066 return true;
7069 /* Make OP describe an input operand that has value INTVAL and that has
7070 no inherent mode. This function should only be used for operands that
7071 are always expand-time constants. The backend may request that INTVAL
7072 be copied into a different kind of rtx, but it must specify the mode
7073 of that rtx if so. */
7075 void
7076 create_integer_operand (struct expand_operand *op, poly_int64 intval)
7078 create_expand_operand (op, EXPAND_INTEGER,
7079 gen_int_mode (intval, MAX_MODE_INT),
7080 VOIDmode, false, intval);
7083 /* Like maybe_legitimize_operand, but do not change the code of the
7084 current rtx value. */
7086 static bool
7087 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7088 struct expand_operand *op)
7090 /* See if the operand matches in its current form. */
7091 if (insn_operand_matches (icode, opno, op->value))
7092 return true;
7094 /* If the operand is a memory whose address has no side effects,
7095 try forcing the address into a non-virtual pseudo register.
7096 The check for side effects is important because copy_to_mode_reg
7097 cannot handle things like auto-modified addresses. */
7098 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7100 rtx addr, mem;
7102 mem = op->value;
7103 addr = XEXP (mem, 0);
7104 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7105 && !side_effects_p (addr))
7107 rtx_insn *last;
7108 machine_mode mode;
7110 last = get_last_insn ();
7111 mode = get_address_mode (mem);
7112 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7113 if (insn_operand_matches (icode, opno, mem))
7115 op->value = mem;
7116 return true;
7118 delete_insns_since (last);
7122 return false;
7125 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7126 on success, storing the new operand value back in OP. */
7128 static bool
7129 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7130 struct expand_operand *op)
7132 machine_mode mode, imode;
7133 bool old_volatile_ok, result;
7135 mode = op->mode;
7136 switch (op->type)
7138 case EXPAND_FIXED:
7139 old_volatile_ok = volatile_ok;
7140 volatile_ok = true;
7141 result = maybe_legitimize_operand_same_code (icode, opno, op);
7142 volatile_ok = old_volatile_ok;
7143 return result;
7145 case EXPAND_OUTPUT:
7146 gcc_assert (mode != VOIDmode);
7147 if (op->value
7148 && op->value != const0_rtx
7149 && GET_MODE (op->value) == mode
7150 && maybe_legitimize_operand_same_code (icode, opno, op))
7151 return true;
7153 op->value = gen_reg_rtx (mode);
7154 op->target = 0;
7155 break;
7157 case EXPAND_INPUT:
7158 input:
7159 gcc_assert (mode != VOIDmode);
7160 gcc_assert (GET_MODE (op->value) == VOIDmode
7161 || GET_MODE (op->value) == mode);
7162 if (maybe_legitimize_operand_same_code (icode, opno, op))
7163 return true;
7165 op->value = copy_to_mode_reg (mode, op->value);
7166 break;
7168 case EXPAND_CONVERT_TO:
7169 gcc_assert (mode != VOIDmode);
7170 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7171 goto input;
7173 case EXPAND_CONVERT_FROM:
7174 if (GET_MODE (op->value) != VOIDmode)
7175 mode = GET_MODE (op->value);
7176 else
7177 /* The caller must tell us what mode this value has. */
7178 gcc_assert (mode != VOIDmode);
7180 imode = insn_data[(int) icode].operand[opno].mode;
7181 if (imode != VOIDmode && imode != mode)
7183 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7184 mode = imode;
7186 goto input;
7188 case EXPAND_ADDRESS:
7189 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7190 op->value);
7191 goto input;
7193 case EXPAND_INTEGER:
7194 mode = insn_data[(int) icode].operand[opno].mode;
7195 if (mode != VOIDmode
7196 && known_eq (trunc_int_for_mode (op->int_value, mode),
7197 op->int_value))
7199 op->value = gen_int_mode (op->int_value, mode);
7200 goto input;
7202 break;
7204 return insn_operand_matches (icode, opno, op->value);
7207 /* Make OP describe an input operand that should have the same value
7208 as VALUE, after any mode conversion that the target might request.
7209 TYPE is the type of VALUE. */
7211 void
7212 create_convert_operand_from_type (struct expand_operand *op,
7213 rtx value, tree type)
7215 create_convert_operand_from (op, value, TYPE_MODE (type),
7216 TYPE_UNSIGNED (type));
7219 /* Return true if the requirements on operands OP1 and OP2 of instruction
7220 ICODE are similar enough for the result of legitimizing OP1 to be
7221 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7222 with OP1 and OP2 respectively. */
7224 static inline bool
7225 can_reuse_operands_p (enum insn_code icode,
7226 unsigned int opno1, unsigned int opno2,
7227 const struct expand_operand *op1,
7228 const struct expand_operand *op2)
7230 /* Check requirements that are common to all types. */
7231 if (op1->type != op2->type
7232 || op1->mode != op2->mode
7233 || (insn_data[(int) icode].operand[opno1].mode
7234 != insn_data[(int) icode].operand[opno2].mode))
7235 return false;
7237 /* Check the requirements for specific types. */
7238 switch (op1->type)
7240 case EXPAND_OUTPUT:
7241 /* Outputs must remain distinct. */
7242 return false;
7244 case EXPAND_FIXED:
7245 case EXPAND_INPUT:
7246 case EXPAND_ADDRESS:
7247 case EXPAND_INTEGER:
7248 return true;
7250 case EXPAND_CONVERT_TO:
7251 case EXPAND_CONVERT_FROM:
7252 return op1->unsigned_p == op2->unsigned_p;
7254 gcc_unreachable ();
7257 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7258 of instruction ICODE. Return true on success, leaving the new operand
7259 values in the OPS themselves. Emit no code on failure. */
7261 bool
7262 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7263 unsigned int nops, struct expand_operand *ops)
7265 rtx_insn *last = get_last_insn ();
7266 rtx *orig_values = XALLOCAVEC (rtx, nops);
7267 for (unsigned int i = 0; i < nops; i++)
7269 orig_values[i] = ops[i].value;
7271 /* First try reusing the result of an earlier legitimization.
7272 This avoids duplicate rtl and ensures that tied operands
7273 remain tied.
7275 This search is linear, but NOPS is bounded at compile time
7276 to a small number (current a single digit). */
7277 unsigned int j = 0;
7278 for (; j < i; ++j)
7279 if (can_reuse_operands_p (icode, opno + j, opno + i, &ops[j], &ops[i])
7280 && rtx_equal_p (orig_values[j], orig_values[i])
7281 && ops[j].value
7282 && insn_operand_matches (icode, opno + i, ops[j].value))
7284 ops[i].value = copy_rtx (ops[j].value);
7285 break;
7288 /* Otherwise try legitimizing the operand on its own. */
7289 if (j == i && !maybe_legitimize_operand (icode, opno + i, &ops[i]))
7291 delete_insns_since (last);
7292 return false;
7295 return true;
7298 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7299 as its operands. Return the instruction pattern on success,
7300 and emit any necessary set-up code. Return null and emit no
7301 code on failure. */
7303 rtx_insn *
7304 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7305 struct expand_operand *ops)
7307 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7308 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7309 return NULL;
7311 switch (nops)
7313 case 1:
7314 return GEN_FCN (icode) (ops[0].value);
7315 case 2:
7316 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7317 case 3:
7318 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7319 case 4:
7320 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7321 ops[3].value);
7322 case 5:
7323 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7324 ops[3].value, ops[4].value);
7325 case 6:
7326 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7327 ops[3].value, ops[4].value, ops[5].value);
7328 case 7:
7329 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7330 ops[3].value, ops[4].value, ops[5].value,
7331 ops[6].value);
7332 case 8:
7333 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7334 ops[3].value, ops[4].value, ops[5].value,
7335 ops[6].value, ops[7].value);
7336 case 9:
7337 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7338 ops[3].value, ops[4].value, ops[5].value,
7339 ops[6].value, ops[7].value, ops[8].value);
7341 gcc_unreachable ();
7344 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7345 as its operands. Return true on success and emit no code on failure. */
7347 bool
7348 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7349 struct expand_operand *ops)
7351 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7352 if (pat)
7354 emit_insn (pat);
7355 return true;
7357 return false;
7360 /* Like maybe_expand_insn, but for jumps. */
7362 bool
7363 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7364 struct expand_operand *ops)
7366 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7367 if (pat)
7369 emit_jump_insn (pat);
7370 return true;
7372 return false;
7375 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7376 as its operands. */
7378 void
7379 expand_insn (enum insn_code icode, unsigned int nops,
7380 struct expand_operand *ops)
7382 if (!maybe_expand_insn (icode, nops, ops))
7383 gcc_unreachable ();
7386 /* Like expand_insn, but for jumps. */
7388 void
7389 expand_jump_insn (enum insn_code icode, unsigned int nops,
7390 struct expand_operand *ops)
7392 if (!maybe_expand_jump_insn (icode, nops, ops))
7393 gcc_unreachable ();