gimple-fold: Use ranges to simplify _chk calls
[official-gcc.git] / gcc / optabs.c
blob019bbb628825963e8b03b5ce8d7470912f93ba0c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "optabs.h"
32 #include "expmed.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
41 #include "except.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "optabs-tree.h"
46 #include "libfuncs.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
50 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
51 machine_mode *);
52 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
53 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
55 /* Debug facility for use in GDB. */
56 void debug_optab_libfuncs (void);
58 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
59 the result of operation CODE applied to OP0 (and OP1 if it is a binary
60 operation). OP0_MODE is OP0's mode.
62 If the last insn does not set TARGET, don't do anything, but return 1.
64 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
65 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
66 try again, ensuring that TARGET is not one of the operands. */
68 static int
69 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0,
70 rtx op1, machine_mode op0_mode)
72 rtx_insn *last_insn;
73 rtx set;
74 rtx note;
76 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
78 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
79 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
80 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
81 && GET_RTX_CLASS (code) != RTX_COMPARE
82 && GET_RTX_CLASS (code) != RTX_UNARY)
83 return 1;
85 if (GET_CODE (target) == ZERO_EXTRACT)
86 return 1;
88 for (last_insn = insns;
89 NEXT_INSN (last_insn) != NULL_RTX;
90 last_insn = NEXT_INSN (last_insn))
93 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
94 a value changing in the insn, so the note would be invalid for CSE. */
95 if (reg_overlap_mentioned_p (target, op0)
96 || (op1 && reg_overlap_mentioned_p (target, op1)))
98 if (MEM_P (target)
99 && (rtx_equal_p (target, op0)
100 || (op1 && rtx_equal_p (target, op1))))
102 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
103 over expanding it as temp = MEM op X, MEM = temp. If the target
104 supports MEM = MEM op X instructions, it is sometimes too hard
105 to reconstruct that form later, especially if X is also a memory,
106 and due to multiple occurrences of addresses the address might
107 be forced into register unnecessarily.
108 Note that not emitting the REG_EQUIV note might inhibit
109 CSE in some cases. */
110 set = single_set (last_insn);
111 if (set
112 && GET_CODE (SET_SRC (set)) == code
113 && MEM_P (SET_DEST (set))
114 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
115 || (op1 && rtx_equal_p (SET_DEST (set),
116 XEXP (SET_SRC (set), 1)))))
117 return 1;
119 return 0;
122 set = set_for_reg_notes (last_insn);
123 if (set == NULL_RTX)
124 return 1;
126 if (! rtx_equal_p (SET_DEST (set), target)
127 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
128 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
129 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
130 return 1;
132 if (GET_RTX_CLASS (code) == RTX_UNARY)
133 switch (code)
135 case FFS:
136 case CLZ:
137 case CTZ:
138 case CLRSB:
139 case POPCOUNT:
140 case PARITY:
141 case BSWAP:
142 if (op0_mode != VOIDmode && GET_MODE (target) != op0_mode)
144 note = gen_rtx_fmt_e (code, op0_mode, copy_rtx (op0));
145 if (GET_MODE_UNIT_SIZE (op0_mode)
146 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
147 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
148 note, op0_mode);
149 else
150 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
151 note, op0_mode);
152 break;
154 /* FALLTHRU */
155 default:
156 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
157 break;
159 else
160 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
162 set_unique_reg_note (last_insn, REG_EQUAL, note);
164 return 1;
167 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
168 for a widening operation would be. In most cases this would be OP0, but if
169 that's a constant it'll be VOIDmode, which isn't useful. */
171 static machine_mode
172 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
174 machine_mode m0 = GET_MODE (op0);
175 machine_mode m1 = GET_MODE (op1);
176 machine_mode result;
178 if (m0 == VOIDmode && m1 == VOIDmode)
179 return to_mode;
180 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
181 result = m1;
182 else
183 result = m0;
185 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
186 return to_mode;
188 return result;
191 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
192 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
193 not actually do a sign-extend or zero-extend, but can leave the
194 higher-order bits of the result rtx undefined, for example, in the case
195 of logical operations, but not right shifts. */
197 static rtx
198 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
199 int unsignedp, int no_extend)
201 rtx result;
202 scalar_int_mode int_mode;
204 /* If we don't have to extend and this is a constant, return it. */
205 if (no_extend && GET_MODE (op) == VOIDmode)
206 return op;
208 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
209 extend since it will be more efficient to do so unless the signedness of
210 a promoted object differs from our extension. */
211 if (! no_extend
212 || !is_a <scalar_int_mode> (mode, &int_mode)
213 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
214 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
215 return convert_modes (mode, oldmode, op, unsignedp);
217 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
218 SUBREG. */
219 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
220 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
222 /* Otherwise, get an object of MODE, clobber it, and set the low-order
223 part to OP. */
225 result = gen_reg_rtx (int_mode);
226 emit_clobber (result);
227 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
228 return result;
231 /* Expand vector widening operations.
233 There are two different classes of operations handled here:
234 1) Operations whose result is wider than all the arguments to the operation.
235 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
236 In this case OP0 and optionally OP1 would be initialized,
237 but WIDE_OP wouldn't (not relevant for this case).
238 2) Operations whose result is of the same size as the last argument to the
239 operation, but wider than all the other arguments to the operation.
240 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
241 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
243 E.g, when called to expand the following operations, this is how
244 the arguments will be initialized:
245 nops OP0 OP1 WIDE_OP
246 widening-sum 2 oprnd0 - oprnd1
247 widening-dot-product 3 oprnd0 oprnd1 oprnd2
248 widening-mult 2 oprnd0 oprnd1 -
249 type-promotion (vec-unpack) 1 oprnd0 - - */
252 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
253 rtx target, int unsignedp)
255 class expand_operand eops[4];
256 tree oprnd0, oprnd1, oprnd2;
257 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
258 optab widen_pattern_optab;
259 enum insn_code icode;
260 int nops = TREE_CODE_LENGTH (ops->code);
261 int op;
262 bool sbool = false;
264 oprnd0 = ops->op0;
265 if (nops >= 2)
266 oprnd1 = ops->op1;
267 if (nops >= 3)
268 oprnd2 = ops->op2;
270 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
271 if (ops->code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
272 || ops->code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
273 /* The sign is from the result type rather than operand's type
274 for these ops. */
275 widen_pattern_optab
276 = optab_for_tree_code (ops->code, ops->type, optab_default);
277 else if ((ops->code == VEC_UNPACK_HI_EXPR
278 || ops->code == VEC_UNPACK_LO_EXPR)
279 && VECTOR_BOOLEAN_TYPE_P (ops->type)
280 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0))
281 && TYPE_MODE (ops->type) == TYPE_MODE (TREE_TYPE (oprnd0))
282 && SCALAR_INT_MODE_P (TYPE_MODE (ops->type)))
284 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
285 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
286 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
287 the pattern number of elements in the wider vector. */
288 widen_pattern_optab
289 = (ops->code == VEC_UNPACK_HI_EXPR
290 ? vec_unpacks_sbool_hi_optab : vec_unpacks_sbool_lo_optab);
291 sbool = true;
293 else if (ops->code == DOT_PROD_EXPR)
295 enum optab_subtype subtype = optab_default;
296 signop sign1 = TYPE_SIGN (TREE_TYPE (oprnd0));
297 signop sign2 = TYPE_SIGN (TREE_TYPE (oprnd1));
298 if (sign1 == sign2)
300 else if (sign1 == SIGNED && sign2 == UNSIGNED)
302 subtype = optab_vector_mixed_sign;
303 /* Same as optab_vector_mixed_sign but flip the operands. */
304 std::swap (op0, op1);
306 else if (sign1 == UNSIGNED && sign2 == SIGNED)
307 subtype = optab_vector_mixed_sign;
308 else
309 gcc_unreachable ();
311 widen_pattern_optab
312 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), subtype);
314 else
315 widen_pattern_optab
316 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
317 if (ops->code == WIDEN_MULT_PLUS_EXPR
318 || ops->code == WIDEN_MULT_MINUS_EXPR)
319 icode = find_widening_optab_handler (widen_pattern_optab,
320 TYPE_MODE (TREE_TYPE (ops->op2)),
321 tmode0);
322 else
323 icode = optab_handler (widen_pattern_optab, tmode0);
324 gcc_assert (icode != CODE_FOR_nothing);
326 if (nops >= 2)
327 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
328 else if (sbool)
330 nops = 2;
331 op1 = GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0)).to_constant ());
332 tmode1 = tmode0;
335 /* The last operand is of a wider mode than the rest of the operands. */
336 if (nops == 2)
337 wmode = tmode1;
338 else if (nops == 3)
340 gcc_assert (tmode1 == tmode0);
341 gcc_assert (op1);
342 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
345 op = 0;
346 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
347 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
348 if (op1)
349 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
350 if (wide_op)
351 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
352 expand_insn (icode, op, eops);
353 return eops[0].value;
356 /* Generate code to perform an operation specified by TERNARY_OPTAB
357 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
359 UNSIGNEDP is for the case where we have to widen the operands
360 to perform the operation. It says to use zero-extension.
362 If TARGET is nonzero, the value
363 is generated there, if it is convenient to do so.
364 In all cases an rtx is returned for the locus of the value;
365 this may or may not be TARGET. */
368 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
369 rtx op1, rtx op2, rtx target, int unsignedp)
371 class expand_operand ops[4];
372 enum insn_code icode = optab_handler (ternary_optab, mode);
374 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
376 create_output_operand (&ops[0], target, mode);
377 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
378 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
379 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
380 expand_insn (icode, 4, ops);
381 return ops[0].value;
385 /* Like expand_binop, but return a constant rtx if the result can be
386 calculated at compile time. The arguments and return value are
387 otherwise the same as for expand_binop. */
390 simplify_expand_binop (machine_mode mode, optab binoptab,
391 rtx op0, rtx op1, rtx target, int unsignedp,
392 enum optab_methods methods)
394 if (CONSTANT_P (op0) && CONSTANT_P (op1))
396 rtx x = simplify_binary_operation (optab_to_code (binoptab),
397 mode, op0, op1);
398 if (x)
399 return x;
402 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
405 /* Like simplify_expand_binop, but always put the result in TARGET.
406 Return true if the expansion succeeded. */
408 bool
409 force_expand_binop (machine_mode mode, optab binoptab,
410 rtx op0, rtx op1, rtx target, int unsignedp,
411 enum optab_methods methods)
413 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
414 target, unsignedp, methods);
415 if (x == 0)
416 return false;
417 if (x != target)
418 emit_move_insn (target, x);
419 return true;
422 /* Create a new vector value in VMODE with all elements set to OP. The
423 mode of OP must be the element mode of VMODE. If OP is a constant,
424 then the return value will be a constant. */
427 expand_vector_broadcast (machine_mode vmode, rtx op)
429 int n;
430 rtvec vec;
432 gcc_checking_assert (VECTOR_MODE_P (vmode));
434 if (valid_for_const_vector_p (vmode, op))
435 return gen_const_vec_duplicate (vmode, op);
437 insn_code icode = optab_handler (vec_duplicate_optab, vmode);
438 if (icode != CODE_FOR_nothing)
440 class expand_operand ops[2];
441 create_output_operand (&ops[0], NULL_RTX, vmode);
442 create_input_operand (&ops[1], op, GET_MODE (op));
443 expand_insn (icode, 2, ops);
444 return ops[0].value;
447 if (!GET_MODE_NUNITS (vmode).is_constant (&n))
448 return NULL;
450 /* ??? If the target doesn't have a vec_init, then we have no easy way
451 of performing this operation. Most of this sort of generic support
452 is hidden away in the vector lowering support in gimple. */
453 icode = convert_optab_handler (vec_init_optab, vmode,
454 GET_MODE_INNER (vmode));
455 if (icode == CODE_FOR_nothing)
456 return NULL;
458 vec = rtvec_alloc (n);
459 for (int i = 0; i < n; ++i)
460 RTVEC_ELT (vec, i) = op;
461 rtx ret = gen_reg_rtx (vmode);
462 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
464 return ret;
467 /* This subroutine of expand_doubleword_shift handles the cases in which
468 the effective shift value is >= BITS_PER_WORD. The arguments and return
469 value are the same as for the parent routine, except that SUPERWORD_OP1
470 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
471 INTO_TARGET may be null if the caller has decided to calculate it. */
473 static bool
474 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
475 rtx outof_target, rtx into_target,
476 int unsignedp, enum optab_methods methods)
478 if (into_target != 0)
479 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
480 into_target, unsignedp, methods))
481 return false;
483 if (outof_target != 0)
485 /* For a signed right shift, we must fill OUTOF_TARGET with copies
486 of the sign bit, otherwise we must fill it with zeros. */
487 if (binoptab != ashr_optab)
488 emit_move_insn (outof_target, CONST0_RTX (word_mode));
489 else
490 if (!force_expand_binop (word_mode, binoptab, outof_input,
491 gen_int_shift_amount (word_mode,
492 BITS_PER_WORD - 1),
493 outof_target, unsignedp, methods))
494 return false;
496 return true;
499 /* This subroutine of expand_doubleword_shift handles the cases in which
500 the effective shift value is < BITS_PER_WORD. The arguments and return
501 value are the same as for the parent routine. */
503 static bool
504 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
505 rtx outof_input, rtx into_input, rtx op1,
506 rtx outof_target, rtx into_target,
507 int unsignedp, enum optab_methods methods,
508 unsigned HOST_WIDE_INT shift_mask)
510 optab reverse_unsigned_shift, unsigned_shift;
511 rtx tmp, carries;
513 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
514 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
516 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
517 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
518 the opposite direction to BINOPTAB. */
519 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
521 carries = outof_input;
522 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
523 op1_mode), op1_mode);
524 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
525 0, true, methods);
527 else
529 /* We must avoid shifting by BITS_PER_WORD bits since that is either
530 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
531 has unknown behavior. Do a single shift first, then shift by the
532 remainder. It's OK to use ~OP1 as the remainder if shift counts
533 are truncated to the mode size. */
534 carries = expand_binop (word_mode, reverse_unsigned_shift,
535 outof_input, const1_rtx, 0, unsignedp, methods);
536 if (shift_mask == BITS_PER_WORD - 1)
538 tmp = immed_wide_int_const
539 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
540 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
541 0, true, methods);
543 else
545 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
546 op1_mode), op1_mode);
547 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
548 0, true, methods);
551 if (tmp == 0 || carries == 0)
552 return false;
553 carries = expand_binop (word_mode, reverse_unsigned_shift,
554 carries, tmp, 0, unsignedp, methods);
555 if (carries == 0)
556 return false;
558 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
559 so the result can go directly into INTO_TARGET if convenient. */
560 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
561 into_target, unsignedp, methods);
562 if (tmp == 0)
563 return false;
565 /* Now OR in the bits carried over from OUTOF_INPUT. */
566 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
567 into_target, unsignedp, methods))
568 return false;
570 /* Use a standard word_mode shift for the out-of half. */
571 if (outof_target != 0)
572 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
573 outof_target, unsignedp, methods))
574 return false;
576 return true;
580 /* Try implementing expand_doubleword_shift using conditional moves.
581 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
582 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
583 are the shift counts to use in the former and latter case. All other
584 arguments are the same as the parent routine. */
586 static bool
587 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
588 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
589 rtx outof_input, rtx into_input,
590 rtx subword_op1, rtx superword_op1,
591 rtx outof_target, rtx into_target,
592 int unsignedp, enum optab_methods methods,
593 unsigned HOST_WIDE_INT shift_mask)
595 rtx outof_superword, into_superword;
597 /* Put the superword version of the output into OUTOF_SUPERWORD and
598 INTO_SUPERWORD. */
599 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
600 if (outof_target != 0 && subword_op1 == superword_op1)
602 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
603 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
604 into_superword = outof_target;
605 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
606 outof_superword, 0, unsignedp, methods))
607 return false;
609 else
611 into_superword = gen_reg_rtx (word_mode);
612 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
613 outof_superword, into_superword,
614 unsignedp, methods))
615 return false;
618 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
619 if (!expand_subword_shift (op1_mode, binoptab,
620 outof_input, into_input, subword_op1,
621 outof_target, into_target,
622 unsignedp, methods, shift_mask))
623 return false;
625 /* Select between them. Do the INTO half first because INTO_SUPERWORD
626 might be the current value of OUTOF_TARGET. */
627 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
628 into_target, into_superword, word_mode, false))
629 return false;
631 if (outof_target != 0)
632 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
633 outof_target, outof_superword,
634 word_mode, false))
635 return false;
637 return true;
640 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
641 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
642 input operand; the shift moves bits in the direction OUTOF_INPUT->
643 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
644 of the target. OP1 is the shift count and OP1_MODE is its mode.
645 If OP1 is constant, it will have been truncated as appropriate
646 and is known to be nonzero.
648 If SHIFT_MASK is zero, the result of word shifts is undefined when the
649 shift count is outside the range [0, BITS_PER_WORD). This routine must
650 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
652 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
653 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
654 fill with zeros or sign bits as appropriate.
656 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
657 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
658 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
659 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
660 are undefined.
662 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
663 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
664 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
665 function wants to calculate it itself.
667 Return true if the shift could be successfully synthesized. */
669 static bool
670 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
671 rtx outof_input, rtx into_input, rtx op1,
672 rtx outof_target, rtx into_target,
673 int unsignedp, enum optab_methods methods,
674 unsigned HOST_WIDE_INT shift_mask)
676 rtx superword_op1, tmp, cmp1, cmp2;
677 enum rtx_code cmp_code;
679 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
680 fill the result with sign or zero bits as appropriate. If so, the value
681 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
682 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
683 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
685 This isn't worthwhile for constant shifts since the optimizers will
686 cope better with in-range shift counts. */
687 if (shift_mask >= BITS_PER_WORD
688 && outof_target != 0
689 && !CONSTANT_P (op1))
691 if (!expand_doubleword_shift (op1_mode, binoptab,
692 outof_input, into_input, op1,
693 0, into_target,
694 unsignedp, methods, shift_mask))
695 return false;
696 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
697 outof_target, unsignedp, methods))
698 return false;
699 return true;
702 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
703 is true when the effective shift value is less than BITS_PER_WORD.
704 Set SUPERWORD_OP1 to the shift count that should be used to shift
705 OUTOF_INPUT into INTO_TARGET when the condition is false. */
706 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
707 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
709 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
710 is a subword shift count. */
711 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
712 0, true, methods);
713 cmp2 = CONST0_RTX (op1_mode);
714 cmp_code = EQ;
715 superword_op1 = op1;
717 else
719 /* Set CMP1 to OP1 - BITS_PER_WORD. */
720 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
721 0, true, methods);
722 cmp2 = CONST0_RTX (op1_mode);
723 cmp_code = LT;
724 superword_op1 = cmp1;
726 if (cmp1 == 0)
727 return false;
729 /* If we can compute the condition at compile time, pick the
730 appropriate subroutine. */
731 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
732 if (tmp != 0 && CONST_INT_P (tmp))
734 if (tmp == const0_rtx)
735 return expand_superword_shift (binoptab, outof_input, superword_op1,
736 outof_target, into_target,
737 unsignedp, methods);
738 else
739 return expand_subword_shift (op1_mode, binoptab,
740 outof_input, into_input, op1,
741 outof_target, into_target,
742 unsignedp, methods, shift_mask);
745 /* Try using conditional moves to generate straight-line code. */
746 if (HAVE_conditional_move)
748 rtx_insn *start = get_last_insn ();
749 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
750 cmp_code, cmp1, cmp2,
751 outof_input, into_input,
752 op1, superword_op1,
753 outof_target, into_target,
754 unsignedp, methods, shift_mask))
755 return true;
756 delete_insns_since (start);
759 /* As a last resort, use branches to select the correct alternative. */
760 rtx_code_label *subword_label = gen_label_rtx ();
761 rtx_code_label *done_label = gen_label_rtx ();
763 NO_DEFER_POP;
764 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
765 0, 0, subword_label,
766 profile_probability::uninitialized ());
767 OK_DEFER_POP;
769 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
770 outof_target, into_target,
771 unsignedp, methods))
772 return false;
774 emit_jump_insn (targetm.gen_jump (done_label));
775 emit_barrier ();
776 emit_label (subword_label);
778 if (!expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask))
782 return false;
784 emit_label (done_label);
785 return true;
788 /* Subroutine of expand_binop. Perform a double word multiplication of
789 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
790 as the target's word_mode. This function return NULL_RTX if anything
791 goes wrong, in which case it may have already emitted instructions
792 which need to be deleted.
794 If we want to multiply two two-word values and have normal and widening
795 multiplies of single-word values, we can do this with three smaller
796 multiplications.
798 The multiplication proceeds as follows:
799 _______________________
800 [__op0_high_|__op0_low__]
801 _______________________
802 * [__op1_high_|__op1_low__]
803 _______________________________________________
804 _______________________
805 (1) [__op0_low__*__op1_low__]
806 _______________________
807 (2a) [__op0_low__*__op1_high_]
808 _______________________
809 (2b) [__op0_high_*__op1_low__]
810 _______________________
811 (3) [__op0_high_*__op1_high_]
814 This gives a 4-word result. Since we are only interested in the
815 lower 2 words, partial result (3) and the upper words of (2a) and
816 (2b) don't need to be calculated. Hence (2a) and (2b) can be
817 calculated using non-widening multiplication.
819 (1), however, needs to be calculated with an unsigned widening
820 multiplication. If this operation is not directly supported we
821 try using a signed widening multiplication and adjust the result.
822 This adjustment works as follows:
824 If both operands are positive then no adjustment is needed.
826 If the operands have different signs, for example op0_low < 0 and
827 op1_low >= 0, the instruction treats the most significant bit of
828 op0_low as a sign bit instead of a bit with significance
829 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
830 with 2**BITS_PER_WORD - op0_low, and two's complements the
831 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
832 the result.
834 Similarly, if both operands are negative, we need to add
835 (op0_low + op1_low) * 2**BITS_PER_WORD.
837 We use a trick to adjust quickly. We logically shift op0_low right
838 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
839 op0_high (op1_high) before it is used to calculate 2b (2a). If no
840 logical shift exists, we do an arithmetic right shift and subtract
841 the 0 or -1. */
843 static rtx
844 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
845 bool umulp, enum optab_methods methods)
847 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
848 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
849 rtx wordm1 = (umulp ? NULL_RTX
850 : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
851 rtx product, adjust, product_high, temp;
853 rtx op0_high = operand_subword_force (op0, high, mode);
854 rtx op0_low = operand_subword_force (op0, low, mode);
855 rtx op1_high = operand_subword_force (op1, high, mode);
856 rtx op1_low = operand_subword_force (op1, low, mode);
858 /* If we're using an unsigned multiply to directly compute the product
859 of the low-order words of the operands and perform any required
860 adjustments of the operands, we begin by trying two more multiplications
861 and then computing the appropriate sum.
863 We have checked above that the required addition is provided.
864 Full-word addition will normally always succeed, especially if
865 it is provided at all, so we don't worry about its failure. The
866 multiplication may well fail, however, so we do handle that. */
868 if (!umulp)
870 /* ??? This could be done with emit_store_flag where available. */
871 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
872 NULL_RTX, 1, methods);
873 if (temp)
874 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
875 NULL_RTX, 0, OPTAB_DIRECT);
876 else
878 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
879 NULL_RTX, 0, methods);
880 if (!temp)
881 return NULL_RTX;
882 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
883 NULL_RTX, 0, OPTAB_DIRECT);
886 if (!op0_high)
887 return NULL_RTX;
890 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
891 NULL_RTX, 0, OPTAB_DIRECT);
892 if (!adjust)
893 return NULL_RTX;
895 /* OP0_HIGH should now be dead. */
897 if (!umulp)
899 /* ??? This could be done with emit_store_flag where available. */
900 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
901 NULL_RTX, 1, methods);
902 if (temp)
903 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
904 NULL_RTX, 0, OPTAB_DIRECT);
905 else
907 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
908 NULL_RTX, 0, methods);
909 if (!temp)
910 return NULL_RTX;
911 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
915 if (!op1_high)
916 return NULL_RTX;
919 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
920 NULL_RTX, 0, OPTAB_DIRECT);
921 if (!temp)
922 return NULL_RTX;
924 /* OP1_HIGH should now be dead. */
926 adjust = expand_binop (word_mode, add_optab, adjust, temp,
927 NULL_RTX, 0, OPTAB_DIRECT);
929 if (target && !REG_P (target))
930 target = NULL_RTX;
932 /* *_widen_optab needs to determine operand mode, make sure at least
933 one operand has non-VOID mode. */
934 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
935 op0_low = force_reg (word_mode, op0_low);
937 if (umulp)
938 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
939 target, 1, OPTAB_DIRECT);
940 else
941 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
942 target, 1, OPTAB_DIRECT);
944 if (!product)
945 return NULL_RTX;
947 product_high = operand_subword (product, high, 1, mode);
948 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
949 NULL_RTX, 0, OPTAB_DIRECT);
950 emit_move_insn (product_high, adjust);
951 return product;
954 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
955 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
956 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
957 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
958 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
959 depends on the bit value, if 2, then carry from the addition needs to be
960 added too, i.e. like:
961 sum += __builtin_add_overflow (low, high, &sum)
963 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
964 factor to the sum before doing unsigned remainder, in the form of
965 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
966 then perform unsigned
967 remainder = sum % OP1;
968 and finally
969 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
971 static rtx
972 expand_doubleword_mod (machine_mode mode, rtx op0, rtx op1, bool unsignedp)
974 if (INTVAL (op1) <= 1 || (INTVAL (op1) & 1) == 0)
975 return NULL_RTX;
977 rtx_insn *last = get_last_insn ();
978 for (int bit = BITS_PER_WORD; bit >= BITS_PER_WORD / 2; bit--)
980 wide_int w = wi::shifted_mask (bit, 1, false, 2 * BITS_PER_WORD);
981 if (wi::ne_p (wi::umod_trunc (w, INTVAL (op1)), 1))
982 continue;
983 rtx sum = NULL_RTX, mask = NULL_RTX;
984 if (bit == BITS_PER_WORD)
986 /* For signed modulo we need to add correction to the sum
987 and that might again overflow. */
988 if (!unsignedp)
989 continue;
990 if (optab_handler (uaddv4_optab, word_mode) == CODE_FOR_nothing)
991 continue;
992 tree wtype = lang_hooks.types.type_for_mode (word_mode, 1);
993 if (wtype == NULL_TREE)
994 continue;
995 tree ctype = build_complex_type (wtype);
996 if (TYPE_MODE (ctype) != GET_MODE_COMPLEX_MODE (word_mode))
997 continue;
998 machine_mode cmode = TYPE_MODE (ctype);
999 rtx op00 = operand_subword_force (op0, 0, mode);
1000 rtx op01 = operand_subword_force (op0, 1, mode);
1001 rtx cres = gen_rtx_CONCAT (cmode, gen_reg_rtx (word_mode),
1002 gen_reg_rtx (word_mode));
1003 tree lhs = make_tree (ctype, cres);
1004 tree arg0 = make_tree (wtype, op00);
1005 tree arg1 = make_tree (wtype, op01);
1006 expand_addsub_overflow (UNKNOWN_LOCATION, PLUS_EXPR, lhs, arg0,
1007 arg1, true, true, true, false, NULL);
1008 sum = expand_simple_binop (word_mode, PLUS, XEXP (cres, 0),
1009 XEXP (cres, 1), NULL_RTX, 1,
1010 OPTAB_DIRECT);
1011 if (sum == NULL_RTX)
1012 return NULL_RTX;
1014 else
1016 /* Code below uses GEN_INT, so we need the masks to be representable
1017 in HOST_WIDE_INTs. */
1018 if (bit >= HOST_BITS_PER_WIDE_INT)
1019 continue;
1020 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
1021 overflow. Consider 64-bit -1ULL for word size 32, if we add
1022 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1023 if (bit == BITS_PER_WORD - 1)
1024 continue;
1026 int count = (2 * BITS_PER_WORD + bit - 1) / bit;
1027 rtx sum_corr = NULL_RTX;
1029 if (!unsignedp)
1031 /* For signed modulo, compute it as unsigned modulo of
1032 sum with a correction added to it if OP0 is negative,
1033 such that the result can be computed as unsigned
1034 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1035 w = wi::min_value (2 * BITS_PER_WORD, SIGNED);
1036 wide_int wmod1 = wi::umod_trunc (w, INTVAL (op1));
1037 wide_int wmod2 = wi::smod_trunc (w, INTVAL (op1));
1038 /* wmod2 == -wmod1. */
1039 wmod2 = wmod2 + (INTVAL (op1) - 1);
1040 if (wi::ne_p (wmod1, wmod2))
1042 wide_int wcorr = wmod2 - wmod1;
1043 if (wi::neg_p (w))
1044 wcorr = wcorr + INTVAL (op1);
1045 /* Now verify if the count sums can't overflow, and punt
1046 if they could. */
1047 w = wi::mask (bit, false, 2 * BITS_PER_WORD);
1048 w = w * (count - 1);
1049 w = w + wi::mask (2 * BITS_PER_WORD - (count - 1) * bit,
1050 false, 2 * BITS_PER_WORD);
1051 w = w + wcorr;
1052 w = wi::lrshift (w, BITS_PER_WORD);
1053 if (wi::ne_p (w, 0))
1054 continue;
1056 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1057 mode);
1058 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1059 GEN_INT (BITS_PER_WORD - 1),
1060 NULL_RTX, 0, OPTAB_DIRECT);
1061 if (mask == NULL_RTX)
1062 return NULL_RTX;
1063 sum_corr = immed_wide_int_const (wcorr, word_mode);
1064 sum_corr = expand_simple_binop (word_mode, AND, mask,
1065 sum_corr, NULL_RTX, 1,
1066 OPTAB_DIRECT);
1067 if (sum_corr == NULL_RTX)
1068 return NULL_RTX;
1072 for (int i = 0; i < count; i++)
1074 rtx v = op0;
1075 if (i)
1076 v = expand_simple_binop (mode, LSHIFTRT, v, GEN_INT (i * bit),
1077 NULL_RTX, 1, OPTAB_DIRECT);
1078 if (v == NULL_RTX)
1079 return NULL_RTX;
1080 v = lowpart_subreg (word_mode, v, mode);
1081 if (v == NULL_RTX)
1082 return NULL_RTX;
1083 if (i != count - 1)
1084 v = expand_simple_binop (word_mode, AND, v,
1085 GEN_INT ((HOST_WIDE_INT_1U << bit)
1086 - 1), NULL_RTX, 1,
1087 OPTAB_DIRECT);
1088 if (v == NULL_RTX)
1089 return NULL_RTX;
1090 if (sum == NULL_RTX)
1091 sum = v;
1092 else
1093 sum = expand_simple_binop (word_mode, PLUS, sum, v, NULL_RTX,
1094 1, OPTAB_DIRECT);
1095 if (sum == NULL_RTX)
1096 return NULL_RTX;
1098 if (sum_corr)
1100 sum = expand_simple_binop (word_mode, PLUS, sum, sum_corr,
1101 NULL_RTX, 1, OPTAB_DIRECT);
1102 if (sum == NULL_RTX)
1103 return NULL_RTX;
1106 rtx remainder = expand_divmod (1, TRUNC_MOD_EXPR, word_mode, sum,
1107 gen_int_mode (INTVAL (op1), word_mode),
1108 NULL_RTX, 1, OPTAB_DIRECT);
1109 if (remainder == NULL_RTX)
1110 return NULL_RTX;
1112 if (!unsignedp)
1114 if (mask == NULL_RTX)
1116 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1117 mode);
1118 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1119 GEN_INT (BITS_PER_WORD - 1),
1120 NULL_RTX, 0, OPTAB_DIRECT);
1121 if (mask == NULL_RTX)
1122 return NULL_RTX;
1124 mask = expand_simple_binop (word_mode, AND, mask,
1125 gen_int_mode (1 - INTVAL (op1),
1126 word_mode),
1127 NULL_RTX, 1, OPTAB_DIRECT);
1128 if (mask == NULL_RTX)
1129 return NULL_RTX;
1130 remainder = expand_simple_binop (word_mode, PLUS, remainder,
1131 mask, NULL_RTX, 1, OPTAB_DIRECT);
1132 if (remainder == NULL_RTX)
1133 return NULL_RTX;
1136 remainder = convert_modes (mode, word_mode, remainder, unsignedp);
1137 /* Punt if we need any library calls. */
1138 for (; last; last = NEXT_INSN (last))
1139 if (CALL_P (last))
1140 return NULL_RTX;
1141 return remainder;
1143 return NULL_RTX;
1146 /* Similarly to the above function, but compute both quotient and remainder.
1147 Quotient can be computed from the remainder as:
1148 rem = op0 % op1; // Handled using expand_doubleword_mod
1149 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1150 // 2 * BITS_PER_WORD
1152 We can also handle cases where op1 is a multiple of power of two constant
1153 and constant handled by expand_doubleword_mod.
1154 op11 = 1 << __builtin_ctz (op1);
1155 op12 = op1 / op11;
1156 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1157 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1158 // 2 * BITS_PER_WORD
1159 rem = (quot1 % op11) * op12 + rem1;
1160 quot = quot1 / op11; */
1163 expand_doubleword_divmod (machine_mode mode, rtx op0, rtx op1, rtx *rem,
1164 bool unsignedp)
1166 *rem = NULL_RTX;
1168 /* Negative dividend should have been optimized into positive,
1169 similarly modulo by 1 and modulo by power of two is optimized
1170 differently too. */
1171 if (INTVAL (op1) <= 1 || pow2p_hwi (INTVAL (op1)))
1172 return NULL_RTX;
1174 rtx op11 = const1_rtx;
1175 rtx op12 = op1;
1176 if ((INTVAL (op1) & 1) == 0)
1178 int bit = ctz_hwi (INTVAL (op1));
1179 op11 = GEN_INT (HOST_WIDE_INT_1 << bit);
1180 op12 = GEN_INT (INTVAL (op1) >> bit);
1183 rtx rem1 = expand_doubleword_mod (mode, op0, op12, unsignedp);
1184 if (rem1 == NULL_RTX)
1185 return NULL_RTX;
1187 int prec = 2 * BITS_PER_WORD;
1188 wide_int a = wide_int::from (INTVAL (op12), prec + 1, UNSIGNED);
1189 wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
1190 wide_int m = wide_int::from (wi::mod_inv (a, b), prec, UNSIGNED);
1191 rtx inv = immed_wide_int_const (m, mode);
1193 rtx_insn *last = get_last_insn ();
1194 rtx quot1 = expand_simple_binop (mode, MINUS, op0, rem1,
1195 NULL_RTX, unsignedp, OPTAB_DIRECT);
1196 if (quot1 == NULL_RTX)
1197 return NULL_RTX;
1199 quot1 = expand_simple_binop (mode, MULT, quot1, inv,
1200 NULL_RTX, unsignedp, OPTAB_DIRECT);
1201 if (quot1 == NULL_RTX)
1202 return NULL_RTX;
1204 if (op11 != const1_rtx)
1206 rtx rem2 = expand_divmod (1, TRUNC_MOD_EXPR, mode, quot1, op11,
1207 NULL_RTX, unsignedp, OPTAB_DIRECT);
1208 if (rem2 == NULL_RTX)
1209 return NULL_RTX;
1211 rem2 = expand_simple_binop (mode, MULT, rem2, op12, NULL_RTX,
1212 unsignedp, OPTAB_DIRECT);
1213 if (rem2 == NULL_RTX)
1214 return NULL_RTX;
1216 rem2 = expand_simple_binop (mode, PLUS, rem2, rem1, NULL_RTX,
1217 unsignedp, OPTAB_DIRECT);
1218 if (rem2 == NULL_RTX)
1219 return NULL_RTX;
1221 rtx quot2 = expand_divmod (0, TRUNC_DIV_EXPR, mode, quot1, op11,
1222 NULL_RTX, unsignedp, OPTAB_DIRECT);
1223 if (quot2 == NULL_RTX)
1224 return NULL_RTX;
1226 rem1 = rem2;
1227 quot1 = quot2;
1230 /* Punt if we need any library calls. */
1231 for (; last; last = NEXT_INSN (last))
1232 if (CALL_P (last))
1233 return NULL_RTX;
1235 *rem = rem1;
1236 return quot1;
1239 /* Wrapper around expand_binop which takes an rtx code to specify
1240 the operation to perform, not an optab pointer. All other
1241 arguments are the same. */
1243 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
1244 rtx op1, rtx target, int unsignedp,
1245 enum optab_methods methods)
1247 optab binop = code_to_optab (code);
1248 gcc_assert (binop);
1250 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1253 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1254 binop. Order them according to commutative_operand_precedence and, if
1255 possible, try to put TARGET or a pseudo first. */
1256 static bool
1257 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1259 int op0_prec = commutative_operand_precedence (op0);
1260 int op1_prec = commutative_operand_precedence (op1);
1262 if (op0_prec < op1_prec)
1263 return true;
1265 if (op0_prec > op1_prec)
1266 return false;
1268 /* With equal precedence, both orders are ok, but it is better if the
1269 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1270 if (target == 0 || REG_P (target))
1271 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1272 else
1273 return rtx_equal_p (op1, target);
1276 /* Return true if BINOPTAB implements a shift operation. */
1278 static bool
1279 shift_optab_p (optab binoptab)
1281 switch (optab_to_code (binoptab))
1283 case ASHIFT:
1284 case SS_ASHIFT:
1285 case US_ASHIFT:
1286 case ASHIFTRT:
1287 case LSHIFTRT:
1288 case ROTATE:
1289 case ROTATERT:
1290 return true;
1292 default:
1293 return false;
1297 /* Return true if BINOPTAB implements a commutative binary operation. */
1299 static bool
1300 commutative_optab_p (optab binoptab)
1302 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
1303 || binoptab == smul_widen_optab
1304 || binoptab == umul_widen_optab
1305 || binoptab == smul_highpart_optab
1306 || binoptab == umul_highpart_optab);
1309 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1310 optimizing, and if the operand is a constant that costs more than
1311 1 instruction, force the constant into a register and return that
1312 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1314 static rtx
1315 avoid_expensive_constant (machine_mode mode, optab binoptab,
1316 int opn, rtx x, bool unsignedp)
1318 bool speed = optimize_insn_for_speed_p ();
1320 if (mode != VOIDmode
1321 && optimize
1322 && CONSTANT_P (x)
1323 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
1324 > set_src_cost (x, mode, speed)))
1326 if (CONST_INT_P (x))
1328 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1329 if (intval != INTVAL (x))
1330 x = GEN_INT (intval);
1332 else
1333 x = convert_modes (mode, VOIDmode, x, unsignedp);
1334 x = force_reg (mode, x);
1336 return x;
1339 /* Helper function for expand_binop: handle the case where there
1340 is an insn ICODE that directly implements the indicated operation.
1341 Returns null if this is not possible. */
1342 static rtx
1343 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1344 rtx op0, rtx op1,
1345 rtx target, int unsignedp, enum optab_methods methods,
1346 rtx_insn *last)
1348 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1349 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1350 machine_mode mode0, mode1, tmp_mode;
1351 class expand_operand ops[3];
1352 bool commutative_p;
1353 rtx_insn *pat;
1354 rtx xop0 = op0, xop1 = op1;
1355 bool canonicalize_op1 = false;
1357 /* If it is a commutative operator and the modes would match
1358 if we would swap the operands, we can save the conversions. */
1359 commutative_p = commutative_optab_p (binoptab);
1360 if (commutative_p
1361 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1362 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode0)
1363 std::swap (xop0, xop1);
1365 /* If we are optimizing, force expensive constants into a register. */
1366 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1367 if (!shift_optab_p (binoptab))
1368 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1369 else
1370 /* Shifts and rotates often use a different mode for op1 from op0;
1371 for VOIDmode constants we don't know the mode, so force it
1372 to be canonicalized using convert_modes. */
1373 canonicalize_op1 = true;
1375 /* In case the insn wants input operands in modes different from
1376 those of the actual operands, convert the operands. It would
1377 seem that we don't need to convert CONST_INTs, but we do, so
1378 that they're properly zero-extended, sign-extended or truncated
1379 for their mode. */
1381 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1382 if (xmode0 != VOIDmode && xmode0 != mode0)
1384 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1385 mode0 = xmode0;
1388 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1389 ? GET_MODE (xop1) : mode);
1390 if (xmode1 != VOIDmode && xmode1 != mode1)
1392 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1393 mode1 = xmode1;
1396 /* If operation is commutative,
1397 try to make the first operand a register.
1398 Even better, try to make it the same as the target.
1399 Also try to make the last operand a constant. */
1400 if (commutative_p
1401 && swap_commutative_operands_with_target (target, xop0, xop1))
1402 std::swap (xop0, xop1);
1404 /* Now, if insn's predicates don't allow our operands, put them into
1405 pseudo regs. */
1407 if (binoptab == vec_pack_trunc_optab
1408 || binoptab == vec_pack_usat_optab
1409 || binoptab == vec_pack_ssat_optab
1410 || binoptab == vec_pack_ufix_trunc_optab
1411 || binoptab == vec_pack_sfix_trunc_optab
1412 || binoptab == vec_packu_float_optab
1413 || binoptab == vec_packs_float_optab)
1415 /* The mode of the result is different then the mode of the
1416 arguments. */
1417 tmp_mode = insn_data[(int) icode].operand[0].mode;
1418 if (VECTOR_MODE_P (mode)
1419 && maybe_ne (GET_MODE_NUNITS (tmp_mode), 2 * GET_MODE_NUNITS (mode)))
1421 delete_insns_since (last);
1422 return NULL_RTX;
1425 else
1426 tmp_mode = mode;
1428 create_output_operand (&ops[0], target, tmp_mode);
1429 create_input_operand (&ops[1], xop0, mode0);
1430 create_input_operand (&ops[2], xop1, mode1);
1431 pat = maybe_gen_insn (icode, 3, ops);
1432 if (pat)
1434 /* If PAT is composed of more than one insn, try to add an appropriate
1435 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1436 operand, call expand_binop again, this time without a target. */
1437 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1438 && ! add_equal_note (pat, ops[0].value,
1439 optab_to_code (binoptab),
1440 ops[1].value, ops[2].value, mode0))
1442 delete_insns_since (last);
1443 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1444 unsignedp, methods);
1447 emit_insn (pat);
1448 return ops[0].value;
1450 delete_insns_since (last);
1451 return NULL_RTX;
1454 /* Generate code to perform an operation specified by BINOPTAB
1455 on operands OP0 and OP1, with result having machine-mode MODE.
1457 UNSIGNEDP is for the case where we have to widen the operands
1458 to perform the operation. It says to use zero-extension.
1460 If TARGET is nonzero, the value
1461 is generated there, if it is convenient to do so.
1462 In all cases an rtx is returned for the locus of the value;
1463 this may or may not be TARGET. */
1466 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1467 rtx target, int unsignedp, enum optab_methods methods)
1469 enum optab_methods next_methods
1470 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1471 ? OPTAB_WIDEN : methods);
1472 enum mode_class mclass;
1473 enum insn_code icode;
1474 machine_mode wider_mode;
1475 scalar_int_mode int_mode;
1476 rtx libfunc;
1477 rtx temp;
1478 rtx_insn *entry_last = get_last_insn ();
1479 rtx_insn *last;
1481 mclass = GET_MODE_CLASS (mode);
1483 /* If subtracting an integer constant, convert this into an addition of
1484 the negated constant. */
1486 if (binoptab == sub_optab && CONST_INT_P (op1))
1488 op1 = negate_rtx (mode, op1);
1489 binoptab = add_optab;
1491 /* For shifts, constant invalid op1 might be expanded from different
1492 mode than MODE. As those are invalid, force them to a register
1493 to avoid further problems during expansion. */
1494 else if (CONST_INT_P (op1)
1495 && shift_optab_p (binoptab)
1496 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1498 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1499 op1 = force_reg (GET_MODE_INNER (mode), op1);
1502 /* Record where to delete back to if we backtrack. */
1503 last = get_last_insn ();
1505 /* If we can do it with a three-operand insn, do so. */
1507 if (methods != OPTAB_MUST_WIDEN)
1509 if (convert_optab_p (binoptab))
1511 machine_mode from_mode = widened_mode (mode, op0, op1);
1512 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1514 else
1515 icode = optab_handler (binoptab, mode);
1516 if (icode != CODE_FOR_nothing)
1518 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1519 target, unsignedp, methods, last);
1520 if (temp)
1521 return temp;
1525 /* If we were trying to rotate, and that didn't work, try rotating
1526 the other direction before falling back to shifts and bitwise-or. */
1527 if (((binoptab == rotl_optab
1528 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1529 || (binoptab == rotr_optab
1530 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1531 && is_int_mode (mode, &int_mode))
1533 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1534 rtx newop1;
1535 unsigned int bits = GET_MODE_PRECISION (int_mode);
1537 if (CONST_INT_P (op1))
1538 newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1539 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1540 newop1 = negate_rtx (GET_MODE (op1), op1);
1541 else
1542 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1543 gen_int_mode (bits, GET_MODE (op1)), op1,
1544 NULL_RTX, unsignedp, OPTAB_DIRECT);
1546 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1547 target, unsignedp, methods, last);
1548 if (temp)
1549 return temp;
1552 /* If this is a multiply, see if we can do a widening operation that
1553 takes operands of this mode and makes a wider mode. */
1555 if (binoptab == smul_optab
1556 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1557 && (convert_optab_handler ((unsignedp
1558 ? umul_widen_optab
1559 : smul_widen_optab),
1560 wider_mode, mode) != CODE_FOR_nothing))
1562 /* *_widen_optab needs to determine operand mode, make sure at least
1563 one operand has non-VOID mode. */
1564 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1565 op0 = force_reg (mode, op0);
1566 temp = expand_binop (wider_mode,
1567 unsignedp ? umul_widen_optab : smul_widen_optab,
1568 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1570 if (temp != 0)
1572 if (GET_MODE_CLASS (mode) == MODE_INT
1573 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1574 return gen_lowpart (mode, temp);
1575 else
1576 return convert_to_mode (mode, temp, unsignedp);
1580 /* If this is a vector shift by a scalar, see if we can do a vector
1581 shift by a vector. If so, broadcast the scalar into a vector. */
1582 if (mclass == MODE_VECTOR_INT)
1584 optab otheroptab = unknown_optab;
1586 if (binoptab == ashl_optab)
1587 otheroptab = vashl_optab;
1588 else if (binoptab == ashr_optab)
1589 otheroptab = vashr_optab;
1590 else if (binoptab == lshr_optab)
1591 otheroptab = vlshr_optab;
1592 else if (binoptab == rotl_optab)
1593 otheroptab = vrotl_optab;
1594 else if (binoptab == rotr_optab)
1595 otheroptab = vrotr_optab;
1597 if (otheroptab
1598 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1600 /* The scalar may have been extended to be too wide. Truncate
1601 it back to the proper size to fit in the broadcast vector. */
1602 scalar_mode inner_mode = GET_MODE_INNER (mode);
1603 if (!CONST_INT_P (op1)
1604 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1605 > GET_MODE_BITSIZE (inner_mode)))
1606 op1 = force_reg (inner_mode,
1607 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1608 GET_MODE (op1)));
1609 rtx vop1 = expand_vector_broadcast (mode, op1);
1610 if (vop1)
1612 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1613 target, unsignedp, methods, last);
1614 if (temp)
1615 return temp;
1620 /* Look for a wider mode of the same class for which we think we
1621 can open-code the operation. Check for a widening multiply at the
1622 wider mode as well. */
1624 if (CLASS_HAS_WIDER_MODES_P (mclass)
1625 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1626 FOR_EACH_WIDER_MODE (wider_mode, mode)
1628 machine_mode next_mode;
1629 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1630 || (binoptab == smul_optab
1631 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1632 && (find_widening_optab_handler ((unsignedp
1633 ? umul_widen_optab
1634 : smul_widen_optab),
1635 next_mode, mode)
1636 != CODE_FOR_nothing)))
1638 rtx xop0 = op0, xop1 = op1;
1639 int no_extend = 0;
1641 /* For certain integer operations, we need not actually extend
1642 the narrow operands, as long as we will truncate
1643 the results to the same narrowness. */
1645 if ((binoptab == ior_optab || binoptab == and_optab
1646 || binoptab == xor_optab
1647 || binoptab == add_optab || binoptab == sub_optab
1648 || binoptab == smul_optab || binoptab == ashl_optab)
1649 && mclass == MODE_INT)
1651 no_extend = 1;
1652 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1653 xop0, unsignedp);
1654 if (binoptab != ashl_optab)
1655 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1656 xop1, unsignedp);
1659 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1661 /* The second operand of a shift must always be extended. */
1662 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1663 no_extend && binoptab != ashl_optab);
1665 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1666 unsignedp, OPTAB_DIRECT);
1667 if (temp)
1669 if (mclass != MODE_INT
1670 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1672 if (target == 0)
1673 target = gen_reg_rtx (mode);
1674 convert_move (target, temp, 0);
1675 return target;
1677 else
1678 return gen_lowpart (mode, temp);
1680 else
1681 delete_insns_since (last);
1685 /* If operation is commutative,
1686 try to make the first operand a register.
1687 Even better, try to make it the same as the target.
1688 Also try to make the last operand a constant. */
1689 if (commutative_optab_p (binoptab)
1690 && swap_commutative_operands_with_target (target, op0, op1))
1691 std::swap (op0, op1);
1693 /* These can be done a word at a time. */
1694 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1695 && is_int_mode (mode, &int_mode)
1696 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1697 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1699 int i;
1700 rtx_insn *insns;
1702 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1703 won't be accurate, so use a new target. */
1704 if (target == 0
1705 || target == op0
1706 || target == op1
1707 || reg_overlap_mentioned_p (target, op0)
1708 || reg_overlap_mentioned_p (target, op1)
1709 || !valid_multiword_target_p (target))
1710 target = gen_reg_rtx (int_mode);
1712 start_sequence ();
1714 /* Do the actual arithmetic. */
1715 machine_mode op0_mode = GET_MODE (op0);
1716 machine_mode op1_mode = GET_MODE (op1);
1717 if (op0_mode == VOIDmode)
1718 op0_mode = int_mode;
1719 if (op1_mode == VOIDmode)
1720 op1_mode = int_mode;
1721 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1723 rtx target_piece = operand_subword (target, i, 1, int_mode);
1724 rtx x = expand_binop (word_mode, binoptab,
1725 operand_subword_force (op0, i, op0_mode),
1726 operand_subword_force (op1, i, op1_mode),
1727 target_piece, unsignedp, next_methods);
1729 if (x == 0)
1730 break;
1732 if (target_piece != x)
1733 emit_move_insn (target_piece, x);
1736 insns = get_insns ();
1737 end_sequence ();
1739 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1741 emit_insn (insns);
1742 return target;
1746 /* Synthesize double word shifts from single word shifts. */
1747 if ((binoptab == lshr_optab || binoptab == ashl_optab
1748 || binoptab == ashr_optab)
1749 && is_int_mode (mode, &int_mode)
1750 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1751 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1752 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1753 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1754 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1755 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1757 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1758 scalar_int_mode op1_mode;
1760 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1761 shift_mask = targetm.shift_truncation_mask (word_mode);
1762 op1_mode = (GET_MODE (op1) != VOIDmode
1763 ? as_a <scalar_int_mode> (GET_MODE (op1))
1764 : word_mode);
1766 /* Apply the truncation to constant shifts. */
1767 if (double_shift_mask > 0 && CONST_INT_P (op1))
1768 op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1770 if (op1 == CONST0_RTX (op1_mode))
1771 return op0;
1773 /* Make sure that this is a combination that expand_doubleword_shift
1774 can handle. See the comments there for details. */
1775 if (double_shift_mask == 0
1776 || (shift_mask == BITS_PER_WORD - 1
1777 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1779 rtx_insn *insns;
1780 rtx into_target, outof_target;
1781 rtx into_input, outof_input;
1782 int left_shift, outof_word;
1784 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1785 won't be accurate, so use a new target. */
1786 if (target == 0
1787 || target == op0
1788 || target == op1
1789 || reg_overlap_mentioned_p (target, op0)
1790 || reg_overlap_mentioned_p (target, op1)
1791 || !valid_multiword_target_p (target))
1792 target = gen_reg_rtx (int_mode);
1794 start_sequence ();
1796 /* OUTOF_* is the word we are shifting bits away from, and
1797 INTO_* is the word that we are shifting bits towards, thus
1798 they differ depending on the direction of the shift and
1799 WORDS_BIG_ENDIAN. */
1801 left_shift = binoptab == ashl_optab;
1802 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1804 outof_target = operand_subword (target, outof_word, 1, int_mode);
1805 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1807 outof_input = operand_subword_force (op0, outof_word, int_mode);
1808 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1810 if (expand_doubleword_shift (op1_mode, binoptab,
1811 outof_input, into_input, op1,
1812 outof_target, into_target,
1813 unsignedp, next_methods, shift_mask))
1815 insns = get_insns ();
1816 end_sequence ();
1818 emit_insn (insns);
1819 return target;
1821 end_sequence ();
1825 /* Synthesize double word rotates from single word shifts. */
1826 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1827 && is_int_mode (mode, &int_mode)
1828 && CONST_INT_P (op1)
1829 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1830 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1831 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1833 rtx_insn *insns;
1834 rtx into_target, outof_target;
1835 rtx into_input, outof_input;
1836 rtx inter;
1837 int shift_count, left_shift, outof_word;
1839 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1840 won't be accurate, so use a new target. Do this also if target is not
1841 a REG, first because having a register instead may open optimization
1842 opportunities, and second because if target and op0 happen to be MEMs
1843 designating the same location, we would risk clobbering it too early
1844 in the code sequence we generate below. */
1845 if (target == 0
1846 || target == op0
1847 || target == op1
1848 || !REG_P (target)
1849 || reg_overlap_mentioned_p (target, op0)
1850 || reg_overlap_mentioned_p (target, op1)
1851 || !valid_multiword_target_p (target))
1852 target = gen_reg_rtx (int_mode);
1854 start_sequence ();
1856 shift_count = INTVAL (op1);
1858 /* OUTOF_* is the word we are shifting bits away from, and
1859 INTO_* is the word that we are shifting bits towards, thus
1860 they differ depending on the direction of the shift and
1861 WORDS_BIG_ENDIAN. */
1863 left_shift = (binoptab == rotl_optab);
1864 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1866 outof_target = operand_subword (target, outof_word, 1, int_mode);
1867 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1869 outof_input = operand_subword_force (op0, outof_word, int_mode);
1870 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1872 if (shift_count == BITS_PER_WORD)
1874 /* This is just a word swap. */
1875 emit_move_insn (outof_target, into_input);
1876 emit_move_insn (into_target, outof_input);
1877 inter = const0_rtx;
1879 else
1881 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1882 HOST_WIDE_INT first_shift_count, second_shift_count;
1883 optab reverse_unsigned_shift, unsigned_shift;
1885 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1886 ? lshr_optab : ashl_optab);
1888 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1889 ? ashl_optab : lshr_optab);
1891 if (shift_count > BITS_PER_WORD)
1893 first_shift_count = shift_count - BITS_PER_WORD;
1894 second_shift_count = 2 * BITS_PER_WORD - shift_count;
1896 else
1898 first_shift_count = BITS_PER_WORD - shift_count;
1899 second_shift_count = shift_count;
1901 rtx first_shift_count_rtx
1902 = gen_int_shift_amount (word_mode, first_shift_count);
1903 rtx second_shift_count_rtx
1904 = gen_int_shift_amount (word_mode, second_shift_count);
1906 into_temp1 = expand_binop (word_mode, unsigned_shift,
1907 outof_input, first_shift_count_rtx,
1908 NULL_RTX, unsignedp, next_methods);
1909 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1910 into_input, second_shift_count_rtx,
1911 NULL_RTX, unsignedp, next_methods);
1913 if (into_temp1 != 0 && into_temp2 != 0)
1914 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1915 into_target, unsignedp, next_methods);
1916 else
1917 inter = 0;
1919 if (inter != 0 && inter != into_target)
1920 emit_move_insn (into_target, inter);
1922 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1923 into_input, first_shift_count_rtx,
1924 NULL_RTX, unsignedp, next_methods);
1925 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1926 outof_input, second_shift_count_rtx,
1927 NULL_RTX, unsignedp, next_methods);
1929 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1930 inter = expand_binop (word_mode, ior_optab,
1931 outof_temp1, outof_temp2,
1932 outof_target, unsignedp, next_methods);
1934 if (inter != 0 && inter != outof_target)
1935 emit_move_insn (outof_target, inter);
1938 insns = get_insns ();
1939 end_sequence ();
1941 if (inter != 0)
1943 emit_insn (insns);
1944 return target;
1948 /* These can be done a word at a time by propagating carries. */
1949 if ((binoptab == add_optab || binoptab == sub_optab)
1950 && is_int_mode (mode, &int_mode)
1951 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1952 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1954 unsigned int i;
1955 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1956 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1957 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1958 rtx xop0, xop1, xtarget;
1960 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1961 value is one of those, use it. Otherwise, use 1 since it is the
1962 one easiest to get. */
1963 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1964 int normalizep = STORE_FLAG_VALUE;
1965 #else
1966 int normalizep = 1;
1967 #endif
1969 /* Prepare the operands. */
1970 xop0 = force_reg (int_mode, op0);
1971 xop1 = force_reg (int_mode, op1);
1973 xtarget = gen_reg_rtx (int_mode);
1975 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1976 target = xtarget;
1978 /* Indicate for flow that the entire target reg is being set. */
1979 if (REG_P (target))
1980 emit_clobber (xtarget);
1982 /* Do the actual arithmetic. */
1983 for (i = 0; i < nwords; i++)
1985 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1986 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1987 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1988 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1989 rtx x;
1991 /* Main add/subtract of the input operands. */
1992 x = expand_binop (word_mode, binoptab,
1993 op0_piece, op1_piece,
1994 target_piece, unsignedp, next_methods);
1995 if (x == 0)
1996 break;
1998 if (i + 1 < nwords)
2000 /* Store carry from main add/subtract. */
2001 carry_out = gen_reg_rtx (word_mode);
2002 carry_out = emit_store_flag_force (carry_out,
2003 (binoptab == add_optab
2004 ? LT : GT),
2005 x, op0_piece,
2006 word_mode, 1, normalizep);
2009 if (i > 0)
2011 rtx newx;
2013 /* Add/subtract previous carry to main result. */
2014 newx = expand_binop (word_mode,
2015 normalizep == 1 ? binoptab : otheroptab,
2016 x, carry_in,
2017 NULL_RTX, 1, next_methods);
2019 if (i + 1 < nwords)
2021 /* Get out carry from adding/subtracting carry in. */
2022 rtx carry_tmp = gen_reg_rtx (word_mode);
2023 carry_tmp = emit_store_flag_force (carry_tmp,
2024 (binoptab == add_optab
2025 ? LT : GT),
2026 newx, x,
2027 word_mode, 1, normalizep);
2029 /* Logical-ior the two poss. carry together. */
2030 carry_out = expand_binop (word_mode, ior_optab,
2031 carry_out, carry_tmp,
2032 carry_out, 0, next_methods);
2033 if (carry_out == 0)
2034 break;
2036 emit_move_insn (target_piece, newx);
2038 else
2040 if (x != target_piece)
2041 emit_move_insn (target_piece, x);
2044 carry_in = carry_out;
2047 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
2049 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
2050 || ! rtx_equal_p (target, xtarget))
2052 rtx_insn *temp = emit_move_insn (target, xtarget);
2054 set_dst_reg_note (temp, REG_EQUAL,
2055 gen_rtx_fmt_ee (optab_to_code (binoptab),
2056 int_mode, copy_rtx (xop0),
2057 copy_rtx (xop1)),
2058 target);
2060 else
2061 target = xtarget;
2063 return target;
2066 else
2067 delete_insns_since (last);
2070 /* Attempt to synthesize double word multiplies using a sequence of word
2071 mode multiplications. We first attempt to generate a sequence using a
2072 more efficient unsigned widening multiply, and if that fails we then
2073 try using a signed widening multiply. */
2075 if (binoptab == smul_optab
2076 && is_int_mode (mode, &int_mode)
2077 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2078 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2079 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2081 rtx product = NULL_RTX;
2082 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
2083 != CODE_FOR_nothing)
2085 product = expand_doubleword_mult (int_mode, op0, op1, target,
2086 true, methods);
2087 if (!product)
2088 delete_insns_since (last);
2091 if (product == NULL_RTX
2092 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
2093 != CODE_FOR_nothing))
2095 product = expand_doubleword_mult (int_mode, op0, op1, target,
2096 false, methods);
2097 if (!product)
2098 delete_insns_since (last);
2101 if (product != NULL_RTX)
2103 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2105 rtx_insn *move = emit_move_insn (target ? target : product,
2106 product);
2107 set_dst_reg_note (move,
2108 REG_EQUAL,
2109 gen_rtx_fmt_ee (MULT, int_mode,
2110 copy_rtx (op0),
2111 copy_rtx (op1)),
2112 target ? target : product);
2114 return product;
2118 /* Attempt to synthetize double word modulo by constant divisor. */
2119 if ((binoptab == umod_optab
2120 || binoptab == smod_optab
2121 || binoptab == udiv_optab
2122 || binoptab == sdiv_optab)
2123 && optimize
2124 && CONST_INT_P (op1)
2125 && is_int_mode (mode, &int_mode)
2126 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2127 && optab_handler ((binoptab == umod_optab || binoptab == udiv_optab)
2128 ? udivmod_optab : sdivmod_optab,
2129 int_mode) == CODE_FOR_nothing
2130 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
2131 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
2132 && optimize_insn_for_speed_p ())
2134 rtx res = NULL_RTX;
2135 if ((binoptab == umod_optab || binoptab == smod_optab)
2136 && (INTVAL (op1) & 1) == 0)
2137 res = expand_doubleword_mod (int_mode, op0, op1,
2138 binoptab == umod_optab);
2139 else
2141 rtx quot = expand_doubleword_divmod (int_mode, op0, op1, &res,
2142 binoptab == umod_optab
2143 || binoptab == udiv_optab);
2144 if (quot == NULL_RTX)
2145 res = NULL_RTX;
2146 else if (binoptab == udiv_optab || binoptab == sdiv_optab)
2147 res = quot;
2149 if (res != NULL_RTX)
2151 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2153 rtx_insn *move = emit_move_insn (target ? target : res,
2154 res);
2155 set_dst_reg_note (move, REG_EQUAL,
2156 gen_rtx_fmt_ee (optab_to_code (binoptab),
2157 int_mode, copy_rtx (op0), op1),
2158 target ? target : res);
2160 return res;
2162 else
2163 delete_insns_since (last);
2166 /* It can't be open-coded in this mode.
2167 Use a library call if one is available and caller says that's ok. */
2169 libfunc = optab_libfunc (binoptab, mode);
2170 if (libfunc
2171 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2173 rtx_insn *insns;
2174 rtx op1x = op1;
2175 machine_mode op1_mode = mode;
2176 rtx value;
2178 start_sequence ();
2180 if (shift_optab_p (binoptab))
2182 op1_mode = targetm.libgcc_shift_count_mode ();
2183 /* Specify unsigned here,
2184 since negative shift counts are meaningless. */
2185 op1x = convert_to_mode (op1_mode, op1, 1);
2188 if (GET_MODE (op0) != VOIDmode
2189 && GET_MODE (op0) != mode)
2190 op0 = convert_to_mode (mode, op0, unsignedp);
2192 /* Pass 1 for NO_QUEUE so we don't lose any increments
2193 if the libcall is cse'd or moved. */
2194 value = emit_library_call_value (libfunc,
2195 NULL_RTX, LCT_CONST, mode,
2196 op0, mode, op1x, op1_mode);
2198 insns = get_insns ();
2199 end_sequence ();
2201 bool trapv = trapv_binoptab_p (binoptab);
2202 target = gen_reg_rtx (mode);
2203 emit_libcall_block_1 (insns, target, value,
2204 trapv ? NULL_RTX
2205 : gen_rtx_fmt_ee (optab_to_code (binoptab),
2206 mode, op0, op1), trapv);
2208 return target;
2211 delete_insns_since (last);
2213 /* It can't be done in this mode. Can we do it in a wider mode? */
2215 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2216 || methods == OPTAB_MUST_WIDEN))
2218 /* Caller says, don't even try. */
2219 delete_insns_since (entry_last);
2220 return 0;
2223 /* Compute the value of METHODS to pass to recursive calls.
2224 Don't allow widening to be tried recursively. */
2226 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2228 /* Look for a wider mode of the same class for which it appears we can do
2229 the operation. */
2231 if (CLASS_HAS_WIDER_MODES_P (mclass))
2233 /* This code doesn't make sense for conversion optabs, since we
2234 wouldn't then want to extend the operands to be the same size
2235 as the result. */
2236 gcc_assert (!convert_optab_p (binoptab));
2237 FOR_EACH_WIDER_MODE (wider_mode, mode)
2239 if (optab_handler (binoptab, wider_mode)
2240 || (methods == OPTAB_LIB
2241 && optab_libfunc (binoptab, wider_mode)))
2243 rtx xop0 = op0, xop1 = op1;
2244 int no_extend = 0;
2246 /* For certain integer operations, we need not actually extend
2247 the narrow operands, as long as we will truncate
2248 the results to the same narrowness. */
2250 if ((binoptab == ior_optab || binoptab == and_optab
2251 || binoptab == xor_optab
2252 || binoptab == add_optab || binoptab == sub_optab
2253 || binoptab == smul_optab || binoptab == ashl_optab)
2254 && mclass == MODE_INT)
2255 no_extend = 1;
2257 xop0 = widen_operand (xop0, wider_mode, mode,
2258 unsignedp, no_extend);
2260 /* The second operand of a shift must always be extended. */
2261 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2262 no_extend && binoptab != ashl_optab);
2264 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2265 unsignedp, methods);
2266 if (temp)
2268 if (mclass != MODE_INT
2269 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2271 if (target == 0)
2272 target = gen_reg_rtx (mode);
2273 convert_move (target, temp, 0);
2274 return target;
2276 else
2277 return gen_lowpart (mode, temp);
2279 else
2280 delete_insns_since (last);
2285 delete_insns_since (entry_last);
2286 return 0;
2289 /* Expand a binary operator which has both signed and unsigned forms.
2290 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2291 signed operations.
2293 If we widen unsigned operands, we may use a signed wider operation instead
2294 of an unsigned wider operation, since the result would be the same. */
2297 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
2298 rtx op0, rtx op1, rtx target, int unsignedp,
2299 enum optab_methods methods)
2301 rtx temp;
2302 optab direct_optab = unsignedp ? uoptab : soptab;
2303 bool save_enable;
2305 /* Do it without widening, if possible. */
2306 temp = expand_binop (mode, direct_optab, op0, op1, target,
2307 unsignedp, OPTAB_DIRECT);
2308 if (temp || methods == OPTAB_DIRECT)
2309 return temp;
2311 /* Try widening to a signed int. Disable any direct use of any
2312 signed insn in the current mode. */
2313 save_enable = swap_optab_enable (soptab, mode, false);
2315 temp = expand_binop (mode, soptab, op0, op1, target,
2316 unsignedp, OPTAB_WIDEN);
2318 /* For unsigned operands, try widening to an unsigned int. */
2319 if (!temp && unsignedp)
2320 temp = expand_binop (mode, uoptab, op0, op1, target,
2321 unsignedp, OPTAB_WIDEN);
2322 if (temp || methods == OPTAB_WIDEN)
2323 goto egress;
2325 /* Use the right width libcall if that exists. */
2326 temp = expand_binop (mode, direct_optab, op0, op1, target,
2327 unsignedp, OPTAB_LIB);
2328 if (temp || methods == OPTAB_LIB)
2329 goto egress;
2331 /* Must widen and use a libcall, use either signed or unsigned. */
2332 temp = expand_binop (mode, soptab, op0, op1, target,
2333 unsignedp, methods);
2334 if (!temp && unsignedp)
2335 temp = expand_binop (mode, uoptab, op0, op1, target,
2336 unsignedp, methods);
2338 egress:
2339 /* Undo the fiddling above. */
2340 if (save_enable)
2341 swap_optab_enable (soptab, mode, true);
2342 return temp;
2345 /* Generate code to perform an operation specified by UNOPPTAB
2346 on operand OP0, with two results to TARG0 and TARG1.
2347 We assume that the order of the operands for the instruction
2348 is TARG0, TARG1, OP0.
2350 Either TARG0 or TARG1 may be zero, but what that means is that
2351 the result is not actually wanted. We will generate it into
2352 a dummy pseudo-reg and discard it. They may not both be zero.
2354 Returns 1 if this operation can be performed; 0 if not. */
2357 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2358 int unsignedp)
2360 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2361 enum mode_class mclass;
2362 machine_mode wider_mode;
2363 rtx_insn *entry_last = get_last_insn ();
2364 rtx_insn *last;
2366 mclass = GET_MODE_CLASS (mode);
2368 if (!targ0)
2369 targ0 = gen_reg_rtx (mode);
2370 if (!targ1)
2371 targ1 = gen_reg_rtx (mode);
2373 /* Record where to go back to if we fail. */
2374 last = get_last_insn ();
2376 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2378 class expand_operand ops[3];
2379 enum insn_code icode = optab_handler (unoptab, mode);
2381 create_fixed_operand (&ops[0], targ0);
2382 create_fixed_operand (&ops[1], targ1);
2383 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2384 if (maybe_expand_insn (icode, 3, ops))
2385 return 1;
2388 /* It can't be done in this mode. Can we do it in a wider mode? */
2390 if (CLASS_HAS_WIDER_MODES_P (mclass))
2392 FOR_EACH_WIDER_MODE (wider_mode, mode)
2394 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2396 rtx t0 = gen_reg_rtx (wider_mode);
2397 rtx t1 = gen_reg_rtx (wider_mode);
2398 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2400 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2402 convert_move (targ0, t0, unsignedp);
2403 convert_move (targ1, t1, unsignedp);
2404 return 1;
2406 else
2407 delete_insns_since (last);
2412 delete_insns_since (entry_last);
2413 return 0;
2416 /* Generate code to perform an operation specified by BINOPTAB
2417 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2418 We assume that the order of the operands for the instruction
2419 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2420 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2422 Either TARG0 or TARG1 may be zero, but what that means is that
2423 the result is not actually wanted. We will generate it into
2424 a dummy pseudo-reg and discard it. They may not both be zero.
2426 Returns 1 if this operation can be performed; 0 if not. */
2429 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2430 int unsignedp)
2432 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2433 enum mode_class mclass;
2434 machine_mode wider_mode;
2435 rtx_insn *entry_last = get_last_insn ();
2436 rtx_insn *last;
2438 mclass = GET_MODE_CLASS (mode);
2440 if (!targ0)
2441 targ0 = gen_reg_rtx (mode);
2442 if (!targ1)
2443 targ1 = gen_reg_rtx (mode);
2445 /* Record where to go back to if we fail. */
2446 last = get_last_insn ();
2448 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2450 class expand_operand ops[4];
2451 enum insn_code icode = optab_handler (binoptab, mode);
2452 machine_mode mode0 = insn_data[icode].operand[1].mode;
2453 machine_mode mode1 = insn_data[icode].operand[2].mode;
2454 rtx xop0 = op0, xop1 = op1;
2456 /* If we are optimizing, force expensive constants into a register. */
2457 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2458 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2460 create_fixed_operand (&ops[0], targ0);
2461 create_convert_operand_from (&ops[1], xop0, mode, unsignedp);
2462 create_convert_operand_from (&ops[2], xop1, mode, unsignedp);
2463 create_fixed_operand (&ops[3], targ1);
2464 if (maybe_expand_insn (icode, 4, ops))
2465 return 1;
2466 delete_insns_since (last);
2469 /* It can't be done in this mode. Can we do it in a wider mode? */
2471 if (CLASS_HAS_WIDER_MODES_P (mclass))
2473 FOR_EACH_WIDER_MODE (wider_mode, mode)
2475 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2477 rtx t0 = gen_reg_rtx (wider_mode);
2478 rtx t1 = gen_reg_rtx (wider_mode);
2479 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2480 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2482 if (expand_twoval_binop (binoptab, cop0, cop1,
2483 t0, t1, unsignedp))
2485 convert_move (targ0, t0, unsignedp);
2486 convert_move (targ1, t1, unsignedp);
2487 return 1;
2489 else
2490 delete_insns_since (last);
2495 delete_insns_since (entry_last);
2496 return 0;
2499 /* Expand the two-valued library call indicated by BINOPTAB, but
2500 preserve only one of the values. If TARG0 is non-NULL, the first
2501 value is placed into TARG0; otherwise the second value is placed
2502 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2503 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2504 This routine assumes that the value returned by the library call is
2505 as if the return value was of an integral mode twice as wide as the
2506 mode of OP0. Returns 1 if the call was successful. */
2508 bool
2509 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2510 rtx targ0, rtx targ1, enum rtx_code code)
2512 machine_mode mode;
2513 machine_mode libval_mode;
2514 rtx libval;
2515 rtx_insn *insns;
2516 rtx libfunc;
2518 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2519 gcc_assert (!targ0 != !targ1);
2521 mode = GET_MODE (op0);
2522 libfunc = optab_libfunc (binoptab, mode);
2523 if (!libfunc)
2524 return false;
2526 /* The value returned by the library function will have twice as
2527 many bits as the nominal MODE. */
2528 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2529 start_sequence ();
2530 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2531 libval_mode,
2532 op0, mode,
2533 op1, mode);
2534 /* Get the part of VAL containing the value that we want. */
2535 libval = simplify_gen_subreg (mode, libval, libval_mode,
2536 targ0 ? 0 : GET_MODE_SIZE (mode));
2537 insns = get_insns ();
2538 end_sequence ();
2539 /* Move the into the desired location. */
2540 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2541 gen_rtx_fmt_ee (code, mode, op0, op1));
2543 return true;
2547 /* Wrapper around expand_unop which takes an rtx code to specify
2548 the operation to perform, not an optab pointer. All other
2549 arguments are the same. */
2551 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2552 rtx target, int unsignedp)
2554 optab unop = code_to_optab (code);
2555 gcc_assert (unop);
2557 return expand_unop (mode, unop, op0, target, unsignedp);
2560 /* Try calculating
2561 (clz:narrow x)
2563 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2565 A similar operation can be used for clrsb. UNOPTAB says which operation
2566 we are trying to expand. */
2567 static rtx
2568 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2570 opt_scalar_int_mode wider_mode_iter;
2571 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2573 scalar_int_mode wider_mode = wider_mode_iter.require ();
2574 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2576 rtx xop0, temp;
2577 rtx_insn *last;
2579 last = get_last_insn ();
2581 if (target == 0)
2582 target = gen_reg_rtx (mode);
2583 xop0 = widen_operand (op0, wider_mode, mode,
2584 unoptab != clrsb_optab, false);
2585 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2586 unoptab != clrsb_optab);
2587 if (temp != 0)
2588 temp = expand_binop
2589 (wider_mode, sub_optab, temp,
2590 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2591 - GET_MODE_PRECISION (mode),
2592 wider_mode),
2593 target, true, OPTAB_DIRECT);
2594 if (temp == 0)
2595 delete_insns_since (last);
2597 return temp;
2600 return 0;
2603 /* Attempt to emit (clrsb:mode op0) as
2604 (plus:mode (clz:mode (xor:mode op0 (ashr:mode op0 (const_int prec-1))))
2605 (const_int -1))
2606 if CLZ_DEFINED_VALUE_AT_ZERO (mode, val) is 2 and val is prec,
2607 or as
2608 (clz:mode (ior:mode (xor:mode (ashl:mode op0 (const_int 1))
2609 (ashr:mode op0 (const_int prec-1)))
2610 (const_int 1)))
2611 otherwise. */
2613 static rtx
2614 expand_clrsb_using_clz (scalar_int_mode mode, rtx op0, rtx target)
2616 if (optimize_insn_for_size_p ()
2617 || optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2618 return NULL_RTX;
2620 start_sequence ();
2621 HOST_WIDE_INT val = 0;
2622 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) != 2
2623 || val != GET_MODE_PRECISION (mode))
2624 val = 0;
2625 else
2626 val = 1;
2628 rtx temp2 = op0;
2629 if (!val)
2631 temp2 = expand_binop (mode, ashl_optab, op0, const1_rtx,
2632 NULL_RTX, 0, OPTAB_DIRECT);
2633 if (!temp2)
2635 fail:
2636 end_sequence ();
2637 return NULL_RTX;
2641 rtx temp = expand_binop (mode, ashr_optab, op0,
2642 GEN_INT (GET_MODE_PRECISION (mode) - 1),
2643 NULL_RTX, 0, OPTAB_DIRECT);
2644 if (!temp)
2645 goto fail;
2647 temp = expand_binop (mode, xor_optab, temp2, temp, NULL_RTX, 0,
2648 OPTAB_DIRECT);
2649 if (!temp)
2650 goto fail;
2652 if (!val)
2654 temp = expand_binop (mode, ior_optab, temp, const1_rtx,
2655 NULL_RTX, 0, OPTAB_DIRECT);
2656 if (!temp)
2657 goto fail;
2659 temp = expand_unop_direct (mode, clz_optab, temp, val ? NULL_RTX : target,
2660 true);
2661 if (!temp)
2662 goto fail;
2663 if (val)
2665 temp = expand_binop (mode, add_optab, temp, constm1_rtx,
2666 target, 0, OPTAB_DIRECT);
2667 if (!temp)
2668 goto fail;
2671 rtx_insn *seq = get_insns ();
2672 end_sequence ();
2674 add_equal_note (seq, temp, CLRSB, op0, NULL_RTX, mode);
2675 emit_insn (seq);
2676 return temp;
2679 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2680 quantities, choosing which based on whether the high word is nonzero. */
2681 static rtx
2682 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2684 rtx xop0 = force_reg (mode, op0);
2685 rtx subhi = gen_highpart (word_mode, xop0);
2686 rtx sublo = gen_lowpart (word_mode, xop0);
2687 rtx_code_label *hi0_label = gen_label_rtx ();
2688 rtx_code_label *after_label = gen_label_rtx ();
2689 rtx_insn *seq;
2690 rtx temp, result;
2692 /* If we were not given a target, use a word_mode register, not a
2693 'mode' register. The result will fit, and nobody is expecting
2694 anything bigger (the return type of __builtin_clz* is int). */
2695 if (!target)
2696 target = gen_reg_rtx (word_mode);
2698 /* In any case, write to a word_mode scratch in both branches of the
2699 conditional, so we can ensure there is a single move insn setting
2700 'target' to tag a REG_EQUAL note on. */
2701 result = gen_reg_rtx (word_mode);
2703 start_sequence ();
2705 /* If the high word is not equal to zero,
2706 then clz of the full value is clz of the high word. */
2707 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2708 word_mode, true, hi0_label);
2710 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2711 if (!temp)
2712 goto fail;
2714 if (temp != result)
2715 convert_move (result, temp, true);
2717 emit_jump_insn (targetm.gen_jump (after_label));
2718 emit_barrier ();
2720 /* Else clz of the full value is clz of the low word plus the number
2721 of bits in the high word. */
2722 emit_label (hi0_label);
2724 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2725 if (!temp)
2726 goto fail;
2727 temp = expand_binop (word_mode, add_optab, temp,
2728 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2729 result, true, OPTAB_DIRECT);
2730 if (!temp)
2731 goto fail;
2732 if (temp != result)
2733 convert_move (result, temp, true);
2735 emit_label (after_label);
2736 convert_move (target, result, true);
2738 seq = get_insns ();
2739 end_sequence ();
2741 add_equal_note (seq, target, CLZ, xop0, NULL_RTX, mode);
2742 emit_insn (seq);
2743 return target;
2745 fail:
2746 end_sequence ();
2747 return 0;
2750 /* Try calculating popcount of a double-word quantity as two popcount's of
2751 word-sized quantities and summing up the results. */
2752 static rtx
2753 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2755 rtx t0, t1, t;
2756 rtx_insn *seq;
2758 start_sequence ();
2760 t0 = expand_unop_direct (word_mode, popcount_optab,
2761 operand_subword_force (op0, 0, mode), NULL_RTX,
2762 true);
2763 t1 = expand_unop_direct (word_mode, popcount_optab,
2764 operand_subword_force (op0, 1, mode), NULL_RTX,
2765 true);
2766 if (!t0 || !t1)
2768 end_sequence ();
2769 return NULL_RTX;
2772 /* If we were not given a target, use a word_mode register, not a
2773 'mode' register. The result will fit, and nobody is expecting
2774 anything bigger (the return type of __builtin_popcount* is int). */
2775 if (!target)
2776 target = gen_reg_rtx (word_mode);
2778 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2780 seq = get_insns ();
2781 end_sequence ();
2783 add_equal_note (seq, t, POPCOUNT, op0, NULL_RTX, mode);
2784 emit_insn (seq);
2785 return t;
2788 /* Try calculating
2789 (parity:wide x)
2791 (parity:narrow (low (x) ^ high (x))) */
2792 static rtx
2793 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2795 rtx t = expand_binop (word_mode, xor_optab,
2796 operand_subword_force (op0, 0, mode),
2797 operand_subword_force (op0, 1, mode),
2798 NULL_RTX, 0, OPTAB_DIRECT);
2799 return expand_unop (word_mode, parity_optab, t, target, true);
2802 /* Try calculating
2803 (bswap:narrow x)
2805 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2806 static rtx
2807 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2809 rtx x;
2810 rtx_insn *last;
2811 opt_scalar_int_mode wider_mode_iter;
2813 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2814 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2815 != CODE_FOR_nothing)
2816 break;
2818 if (!wider_mode_iter.exists ())
2819 return NULL_RTX;
2821 scalar_int_mode wider_mode = wider_mode_iter.require ();
2822 last = get_last_insn ();
2824 x = widen_operand (op0, wider_mode, mode, true, true);
2825 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2827 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2828 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2829 if (x != 0)
2830 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2831 GET_MODE_BITSIZE (wider_mode)
2832 - GET_MODE_BITSIZE (mode),
2833 NULL_RTX, true);
2835 if (x != 0)
2837 if (target == 0)
2838 target = gen_reg_rtx (mode);
2839 emit_move_insn (target, gen_lowpart (mode, x));
2841 else
2842 delete_insns_since (last);
2844 return target;
2847 /* Try calculating bswap as two bswaps of two word-sized operands. */
2849 static rtx
2850 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2852 rtx t0, t1;
2854 t1 = expand_unop (word_mode, bswap_optab,
2855 operand_subword_force (op, 0, mode), NULL_RTX, true);
2856 t0 = expand_unop (word_mode, bswap_optab,
2857 operand_subword_force (op, 1, mode), NULL_RTX, true);
2859 if (target == 0 || !valid_multiword_target_p (target))
2860 target = gen_reg_rtx (mode);
2861 if (REG_P (target))
2862 emit_clobber (target);
2863 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2864 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2866 return target;
2869 /* Try calculating (parity x) as (and (popcount x) 1), where
2870 popcount can also be done in a wider mode. */
2871 static rtx
2872 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2874 enum mode_class mclass = GET_MODE_CLASS (mode);
2875 opt_scalar_int_mode wider_mode_iter;
2876 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2878 scalar_int_mode wider_mode = wider_mode_iter.require ();
2879 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2881 rtx xop0, temp;
2882 rtx_insn *last;
2884 last = get_last_insn ();
2886 if (target == 0 || GET_MODE (target) != wider_mode)
2887 target = gen_reg_rtx (wider_mode);
2889 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2890 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2891 true);
2892 if (temp != 0)
2893 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2894 target, true, OPTAB_DIRECT);
2896 if (temp)
2898 if (mclass != MODE_INT
2899 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2900 return convert_to_mode (mode, temp, 0);
2901 else
2902 return gen_lowpart (mode, temp);
2904 else
2905 delete_insns_since (last);
2908 return 0;
2911 /* Try calculating ctz(x) as K - clz(x & -x) ,
2912 where K is GET_MODE_PRECISION(mode) - 1.
2914 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2915 don't have to worry about what the hardware does in that case. (If
2916 the clz instruction produces the usual value at 0, which is K, the
2917 result of this code sequence will be -1; expand_ffs, below, relies
2918 on this. It might be nice to have it be K instead, for consistency
2919 with the (very few) processors that provide a ctz with a defined
2920 value, but that would take one more instruction, and it would be
2921 less convenient for expand_ffs anyway. */
2923 static rtx
2924 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2926 rtx_insn *seq;
2927 rtx temp;
2929 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2930 return 0;
2932 start_sequence ();
2934 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2935 if (temp)
2936 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2937 true, OPTAB_DIRECT);
2938 if (temp)
2939 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2940 if (temp)
2941 temp = expand_binop (mode, sub_optab,
2942 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2943 temp, target,
2944 true, OPTAB_DIRECT);
2945 if (temp == 0)
2947 end_sequence ();
2948 return 0;
2951 seq = get_insns ();
2952 end_sequence ();
2954 add_equal_note (seq, temp, CTZ, op0, NULL_RTX, mode);
2955 emit_insn (seq);
2956 return temp;
2960 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2961 else with the sequence used by expand_clz.
2963 The ffs builtin promises to return zero for a zero value and ctz/clz
2964 may have an undefined value in that case. If they do not give us a
2965 convenient value, we have to generate a test and branch. */
2966 static rtx
2967 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2969 HOST_WIDE_INT val = 0;
2970 bool defined_at_zero = false;
2971 rtx temp;
2972 rtx_insn *seq;
2974 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2976 start_sequence ();
2978 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2979 if (!temp)
2980 goto fail;
2982 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2984 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2986 start_sequence ();
2987 temp = expand_ctz (mode, op0, 0);
2988 if (!temp)
2989 goto fail;
2991 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2993 defined_at_zero = true;
2994 val = (GET_MODE_PRECISION (mode) - 1) - val;
2997 else
2998 return 0;
3000 if (defined_at_zero && val == -1)
3001 /* No correction needed at zero. */;
3002 else
3004 /* We don't try to do anything clever with the situation found
3005 on some processors (eg Alpha) where ctz(0:mode) ==
3006 bitsize(mode). If someone can think of a way to send N to -1
3007 and leave alone all values in the range 0..N-1 (where N is a
3008 power of two), cheaper than this test-and-branch, please add it.
3010 The test-and-branch is done after the operation itself, in case
3011 the operation sets condition codes that can be recycled for this.
3012 (This is true on i386, for instance.) */
3014 rtx_code_label *nonzero_label = gen_label_rtx ();
3015 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
3016 mode, true, nonzero_label);
3018 convert_move (temp, GEN_INT (-1), false);
3019 emit_label (nonzero_label);
3022 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
3023 to produce a value in the range 0..bitsize. */
3024 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
3025 target, false, OPTAB_DIRECT);
3026 if (!temp)
3027 goto fail;
3029 seq = get_insns ();
3030 end_sequence ();
3032 add_equal_note (seq, temp, FFS, op0, NULL_RTX, mode);
3033 emit_insn (seq);
3034 return temp;
3036 fail:
3037 end_sequence ();
3038 return 0;
3041 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
3042 conditions, VAL may already be a SUBREG against which we cannot generate
3043 a further SUBREG. In this case, we expect forcing the value into a
3044 register will work around the situation. */
3046 static rtx
3047 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
3048 machine_mode imode)
3050 rtx ret;
3051 ret = lowpart_subreg (omode, val, imode);
3052 if (ret == NULL)
3054 val = force_reg (imode, val);
3055 ret = lowpart_subreg (omode, val, imode);
3056 gcc_assert (ret != NULL);
3058 return ret;
3061 /* Expand a floating point absolute value or negation operation via a
3062 logical operation on the sign bit. */
3064 static rtx
3065 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
3066 rtx op0, rtx target)
3068 const struct real_format *fmt;
3069 int bitpos, word, nwords, i;
3070 scalar_int_mode imode;
3071 rtx temp;
3072 rtx_insn *insns;
3074 /* The format has to have a simple sign bit. */
3075 fmt = REAL_MODE_FORMAT (mode);
3076 if (fmt == NULL)
3077 return NULL_RTX;
3079 bitpos = fmt->signbit_rw;
3080 if (bitpos < 0)
3081 return NULL_RTX;
3083 /* Don't create negative zeros if the format doesn't support them. */
3084 if (code == NEG && !fmt->has_signed_zero)
3085 return NULL_RTX;
3087 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3089 if (!int_mode_for_mode (mode).exists (&imode))
3090 return NULL_RTX;
3091 word = 0;
3092 nwords = 1;
3094 else
3096 imode = word_mode;
3098 if (FLOAT_WORDS_BIG_ENDIAN)
3099 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3100 else
3101 word = bitpos / BITS_PER_WORD;
3102 bitpos = bitpos % BITS_PER_WORD;
3103 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3106 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3107 if (code == ABS)
3108 mask = ~mask;
3110 if (target == 0
3111 || target == op0
3112 || reg_overlap_mentioned_p (target, op0)
3113 || (nwords > 1 && !valid_multiword_target_p (target)))
3114 target = gen_reg_rtx (mode);
3116 if (nwords > 1)
3118 start_sequence ();
3120 for (i = 0; i < nwords; ++i)
3122 rtx targ_piece = operand_subword (target, i, 1, mode);
3123 rtx op0_piece = operand_subword_force (op0, i, mode);
3125 if (i == word)
3127 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3128 op0_piece,
3129 immed_wide_int_const (mask, imode),
3130 targ_piece, 1, OPTAB_LIB_WIDEN);
3131 if (temp != targ_piece)
3132 emit_move_insn (targ_piece, temp);
3134 else
3135 emit_move_insn (targ_piece, op0_piece);
3138 insns = get_insns ();
3139 end_sequence ();
3141 emit_insn (insns);
3143 else
3145 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3146 gen_lowpart (imode, op0),
3147 immed_wide_int_const (mask, imode),
3148 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3149 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3151 set_dst_reg_note (get_last_insn (), REG_EQUAL,
3152 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
3153 target);
3156 return target;
3159 /* As expand_unop, but will fail rather than attempt the operation in a
3160 different mode or with a libcall. */
3161 static rtx
3162 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
3163 int unsignedp)
3165 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
3167 class expand_operand ops[2];
3168 enum insn_code icode = optab_handler (unoptab, mode);
3169 rtx_insn *last = get_last_insn ();
3170 rtx_insn *pat;
3172 create_output_operand (&ops[0], target, mode);
3173 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
3174 pat = maybe_gen_insn (icode, 2, ops);
3175 if (pat)
3177 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3178 && ! add_equal_note (pat, ops[0].value,
3179 optab_to_code (unoptab),
3180 ops[1].value, NULL_RTX, mode))
3182 delete_insns_since (last);
3183 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3186 emit_insn (pat);
3188 return ops[0].value;
3191 return 0;
3194 /* Generate code to perform an operation specified by UNOPTAB
3195 on operand OP0, with result having machine-mode MODE.
3197 UNSIGNEDP is for the case where we have to widen the operands
3198 to perform the operation. It says to use zero-extension.
3200 If TARGET is nonzero, the value
3201 is generated there, if it is convenient to do so.
3202 In all cases an rtx is returned for the locus of the value;
3203 this may or may not be TARGET. */
3206 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
3207 int unsignedp)
3209 enum mode_class mclass = GET_MODE_CLASS (mode);
3210 machine_mode wider_mode;
3211 scalar_int_mode int_mode;
3212 scalar_float_mode float_mode;
3213 rtx temp;
3214 rtx libfunc;
3216 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3217 if (temp)
3218 return temp;
3220 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3222 /* Widening (or narrowing) clz needs special treatment. */
3223 if (unoptab == clz_optab)
3225 if (is_a <scalar_int_mode> (mode, &int_mode))
3227 temp = widen_leading (int_mode, op0, target, unoptab);
3228 if (temp)
3229 return temp;
3231 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3232 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3234 temp = expand_doubleword_clz (int_mode, op0, target);
3235 if (temp)
3236 return temp;
3240 goto try_libcall;
3243 if (unoptab == clrsb_optab)
3245 if (is_a <scalar_int_mode> (mode, &int_mode))
3247 temp = widen_leading (int_mode, op0, target, unoptab);
3248 if (temp)
3249 return temp;
3250 temp = expand_clrsb_using_clz (int_mode, op0, target);
3251 if (temp)
3252 return temp;
3254 goto try_libcall;
3257 if (unoptab == popcount_optab
3258 && is_a <scalar_int_mode> (mode, &int_mode)
3259 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3260 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3261 && optimize_insn_for_speed_p ())
3263 temp = expand_doubleword_popcount (int_mode, op0, target);
3264 if (temp)
3265 return temp;
3268 if (unoptab == parity_optab
3269 && is_a <scalar_int_mode> (mode, &int_mode)
3270 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3271 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3272 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
3273 && optimize_insn_for_speed_p ())
3275 temp = expand_doubleword_parity (int_mode, op0, target);
3276 if (temp)
3277 return temp;
3280 /* Widening (or narrowing) bswap needs special treatment. */
3281 if (unoptab == bswap_optab)
3283 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3284 or ROTATERT. First try these directly; if this fails, then try the
3285 obvious pair of shifts with allowed widening, as this will probably
3286 be always more efficient than the other fallback methods. */
3287 if (mode == HImode)
3289 rtx_insn *last;
3290 rtx temp1, temp2;
3292 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
3294 temp = expand_binop (mode, rotl_optab, op0,
3295 gen_int_shift_amount (mode, 8),
3296 target, unsignedp, OPTAB_DIRECT);
3297 if (temp)
3298 return temp;
3301 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
3303 temp = expand_binop (mode, rotr_optab, op0,
3304 gen_int_shift_amount (mode, 8),
3305 target, unsignedp, OPTAB_DIRECT);
3306 if (temp)
3307 return temp;
3310 last = get_last_insn ();
3312 temp1 = expand_binop (mode, ashl_optab, op0,
3313 gen_int_shift_amount (mode, 8), NULL_RTX,
3314 unsignedp, OPTAB_WIDEN);
3315 temp2 = expand_binop (mode, lshr_optab, op0,
3316 gen_int_shift_amount (mode, 8), NULL_RTX,
3317 unsignedp, OPTAB_WIDEN);
3318 if (temp1 && temp2)
3320 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
3321 unsignedp, OPTAB_WIDEN);
3322 if (temp)
3323 return temp;
3326 delete_insns_since (last);
3329 if (is_a <scalar_int_mode> (mode, &int_mode))
3331 temp = widen_bswap (int_mode, op0, target);
3332 if (temp)
3333 return temp;
3335 /* We do not provide a 128-bit bswap in libgcc so force the use of
3336 a double bswap for 64-bit targets. */
3337 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3338 && (UNITS_PER_WORD == 8
3339 || optab_handler (unoptab, word_mode) != CODE_FOR_nothing))
3341 temp = expand_doubleword_bswap (mode, op0, target);
3342 if (temp)
3343 return temp;
3347 goto try_libcall;
3350 if (CLASS_HAS_WIDER_MODES_P (mclass))
3351 FOR_EACH_WIDER_MODE (wider_mode, mode)
3353 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3355 rtx xop0 = op0;
3356 rtx_insn *last = get_last_insn ();
3358 /* For certain operations, we need not actually extend
3359 the narrow operand, as long as we will truncate the
3360 results to the same narrowness. */
3362 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3363 (unoptab == neg_optab
3364 || unoptab == one_cmpl_optab)
3365 && mclass == MODE_INT);
3367 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3368 unsignedp);
3370 if (temp)
3372 if (mclass != MODE_INT
3373 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3375 if (target == 0)
3376 target = gen_reg_rtx (mode);
3377 convert_move (target, temp, 0);
3378 return target;
3380 else
3381 return gen_lowpart (mode, temp);
3383 else
3384 delete_insns_since (last);
3388 /* These can be done a word at a time. */
3389 if (unoptab == one_cmpl_optab
3390 && is_int_mode (mode, &int_mode)
3391 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
3392 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3394 int i;
3395 rtx_insn *insns;
3397 if (target == 0
3398 || target == op0
3399 || reg_overlap_mentioned_p (target, op0)
3400 || !valid_multiword_target_p (target))
3401 target = gen_reg_rtx (int_mode);
3403 start_sequence ();
3405 /* Do the actual arithmetic. */
3406 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
3408 rtx target_piece = operand_subword (target, i, 1, int_mode);
3409 rtx x = expand_unop (word_mode, unoptab,
3410 operand_subword_force (op0, i, int_mode),
3411 target_piece, unsignedp);
3413 if (target_piece != x)
3414 emit_move_insn (target_piece, x);
3417 insns = get_insns ();
3418 end_sequence ();
3420 emit_insn (insns);
3421 return target;
3424 /* Emit ~op0 as op0 ^ -1. */
3425 if (unoptab == one_cmpl_optab
3426 && (SCALAR_INT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3427 && optab_handler (xor_optab, mode) != CODE_FOR_nothing)
3429 temp = expand_binop (mode, xor_optab, op0, CONSTM1_RTX (mode),
3430 target, unsignedp, OPTAB_DIRECT);
3431 if (temp)
3432 return temp;
3435 if (optab_to_code (unoptab) == NEG)
3437 /* Try negating floating point values by flipping the sign bit. */
3438 if (is_a <scalar_float_mode> (mode, &float_mode))
3440 temp = expand_absneg_bit (NEG, float_mode, op0, target);
3441 if (temp)
3442 return temp;
3445 /* If there is no negation pattern, and we have no negative zero,
3446 try subtracting from zero. */
3447 if (!HONOR_SIGNED_ZEROS (mode))
3449 temp = expand_binop (mode, (unoptab == negv_optab
3450 ? subv_optab : sub_optab),
3451 CONST0_RTX (mode), op0, target,
3452 unsignedp, OPTAB_DIRECT);
3453 if (temp)
3454 return temp;
3458 /* Try calculating parity (x) as popcount (x) % 2. */
3459 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
3461 temp = expand_parity (int_mode, op0, target);
3462 if (temp)
3463 return temp;
3466 /* Try implementing ffs (x) in terms of clz (x). */
3467 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
3469 temp = expand_ffs (int_mode, op0, target);
3470 if (temp)
3471 return temp;
3474 /* Try implementing ctz (x) in terms of clz (x). */
3475 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
3477 temp = expand_ctz (int_mode, op0, target);
3478 if (temp)
3479 return temp;
3482 try_libcall:
3483 /* Now try a library call in this mode. */
3484 libfunc = optab_libfunc (unoptab, mode);
3485 if (libfunc)
3487 rtx_insn *insns;
3488 rtx value;
3489 rtx eq_value;
3490 machine_mode outmode = mode;
3492 /* All of these functions return small values. Thus we choose to
3493 have them return something that isn't a double-word. */
3494 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3495 || unoptab == clrsb_optab || unoptab == popcount_optab
3496 || unoptab == parity_optab)
3497 outmode
3498 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3499 optab_libfunc (unoptab, mode)));
3501 start_sequence ();
3503 /* Pass 1 for NO_QUEUE so we don't lose any increments
3504 if the libcall is cse'd or moved. */
3505 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3506 op0, mode);
3507 insns = get_insns ();
3508 end_sequence ();
3510 target = gen_reg_rtx (outmode);
3511 bool trapv = trapv_unoptab_p (unoptab);
3512 if (trapv)
3513 eq_value = NULL_RTX;
3514 else
3516 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3517 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3518 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3519 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3520 eq_value = simplify_gen_unary (ZERO_EXTEND,
3521 outmode, eq_value, mode);
3523 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3525 return target;
3528 /* It can't be done in this mode. Can we do it in a wider mode? */
3530 if (CLASS_HAS_WIDER_MODES_P (mclass))
3532 FOR_EACH_WIDER_MODE (wider_mode, mode)
3534 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3535 || optab_libfunc (unoptab, wider_mode))
3537 rtx xop0 = op0;
3538 rtx_insn *last = get_last_insn ();
3540 /* For certain operations, we need not actually extend
3541 the narrow operand, as long as we will truncate the
3542 results to the same narrowness. */
3543 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3544 (unoptab == neg_optab
3545 || unoptab == one_cmpl_optab
3546 || unoptab == bswap_optab)
3547 && mclass == MODE_INT);
3549 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3550 unsignedp);
3552 /* If we are generating clz using wider mode, adjust the
3553 result. Similarly for clrsb. */
3554 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3555 && temp != 0)
3557 scalar_int_mode wider_int_mode
3558 = as_a <scalar_int_mode> (wider_mode);
3559 int_mode = as_a <scalar_int_mode> (mode);
3560 temp = expand_binop
3561 (wider_mode, sub_optab, temp,
3562 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3563 - GET_MODE_PRECISION (int_mode),
3564 wider_int_mode),
3565 target, true, OPTAB_DIRECT);
3568 /* Likewise for bswap. */
3569 if (unoptab == bswap_optab && temp != 0)
3571 scalar_int_mode wider_int_mode
3572 = as_a <scalar_int_mode> (wider_mode);
3573 int_mode = as_a <scalar_int_mode> (mode);
3574 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3575 == GET_MODE_BITSIZE (wider_int_mode)
3576 && GET_MODE_PRECISION (int_mode)
3577 == GET_MODE_BITSIZE (int_mode));
3579 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3580 GET_MODE_BITSIZE (wider_int_mode)
3581 - GET_MODE_BITSIZE (int_mode),
3582 NULL_RTX, true);
3585 if (temp)
3587 if (mclass != MODE_INT)
3589 if (target == 0)
3590 target = gen_reg_rtx (mode);
3591 convert_move (target, temp, 0);
3592 return target;
3594 else
3595 return gen_lowpart (mode, temp);
3597 else
3598 delete_insns_since (last);
3603 /* One final attempt at implementing negation via subtraction,
3604 this time allowing widening of the operand. */
3605 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3607 rtx temp;
3608 temp = expand_binop (mode,
3609 unoptab == negv_optab ? subv_optab : sub_optab,
3610 CONST0_RTX (mode), op0,
3611 target, unsignedp, OPTAB_LIB_WIDEN);
3612 if (temp)
3613 return temp;
3616 return 0;
3619 /* Emit code to compute the absolute value of OP0, with result to
3620 TARGET if convenient. (TARGET may be 0.) The return value says
3621 where the result actually is to be found.
3623 MODE is the mode of the operand; the mode of the result is
3624 different but can be deduced from MODE.
3629 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3630 int result_unsignedp)
3632 rtx temp;
3634 if (GET_MODE_CLASS (mode) != MODE_INT
3635 || ! flag_trapv)
3636 result_unsignedp = 1;
3638 /* First try to do it with a special abs instruction. */
3639 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3640 op0, target, 0);
3641 if (temp != 0)
3642 return temp;
3644 /* For floating point modes, try clearing the sign bit. */
3645 scalar_float_mode float_mode;
3646 if (is_a <scalar_float_mode> (mode, &float_mode))
3648 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3649 if (temp)
3650 return temp;
3653 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3654 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3655 && !HONOR_SIGNED_ZEROS (mode))
3657 rtx_insn *last = get_last_insn ();
3659 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3660 op0, NULL_RTX, 0);
3661 if (temp != 0)
3662 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3663 OPTAB_WIDEN);
3665 if (temp != 0)
3666 return temp;
3668 delete_insns_since (last);
3671 /* If this machine has expensive jumps, we can do integer absolute
3672 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3673 where W is the width of MODE. */
3675 scalar_int_mode int_mode;
3676 if (is_int_mode (mode, &int_mode)
3677 && BRANCH_COST (optimize_insn_for_speed_p (),
3678 false) >= 2)
3680 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3681 GET_MODE_PRECISION (int_mode) - 1,
3682 NULL_RTX, 0);
3684 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3685 OPTAB_LIB_WIDEN);
3686 if (temp != 0)
3687 temp = expand_binop (int_mode,
3688 result_unsignedp ? sub_optab : subv_optab,
3689 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3691 if (temp != 0)
3692 return temp;
3695 return NULL_RTX;
3699 expand_abs (machine_mode mode, rtx op0, rtx target,
3700 int result_unsignedp, int safe)
3702 rtx temp;
3703 rtx_code_label *op1;
3705 if (GET_MODE_CLASS (mode) != MODE_INT
3706 || ! flag_trapv)
3707 result_unsignedp = 1;
3709 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3710 if (temp != 0)
3711 return temp;
3713 /* If that does not win, use conditional jump and negate. */
3715 /* It is safe to use the target if it is the same
3716 as the source if this is also a pseudo register */
3717 if (op0 == target && REG_P (op0)
3718 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3719 safe = 1;
3721 op1 = gen_label_rtx ();
3722 if (target == 0 || ! safe
3723 || GET_MODE (target) != mode
3724 || (MEM_P (target) && MEM_VOLATILE_P (target))
3725 || (REG_P (target)
3726 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3727 target = gen_reg_rtx (mode);
3729 emit_move_insn (target, op0);
3730 NO_DEFER_POP;
3732 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3733 NULL_RTX, NULL, op1,
3734 profile_probability::uninitialized ());
3736 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3737 target, target, 0);
3738 if (op0 != target)
3739 emit_move_insn (target, op0);
3740 emit_label (op1);
3741 OK_DEFER_POP;
3742 return target;
3745 /* Emit code to compute the one's complement absolute value of OP0
3746 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3747 (TARGET may be NULL_RTX.) The return value says where the result
3748 actually is to be found.
3750 MODE is the mode of the operand; the mode of the result is
3751 different but can be deduced from MODE. */
3754 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3756 rtx temp;
3758 /* Not applicable for floating point modes. */
3759 if (FLOAT_MODE_P (mode))
3760 return NULL_RTX;
3762 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3763 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3765 rtx_insn *last = get_last_insn ();
3767 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3768 if (temp != 0)
3769 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3770 OPTAB_WIDEN);
3772 if (temp != 0)
3773 return temp;
3775 delete_insns_since (last);
3778 /* If this machine has expensive jumps, we can do one's complement
3779 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3781 scalar_int_mode int_mode;
3782 if (is_int_mode (mode, &int_mode)
3783 && BRANCH_COST (optimize_insn_for_speed_p (),
3784 false) >= 2)
3786 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3787 GET_MODE_PRECISION (int_mode) - 1,
3788 NULL_RTX, 0);
3790 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3791 OPTAB_LIB_WIDEN);
3793 if (temp != 0)
3794 return temp;
3797 return NULL_RTX;
3800 /* A subroutine of expand_copysign, perform the copysign operation using the
3801 abs and neg primitives advertised to exist on the target. The assumption
3802 is that we have a split register file, and leaving op0 in fp registers,
3803 and not playing with subregs so much, will help the register allocator. */
3805 static rtx
3806 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3807 int bitpos, bool op0_is_abs)
3809 scalar_int_mode imode;
3810 enum insn_code icode;
3811 rtx sign;
3812 rtx_code_label *label;
3814 if (target == op1)
3815 target = NULL_RTX;
3817 /* Check if the back end provides an insn that handles signbit for the
3818 argument's mode. */
3819 icode = optab_handler (signbit_optab, mode);
3820 if (icode != CODE_FOR_nothing)
3822 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3823 sign = gen_reg_rtx (imode);
3824 emit_unop_insn (icode, sign, op1, UNKNOWN);
3826 else
3828 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3830 if (!int_mode_for_mode (mode).exists (&imode))
3831 return NULL_RTX;
3832 op1 = gen_lowpart (imode, op1);
3834 else
3836 int word;
3838 imode = word_mode;
3839 if (FLOAT_WORDS_BIG_ENDIAN)
3840 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3841 else
3842 word = bitpos / BITS_PER_WORD;
3843 bitpos = bitpos % BITS_PER_WORD;
3844 op1 = operand_subword_force (op1, word, mode);
3847 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3848 sign = expand_binop (imode, and_optab, op1,
3849 immed_wide_int_const (mask, imode),
3850 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3853 if (!op0_is_abs)
3855 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3856 if (op0 == NULL)
3857 return NULL_RTX;
3858 target = op0;
3860 else
3862 if (target == NULL_RTX)
3863 target = copy_to_reg (op0);
3864 else
3865 emit_move_insn (target, op0);
3868 label = gen_label_rtx ();
3869 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3871 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3872 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3873 else
3874 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3875 if (op0 != target)
3876 emit_move_insn (target, op0);
3878 emit_label (label);
3880 return target;
3884 /* A subroutine of expand_copysign, perform the entire copysign operation
3885 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3886 is true if op0 is known to have its sign bit clear. */
3888 static rtx
3889 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3890 int bitpos, bool op0_is_abs)
3892 scalar_int_mode imode;
3893 int word, nwords, i;
3894 rtx temp;
3895 rtx_insn *insns;
3897 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3899 if (!int_mode_for_mode (mode).exists (&imode))
3900 return NULL_RTX;
3901 word = 0;
3902 nwords = 1;
3904 else
3906 imode = word_mode;
3908 if (FLOAT_WORDS_BIG_ENDIAN)
3909 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3910 else
3911 word = bitpos / BITS_PER_WORD;
3912 bitpos = bitpos % BITS_PER_WORD;
3913 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3916 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3918 if (target == 0
3919 || target == op0
3920 || target == op1
3921 || reg_overlap_mentioned_p (target, op0)
3922 || reg_overlap_mentioned_p (target, op1)
3923 || (nwords > 1 && !valid_multiword_target_p (target)))
3924 target = gen_reg_rtx (mode);
3926 if (nwords > 1)
3928 start_sequence ();
3930 for (i = 0; i < nwords; ++i)
3932 rtx targ_piece = operand_subword (target, i, 1, mode);
3933 rtx op0_piece = operand_subword_force (op0, i, mode);
3935 if (i == word)
3937 if (!op0_is_abs)
3938 op0_piece
3939 = expand_binop (imode, and_optab, op0_piece,
3940 immed_wide_int_const (~mask, imode),
3941 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3942 op1 = expand_binop (imode, and_optab,
3943 operand_subword_force (op1, i, mode),
3944 immed_wide_int_const (mask, imode),
3945 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3947 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3948 targ_piece, 1, OPTAB_LIB_WIDEN);
3949 if (temp != targ_piece)
3950 emit_move_insn (targ_piece, temp);
3952 else
3953 emit_move_insn (targ_piece, op0_piece);
3956 insns = get_insns ();
3957 end_sequence ();
3959 emit_insn (insns);
3961 else
3963 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3964 immed_wide_int_const (mask, imode),
3965 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3967 op0 = gen_lowpart (imode, op0);
3968 if (!op0_is_abs)
3969 op0 = expand_binop (imode, and_optab, op0,
3970 immed_wide_int_const (~mask, imode),
3971 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3973 temp = expand_binop (imode, ior_optab, op0, op1,
3974 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3975 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3978 return target;
3981 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3982 scalar floating point mode. Return NULL if we do not know how to
3983 expand the operation inline. */
3986 expand_copysign (rtx op0, rtx op1, rtx target)
3988 scalar_float_mode mode;
3989 const struct real_format *fmt;
3990 bool op0_is_abs;
3991 rtx temp;
3993 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3994 gcc_assert (GET_MODE (op1) == mode);
3996 /* First try to do it with a special instruction. */
3997 temp = expand_binop (mode, copysign_optab, op0, op1,
3998 target, 0, OPTAB_DIRECT);
3999 if (temp)
4000 return temp;
4002 fmt = REAL_MODE_FORMAT (mode);
4003 if (fmt == NULL || !fmt->has_signed_zero)
4004 return NULL_RTX;
4006 op0_is_abs = false;
4007 if (CONST_DOUBLE_AS_FLOAT_P (op0))
4009 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
4010 op0 = simplify_unary_operation (ABS, mode, op0, mode);
4011 op0_is_abs = true;
4014 if (fmt->signbit_ro >= 0
4015 && (CONST_DOUBLE_AS_FLOAT_P (op0)
4016 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
4017 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
4019 temp = expand_copysign_absneg (mode, op0, op1, target,
4020 fmt->signbit_ro, op0_is_abs);
4021 if (temp)
4022 return temp;
4025 if (fmt->signbit_rw < 0)
4026 return NULL_RTX;
4027 return expand_copysign_bit (mode, op0, op1, target,
4028 fmt->signbit_rw, op0_is_abs);
4031 /* Generate an instruction whose insn-code is INSN_CODE,
4032 with two operands: an output TARGET and an input OP0.
4033 TARGET *must* be nonzero, and the output is always stored there.
4034 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4035 the value that is stored into TARGET.
4037 Return false if expansion failed. */
4039 bool
4040 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
4041 enum rtx_code code)
4043 class expand_operand ops[2];
4044 rtx_insn *pat;
4046 create_output_operand (&ops[0], target, GET_MODE (target));
4047 create_input_operand (&ops[1], op0, GET_MODE (op0));
4048 pat = maybe_gen_insn (icode, 2, ops);
4049 if (!pat)
4050 return false;
4052 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
4053 && code != UNKNOWN)
4054 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX,
4055 GET_MODE (op0));
4057 emit_insn (pat);
4059 if (ops[0].value != target)
4060 emit_move_insn (target, ops[0].value);
4061 return true;
4063 /* Generate an instruction whose insn-code is INSN_CODE,
4064 with two operands: an output TARGET and an input OP0.
4065 TARGET *must* be nonzero, and the output is always stored there.
4066 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4067 the value that is stored into TARGET. */
4069 void
4070 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
4072 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
4073 gcc_assert (ok);
4076 struct no_conflict_data
4078 rtx target;
4079 rtx_insn *first, *insn;
4080 bool must_stay;
4083 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
4084 the currently examined clobber / store has to stay in the list of
4085 insns that constitute the actual libcall block. */
4086 static void
4087 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
4089 struct no_conflict_data *p= (struct no_conflict_data *) p0;
4091 /* If this inns directly contributes to setting the target, it must stay. */
4092 if (reg_overlap_mentioned_p (p->target, dest))
4093 p->must_stay = true;
4094 /* If we haven't committed to keeping any other insns in the list yet,
4095 there is nothing more to check. */
4096 else if (p->insn == p->first)
4097 return;
4098 /* If this insn sets / clobbers a register that feeds one of the insns
4099 already in the list, this insn has to stay too. */
4100 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
4101 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
4102 || reg_used_between_p (dest, p->first, p->insn)
4103 /* Likewise if this insn depends on a register set by a previous
4104 insn in the list, or if it sets a result (presumably a hard
4105 register) that is set or clobbered by a previous insn.
4106 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4107 SET_DEST perform the former check on the address, and the latter
4108 check on the MEM. */
4109 || (GET_CODE (set) == SET
4110 && (modified_in_p (SET_SRC (set), p->first)
4111 || modified_in_p (SET_DEST (set), p->first)
4112 || modified_between_p (SET_SRC (set), p->first, p->insn)
4113 || modified_between_p (SET_DEST (set), p->first, p->insn))))
4114 p->must_stay = true;
4118 /* Emit code to make a call to a constant function or a library call.
4120 INSNS is a list containing all insns emitted in the call.
4121 These insns leave the result in RESULT. Our block is to copy RESULT
4122 to TARGET, which is logically equivalent to EQUIV.
4124 We first emit any insns that set a pseudo on the assumption that these are
4125 loading constants into registers; doing so allows them to be safely cse'ed
4126 between blocks. Then we emit all the other insns in the block, followed by
4127 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4128 note with an operand of EQUIV. */
4130 static void
4131 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
4132 bool equiv_may_trap)
4134 rtx final_dest = target;
4135 rtx_insn *next, *last, *insn;
4137 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4138 into a MEM later. Protect the libcall block from this change. */
4139 if (! REG_P (target) || REG_USERVAR_P (target))
4140 target = gen_reg_rtx (GET_MODE (target));
4142 /* If we're using non-call exceptions, a libcall corresponding to an
4143 operation that may trap may also trap. */
4144 /* ??? See the comment in front of make_reg_eh_region_note. */
4145 if (cfun->can_throw_non_call_exceptions
4146 && (equiv_may_trap || may_trap_p (equiv)))
4148 for (insn = insns; insn; insn = NEXT_INSN (insn))
4149 if (CALL_P (insn))
4151 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4152 if (note)
4154 int lp_nr = INTVAL (XEXP (note, 0));
4155 if (lp_nr == 0 || lp_nr == INT_MIN)
4156 remove_note (insn, note);
4160 else
4162 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4163 reg note to indicate that this call cannot throw or execute a nonlocal
4164 goto (unless there is already a REG_EH_REGION note, in which case
4165 we update it). */
4166 for (insn = insns; insn; insn = NEXT_INSN (insn))
4167 if (CALL_P (insn))
4168 make_reg_eh_region_note_nothrow_nononlocal (insn);
4171 /* First emit all insns that set pseudos. Remove them from the list as
4172 we go. Avoid insns that set pseudos which were referenced in previous
4173 insns. These can be generated by move_by_pieces, for example,
4174 to update an address. Similarly, avoid insns that reference things
4175 set in previous insns. */
4177 for (insn = insns; insn; insn = next)
4179 rtx set = single_set (insn);
4181 next = NEXT_INSN (insn);
4183 if (set != 0 && REG_P (SET_DEST (set))
4184 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4186 struct no_conflict_data data;
4188 data.target = const0_rtx;
4189 data.first = insns;
4190 data.insn = insn;
4191 data.must_stay = 0;
4192 note_stores (insn, no_conflict_move_test, &data);
4193 if (! data.must_stay)
4195 if (PREV_INSN (insn))
4196 SET_NEXT_INSN (PREV_INSN (insn)) = next;
4197 else
4198 insns = next;
4200 if (next)
4201 SET_PREV_INSN (next) = PREV_INSN (insn);
4203 add_insn (insn);
4207 /* Some ports use a loop to copy large arguments onto the stack.
4208 Don't move anything outside such a loop. */
4209 if (LABEL_P (insn))
4210 break;
4213 /* Write the remaining insns followed by the final copy. */
4214 for (insn = insns; insn; insn = next)
4216 next = NEXT_INSN (insn);
4218 add_insn (insn);
4221 last = emit_move_insn (target, result);
4222 if (equiv)
4223 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
4225 if (final_dest != target)
4226 emit_move_insn (final_dest, target);
4229 void
4230 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
4232 emit_libcall_block_1 (insns, target, result, equiv, false);
4235 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4236 PURPOSE describes how this comparison will be used. CODE is the rtx
4237 comparison code we will be using.
4239 ??? Actually, CODE is slightly weaker than that. A target is still
4240 required to implement all of the normal bcc operations, but not
4241 required to implement all (or any) of the unordered bcc operations. */
4244 can_compare_p (enum rtx_code code, machine_mode mode,
4245 enum can_compare_purpose purpose)
4247 rtx test;
4248 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4251 enum insn_code icode;
4253 if (purpose == ccp_jump
4254 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
4255 && insn_operand_matches (icode, 0, test))
4256 return 1;
4257 if (purpose == ccp_store_flag
4258 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
4259 && insn_operand_matches (icode, 1, test))
4260 return 1;
4261 if (purpose == ccp_cmov
4262 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
4263 return 1;
4265 mode = GET_MODE_WIDER_MODE (mode).else_void ();
4266 PUT_MODE (test, mode);
4268 while (mode != VOIDmode);
4270 return 0;
4273 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4275 static bool
4276 unsigned_optab_p (enum rtx_code code)
4278 return code == LTU || code == LEU || code == GTU || code == GEU;
4281 /* Return whether the backend-emitted comparison for code CODE, comparing
4282 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4283 operand OPNO of pattern ICODE. */
4285 static bool
4286 insn_predicate_matches_p (enum insn_code icode, unsigned int opno,
4287 enum rtx_code code, machine_mode mask_mode,
4288 machine_mode value_mode)
4290 rtx reg1 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 1);
4291 rtx reg2 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 2);
4292 rtx test = alloca_rtx_fmt_ee (code, mask_mode, reg1, reg2);
4293 return insn_operand_matches (icode, opno, test);
4296 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4297 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4298 with MASK_MODE. */
4300 bool
4301 can_vec_cmp_compare_p (enum rtx_code code, machine_mode value_mode,
4302 machine_mode mask_mode)
4304 enum insn_code icode
4305 = get_vec_cmp_icode (value_mode, mask_mode, unsigned_optab_p (code));
4306 if (icode == CODE_FOR_nothing)
4307 return false;
4309 return insn_predicate_matches_p (icode, 1, code, mask_mode, value_mode);
4312 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4313 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4314 with VALUE_MODE. */
4316 bool
4317 can_vcond_compare_p (enum rtx_code code, machine_mode value_mode,
4318 machine_mode cmp_op_mode)
4320 enum insn_code icode
4321 = get_vcond_icode (value_mode, cmp_op_mode, unsigned_optab_p (code));
4322 if (icode == CODE_FOR_nothing)
4323 return false;
4325 return insn_predicate_matches_p (icode, 3, code, value_mode, cmp_op_mode);
4328 /* Return whether the backend can emit vector set instructions for inserting
4329 element into vector at variable index position. */
4331 bool
4332 can_vec_set_var_idx_p (machine_mode vec_mode)
4334 if (!VECTOR_MODE_P (vec_mode))
4335 return false;
4337 machine_mode inner_mode = GET_MODE_INNER (vec_mode);
4338 rtx reg1 = alloca_raw_REG (vec_mode, LAST_VIRTUAL_REGISTER + 1);
4339 rtx reg2 = alloca_raw_REG (inner_mode, LAST_VIRTUAL_REGISTER + 2);
4340 rtx reg3 = alloca_raw_REG (VOIDmode, LAST_VIRTUAL_REGISTER + 3);
4342 enum insn_code icode = optab_handler (vec_set_optab, vec_mode);
4344 return icode != CODE_FOR_nothing && insn_operand_matches (icode, 0, reg1)
4345 && insn_operand_matches (icode, 1, reg2)
4346 && insn_operand_matches (icode, 2, reg3);
4349 /* This function is called when we are going to emit a compare instruction that
4350 compares the values found in X and Y, using the rtl operator COMPARISON.
4352 If they have mode BLKmode, then SIZE specifies the size of both operands.
4354 UNSIGNEDP nonzero says that the operands are unsigned;
4355 this matters if they need to be widened (as given by METHODS).
4357 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4358 if we failed to produce one.
4360 *PMODE is the mode of the inputs (in case they are const_int).
4362 This function performs all the setup necessary so that the caller only has
4363 to emit a single comparison insn. This setup can involve doing a BLKmode
4364 comparison or emitting a library call to perform the comparison if no insn
4365 is available to handle it.
4366 The values which are passed in through pointers can be modified; the caller
4367 should perform the comparison on the modified values. Constant
4368 comparisons must have already been folded. */
4370 static void
4371 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4372 int unsignedp, enum optab_methods methods,
4373 rtx *ptest, machine_mode *pmode)
4375 machine_mode mode = *pmode;
4376 rtx libfunc, test;
4377 machine_mode cmp_mode;
4378 enum mode_class mclass;
4380 /* The other methods are not needed. */
4381 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4382 || methods == OPTAB_LIB_WIDEN);
4384 if (CONST_SCALAR_INT_P (y))
4385 canonicalize_comparison (mode, &comparison, &y);
4387 /* If we are optimizing, force expensive constants into a register. */
4388 if (CONSTANT_P (x) && optimize
4389 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
4390 > COSTS_N_INSNS (1)))
4391 x = force_reg (mode, x);
4393 if (CONSTANT_P (y) && optimize
4394 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
4395 > COSTS_N_INSNS (1)))
4396 y = force_reg (mode, y);
4398 /* Don't let both operands fail to indicate the mode. */
4399 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4400 x = force_reg (mode, x);
4401 if (mode == VOIDmode)
4402 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4404 /* Handle all BLKmode compares. */
4406 if (mode == BLKmode)
4408 machine_mode result_mode;
4409 enum insn_code cmp_code;
4410 rtx result;
4411 rtx opalign
4412 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4414 gcc_assert (size);
4416 /* Try to use a memory block compare insn - either cmpstr
4417 or cmpmem will do. */
4418 opt_scalar_int_mode cmp_mode_iter;
4419 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
4421 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
4422 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4423 if (cmp_code == CODE_FOR_nothing)
4424 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4425 if (cmp_code == CODE_FOR_nothing)
4426 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4427 if (cmp_code == CODE_FOR_nothing)
4428 continue;
4430 /* Must make sure the size fits the insn's mode. */
4431 if (CONST_INT_P (size)
4432 ? UINTVAL (size) > GET_MODE_MASK (cmp_mode)
4433 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
4434 > GET_MODE_BITSIZE (cmp_mode)))
4435 continue;
4437 result_mode = insn_data[cmp_code].operand[0].mode;
4438 result = gen_reg_rtx (result_mode);
4439 size = convert_to_mode (cmp_mode, size, 1);
4440 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4442 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4443 *pmode = result_mode;
4444 return;
4447 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4448 goto fail;
4450 /* Otherwise call a library function. */
4451 result = emit_block_comp_via_libcall (x, y, size);
4453 x = result;
4454 y = const0_rtx;
4455 mode = TYPE_MODE (integer_type_node);
4456 methods = OPTAB_LIB_WIDEN;
4457 unsignedp = false;
4460 /* Don't allow operands to the compare to trap, as that can put the
4461 compare and branch in different basic blocks. */
4462 if (cfun->can_throw_non_call_exceptions)
4464 if (may_trap_p (x))
4465 x = copy_to_reg (x);
4466 if (may_trap_p (y))
4467 y = copy_to_reg (y);
4470 if (GET_MODE_CLASS (mode) == MODE_CC)
4472 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
4473 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4474 gcc_assert (icode != CODE_FOR_nothing
4475 && insn_operand_matches (icode, 0, test));
4476 *ptest = test;
4477 return;
4480 mclass = GET_MODE_CLASS (mode);
4481 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4482 FOR_EACH_MODE_FROM (cmp_mode, mode)
4484 enum insn_code icode;
4485 icode = optab_handler (cbranch_optab, cmp_mode);
4486 if (icode != CODE_FOR_nothing
4487 && insn_operand_matches (icode, 0, test))
4489 rtx_insn *last = get_last_insn ();
4490 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4491 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4492 if (op0 && op1
4493 && insn_operand_matches (icode, 1, op0)
4494 && insn_operand_matches (icode, 2, op1))
4496 XEXP (test, 0) = op0;
4497 XEXP (test, 1) = op1;
4498 *ptest = test;
4499 *pmode = cmp_mode;
4500 return;
4502 delete_insns_since (last);
4505 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4506 break;
4509 if (methods != OPTAB_LIB_WIDEN)
4510 goto fail;
4512 if (SCALAR_FLOAT_MODE_P (mode))
4514 /* Small trick if UNORDERED isn't implemented by the hardware. */
4515 if (comparison == UNORDERED && rtx_equal_p (x, y))
4517 prepare_cmp_insn (x, y, UNLT, NULL_RTX, unsignedp, OPTAB_WIDEN,
4518 ptest, pmode);
4519 if (*ptest)
4520 return;
4523 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4525 else
4527 rtx result;
4528 machine_mode ret_mode;
4530 /* Handle a libcall just for the mode we are using. */
4531 libfunc = optab_libfunc (cmp_optab, mode);
4532 gcc_assert (libfunc);
4534 /* If we want unsigned, and this mode has a distinct unsigned
4535 comparison routine, use that. */
4536 if (unsignedp)
4538 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4539 if (ulibfunc)
4540 libfunc = ulibfunc;
4543 ret_mode = targetm.libgcc_cmp_return_mode ();
4544 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4545 ret_mode, x, mode, y, mode);
4547 /* There are two kinds of comparison routines. Biased routines
4548 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4549 of gcc expect that the comparison operation is equivalent
4550 to the modified comparison. For signed comparisons compare the
4551 result against 1 in the biased case, and zero in the unbiased
4552 case. For unsigned comparisons always compare against 1 after
4553 biasing the unbiased result by adding 1. This gives us a way to
4554 represent LTU.
4555 The comparisons in the fixed-point helper library are always
4556 biased. */
4557 x = result;
4558 y = const1_rtx;
4560 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4562 if (unsignedp)
4563 x = plus_constant (ret_mode, result, 1);
4564 else
4565 y = const0_rtx;
4568 *pmode = ret_mode;
4569 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4570 ptest, pmode);
4573 return;
4575 fail:
4576 *ptest = NULL_RTX;
4579 /* Before emitting an insn with code ICODE, make sure that X, which is going
4580 to be used for operand OPNUM of the insn, is converted from mode MODE to
4581 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4582 that it is accepted by the operand predicate. Return the new value. */
4585 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4586 machine_mode wider_mode, int unsignedp)
4588 if (mode != wider_mode)
4589 x = convert_modes (wider_mode, mode, x, unsignedp);
4591 if (!insn_operand_matches (icode, opnum, x))
4593 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4594 if (reload_completed)
4595 return NULL_RTX;
4596 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4597 return NULL_RTX;
4598 x = copy_to_mode_reg (op_mode, x);
4601 return x;
4604 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4605 we can do the branch. */
4607 static void
4608 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4609 profile_probability prob)
4611 machine_mode optab_mode;
4612 enum mode_class mclass;
4613 enum insn_code icode;
4614 rtx_insn *insn;
4616 mclass = GET_MODE_CLASS (mode);
4617 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4618 icode = optab_handler (cbranch_optab, optab_mode);
4620 gcc_assert (icode != CODE_FOR_nothing);
4621 gcc_assert (insn_operand_matches (icode, 0, test));
4622 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4623 XEXP (test, 1), label));
4624 if (prob.initialized_p ()
4625 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4626 && insn
4627 && JUMP_P (insn)
4628 && any_condjump_p (insn)
4629 && !find_reg_note (insn, REG_BR_PROB, 0))
4630 add_reg_br_prob_note (insn, prob);
4633 /* Generate code to compare X with Y so that the condition codes are
4634 set and to jump to LABEL if the condition is true. If X is a
4635 constant and Y is not a constant, then the comparison is swapped to
4636 ensure that the comparison RTL has the canonical form.
4638 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4639 need to be widened. UNSIGNEDP is also used to select the proper
4640 branch condition code.
4642 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4644 MODE is the mode of the inputs (in case they are const_int).
4646 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4647 It will be potentially converted into an unsigned variant based on
4648 UNSIGNEDP to select a proper jump instruction.
4650 PROB is the probability of jumping to LABEL. */
4652 void
4653 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4654 machine_mode mode, int unsignedp, rtx label,
4655 profile_probability prob)
4657 rtx op0 = x, op1 = y;
4658 rtx test;
4660 /* Swap operands and condition to ensure canonical RTL. */
4661 if (swap_commutative_operands_p (x, y)
4662 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4664 op0 = y, op1 = x;
4665 comparison = swap_condition (comparison);
4668 /* If OP0 is still a constant, then both X and Y must be constants
4669 or the opposite comparison is not supported. Force X into a register
4670 to create canonical RTL. */
4671 if (CONSTANT_P (op0))
4672 op0 = force_reg (mode, op0);
4674 if (unsignedp)
4675 comparison = unsigned_condition (comparison);
4677 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4678 &test, &mode);
4679 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4683 /* Emit a library call comparison between floating point X and Y.
4684 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4686 static void
4687 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4688 rtx *ptest, machine_mode *pmode)
4690 enum rtx_code swapped = swap_condition (comparison);
4691 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4692 machine_mode orig_mode = GET_MODE (x);
4693 machine_mode mode;
4694 rtx true_rtx, false_rtx;
4695 rtx value, target, equiv;
4696 rtx_insn *insns;
4697 rtx libfunc = 0;
4698 bool reversed_p = false;
4699 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4701 FOR_EACH_MODE_FROM (mode, orig_mode)
4703 if (code_to_optab (comparison)
4704 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4705 break;
4707 if (code_to_optab (swapped)
4708 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4710 std::swap (x, y);
4711 comparison = swapped;
4712 break;
4715 if (code_to_optab (reversed)
4716 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4718 comparison = reversed;
4719 reversed_p = true;
4720 break;
4724 gcc_assert (mode != VOIDmode);
4726 if (mode != orig_mode)
4728 x = convert_to_mode (mode, x, 0);
4729 y = convert_to_mode (mode, y, 0);
4732 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4733 the RTL. The allows the RTL optimizers to delete the libcall if the
4734 condition can be determined at compile-time. */
4735 if (comparison == UNORDERED
4736 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4738 true_rtx = const_true_rtx;
4739 false_rtx = const0_rtx;
4741 else
4743 switch (comparison)
4745 case EQ:
4746 true_rtx = const0_rtx;
4747 false_rtx = const_true_rtx;
4748 break;
4750 case NE:
4751 true_rtx = const_true_rtx;
4752 false_rtx = const0_rtx;
4753 break;
4755 case GT:
4756 true_rtx = const1_rtx;
4757 false_rtx = const0_rtx;
4758 break;
4760 case GE:
4761 true_rtx = const0_rtx;
4762 false_rtx = constm1_rtx;
4763 break;
4765 case LT:
4766 true_rtx = constm1_rtx;
4767 false_rtx = const0_rtx;
4768 break;
4770 case LE:
4771 true_rtx = const0_rtx;
4772 false_rtx = const1_rtx;
4773 break;
4775 default:
4776 gcc_unreachable ();
4780 if (comparison == UNORDERED)
4782 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4783 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4784 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4785 temp, const_true_rtx, equiv);
4787 else
4789 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4790 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4791 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4792 equiv, true_rtx, false_rtx);
4795 start_sequence ();
4796 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4797 cmp_mode, x, mode, y, mode);
4798 insns = get_insns ();
4799 end_sequence ();
4801 target = gen_reg_rtx (cmp_mode);
4802 emit_libcall_block (insns, target, value, equiv);
4804 if (comparison == UNORDERED
4805 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4806 || reversed_p)
4807 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4808 else
4809 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4811 *pmode = cmp_mode;
4814 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4816 void
4817 emit_indirect_jump (rtx loc)
4819 if (!targetm.have_indirect_jump ())
4820 sorry ("indirect jumps are not available on this target");
4821 else
4823 class expand_operand ops[1];
4824 create_address_operand (&ops[0], loc);
4825 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4826 emit_barrier ();
4831 /* Emit a conditional move instruction if the machine supports one for that
4832 condition and machine mode.
4834 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4835 the mode to use should they be constants. If it is VOIDmode, they cannot
4836 both be constants.
4838 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4839 should be stored there. MODE is the mode to use should they be constants.
4840 If it is VOIDmode, they cannot both be constants.
4842 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4843 is not supported. */
4846 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4847 machine_mode cmode, rtx op2, rtx op3,
4848 machine_mode mode, int unsignedp)
4850 rtx comparison;
4851 rtx_insn *last;
4852 enum insn_code icode;
4853 enum rtx_code reversed;
4855 /* If the two source operands are identical, that's just a move. */
4857 if (rtx_equal_p (op2, op3))
4859 if (!target)
4860 target = gen_reg_rtx (mode);
4862 emit_move_insn (target, op3);
4863 return target;
4866 /* If one operand is constant, make it the second one. Only do this
4867 if the other operand is not constant as well. */
4869 if (swap_commutative_operands_p (op0, op1))
4871 std::swap (op0, op1);
4872 code = swap_condition (code);
4875 /* get_condition will prefer to generate LT and GT even if the old
4876 comparison was against zero, so undo that canonicalization here since
4877 comparisons against zero are cheaper. */
4878 if (code == LT && op1 == const1_rtx)
4879 code = LE, op1 = const0_rtx;
4880 else if (code == GT && op1 == constm1_rtx)
4881 code = GE, op1 = const0_rtx;
4883 if (cmode == VOIDmode)
4884 cmode = GET_MODE (op0);
4886 enum rtx_code orig_code = code;
4887 bool swapped = false;
4888 if (swap_commutative_operands_p (op2, op3)
4889 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4890 != UNKNOWN))
4892 std::swap (op2, op3);
4893 code = reversed;
4894 swapped = true;
4897 if (mode == VOIDmode)
4898 mode = GET_MODE (op2);
4900 icode = direct_optab_handler (movcc_optab, mode);
4902 if (icode == CODE_FOR_nothing)
4903 return NULL_RTX;
4905 if (!target)
4906 target = gen_reg_rtx (mode);
4908 for (int pass = 0; ; pass++)
4910 code = unsignedp ? unsigned_condition (code) : code;
4911 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4913 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4914 punt and let the caller figure out how best to deal with this
4915 situation. */
4916 if (COMPARISON_P (comparison))
4918 saved_pending_stack_adjust save;
4919 save_pending_stack_adjust (&save);
4920 last = get_last_insn ();
4921 do_pending_stack_adjust ();
4922 machine_mode cmpmode = cmode;
4923 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4924 GET_CODE (comparison), NULL_RTX, unsignedp,
4925 OPTAB_WIDEN, &comparison, &cmpmode);
4926 if (comparison)
4928 class expand_operand ops[4];
4930 create_output_operand (&ops[0], target, mode);
4931 create_fixed_operand (&ops[1], comparison);
4932 create_input_operand (&ops[2], op2, mode);
4933 create_input_operand (&ops[3], op3, mode);
4934 if (maybe_expand_insn (icode, 4, ops))
4936 if (ops[0].value != target)
4937 convert_move (target, ops[0].value, false);
4938 return target;
4941 delete_insns_since (last);
4942 restore_pending_stack_adjust (&save);
4945 if (pass == 1)
4946 return NULL_RTX;
4948 /* If the preferred op2/op3 order is not usable, retry with other
4949 operand order, perhaps it will expand successfully. */
4950 if (swapped)
4951 code = orig_code;
4952 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4953 NULL))
4954 != UNKNOWN)
4955 code = reversed;
4956 else
4957 return NULL_RTX;
4958 std::swap (op2, op3);
4963 /* Emit a conditional negate or bitwise complement using the
4964 negcc or notcc optabs if available. Return NULL_RTX if such operations
4965 are not available. Otherwise return the RTX holding the result.
4966 TARGET is the desired destination of the result. COMP is the comparison
4967 on which to negate. If COND is true move into TARGET the negation
4968 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4969 CODE is either NEG or NOT. MODE is the machine mode in which the
4970 operation is performed. */
4973 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4974 machine_mode mode, rtx cond, rtx op1,
4975 rtx op2)
4977 optab op = unknown_optab;
4978 if (code == NEG)
4979 op = negcc_optab;
4980 else if (code == NOT)
4981 op = notcc_optab;
4982 else
4983 gcc_unreachable ();
4985 insn_code icode = direct_optab_handler (op, mode);
4987 if (icode == CODE_FOR_nothing)
4988 return NULL_RTX;
4990 if (!target)
4991 target = gen_reg_rtx (mode);
4993 rtx_insn *last = get_last_insn ();
4994 class expand_operand ops[4];
4996 create_output_operand (&ops[0], target, mode);
4997 create_fixed_operand (&ops[1], cond);
4998 create_input_operand (&ops[2], op1, mode);
4999 create_input_operand (&ops[3], op2, mode);
5001 if (maybe_expand_insn (icode, 4, ops))
5003 if (ops[0].value != target)
5004 convert_move (target, ops[0].value, false);
5006 return target;
5008 delete_insns_since (last);
5009 return NULL_RTX;
5012 /* Emit a conditional addition instruction if the machine supports one for that
5013 condition and machine mode.
5015 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
5016 the mode to use should they be constants. If it is VOIDmode, they cannot
5017 both be constants.
5019 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
5020 should be stored there. MODE is the mode to use should they be constants.
5021 If it is VOIDmode, they cannot both be constants.
5023 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
5024 is not supported. */
5027 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
5028 machine_mode cmode, rtx op2, rtx op3,
5029 machine_mode mode, int unsignedp)
5031 rtx comparison;
5032 rtx_insn *last;
5033 enum insn_code icode;
5035 /* If one operand is constant, make it the second one. Only do this
5036 if the other operand is not constant as well. */
5038 if (swap_commutative_operands_p (op0, op1))
5040 std::swap (op0, op1);
5041 code = swap_condition (code);
5044 /* get_condition will prefer to generate LT and GT even if the old
5045 comparison was against zero, so undo that canonicalization here since
5046 comparisons against zero are cheaper. */
5047 if (code == LT && op1 == const1_rtx)
5048 code = LE, op1 = const0_rtx;
5049 else if (code == GT && op1 == constm1_rtx)
5050 code = GE, op1 = const0_rtx;
5052 if (cmode == VOIDmode)
5053 cmode = GET_MODE (op0);
5055 if (mode == VOIDmode)
5056 mode = GET_MODE (op2);
5058 icode = optab_handler (addcc_optab, mode);
5060 if (icode == CODE_FOR_nothing)
5061 return 0;
5063 if (!target)
5064 target = gen_reg_rtx (mode);
5066 code = unsignedp ? unsigned_condition (code) : code;
5067 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
5069 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5070 return NULL and let the caller figure out how best to deal with this
5071 situation. */
5072 if (!COMPARISON_P (comparison))
5073 return NULL_RTX;
5075 do_pending_stack_adjust ();
5076 last = get_last_insn ();
5077 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
5078 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
5079 &comparison, &cmode);
5080 if (comparison)
5082 class expand_operand ops[4];
5084 create_output_operand (&ops[0], target, mode);
5085 create_fixed_operand (&ops[1], comparison);
5086 create_input_operand (&ops[2], op2, mode);
5087 create_input_operand (&ops[3], op3, mode);
5088 if (maybe_expand_insn (icode, 4, ops))
5090 if (ops[0].value != target)
5091 convert_move (target, ops[0].value, false);
5092 return target;
5095 delete_insns_since (last);
5096 return NULL_RTX;
5099 /* These functions attempt to generate an insn body, rather than
5100 emitting the insn, but if the gen function already emits them, we
5101 make no attempt to turn them back into naked patterns. */
5103 /* Generate and return an insn body to add Y to X. */
5105 rtx_insn *
5106 gen_add2_insn (rtx x, rtx y)
5108 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
5110 gcc_assert (insn_operand_matches (icode, 0, x));
5111 gcc_assert (insn_operand_matches (icode, 1, x));
5112 gcc_assert (insn_operand_matches (icode, 2, y));
5114 return GEN_FCN (icode) (x, x, y);
5117 /* Generate and return an insn body to add r1 and c,
5118 storing the result in r0. */
5120 rtx_insn *
5121 gen_add3_insn (rtx r0, rtx r1, rtx c)
5123 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
5125 if (icode == CODE_FOR_nothing
5126 || !insn_operand_matches (icode, 0, r0)
5127 || !insn_operand_matches (icode, 1, r1)
5128 || !insn_operand_matches (icode, 2, c))
5129 return NULL;
5131 return GEN_FCN (icode) (r0, r1, c);
5135 have_add2_insn (rtx x, rtx y)
5137 enum insn_code icode;
5139 gcc_assert (GET_MODE (x) != VOIDmode);
5141 icode = optab_handler (add_optab, GET_MODE (x));
5143 if (icode == CODE_FOR_nothing)
5144 return 0;
5146 if (!insn_operand_matches (icode, 0, x)
5147 || !insn_operand_matches (icode, 1, x)
5148 || !insn_operand_matches (icode, 2, y))
5149 return 0;
5151 return 1;
5154 /* Generate and return an insn body to add Y to X. */
5156 rtx_insn *
5157 gen_addptr3_insn (rtx x, rtx y, rtx z)
5159 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
5161 gcc_assert (insn_operand_matches (icode, 0, x));
5162 gcc_assert (insn_operand_matches (icode, 1, y));
5163 gcc_assert (insn_operand_matches (icode, 2, z));
5165 return GEN_FCN (icode) (x, y, z);
5168 /* Return true if the target implements an addptr pattern and X, Y,
5169 and Z are valid for the pattern predicates. */
5172 have_addptr3_insn (rtx x, rtx y, rtx z)
5174 enum insn_code icode;
5176 gcc_assert (GET_MODE (x) != VOIDmode);
5178 icode = optab_handler (addptr3_optab, GET_MODE (x));
5180 if (icode == CODE_FOR_nothing)
5181 return 0;
5183 if (!insn_operand_matches (icode, 0, x)
5184 || !insn_operand_matches (icode, 1, y)
5185 || !insn_operand_matches (icode, 2, z))
5186 return 0;
5188 return 1;
5191 /* Generate and return an insn body to subtract Y from X. */
5193 rtx_insn *
5194 gen_sub2_insn (rtx x, rtx y)
5196 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
5198 gcc_assert (insn_operand_matches (icode, 0, x));
5199 gcc_assert (insn_operand_matches (icode, 1, x));
5200 gcc_assert (insn_operand_matches (icode, 2, y));
5202 return GEN_FCN (icode) (x, x, y);
5205 /* Generate and return an insn body to subtract r1 and c,
5206 storing the result in r0. */
5208 rtx_insn *
5209 gen_sub3_insn (rtx r0, rtx r1, rtx c)
5211 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
5213 if (icode == CODE_FOR_nothing
5214 || !insn_operand_matches (icode, 0, r0)
5215 || !insn_operand_matches (icode, 1, r1)
5216 || !insn_operand_matches (icode, 2, c))
5217 return NULL;
5219 return GEN_FCN (icode) (r0, r1, c);
5223 have_sub2_insn (rtx x, rtx y)
5225 enum insn_code icode;
5227 gcc_assert (GET_MODE (x) != VOIDmode);
5229 icode = optab_handler (sub_optab, GET_MODE (x));
5231 if (icode == CODE_FOR_nothing)
5232 return 0;
5234 if (!insn_operand_matches (icode, 0, x)
5235 || !insn_operand_matches (icode, 1, x)
5236 || !insn_operand_matches (icode, 2, y))
5237 return 0;
5239 return 1;
5242 /* Generate the body of an insn to extend Y (with mode MFROM)
5243 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5245 rtx_insn *
5246 gen_extend_insn (rtx x, rtx y, machine_mode mto,
5247 machine_mode mfrom, int unsignedp)
5249 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
5250 return GEN_FCN (icode) (x, y);
5253 /* Generate code to convert FROM to floating point
5254 and store in TO. FROM must be fixed point and not VOIDmode.
5255 UNSIGNEDP nonzero means regard FROM as unsigned.
5256 Normally this is done by correcting the final value
5257 if it is negative. */
5259 void
5260 expand_float (rtx to, rtx from, int unsignedp)
5262 enum insn_code icode;
5263 rtx target = to;
5264 scalar_mode from_mode, to_mode;
5265 machine_mode fmode, imode;
5266 bool can_do_signed = false;
5268 /* Crash now, because we won't be able to decide which mode to use. */
5269 gcc_assert (GET_MODE (from) != VOIDmode);
5271 /* Look for an insn to do the conversion. Do it in the specified
5272 modes if possible; otherwise convert either input, output or both to
5273 wider mode. If the integer mode is wider than the mode of FROM,
5274 we can do the conversion signed even if the input is unsigned. */
5276 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
5277 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
5279 int doing_unsigned = unsignedp;
5281 if (fmode != GET_MODE (to)
5282 && (significand_size (fmode)
5283 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
5284 continue;
5286 icode = can_float_p (fmode, imode, unsignedp);
5287 if (icode == CODE_FOR_nothing && unsignedp)
5289 enum insn_code scode = can_float_p (fmode, imode, 0);
5290 if (scode != CODE_FOR_nothing)
5291 can_do_signed = true;
5292 if (imode != GET_MODE (from))
5293 icode = scode, doing_unsigned = 0;
5296 if (icode != CODE_FOR_nothing)
5298 if (imode != GET_MODE (from))
5299 from = convert_to_mode (imode, from, unsignedp);
5301 if (fmode != GET_MODE (to))
5302 target = gen_reg_rtx (fmode);
5304 emit_unop_insn (icode, target, from,
5305 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5307 if (target != to)
5308 convert_move (to, target, 0);
5309 return;
5313 /* Unsigned integer, and no way to convert directly. Convert as signed,
5314 then unconditionally adjust the result. */
5315 if (unsignedp
5316 && can_do_signed
5317 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
5318 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
5320 opt_scalar_mode fmode_iter;
5321 rtx_code_label *label = gen_label_rtx ();
5322 rtx temp;
5323 REAL_VALUE_TYPE offset;
5325 /* Look for a usable floating mode FMODE wider than the source and at
5326 least as wide as the target. Using FMODE will avoid rounding woes
5327 with unsigned values greater than the signed maximum value. */
5329 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
5331 scalar_mode fmode = fmode_iter.require ();
5332 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
5333 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
5334 break;
5337 if (!fmode_iter.exists (&fmode))
5339 /* There is no such mode. Pretend the target is wide enough. */
5340 fmode = to_mode;
5342 /* Avoid double-rounding when TO is narrower than FROM. */
5343 if ((significand_size (fmode) + 1)
5344 < GET_MODE_PRECISION (from_mode))
5346 rtx temp1;
5347 rtx_code_label *neglabel = gen_label_rtx ();
5349 /* Don't use TARGET if it isn't a register, is a hard register,
5350 or is the wrong mode. */
5351 if (!REG_P (target)
5352 || REGNO (target) < FIRST_PSEUDO_REGISTER
5353 || GET_MODE (target) != fmode)
5354 target = gen_reg_rtx (fmode);
5356 imode = from_mode;
5357 do_pending_stack_adjust ();
5359 /* Test whether the sign bit is set. */
5360 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5361 0, neglabel);
5363 /* The sign bit is not set. Convert as signed. */
5364 expand_float (target, from, 0);
5365 emit_jump_insn (targetm.gen_jump (label));
5366 emit_barrier ();
5368 /* The sign bit is set.
5369 Convert to a usable (positive signed) value by shifting right
5370 one bit, while remembering if a nonzero bit was shifted
5371 out; i.e., compute (from & 1) | (from >> 1). */
5373 emit_label (neglabel);
5374 temp = expand_binop (imode, and_optab, from, const1_rtx,
5375 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5376 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5377 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5378 OPTAB_LIB_WIDEN);
5379 expand_float (target, temp, 0);
5381 /* Multiply by 2 to undo the shift above. */
5382 temp = expand_binop (fmode, add_optab, target, target,
5383 target, 0, OPTAB_LIB_WIDEN);
5384 if (temp != target)
5385 emit_move_insn (target, temp);
5387 do_pending_stack_adjust ();
5388 emit_label (label);
5389 goto done;
5393 /* If we are about to do some arithmetic to correct for an
5394 unsigned operand, do it in a pseudo-register. */
5396 if (to_mode != fmode
5397 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5398 target = gen_reg_rtx (fmode);
5400 /* Convert as signed integer to floating. */
5401 expand_float (target, from, 0);
5403 /* If FROM is negative (and therefore TO is negative),
5404 correct its value by 2**bitwidth. */
5406 do_pending_stack_adjust ();
5407 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
5408 0, label);
5411 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
5412 temp = expand_binop (fmode, add_optab, target,
5413 const_double_from_real_value (offset, fmode),
5414 target, 0, OPTAB_LIB_WIDEN);
5415 if (temp != target)
5416 emit_move_insn (target, temp);
5418 do_pending_stack_adjust ();
5419 emit_label (label);
5420 goto done;
5423 /* No hardware instruction available; call a library routine. */
5425 rtx libfunc;
5426 rtx_insn *insns;
5427 rtx value;
5428 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5430 if (is_narrower_int_mode (GET_MODE (from), SImode))
5431 from = convert_to_mode (SImode, from, unsignedp);
5433 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5434 gcc_assert (libfunc);
5436 start_sequence ();
5438 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5439 GET_MODE (to), from, GET_MODE (from));
5440 insns = get_insns ();
5441 end_sequence ();
5443 emit_libcall_block (insns, target, value,
5444 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5445 GET_MODE (to), from));
5448 done:
5450 /* Copy result to requested destination
5451 if we have been computing in a temp location. */
5453 if (target != to)
5455 if (GET_MODE (target) == GET_MODE (to))
5456 emit_move_insn (to, target);
5457 else
5458 convert_move (to, target, 0);
5462 /* Generate code to convert FROM to fixed point and store in TO. FROM
5463 must be floating point. */
5465 void
5466 expand_fix (rtx to, rtx from, int unsignedp)
5468 enum insn_code icode;
5469 rtx target = to;
5470 machine_mode fmode, imode;
5471 opt_scalar_mode fmode_iter;
5472 bool must_trunc = false;
5474 /* We first try to find a pair of modes, one real and one integer, at
5475 least as wide as FROM and TO, respectively, in which we can open-code
5476 this conversion. If the integer mode is wider than the mode of TO,
5477 we can do the conversion either signed or unsigned. */
5479 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5480 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5482 int doing_unsigned = unsignedp;
5484 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5485 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5486 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5488 if (icode != CODE_FOR_nothing)
5490 rtx_insn *last = get_last_insn ();
5491 rtx from1 = from;
5492 if (fmode != GET_MODE (from))
5493 from1 = convert_to_mode (fmode, from, 0);
5495 if (must_trunc)
5497 rtx temp = gen_reg_rtx (GET_MODE (from1));
5498 from1 = expand_unop (GET_MODE (from1), ftrunc_optab, from1,
5499 temp, 0);
5502 if (imode != GET_MODE (to))
5503 target = gen_reg_rtx (imode);
5505 if (maybe_emit_unop_insn (icode, target, from1,
5506 doing_unsigned ? UNSIGNED_FIX : FIX))
5508 if (target != to)
5509 convert_move (to, target, unsignedp);
5510 return;
5512 delete_insns_since (last);
5516 /* For an unsigned conversion, there is one more way to do it.
5517 If we have a signed conversion, we generate code that compares
5518 the real value to the largest representable positive number. If if
5519 is smaller, the conversion is done normally. Otherwise, subtract
5520 one plus the highest signed number, convert, and add it back.
5522 We only need to check all real modes, since we know we didn't find
5523 anything with a wider integer mode.
5525 This code used to extend FP value into mode wider than the destination.
5526 This is needed for decimal float modes which cannot accurately
5527 represent one plus the highest signed number of the same size, but
5528 not for binary modes. Consider, for instance conversion from SFmode
5529 into DImode.
5531 The hot path through the code is dealing with inputs smaller than 2^63
5532 and doing just the conversion, so there is no bits to lose.
5534 In the other path we know the value is positive in the range 2^63..2^64-1
5535 inclusive. (as for other input overflow happens and result is undefined)
5536 So we know that the most important bit set in mantissa corresponds to
5537 2^63. The subtraction of 2^63 should not generate any rounding as it
5538 simply clears out that bit. The rest is trivial. */
5540 scalar_int_mode to_mode;
5541 if (unsignedp
5542 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
5543 && HWI_COMPUTABLE_MODE_P (to_mode))
5544 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
5546 scalar_mode fmode = fmode_iter.require ();
5547 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
5548 0, &must_trunc)
5549 && (!DECIMAL_FLOAT_MODE_P (fmode)
5550 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
5552 int bitsize;
5553 REAL_VALUE_TYPE offset;
5554 rtx limit;
5555 rtx_code_label *lab1, *lab2;
5556 rtx_insn *insn;
5558 bitsize = GET_MODE_PRECISION (to_mode);
5559 real_2expN (&offset, bitsize - 1, fmode);
5560 limit = const_double_from_real_value (offset, fmode);
5561 lab1 = gen_label_rtx ();
5562 lab2 = gen_label_rtx ();
5564 if (fmode != GET_MODE (from))
5565 from = convert_to_mode (fmode, from, 0);
5567 /* See if we need to do the subtraction. */
5568 do_pending_stack_adjust ();
5569 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
5570 GET_MODE (from), 0, lab1);
5572 /* If not, do the signed "fix" and branch around fixup code. */
5573 expand_fix (to, from, 0);
5574 emit_jump_insn (targetm.gen_jump (lab2));
5575 emit_barrier ();
5577 /* Otherwise, subtract 2**(N-1), convert to signed number,
5578 then add 2**(N-1). Do the addition using XOR since this
5579 will often generate better code. */
5580 emit_label (lab1);
5581 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5582 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5583 expand_fix (to, target, 0);
5584 target = expand_binop (to_mode, xor_optab, to,
5585 gen_int_mode
5586 (HOST_WIDE_INT_1 << (bitsize - 1),
5587 to_mode),
5588 to, 1, OPTAB_LIB_WIDEN);
5590 if (target != to)
5591 emit_move_insn (to, target);
5593 emit_label (lab2);
5595 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5597 /* Make a place for a REG_NOTE and add it. */
5598 insn = emit_move_insn (to, to);
5599 set_dst_reg_note (insn, REG_EQUAL,
5600 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5601 copy_rtx (from)),
5602 to);
5605 return;
5609 /* We can't do it with an insn, so use a library call. But first ensure
5610 that the mode of TO is at least as wide as SImode, since those are the
5611 only library calls we know about. */
5613 if (is_narrower_int_mode (GET_MODE (to), SImode))
5615 target = gen_reg_rtx (SImode);
5617 expand_fix (target, from, unsignedp);
5619 else
5621 rtx_insn *insns;
5622 rtx value;
5623 rtx libfunc;
5625 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5626 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5627 gcc_assert (libfunc);
5629 start_sequence ();
5631 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5632 GET_MODE (to), from, GET_MODE (from));
5633 insns = get_insns ();
5634 end_sequence ();
5636 emit_libcall_block (insns, target, value,
5637 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5638 GET_MODE (to), from));
5641 if (target != to)
5643 if (GET_MODE (to) == GET_MODE (target))
5644 emit_move_insn (to, target);
5645 else
5646 convert_move (to, target, 0);
5651 /* Promote integer arguments for a libcall if necessary.
5652 emit_library_call_value cannot do the promotion because it does not
5653 know if it should do a signed or unsigned promotion. This is because
5654 there are no tree types defined for libcalls. */
5656 static rtx
5657 prepare_libcall_arg (rtx arg, int uintp)
5659 scalar_int_mode mode;
5660 machine_mode arg_mode;
5661 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5663 /* If we need to promote the integer function argument we need to do
5664 it here instead of inside emit_library_call_value because in
5665 emit_library_call_value we don't know if we should do a signed or
5666 unsigned promotion. */
5668 int unsigned_p = 0;
5669 arg_mode = promote_function_mode (NULL_TREE, mode,
5670 &unsigned_p, NULL_TREE, 0);
5671 if (arg_mode != mode)
5672 return convert_to_mode (arg_mode, arg, uintp);
5674 return arg;
5677 /* Generate code to convert FROM or TO a fixed-point.
5678 If UINTP is true, either TO or FROM is an unsigned integer.
5679 If SATP is true, we need to saturate the result. */
5681 void
5682 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5684 machine_mode to_mode = GET_MODE (to);
5685 machine_mode from_mode = GET_MODE (from);
5686 convert_optab tab;
5687 enum rtx_code this_code;
5688 enum insn_code code;
5689 rtx_insn *insns;
5690 rtx value;
5691 rtx libfunc;
5693 if (to_mode == from_mode)
5695 emit_move_insn (to, from);
5696 return;
5699 if (uintp)
5701 tab = satp ? satfractuns_optab : fractuns_optab;
5702 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5704 else
5706 tab = satp ? satfract_optab : fract_optab;
5707 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5709 code = convert_optab_handler (tab, to_mode, from_mode);
5710 if (code != CODE_FOR_nothing)
5712 emit_unop_insn (code, to, from, this_code);
5713 return;
5716 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5717 gcc_assert (libfunc);
5719 from = prepare_libcall_arg (from, uintp);
5720 from_mode = GET_MODE (from);
5722 start_sequence ();
5723 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5724 from, from_mode);
5725 insns = get_insns ();
5726 end_sequence ();
5728 emit_libcall_block (insns, to, value,
5729 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5732 /* Generate code to convert FROM to fixed point and store in TO. FROM
5733 must be floating point, TO must be signed. Use the conversion optab
5734 TAB to do the conversion. */
5736 bool
5737 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5739 enum insn_code icode;
5740 rtx target = to;
5741 machine_mode fmode, imode;
5743 /* We first try to find a pair of modes, one real and one integer, at
5744 least as wide as FROM and TO, respectively, in which we can open-code
5745 this conversion. If the integer mode is wider than the mode of TO,
5746 we can do the conversion either signed or unsigned. */
5748 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5749 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5751 icode = convert_optab_handler (tab, imode, fmode);
5752 if (icode != CODE_FOR_nothing)
5754 rtx_insn *last = get_last_insn ();
5755 if (fmode != GET_MODE (from))
5756 from = convert_to_mode (fmode, from, 0);
5758 if (imode != GET_MODE (to))
5759 target = gen_reg_rtx (imode);
5761 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5763 delete_insns_since (last);
5764 continue;
5766 if (target != to)
5767 convert_move (to, target, 0);
5768 return true;
5772 return false;
5775 /* Report whether we have an instruction to perform the operation
5776 specified by CODE on operands of mode MODE. */
5778 have_insn_for (enum rtx_code code, machine_mode mode)
5780 return (code_to_optab (code)
5781 && (optab_handler (code_to_optab (code), mode)
5782 != CODE_FOR_nothing));
5785 /* Print information about the current contents of the optabs on
5786 STDERR. */
5788 DEBUG_FUNCTION void
5789 debug_optab_libfuncs (void)
5791 int i, j, k;
5793 /* Dump the arithmetic optabs. */
5794 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5795 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5797 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5798 if (l)
5800 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5801 fprintf (stderr, "%s\t%s:\t%s\n",
5802 GET_RTX_NAME (optab_to_code ((optab) i)),
5803 GET_MODE_NAME (j),
5804 XSTR (l, 0));
5808 /* Dump the conversion optabs. */
5809 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5810 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5811 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5813 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5814 (machine_mode) k);
5815 if (l)
5817 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5818 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5819 GET_RTX_NAME (optab_to_code ((optab) i)),
5820 GET_MODE_NAME (j),
5821 GET_MODE_NAME (k),
5822 XSTR (l, 0));
5827 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5828 CODE. Return 0 on failure. */
5830 rtx_insn *
5831 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5833 machine_mode mode = GET_MODE (op1);
5834 enum insn_code icode;
5835 rtx_insn *insn;
5836 rtx trap_rtx;
5838 if (mode == VOIDmode)
5839 return 0;
5841 icode = optab_handler (ctrap_optab, mode);
5842 if (icode == CODE_FOR_nothing)
5843 return 0;
5845 /* Some targets only accept a zero trap code. */
5846 if (!insn_operand_matches (icode, 3, tcode))
5847 return 0;
5849 do_pending_stack_adjust ();
5850 start_sequence ();
5851 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5852 &trap_rtx, &mode);
5853 if (!trap_rtx)
5854 insn = NULL;
5855 else
5856 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5857 tcode);
5859 /* If that failed, then give up. */
5860 if (insn == 0)
5862 end_sequence ();
5863 return 0;
5866 emit_insn (insn);
5867 insn = get_insns ();
5868 end_sequence ();
5869 return insn;
5872 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
5873 or unsigned operation code. */
5875 enum rtx_code
5876 get_rtx_code_1 (enum tree_code tcode, bool unsignedp)
5878 enum rtx_code code;
5879 switch (tcode)
5881 case EQ_EXPR:
5882 code = EQ;
5883 break;
5884 case NE_EXPR:
5885 code = NE;
5886 break;
5887 case LT_EXPR:
5888 code = unsignedp ? LTU : LT;
5889 break;
5890 case LE_EXPR:
5891 code = unsignedp ? LEU : LE;
5892 break;
5893 case GT_EXPR:
5894 code = unsignedp ? GTU : GT;
5895 break;
5896 case GE_EXPR:
5897 code = unsignedp ? GEU : GE;
5898 break;
5900 case UNORDERED_EXPR:
5901 code = UNORDERED;
5902 break;
5903 case ORDERED_EXPR:
5904 code = ORDERED;
5905 break;
5906 case UNLT_EXPR:
5907 code = UNLT;
5908 break;
5909 case UNLE_EXPR:
5910 code = UNLE;
5911 break;
5912 case UNGT_EXPR:
5913 code = UNGT;
5914 break;
5915 case UNGE_EXPR:
5916 code = UNGE;
5917 break;
5918 case UNEQ_EXPR:
5919 code = UNEQ;
5920 break;
5921 case LTGT_EXPR:
5922 code = LTGT;
5923 break;
5925 case BIT_AND_EXPR:
5926 code = AND;
5927 break;
5929 case BIT_IOR_EXPR:
5930 code = IOR;
5931 break;
5933 default:
5934 code = UNKNOWN;
5935 break;
5937 return code;
5940 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5941 or unsigned operation code. */
5943 enum rtx_code
5944 get_rtx_code (enum tree_code tcode, bool unsignedp)
5946 enum rtx_code code = get_rtx_code_1 (tcode, unsignedp);
5947 gcc_assert (code != UNKNOWN);
5948 return code;
5951 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5952 select signed or unsigned operators. OPNO holds the index of the
5953 first comparison operand for insn ICODE. Do not generate the
5954 compare instruction itself. */
5957 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5958 tree t_op0, tree t_op1, bool unsignedp,
5959 enum insn_code icode, unsigned int opno)
5961 class expand_operand ops[2];
5962 rtx rtx_op0, rtx_op1;
5963 machine_mode m0, m1;
5964 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5966 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5968 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5969 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5970 cases, use the original mode. */
5971 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5972 EXPAND_STACK_PARM);
5973 m0 = GET_MODE (rtx_op0);
5974 if (m0 == VOIDmode)
5975 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5977 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5978 EXPAND_STACK_PARM);
5979 m1 = GET_MODE (rtx_op1);
5980 if (m1 == VOIDmode)
5981 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5983 create_input_operand (&ops[0], rtx_op0, m0);
5984 create_input_operand (&ops[1], rtx_op1, m1);
5985 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5986 gcc_unreachable ();
5987 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5990 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5991 the first vec_perm operand, assuming the second operand (for left shift
5992 first operand) is a constant vector of zeros. Return the shift distance
5993 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5994 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5995 shift or vec_shl_optab for left shift. */
5996 static rtx
5997 shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel,
5998 optab shift_optab)
6000 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode);
6001 poly_int64 first = sel[0];
6002 if (maybe_ge (sel[0], GET_MODE_NUNITS (mode)))
6003 return NULL_RTX;
6005 if (shift_optab == vec_shl_optab)
6007 unsigned int nelt;
6008 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6009 return NULL_RTX;
6010 unsigned firstidx = 0;
6011 for (unsigned int i = 0; i < nelt; i++)
6013 if (known_eq (sel[i], nelt))
6015 if (i == 0 || firstidx)
6016 return NULL_RTX;
6017 firstidx = i;
6019 else if (firstidx
6020 ? maybe_ne (sel[i], nelt + i - firstidx)
6021 : maybe_ge (sel[i], nelt))
6022 return NULL_RTX;
6025 if (firstidx == 0)
6026 return NULL_RTX;
6027 first = firstidx;
6029 else if (!sel.series_p (0, 1, first, 1))
6031 unsigned int nelt;
6032 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6033 return NULL_RTX;
6034 for (unsigned int i = 1; i < nelt; i++)
6036 poly_int64 expected = i + first;
6037 /* Indices into the second vector are all equivalent. */
6038 if (maybe_lt (sel[i], nelt)
6039 ? maybe_ne (sel[i], expected)
6040 : maybe_lt (expected, nelt))
6041 return NULL_RTX;
6045 return gen_int_shift_amount (mode, first * bitsize);
6048 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
6050 static rtx
6051 expand_vec_perm_1 (enum insn_code icode, rtx target,
6052 rtx v0, rtx v1, rtx sel)
6054 machine_mode tmode = GET_MODE (target);
6055 machine_mode smode = GET_MODE (sel);
6056 class expand_operand ops[4];
6058 gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
6059 || related_int_vector_mode (tmode).require () == smode);
6060 create_output_operand (&ops[0], target, tmode);
6061 create_input_operand (&ops[3], sel, smode);
6063 /* Make an effort to preserve v0 == v1. The target expander is able to
6064 rely on this to determine if we're permuting a single input operand. */
6065 if (rtx_equal_p (v0, v1))
6067 if (!insn_operand_matches (icode, 1, v0))
6068 v0 = force_reg (tmode, v0);
6069 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6070 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6072 create_fixed_operand (&ops[1], v0);
6073 create_fixed_operand (&ops[2], v0);
6075 else
6077 create_input_operand (&ops[1], v0, tmode);
6078 create_input_operand (&ops[2], v1, tmode);
6081 if (maybe_expand_insn (icode, 4, ops))
6082 return ops[0].value;
6083 return NULL_RTX;
6086 /* Implement a permutation of vectors v0 and v1 using the permutation
6087 vector in SEL and return the result. Use TARGET to hold the result
6088 if nonnull and convenient.
6090 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
6091 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
6092 to have a particular mode. */
6095 expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
6096 const vec_perm_builder &sel, machine_mode sel_mode,
6097 rtx target)
6099 if (!target || !register_operand (target, mode))
6100 target = gen_reg_rtx (mode);
6102 /* Set QIMODE to a different vector mode with byte elements.
6103 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6104 machine_mode qimode;
6105 if (!qimode_for_vec_perm (mode).exists (&qimode))
6106 qimode = VOIDmode;
6108 rtx_insn *last = get_last_insn ();
6110 bool single_arg_p = rtx_equal_p (v0, v1);
6111 /* Always specify two input vectors here and leave the target to handle
6112 cases in which the inputs are equal. Not all backends can cope with
6113 the single-input representation when testing for a double-input
6114 target instruction. */
6115 vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode));
6117 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6118 if the second (for vec_shr) or first (for vec_shl) vector is all
6119 zeroes. */
6120 insn_code shift_code = CODE_FOR_nothing;
6121 insn_code shift_code_qi = CODE_FOR_nothing;
6122 optab shift_optab = unknown_optab;
6123 rtx v2 = v0;
6124 if (v1 == CONST0_RTX (GET_MODE (v1)))
6125 shift_optab = vec_shr_optab;
6126 else if (v0 == CONST0_RTX (GET_MODE (v0)))
6128 shift_optab = vec_shl_optab;
6129 v2 = v1;
6131 if (shift_optab != unknown_optab)
6133 shift_code = optab_handler (shift_optab, mode);
6134 shift_code_qi = ((qimode != VOIDmode && qimode != mode)
6135 ? optab_handler (shift_optab, qimode)
6136 : CODE_FOR_nothing);
6138 if (shift_code != CODE_FOR_nothing || shift_code_qi != CODE_FOR_nothing)
6140 rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices, shift_optab);
6141 if (shift_amt)
6143 class expand_operand ops[3];
6144 if (shift_amt == const0_rtx)
6145 return v2;
6146 if (shift_code != CODE_FOR_nothing)
6148 create_output_operand (&ops[0], target, mode);
6149 create_input_operand (&ops[1], v2, mode);
6150 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6151 if (maybe_expand_insn (shift_code, 3, ops))
6152 return ops[0].value;
6154 if (shift_code_qi != CODE_FOR_nothing)
6156 rtx tmp = gen_reg_rtx (qimode);
6157 create_output_operand (&ops[0], tmp, qimode);
6158 create_input_operand (&ops[1], gen_lowpart (qimode, v2), qimode);
6159 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6160 if (maybe_expand_insn (shift_code_qi, 3, ops))
6161 return gen_lowpart (mode, ops[0].value);
6166 if (targetm.vectorize.vec_perm_const != NULL)
6168 if (single_arg_p)
6169 v1 = v0;
6171 if (targetm.vectorize.vec_perm_const (mode, target, v0, v1, indices))
6172 return target;
6175 /* Fall back to a constant byte-based permutation. */
6176 vec_perm_indices qimode_indices;
6177 rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX;
6178 if (qimode != VOIDmode)
6180 qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode));
6181 target_qi = gen_reg_rtx (qimode);
6182 v0_qi = gen_lowpart (qimode, v0);
6183 v1_qi = gen_lowpart (qimode, v1);
6184 if (targetm.vectorize.vec_perm_const != NULL
6185 && targetm.vectorize.vec_perm_const (qimode, target_qi, v0_qi,
6186 v1_qi, qimode_indices))
6187 return gen_lowpart (mode, target_qi);
6190 v0 = force_reg (mode, v0);
6191 if (single_arg_p)
6192 v1 = v0;
6193 v1 = force_reg (mode, v1);
6195 /* Otherwise expand as a fully variable permuation. */
6197 /* The optabs are only defined for selectors with the same width
6198 as the values being permuted. */
6199 machine_mode required_sel_mode;
6200 if (!related_int_vector_mode (mode).exists (&required_sel_mode))
6202 delete_insns_since (last);
6203 return NULL_RTX;
6206 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6207 If that isn't the mode we want then we need to prove that using
6208 REQUIRED_SEL_MODE is OK. */
6209 if (sel_mode != required_sel_mode)
6211 if (!selector_fits_mode_p (required_sel_mode, indices))
6213 delete_insns_since (last);
6214 return NULL_RTX;
6216 sel_mode = required_sel_mode;
6219 insn_code icode = direct_optab_handler (vec_perm_optab, mode);
6220 if (icode != CODE_FOR_nothing)
6222 rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
6223 rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel_rtx);
6224 if (tmp)
6225 return tmp;
6228 if (qimode != VOIDmode
6229 && selector_fits_mode_p (qimode, qimode_indices))
6231 icode = direct_optab_handler (vec_perm_optab, qimode);
6232 if (icode != CODE_FOR_nothing)
6234 rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices);
6235 rtx tmp = expand_vec_perm_1 (icode, target_qi, v0_qi, v1_qi, sel_qi);
6236 if (tmp)
6237 return gen_lowpart (mode, tmp);
6241 delete_insns_since (last);
6242 return NULL_RTX;
6245 /* Implement a permutation of vectors v0 and v1 using the permutation
6246 vector in SEL and return the result. Use TARGET to hold the result
6247 if nonnull and convenient.
6249 MODE is the mode of the vectors being permuted (V0 and V1).
6250 SEL must have the integer equivalent of MODE and is known to be
6251 unsuitable for permutes with a constant permutation vector. */
6254 expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6256 enum insn_code icode;
6257 unsigned int i, u;
6258 rtx tmp, sel_qi;
6260 u = GET_MODE_UNIT_SIZE (mode);
6262 if (!target || GET_MODE (target) != mode)
6263 target = gen_reg_rtx (mode);
6265 icode = direct_optab_handler (vec_perm_optab, mode);
6266 if (icode != CODE_FOR_nothing)
6268 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6269 if (tmp)
6270 return tmp;
6273 /* As a special case to aid several targets, lower the element-based
6274 permutation to a byte-based permutation and try again. */
6275 machine_mode qimode;
6276 if (!qimode_for_vec_perm (mode).exists (&qimode)
6277 || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
6278 return NULL_RTX;
6279 icode = direct_optab_handler (vec_perm_optab, qimode);
6280 if (icode == CODE_FOR_nothing)
6281 return NULL_RTX;
6283 /* Multiply each element by its byte size. */
6284 machine_mode selmode = GET_MODE (sel);
6285 if (u == 2)
6286 sel = expand_simple_binop (selmode, PLUS, sel, sel,
6287 NULL, 0, OPTAB_DIRECT);
6288 else
6289 sel = expand_simple_binop (selmode, ASHIFT, sel,
6290 gen_int_shift_amount (selmode, exact_log2 (u)),
6291 NULL, 0, OPTAB_DIRECT);
6292 gcc_assert (sel != NULL);
6294 /* Broadcast the low byte each element into each of its bytes.
6295 The encoding has U interleaved stepped patterns, one for each
6296 byte of an element. */
6297 vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
6298 unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
6299 for (i = 0; i < 3; ++i)
6300 for (unsigned int j = 0; j < u; ++j)
6301 const_sel.quick_push (i * u + low_byte_in_u);
6302 sel = gen_lowpart (qimode, sel);
6303 sel = expand_vec_perm_const (qimode, sel, sel, const_sel, qimode, NULL);
6304 gcc_assert (sel != NULL);
6306 /* Add the byte offset to each byte element. */
6307 /* Note that the definition of the indicies here is memory ordering,
6308 so there should be no difference between big and little endian. */
6309 rtx_vector_builder byte_indices (qimode, u, 1);
6310 for (i = 0; i < u; ++i)
6311 byte_indices.quick_push (GEN_INT (i));
6312 tmp = byte_indices.build ();
6313 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
6314 sel, 0, OPTAB_DIRECT);
6315 gcc_assert (sel_qi != NULL);
6317 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
6318 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
6319 gen_lowpart (qimode, v1), sel_qi);
6320 if (tmp)
6321 tmp = gen_lowpart (mode, tmp);
6322 return tmp;
6325 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6326 Use TARGET for the result if nonnull and convenient. */
6329 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
6331 class expand_operand ops[3];
6332 enum insn_code icode;
6333 machine_mode emode = GET_MODE_INNER (vmode);
6335 icode = direct_optab_handler (vec_series_optab, vmode);
6336 gcc_assert (icode != CODE_FOR_nothing);
6338 create_output_operand (&ops[0], target, vmode);
6339 create_input_operand (&ops[1], op0, emode);
6340 create_input_operand (&ops[2], op1, emode);
6342 expand_insn (icode, 3, ops);
6343 return ops[0].value;
6346 /* Generate insns for a vector comparison into a mask. */
6349 expand_vec_cmp_expr (tree type, tree exp, rtx target)
6351 class expand_operand ops[4];
6352 enum insn_code icode;
6353 rtx comparison;
6354 machine_mode mask_mode = TYPE_MODE (type);
6355 machine_mode vmode;
6356 bool unsignedp;
6357 tree op0a, op0b;
6358 enum tree_code tcode;
6360 op0a = TREE_OPERAND (exp, 0);
6361 op0b = TREE_OPERAND (exp, 1);
6362 tcode = TREE_CODE (exp);
6364 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
6365 vmode = TYPE_MODE (TREE_TYPE (op0a));
6367 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
6368 if (icode == CODE_FOR_nothing)
6370 if (tcode == EQ_EXPR || tcode == NE_EXPR)
6371 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
6372 if (icode == CODE_FOR_nothing)
6373 return 0;
6376 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
6377 unsignedp, icode, 2);
6378 create_output_operand (&ops[0], target, mask_mode);
6379 create_fixed_operand (&ops[1], comparison);
6380 create_fixed_operand (&ops[2], XEXP (comparison, 0));
6381 create_fixed_operand (&ops[3], XEXP (comparison, 1));
6382 expand_insn (icode, 4, ops);
6383 return ops[0].value;
6386 /* Expand a highpart multiply. */
6389 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
6390 rtx target, bool uns_p)
6392 class expand_operand eops[3];
6393 enum insn_code icode;
6394 int method, i;
6395 machine_mode wmode;
6396 rtx m1, m2;
6397 optab tab1, tab2;
6399 method = can_mult_highpart_p (mode, uns_p);
6400 switch (method)
6402 case 0:
6403 return NULL_RTX;
6404 case 1:
6405 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
6406 return expand_binop (mode, tab1, op0, op1, target, uns_p,
6407 OPTAB_LIB_WIDEN);
6408 case 2:
6409 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
6410 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
6411 break;
6412 case 3:
6413 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
6414 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
6415 if (BYTES_BIG_ENDIAN)
6416 std::swap (tab1, tab2);
6417 break;
6418 default:
6419 gcc_unreachable ();
6422 icode = optab_handler (tab1, mode);
6423 wmode = insn_data[icode].operand[0].mode;
6424 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
6425 GET_MODE_NUNITS (mode)));
6426 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
6428 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6429 create_input_operand (&eops[1], op0, mode);
6430 create_input_operand (&eops[2], op1, mode);
6431 expand_insn (icode, 3, eops);
6432 m1 = gen_lowpart (mode, eops[0].value);
6434 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6435 create_input_operand (&eops[1], op0, mode);
6436 create_input_operand (&eops[2], op1, mode);
6437 expand_insn (optab_handler (tab2, mode), 3, eops);
6438 m2 = gen_lowpart (mode, eops[0].value);
6440 vec_perm_builder sel;
6441 if (method == 2)
6443 /* The encoding has 2 interleaved stepped patterns. */
6444 sel.new_vector (GET_MODE_NUNITS (mode), 2, 3);
6445 for (i = 0; i < 6; ++i)
6446 sel.quick_push (!BYTES_BIG_ENDIAN + (i & ~1)
6447 + ((i & 1) ? GET_MODE_NUNITS (mode) : 0));
6449 else
6451 /* The encoding has a single interleaved stepped pattern. */
6452 sel.new_vector (GET_MODE_NUNITS (mode), 1, 3);
6453 for (i = 0; i < 3; ++i)
6454 sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
6457 return expand_vec_perm_const (mode, m1, m2, sel, BLKmode, target);
6460 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6461 pattern. */
6463 static void
6464 find_cc_set (rtx x, const_rtx pat, void *data)
6466 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
6467 && GET_CODE (pat) == SET)
6469 rtx *p_cc_reg = (rtx *) data;
6470 gcc_assert (!*p_cc_reg);
6471 *p_cc_reg = x;
6475 /* This is a helper function for the other atomic operations. This function
6476 emits a loop that contains SEQ that iterates until a compare-and-swap
6477 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6478 a set of instructions that takes a value from OLD_REG as an input and
6479 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6480 set to the current contents of MEM. After SEQ, a compare-and-swap will
6481 attempt to update MEM with NEW_REG. The function returns true when the
6482 loop was generated successfully. */
6484 static bool
6485 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6487 machine_mode mode = GET_MODE (mem);
6488 rtx_code_label *label;
6489 rtx cmp_reg, success, oldval;
6491 /* The loop we want to generate looks like
6493 cmp_reg = mem;
6494 label:
6495 old_reg = cmp_reg;
6496 seq;
6497 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6498 if (success)
6499 goto label;
6501 Note that we only do the plain load from memory once. Subsequent
6502 iterations use the value loaded by the compare-and-swap pattern. */
6504 label = gen_label_rtx ();
6505 cmp_reg = gen_reg_rtx (mode);
6507 emit_move_insn (cmp_reg, mem);
6508 emit_label (label);
6509 emit_move_insn (old_reg, cmp_reg);
6510 if (seq)
6511 emit_insn (seq);
6513 success = NULL_RTX;
6514 oldval = cmp_reg;
6515 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
6516 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
6517 MEMMODEL_RELAXED))
6518 return false;
6520 if (oldval != cmp_reg)
6521 emit_move_insn (cmp_reg, oldval);
6523 /* Mark this jump predicted not taken. */
6524 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
6525 GET_MODE (success), 1, label,
6526 profile_probability::guessed_never ());
6527 return true;
6531 /* This function tries to emit an atomic_exchange intruction. VAL is written
6532 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6533 using TARGET if possible. */
6535 static rtx
6536 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6538 machine_mode mode = GET_MODE (mem);
6539 enum insn_code icode;
6541 /* If the target supports the exchange directly, great. */
6542 icode = direct_optab_handler (atomic_exchange_optab, mode);
6543 if (icode != CODE_FOR_nothing)
6545 class expand_operand ops[4];
6547 create_output_operand (&ops[0], target, mode);
6548 create_fixed_operand (&ops[1], mem);
6549 create_input_operand (&ops[2], val, mode);
6550 create_integer_operand (&ops[3], model);
6551 if (maybe_expand_insn (icode, 4, ops))
6552 return ops[0].value;
6555 return NULL_RTX;
6558 /* This function tries to implement an atomic exchange operation using
6559 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6560 The previous contents of *MEM are returned, using TARGET if possible.
6561 Since this instructionn is an acquire barrier only, stronger memory
6562 models may require additional barriers to be emitted. */
6564 static rtx
6565 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
6566 enum memmodel model)
6568 machine_mode mode = GET_MODE (mem);
6569 enum insn_code icode;
6570 rtx_insn *last_insn = get_last_insn ();
6572 icode = optab_handler (sync_lock_test_and_set_optab, mode);
6574 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6575 exists, and the memory model is stronger than acquire, add a release
6576 barrier before the instruction. */
6578 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
6579 expand_mem_thread_fence (model);
6581 if (icode != CODE_FOR_nothing)
6583 class expand_operand ops[3];
6584 create_output_operand (&ops[0], target, mode);
6585 create_fixed_operand (&ops[1], mem);
6586 create_input_operand (&ops[2], val, mode);
6587 if (maybe_expand_insn (icode, 3, ops))
6588 return ops[0].value;
6591 /* If an external test-and-set libcall is provided, use that instead of
6592 any external compare-and-swap that we might get from the compare-and-
6593 swap-loop expansion later. */
6594 if (!can_compare_and_swap_p (mode, false))
6596 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
6597 if (libfunc != NULL)
6599 rtx addr;
6601 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6602 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6603 mode, addr, ptr_mode,
6604 val, mode);
6608 /* If the test_and_set can't be emitted, eliminate any barrier that might
6609 have been emitted. */
6610 delete_insns_since (last_insn);
6611 return NULL_RTX;
6614 /* This function tries to implement an atomic exchange operation using a
6615 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6616 *MEM are returned, using TARGET if possible. No memory model is required
6617 since a compare_and_swap loop is seq-cst. */
6619 static rtx
6620 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6622 machine_mode mode = GET_MODE (mem);
6624 if (can_compare_and_swap_p (mode, true))
6626 if (!target || !register_operand (target, mode))
6627 target = gen_reg_rtx (mode);
6628 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6629 return target;
6632 return NULL_RTX;
6635 /* This function tries to implement an atomic test-and-set operation
6636 using the atomic_test_and_set instruction pattern. A boolean value
6637 is returned from the operation, using TARGET if possible. */
6639 static rtx
6640 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6642 machine_mode pat_bool_mode;
6643 class expand_operand ops[3];
6645 if (!targetm.have_atomic_test_and_set ())
6646 return NULL_RTX;
6648 /* While we always get QImode from __atomic_test_and_set, we get
6649 other memory modes from __sync_lock_test_and_set. Note that we
6650 use no endian adjustment here. This matches the 4.6 behavior
6651 in the Sparc backend. */
6652 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6653 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6654 if (GET_MODE (mem) != QImode)
6655 mem = adjust_address_nv (mem, QImode, 0);
6657 pat_bool_mode = insn_data[icode].operand[0].mode;
6658 create_output_operand (&ops[0], target, pat_bool_mode);
6659 create_fixed_operand (&ops[1], mem);
6660 create_integer_operand (&ops[2], model);
6662 if (maybe_expand_insn (icode, 3, ops))
6663 return ops[0].value;
6664 return NULL_RTX;
6667 /* This function expands the legacy _sync_lock test_and_set operation which is
6668 generally an atomic exchange. Some limited targets only allow the
6669 constant 1 to be stored. This is an ACQUIRE operation.
6671 TARGET is an optional place to stick the return value.
6672 MEM is where VAL is stored. */
6675 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6677 rtx ret;
6679 /* Try an atomic_exchange first. */
6680 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6681 if (ret)
6682 return ret;
6684 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6685 MEMMODEL_SYNC_ACQUIRE);
6686 if (ret)
6687 return ret;
6689 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6690 if (ret)
6691 return ret;
6693 /* If there are no other options, try atomic_test_and_set if the value
6694 being stored is 1. */
6695 if (val == const1_rtx)
6696 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6698 return ret;
6701 /* This function expands the atomic test_and_set operation:
6702 atomically store a boolean TRUE into MEM and return the previous value.
6704 MEMMODEL is the memory model variant to use.
6705 TARGET is an optional place to stick the return value. */
6708 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6710 machine_mode mode = GET_MODE (mem);
6711 rtx ret, trueval, subtarget;
6713 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6714 if (ret)
6715 return ret;
6717 /* Be binary compatible with non-default settings of trueval, and different
6718 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6719 another only has atomic-exchange. */
6720 if (targetm.atomic_test_and_set_trueval == 1)
6722 trueval = const1_rtx;
6723 subtarget = target ? target : gen_reg_rtx (mode);
6725 else
6727 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6728 subtarget = gen_reg_rtx (mode);
6731 /* Try the atomic-exchange optab... */
6732 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6734 /* ... then an atomic-compare-and-swap loop ... */
6735 if (!ret)
6736 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6738 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6739 if (!ret)
6740 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6742 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6743 things with the value 1. Thus we try again without trueval. */
6744 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6745 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6747 /* Failing all else, assume a single threaded environment and simply
6748 perform the operation. */
6749 if (!ret)
6751 /* If the result is ignored skip the move to target. */
6752 if (subtarget != const0_rtx)
6753 emit_move_insn (subtarget, mem);
6755 emit_move_insn (mem, trueval);
6756 ret = subtarget;
6759 /* Recall that have to return a boolean value; rectify if trueval
6760 is not exactly one. */
6761 if (targetm.atomic_test_and_set_trueval != 1)
6762 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6764 return ret;
6767 /* This function expands the atomic exchange operation:
6768 atomically store VAL in MEM and return the previous value in MEM.
6770 MEMMODEL is the memory model variant to use.
6771 TARGET is an optional place to stick the return value. */
6774 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6776 machine_mode mode = GET_MODE (mem);
6777 rtx ret;
6779 /* If loads are not atomic for the required size and we are not called to
6780 provide a __sync builtin, do not do anything so that we stay consistent
6781 with atomic loads of the same size. */
6782 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6783 return NULL_RTX;
6785 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6787 /* Next try a compare-and-swap loop for the exchange. */
6788 if (!ret)
6789 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6791 return ret;
6794 /* This function expands the atomic compare exchange operation:
6796 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6797 *PTARGET_OVAL is an optional place to store the old value from memory.
6798 Both target parameters may be NULL or const0_rtx to indicate that we do
6799 not care about that return value. Both target parameters are updated on
6800 success to the actual location of the corresponding result.
6802 MEMMODEL is the memory model variant to use.
6804 The return value of the function is true for success. */
6806 bool
6807 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6808 rtx mem, rtx expected, rtx desired,
6809 bool is_weak, enum memmodel succ_model,
6810 enum memmodel fail_model)
6812 machine_mode mode = GET_MODE (mem);
6813 class expand_operand ops[8];
6814 enum insn_code icode;
6815 rtx target_oval, target_bool = NULL_RTX;
6816 rtx libfunc;
6818 /* If loads are not atomic for the required size and we are not called to
6819 provide a __sync builtin, do not do anything so that we stay consistent
6820 with atomic loads of the same size. */
6821 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6822 return false;
6824 /* Load expected into a register for the compare and swap. */
6825 if (MEM_P (expected))
6826 expected = copy_to_reg (expected);
6828 /* Make sure we always have some place to put the return oldval.
6829 Further, make sure that place is distinct from the input expected,
6830 just in case we need that path down below. */
6831 if (ptarget_oval && *ptarget_oval == const0_rtx)
6832 ptarget_oval = NULL;
6834 if (ptarget_oval == NULL
6835 || (target_oval = *ptarget_oval) == NULL
6836 || reg_overlap_mentioned_p (expected, target_oval))
6837 target_oval = gen_reg_rtx (mode);
6839 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6840 if (icode != CODE_FOR_nothing)
6842 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6844 if (ptarget_bool && *ptarget_bool == const0_rtx)
6845 ptarget_bool = NULL;
6847 /* Make sure we always have a place for the bool operand. */
6848 if (ptarget_bool == NULL
6849 || (target_bool = *ptarget_bool) == NULL
6850 || GET_MODE (target_bool) != bool_mode)
6851 target_bool = gen_reg_rtx (bool_mode);
6853 /* Emit the compare_and_swap. */
6854 create_output_operand (&ops[0], target_bool, bool_mode);
6855 create_output_operand (&ops[1], target_oval, mode);
6856 create_fixed_operand (&ops[2], mem);
6857 create_input_operand (&ops[3], expected, mode);
6858 create_input_operand (&ops[4], desired, mode);
6859 create_integer_operand (&ops[5], is_weak);
6860 create_integer_operand (&ops[6], succ_model);
6861 create_integer_operand (&ops[7], fail_model);
6862 if (maybe_expand_insn (icode, 8, ops))
6864 /* Return success/failure. */
6865 target_bool = ops[0].value;
6866 target_oval = ops[1].value;
6867 goto success;
6871 /* Otherwise fall back to the original __sync_val_compare_and_swap
6872 which is always seq-cst. */
6873 icode = optab_handler (sync_compare_and_swap_optab, mode);
6874 if (icode != CODE_FOR_nothing)
6876 rtx cc_reg;
6878 create_output_operand (&ops[0], target_oval, mode);
6879 create_fixed_operand (&ops[1], mem);
6880 create_input_operand (&ops[2], expected, mode);
6881 create_input_operand (&ops[3], desired, mode);
6882 if (!maybe_expand_insn (icode, 4, ops))
6883 return false;
6885 target_oval = ops[0].value;
6887 /* If the caller isn't interested in the boolean return value,
6888 skip the computation of it. */
6889 if (ptarget_bool == NULL)
6890 goto success;
6892 /* Otherwise, work out if the compare-and-swap succeeded. */
6893 cc_reg = NULL_RTX;
6894 if (have_insn_for (COMPARE, CCmode))
6895 note_stores (get_last_insn (), find_cc_set, &cc_reg);
6896 if (cc_reg)
6898 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6899 const0_rtx, VOIDmode, 0, 1);
6900 goto success;
6902 goto success_bool_from_val;
6905 /* Also check for library support for __sync_val_compare_and_swap. */
6906 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6907 if (libfunc != NULL)
6909 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6910 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6911 mode, addr, ptr_mode,
6912 expected, mode, desired, mode);
6913 emit_move_insn (target_oval, target);
6915 /* Compute the boolean return value only if requested. */
6916 if (ptarget_bool)
6917 goto success_bool_from_val;
6918 else
6919 goto success;
6922 /* Failure. */
6923 return false;
6925 success_bool_from_val:
6926 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6927 expected, VOIDmode, 1, 1);
6928 success:
6929 /* Make sure that the oval output winds up where the caller asked. */
6930 if (ptarget_oval)
6931 *ptarget_oval = target_oval;
6932 if (ptarget_bool)
6933 *ptarget_bool = target_bool;
6934 return true;
6937 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6939 static void
6940 expand_asm_memory_blockage (void)
6942 rtx asm_op, clob;
6944 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6945 rtvec_alloc (0), rtvec_alloc (0),
6946 rtvec_alloc (0), UNKNOWN_LOCATION);
6947 MEM_VOLATILE_P (asm_op) = 1;
6949 clob = gen_rtx_SCRATCH (VOIDmode);
6950 clob = gen_rtx_MEM (BLKmode, clob);
6951 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6953 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6956 /* Do not propagate memory accesses across this point. */
6958 static void
6959 expand_memory_blockage (void)
6961 if (targetm.have_memory_blockage ())
6962 emit_insn (targetm.gen_memory_blockage ());
6963 else
6964 expand_asm_memory_blockage ();
6967 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
6968 same time clobbering the register set specified by REGS. */
6970 void
6971 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs)
6973 rtx asm_op, clob_mem;
6975 unsigned int num_of_regs = 0;
6976 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6977 if (TEST_HARD_REG_BIT (regs, i))
6978 num_of_regs++;
6980 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6981 rtvec_alloc (0), rtvec_alloc (0),
6982 rtvec_alloc (0), UNKNOWN_LOCATION);
6983 MEM_VOLATILE_P (asm_op) = 1;
6985 rtvec v = rtvec_alloc (num_of_regs + 2);
6987 clob_mem = gen_rtx_SCRATCH (VOIDmode);
6988 clob_mem = gen_rtx_MEM (BLKmode, clob_mem);
6989 clob_mem = gen_rtx_CLOBBER (VOIDmode, clob_mem);
6991 RTVEC_ELT (v, 0) = asm_op;
6992 RTVEC_ELT (v, 1) = clob_mem;
6994 if (num_of_regs > 0)
6996 unsigned int j = 2;
6997 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6998 if (TEST_HARD_REG_BIT (regs, i))
7000 RTVEC_ELT (v, j) = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]);
7001 j++;
7003 gcc_assert (j == (num_of_regs + 2));
7006 emit_insn (gen_rtx_PARALLEL (VOIDmode, v));
7009 /* This routine will either emit the mem_thread_fence pattern or issue a
7010 sync_synchronize to generate a fence for memory model MEMMODEL. */
7012 void
7013 expand_mem_thread_fence (enum memmodel model)
7015 if (is_mm_relaxed (model))
7016 return;
7017 if (targetm.have_mem_thread_fence ())
7019 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
7020 expand_memory_blockage ();
7022 else if (targetm.have_memory_barrier ())
7023 emit_insn (targetm.gen_memory_barrier ());
7024 else if (synchronize_libfunc != NULL_RTX)
7025 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
7026 else
7027 expand_memory_blockage ();
7030 /* Emit a signal fence with given memory model. */
7032 void
7033 expand_mem_signal_fence (enum memmodel model)
7035 /* No machine barrier is required to implement a signal fence, but
7036 a compiler memory barrier must be issued, except for relaxed MM. */
7037 if (!is_mm_relaxed (model))
7038 expand_memory_blockage ();
7041 /* This function expands the atomic load operation:
7042 return the atomically loaded value in MEM.
7044 MEMMODEL is the memory model variant to use.
7045 TARGET is an option place to stick the return value. */
7048 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7050 machine_mode mode = GET_MODE (mem);
7051 enum insn_code icode;
7053 /* If the target supports the load directly, great. */
7054 icode = direct_optab_handler (atomic_load_optab, mode);
7055 if (icode != CODE_FOR_nothing)
7057 class expand_operand ops[3];
7058 rtx_insn *last = get_last_insn ();
7059 if (is_mm_seq_cst (model))
7060 expand_memory_blockage ();
7062 create_output_operand (&ops[0], target, mode);
7063 create_fixed_operand (&ops[1], mem);
7064 create_integer_operand (&ops[2], model);
7065 if (maybe_expand_insn (icode, 3, ops))
7067 if (!is_mm_relaxed (model))
7068 expand_memory_blockage ();
7069 return ops[0].value;
7071 delete_insns_since (last);
7074 /* If the size of the object is greater than word size on this target,
7075 then we assume that a load will not be atomic. We could try to
7076 emulate a load with a compare-and-swap operation, but the store that
7077 doing this could result in would be incorrect if this is a volatile
7078 atomic load or targetting read-only-mapped memory. */
7079 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
7080 /* If there is no atomic load, leave the library call. */
7081 return NULL_RTX;
7083 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7084 if (!target || target == const0_rtx)
7085 target = gen_reg_rtx (mode);
7087 /* For SEQ_CST, emit a barrier before the load. */
7088 if (is_mm_seq_cst (model))
7089 expand_mem_thread_fence (model);
7091 emit_move_insn (target, mem);
7093 /* Emit the appropriate barrier after the load. */
7094 expand_mem_thread_fence (model);
7096 return target;
7099 /* This function expands the atomic store operation:
7100 Atomically store VAL in MEM.
7101 MEMMODEL is the memory model variant to use.
7102 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7103 function returns const0_rtx if a pattern was emitted. */
7106 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7108 machine_mode mode = GET_MODE (mem);
7109 enum insn_code icode;
7110 class expand_operand ops[3];
7112 /* If the target supports the store directly, great. */
7113 icode = direct_optab_handler (atomic_store_optab, mode);
7114 if (icode != CODE_FOR_nothing)
7116 rtx_insn *last = get_last_insn ();
7117 if (!is_mm_relaxed (model))
7118 expand_memory_blockage ();
7119 create_fixed_operand (&ops[0], mem);
7120 create_input_operand (&ops[1], val, mode);
7121 create_integer_operand (&ops[2], model);
7122 if (maybe_expand_insn (icode, 3, ops))
7124 if (is_mm_seq_cst (model))
7125 expand_memory_blockage ();
7126 return const0_rtx;
7128 delete_insns_since (last);
7131 /* If using __sync_lock_release is a viable alternative, try it.
7132 Note that this will not be set to true if we are expanding a generic
7133 __atomic_store_n. */
7134 if (use_release)
7136 icode = direct_optab_handler (sync_lock_release_optab, mode);
7137 if (icode != CODE_FOR_nothing)
7139 create_fixed_operand (&ops[0], mem);
7140 create_input_operand (&ops[1], const0_rtx, mode);
7141 if (maybe_expand_insn (icode, 2, ops))
7143 /* lock_release is only a release barrier. */
7144 if (is_mm_seq_cst (model))
7145 expand_mem_thread_fence (model);
7146 return const0_rtx;
7151 /* If the size of the object is greater than word size on this target,
7152 a default store will not be atomic. */
7153 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
7155 /* If loads are atomic or we are called to provide a __sync builtin,
7156 we can try a atomic_exchange and throw away the result. Otherwise,
7157 don't do anything so that we do not create an inconsistency between
7158 loads and stores. */
7159 if (can_atomic_load_p (mode) || is_mm_sync (model))
7161 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7162 if (!target)
7163 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
7164 val);
7165 if (target)
7166 return const0_rtx;
7168 return NULL_RTX;
7171 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7172 expand_mem_thread_fence (model);
7174 emit_move_insn (mem, val);
7176 /* For SEQ_CST, also emit a barrier after the store. */
7177 if (is_mm_seq_cst (model))
7178 expand_mem_thread_fence (model);
7180 return const0_rtx;
7184 /* Structure containing the pointers and values required to process the
7185 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7187 struct atomic_op_functions
7189 direct_optab mem_fetch_before;
7190 direct_optab mem_fetch_after;
7191 direct_optab mem_no_result;
7192 optab fetch_before;
7193 optab fetch_after;
7194 direct_optab no_result;
7195 enum rtx_code reverse_code;
7199 /* Fill in structure pointed to by OP with the various optab entries for an
7200 operation of type CODE. */
7202 static void
7203 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7205 gcc_assert (op!= NULL);
7207 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7208 in the source code during compilation, and the optab entries are not
7209 computable until runtime. Fill in the values at runtime. */
7210 switch (code)
7212 case PLUS:
7213 op->mem_fetch_before = atomic_fetch_add_optab;
7214 op->mem_fetch_after = atomic_add_fetch_optab;
7215 op->mem_no_result = atomic_add_optab;
7216 op->fetch_before = sync_old_add_optab;
7217 op->fetch_after = sync_new_add_optab;
7218 op->no_result = sync_add_optab;
7219 op->reverse_code = MINUS;
7220 break;
7221 case MINUS:
7222 op->mem_fetch_before = atomic_fetch_sub_optab;
7223 op->mem_fetch_after = atomic_sub_fetch_optab;
7224 op->mem_no_result = atomic_sub_optab;
7225 op->fetch_before = sync_old_sub_optab;
7226 op->fetch_after = sync_new_sub_optab;
7227 op->no_result = sync_sub_optab;
7228 op->reverse_code = PLUS;
7229 break;
7230 case XOR:
7231 op->mem_fetch_before = atomic_fetch_xor_optab;
7232 op->mem_fetch_after = atomic_xor_fetch_optab;
7233 op->mem_no_result = atomic_xor_optab;
7234 op->fetch_before = sync_old_xor_optab;
7235 op->fetch_after = sync_new_xor_optab;
7236 op->no_result = sync_xor_optab;
7237 op->reverse_code = XOR;
7238 break;
7239 case AND:
7240 op->mem_fetch_before = atomic_fetch_and_optab;
7241 op->mem_fetch_after = atomic_and_fetch_optab;
7242 op->mem_no_result = atomic_and_optab;
7243 op->fetch_before = sync_old_and_optab;
7244 op->fetch_after = sync_new_and_optab;
7245 op->no_result = sync_and_optab;
7246 op->reverse_code = UNKNOWN;
7247 break;
7248 case IOR:
7249 op->mem_fetch_before = atomic_fetch_or_optab;
7250 op->mem_fetch_after = atomic_or_fetch_optab;
7251 op->mem_no_result = atomic_or_optab;
7252 op->fetch_before = sync_old_ior_optab;
7253 op->fetch_after = sync_new_ior_optab;
7254 op->no_result = sync_ior_optab;
7255 op->reverse_code = UNKNOWN;
7256 break;
7257 case NOT:
7258 op->mem_fetch_before = atomic_fetch_nand_optab;
7259 op->mem_fetch_after = atomic_nand_fetch_optab;
7260 op->mem_no_result = atomic_nand_optab;
7261 op->fetch_before = sync_old_nand_optab;
7262 op->fetch_after = sync_new_nand_optab;
7263 op->no_result = sync_nand_optab;
7264 op->reverse_code = UNKNOWN;
7265 break;
7266 default:
7267 gcc_unreachable ();
7271 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7272 using memory order MODEL. If AFTER is true the operation needs to return
7273 the value of *MEM after the operation, otherwise the previous value.
7274 TARGET is an optional place to place the result. The result is unused if
7275 it is const0_rtx.
7276 Return the result if there is a better sequence, otherwise NULL_RTX. */
7278 static rtx
7279 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7280 enum memmodel model, bool after)
7282 /* If the value is prefetched, or not used, it may be possible to replace
7283 the sequence with a native exchange operation. */
7284 if (!after || target == const0_rtx)
7286 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7287 if (code == AND && val == const0_rtx)
7289 if (target == const0_rtx)
7290 target = gen_reg_rtx (GET_MODE (mem));
7291 return maybe_emit_atomic_exchange (target, mem, val, model);
7294 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7295 if (code == IOR && val == constm1_rtx)
7297 if (target == const0_rtx)
7298 target = gen_reg_rtx (GET_MODE (mem));
7299 return maybe_emit_atomic_exchange (target, mem, val, model);
7303 return NULL_RTX;
7306 /* Try to emit an instruction for a specific operation varaition.
7307 OPTAB contains the OP functions.
7308 TARGET is an optional place to return the result. const0_rtx means unused.
7309 MEM is the memory location to operate on.
7310 VAL is the value to use in the operation.
7311 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7312 MODEL is the memory model, if used.
7313 AFTER is true if the returned result is the value after the operation. */
7315 static rtx
7316 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7317 rtx val, bool use_memmodel, enum memmodel model, bool after)
7319 machine_mode mode = GET_MODE (mem);
7320 class expand_operand ops[4];
7321 enum insn_code icode;
7322 int op_counter = 0;
7323 int num_ops;
7325 /* Check to see if there is a result returned. */
7326 if (target == const0_rtx)
7328 if (use_memmodel)
7330 icode = direct_optab_handler (optab->mem_no_result, mode);
7331 create_integer_operand (&ops[2], model);
7332 num_ops = 3;
7334 else
7336 icode = direct_optab_handler (optab->no_result, mode);
7337 num_ops = 2;
7340 /* Otherwise, we need to generate a result. */
7341 else
7343 if (use_memmodel)
7345 icode = direct_optab_handler (after ? optab->mem_fetch_after
7346 : optab->mem_fetch_before, mode);
7347 create_integer_operand (&ops[3], model);
7348 num_ops = 4;
7350 else
7352 icode = optab_handler (after ? optab->fetch_after
7353 : optab->fetch_before, mode);
7354 num_ops = 3;
7356 create_output_operand (&ops[op_counter++], target, mode);
7358 if (icode == CODE_FOR_nothing)
7359 return NULL_RTX;
7361 create_fixed_operand (&ops[op_counter++], mem);
7362 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7363 create_convert_operand_to (&ops[op_counter++], val, mode, true);
7365 if (maybe_expand_insn (icode, num_ops, ops))
7366 return (target == const0_rtx ? const0_rtx : ops[0].value);
7368 return NULL_RTX;
7372 /* This function expands an atomic fetch_OP or OP_fetch operation:
7373 TARGET is an option place to stick the return value. const0_rtx indicates
7374 the result is unused.
7375 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7376 CODE is the operation being performed (OP)
7377 MEMMODEL is the memory model variant to use.
7378 AFTER is true to return the result of the operation (OP_fetch).
7379 AFTER is false to return the value before the operation (fetch_OP).
7381 This function will *only* generate instructions if there is a direct
7382 optab. No compare and swap loops or libcalls will be generated. */
7384 static rtx
7385 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
7386 enum rtx_code code, enum memmodel model,
7387 bool after)
7389 machine_mode mode = GET_MODE (mem);
7390 struct atomic_op_functions optab;
7391 rtx result;
7392 bool unused_result = (target == const0_rtx);
7394 get_atomic_op_for_code (&optab, code);
7396 /* Check to see if there are any better instructions. */
7397 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
7398 if (result)
7399 return result;
7401 /* Check for the case where the result isn't used and try those patterns. */
7402 if (unused_result)
7404 /* Try the memory model variant first. */
7405 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
7406 if (result)
7407 return result;
7409 /* Next try the old style withuot a memory model. */
7410 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
7411 if (result)
7412 return result;
7414 /* There is no no-result pattern, so try patterns with a result. */
7415 target = NULL_RTX;
7418 /* Try the __atomic version. */
7419 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
7420 if (result)
7421 return result;
7423 /* Try the older __sync version. */
7424 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
7425 if (result)
7426 return result;
7428 /* If the fetch value can be calculated from the other variation of fetch,
7429 try that operation. */
7430 if (after || unused_result || optab.reverse_code != UNKNOWN)
7432 /* Try the __atomic version, then the older __sync version. */
7433 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
7434 if (!result)
7435 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
7437 if (result)
7439 /* If the result isn't used, no need to do compensation code. */
7440 if (unused_result)
7441 return result;
7443 /* Issue compensation code. Fetch_after == fetch_before OP val.
7444 Fetch_before == after REVERSE_OP val. */
7445 if (!after)
7446 code = optab.reverse_code;
7447 if (code == NOT)
7449 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
7450 true, OPTAB_LIB_WIDEN);
7451 result = expand_simple_unop (mode, NOT, result, target, true);
7453 else
7454 result = expand_simple_binop (mode, code, result, val, target,
7455 true, OPTAB_LIB_WIDEN);
7456 return result;
7460 /* No direct opcode can be generated. */
7461 return NULL_RTX;
7466 /* This function expands an atomic fetch_OP or OP_fetch operation:
7467 TARGET is an option place to stick the return value. const0_rtx indicates
7468 the result is unused.
7469 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7470 CODE is the operation being performed (OP)
7471 MEMMODEL is the memory model variant to use.
7472 AFTER is true to return the result of the operation (OP_fetch).
7473 AFTER is false to return the value before the operation (fetch_OP). */
7475 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7476 enum memmodel model, bool after)
7478 machine_mode mode = GET_MODE (mem);
7479 rtx result;
7480 bool unused_result = (target == const0_rtx);
7482 /* If loads are not atomic for the required size and we are not called to
7483 provide a __sync builtin, do not do anything so that we stay consistent
7484 with atomic loads of the same size. */
7485 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
7486 return NULL_RTX;
7488 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
7489 after);
7491 if (result)
7492 return result;
7494 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7495 if (code == PLUS || code == MINUS)
7497 rtx tmp;
7498 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
7500 start_sequence ();
7501 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
7502 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
7503 model, after);
7504 if (result)
7506 /* PLUS worked so emit the insns and return. */
7507 tmp = get_insns ();
7508 end_sequence ();
7509 emit_insn (tmp);
7510 return result;
7513 /* PLUS did not work, so throw away the negation code and continue. */
7514 end_sequence ();
7517 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7518 if (!can_compare_and_swap_p (mode, false))
7520 rtx libfunc;
7521 bool fixup = false;
7522 enum rtx_code orig_code = code;
7523 struct atomic_op_functions optab;
7525 get_atomic_op_for_code (&optab, code);
7526 libfunc = optab_libfunc (after ? optab.fetch_after
7527 : optab.fetch_before, mode);
7528 if (libfunc == NULL
7529 && (after || unused_result || optab.reverse_code != UNKNOWN))
7531 fixup = true;
7532 if (!after)
7533 code = optab.reverse_code;
7534 libfunc = optab_libfunc (after ? optab.fetch_before
7535 : optab.fetch_after, mode);
7537 if (libfunc != NULL)
7539 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7540 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
7541 addr, ptr_mode, val, mode);
7543 if (!unused_result && fixup)
7544 result = expand_simple_binop (mode, code, result, val, target,
7545 true, OPTAB_LIB_WIDEN);
7546 return result;
7549 /* We need the original code for any further attempts. */
7550 code = orig_code;
7553 /* If nothing else has succeeded, default to a compare and swap loop. */
7554 if (can_compare_and_swap_p (mode, true))
7556 rtx_insn *insn;
7557 rtx t0 = gen_reg_rtx (mode), t1;
7559 start_sequence ();
7561 /* If the result is used, get a register for it. */
7562 if (!unused_result)
7564 if (!target || !register_operand (target, mode))
7565 target = gen_reg_rtx (mode);
7566 /* If fetch_before, copy the value now. */
7567 if (!after)
7568 emit_move_insn (target, t0);
7570 else
7571 target = const0_rtx;
7573 t1 = t0;
7574 if (code == NOT)
7576 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7577 true, OPTAB_LIB_WIDEN);
7578 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7580 else
7581 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7582 OPTAB_LIB_WIDEN);
7584 /* For after, copy the value now. */
7585 if (!unused_result && after)
7586 emit_move_insn (target, t1);
7587 insn = get_insns ();
7588 end_sequence ();
7590 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7591 return target;
7594 return NULL_RTX;
7597 /* Return true if OPERAND is suitable for operand number OPNO of
7598 instruction ICODE. */
7600 bool
7601 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7603 return (!insn_data[(int) icode].operand[opno].predicate
7604 || (insn_data[(int) icode].operand[opno].predicate
7605 (operand, insn_data[(int) icode].operand[opno].mode)));
7608 /* TARGET is a target of a multiword operation that we are going to
7609 implement as a series of word-mode operations. Return true if
7610 TARGET is suitable for this purpose. */
7612 bool
7613 valid_multiword_target_p (rtx target)
7615 machine_mode mode;
7616 int i, size;
7618 mode = GET_MODE (target);
7619 if (!GET_MODE_SIZE (mode).is_constant (&size))
7620 return false;
7621 for (i = 0; i < size; i += UNITS_PER_WORD)
7622 if (!validate_subreg (word_mode, mode, target, i))
7623 return false;
7624 return true;
7627 /* Make OP describe an input operand that has value INTVAL and that has
7628 no inherent mode. This function should only be used for operands that
7629 are always expand-time constants. The backend may request that INTVAL
7630 be copied into a different kind of rtx, but it must specify the mode
7631 of that rtx if so. */
7633 void
7634 create_integer_operand (class expand_operand *op, poly_int64 intval)
7636 create_expand_operand (op, EXPAND_INTEGER,
7637 gen_int_mode (intval, MAX_MODE_INT),
7638 VOIDmode, false, intval);
7641 /* Like maybe_legitimize_operand, but do not change the code of the
7642 current rtx value. */
7644 static bool
7645 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7646 class expand_operand *op)
7648 /* See if the operand matches in its current form. */
7649 if (insn_operand_matches (icode, opno, op->value))
7650 return true;
7652 /* If the operand is a memory whose address has no side effects,
7653 try forcing the address into a non-virtual pseudo register.
7654 The check for side effects is important because copy_to_mode_reg
7655 cannot handle things like auto-modified addresses. */
7656 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7658 rtx addr, mem;
7660 mem = op->value;
7661 addr = XEXP (mem, 0);
7662 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7663 && !side_effects_p (addr))
7665 rtx_insn *last;
7666 machine_mode mode;
7668 last = get_last_insn ();
7669 mode = get_address_mode (mem);
7670 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7671 if (insn_operand_matches (icode, opno, mem))
7673 op->value = mem;
7674 return true;
7676 delete_insns_since (last);
7680 return false;
7683 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7684 on success, storing the new operand value back in OP. */
7686 static bool
7687 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7688 class expand_operand *op)
7690 machine_mode mode, imode, tmode;
7692 mode = op->mode;
7693 switch (op->type)
7695 case EXPAND_FIXED:
7697 temporary_volatile_ok v (true);
7698 return maybe_legitimize_operand_same_code (icode, opno, op);
7701 case EXPAND_OUTPUT:
7702 gcc_assert (mode != VOIDmode);
7703 if (op->value
7704 && op->value != const0_rtx
7705 && GET_MODE (op->value) == mode
7706 && maybe_legitimize_operand_same_code (icode, opno, op))
7707 return true;
7709 op->value = gen_reg_rtx (mode);
7710 op->target = 0;
7711 break;
7713 case EXPAND_INPUT:
7714 input:
7715 gcc_assert (mode != VOIDmode);
7716 gcc_assert (GET_MODE (op->value) == VOIDmode
7717 || GET_MODE (op->value) == mode);
7718 if (maybe_legitimize_operand_same_code (icode, opno, op))
7719 return true;
7721 op->value = copy_to_mode_reg (mode, op->value);
7722 break;
7724 case EXPAND_CONVERT_TO:
7725 gcc_assert (mode != VOIDmode);
7726 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7727 goto input;
7729 case EXPAND_CONVERT_FROM:
7730 if (GET_MODE (op->value) != VOIDmode)
7731 mode = GET_MODE (op->value);
7732 else
7733 /* The caller must tell us what mode this value has. */
7734 gcc_assert (mode != VOIDmode);
7736 imode = insn_data[(int) icode].operand[opno].mode;
7737 tmode = (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode)
7738 ? GET_MODE_INNER (imode) : imode);
7739 if (tmode != VOIDmode && tmode != mode)
7741 op->value = convert_modes (tmode, mode, op->value, op->unsigned_p);
7742 mode = tmode;
7744 if (imode != VOIDmode && imode != mode)
7746 gcc_assert (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode));
7747 op->value = expand_vector_broadcast (imode, op->value);
7748 mode = imode;
7750 goto input;
7752 case EXPAND_ADDRESS:
7753 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7754 op->value);
7755 goto input;
7757 case EXPAND_INTEGER:
7758 mode = insn_data[(int) icode].operand[opno].mode;
7759 if (mode != VOIDmode
7760 && known_eq (trunc_int_for_mode (op->int_value, mode),
7761 op->int_value))
7763 op->value = gen_int_mode (op->int_value, mode);
7764 goto input;
7766 break;
7768 return insn_operand_matches (icode, opno, op->value);
7771 /* Make OP describe an input operand that should have the same value
7772 as VALUE, after any mode conversion that the target might request.
7773 TYPE is the type of VALUE. */
7775 void
7776 create_convert_operand_from_type (class expand_operand *op,
7777 rtx value, tree type)
7779 create_convert_operand_from (op, value, TYPE_MODE (type),
7780 TYPE_UNSIGNED (type));
7783 /* Return true if the requirements on operands OP1 and OP2 of instruction
7784 ICODE are similar enough for the result of legitimizing OP1 to be
7785 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7786 with OP1 and OP2 respectively. */
7788 static inline bool
7789 can_reuse_operands_p (enum insn_code icode,
7790 unsigned int opno1, unsigned int opno2,
7791 const class expand_operand *op1,
7792 const class expand_operand *op2)
7794 /* Check requirements that are common to all types. */
7795 if (op1->type != op2->type
7796 || op1->mode != op2->mode
7797 || (insn_data[(int) icode].operand[opno1].mode
7798 != insn_data[(int) icode].operand[opno2].mode))
7799 return false;
7801 /* Check the requirements for specific types. */
7802 switch (op1->type)
7804 case EXPAND_OUTPUT:
7805 /* Outputs must remain distinct. */
7806 return false;
7808 case EXPAND_FIXED:
7809 case EXPAND_INPUT:
7810 case EXPAND_ADDRESS:
7811 case EXPAND_INTEGER:
7812 return true;
7814 case EXPAND_CONVERT_TO:
7815 case EXPAND_CONVERT_FROM:
7816 return op1->unsigned_p == op2->unsigned_p;
7818 gcc_unreachable ();
7821 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7822 of instruction ICODE. Return true on success, leaving the new operand
7823 values in the OPS themselves. Emit no code on failure. */
7825 bool
7826 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7827 unsigned int nops, class expand_operand *ops)
7829 rtx_insn *last = get_last_insn ();
7830 rtx *orig_values = XALLOCAVEC (rtx, nops);
7831 for (unsigned int i = 0; i < nops; i++)
7833 orig_values[i] = ops[i].value;
7835 /* First try reusing the result of an earlier legitimization.
7836 This avoids duplicate rtl and ensures that tied operands
7837 remain tied.
7839 This search is linear, but NOPS is bounded at compile time
7840 to a small number (current a single digit). */
7841 unsigned int j = 0;
7842 for (; j < i; ++j)
7843 if (can_reuse_operands_p (icode, opno + j, opno + i, &ops[j], &ops[i])
7844 && rtx_equal_p (orig_values[j], orig_values[i])
7845 && ops[j].value
7846 && insn_operand_matches (icode, opno + i, ops[j].value))
7848 ops[i].value = copy_rtx (ops[j].value);
7849 break;
7852 /* Otherwise try legitimizing the operand on its own. */
7853 if (j == i && !maybe_legitimize_operand (icode, opno + i, &ops[i]))
7855 delete_insns_since (last);
7856 return false;
7859 return true;
7862 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7863 as its operands. Return the instruction pattern on success,
7864 and emit any necessary set-up code. Return null and emit no
7865 code on failure. */
7867 rtx_insn *
7868 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7869 class expand_operand *ops)
7871 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7872 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7873 return NULL;
7875 switch (nops)
7877 case 1:
7878 return GEN_FCN (icode) (ops[0].value);
7879 case 2:
7880 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7881 case 3:
7882 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7883 case 4:
7884 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7885 ops[3].value);
7886 case 5:
7887 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7888 ops[3].value, ops[4].value);
7889 case 6:
7890 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7891 ops[3].value, ops[4].value, ops[5].value);
7892 case 7:
7893 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7894 ops[3].value, ops[4].value, ops[5].value,
7895 ops[6].value);
7896 case 8:
7897 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7898 ops[3].value, ops[4].value, ops[5].value,
7899 ops[6].value, ops[7].value);
7900 case 9:
7901 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7902 ops[3].value, ops[4].value, ops[5].value,
7903 ops[6].value, ops[7].value, ops[8].value);
7905 gcc_unreachable ();
7908 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7909 as its operands. Return true on success and emit no code on failure. */
7911 bool
7912 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7913 class expand_operand *ops)
7915 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7916 if (pat)
7918 emit_insn (pat);
7919 return true;
7921 return false;
7924 /* Like maybe_expand_insn, but for jumps. */
7926 bool
7927 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7928 class expand_operand *ops)
7930 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7931 if (pat)
7933 emit_jump_insn (pat);
7934 return true;
7936 return false;
7939 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7940 as its operands. */
7942 void
7943 expand_insn (enum insn_code icode, unsigned int nops,
7944 class expand_operand *ops)
7946 if (!maybe_expand_insn (icode, nops, ops))
7947 gcc_unreachable ();
7950 /* Like expand_insn, but for jumps. */
7952 void
7953 expand_jump_insn (enum insn_code icode, unsigned int nops,
7954 class expand_operand *ops)
7956 if (!maybe_expand_jump_insn (icode, nops, ops))
7957 gcc_unreachable ();