2017-12-17 Sandra Loosemore <sandra@codesourcery.com>
[official-gcc.git] / gcc / optabs.c
blob3354e40aee47b539dbf4bac76a8f418728dc3767
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
115 return 0;
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
142 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
160 return 1;
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
181 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
182 return to_mode;
184 return result;
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
197 rtx result;
198 scalar_int_mode int_mode;
200 /* If we don't have to extend and this is a constant, return it. */
201 if (no_extend && GET_MODE (op) == VOIDmode)
202 return op;
204 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
205 extend since it will be more efficient to do so unless the signedness of
206 a promoted object differs from our extension. */
207 if (! no_extend
208 || !is_a <scalar_int_mode> (mode, &int_mode)
209 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
210 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
211 return convert_modes (mode, oldmode, op, unsignedp);
213 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
214 SUBREG. */
215 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
216 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
218 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 part to OP. */
221 result = gen_reg_rtx (int_mode);
222 emit_clobber (result);
223 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
224 return result;
227 /* Expand vector widening operations.
229 There are two different classes of operations handled here:
230 1) Operations whose result is wider than all the arguments to the operation.
231 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
232 In this case OP0 and optionally OP1 would be initialized,
233 but WIDE_OP wouldn't (not relevant for this case).
234 2) Operations whose result is of the same size as the last argument to the
235 operation, but wider than all the other arguments to the operation.
236 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
237 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
239 E.g, when called to expand the following operations, this is how
240 the arguments will be initialized:
241 nops OP0 OP1 WIDE_OP
242 widening-sum 2 oprnd0 - oprnd1
243 widening-dot-product 3 oprnd0 oprnd1 oprnd2
244 widening-mult 2 oprnd0 oprnd1 -
245 type-promotion (vec-unpack) 1 oprnd0 - - */
248 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
249 rtx target, int unsignedp)
251 struct expand_operand eops[4];
252 tree oprnd0, oprnd1, oprnd2;
253 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
254 optab widen_pattern_optab;
255 enum insn_code icode;
256 int nops = TREE_CODE_LENGTH (ops->code);
257 int op;
259 oprnd0 = ops->op0;
260 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
261 widen_pattern_optab =
262 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
263 if (ops->code == WIDEN_MULT_PLUS_EXPR
264 || ops->code == WIDEN_MULT_MINUS_EXPR)
265 icode = find_widening_optab_handler (widen_pattern_optab,
266 TYPE_MODE (TREE_TYPE (ops->op2)),
267 tmode0);
268 else
269 icode = optab_handler (widen_pattern_optab, tmode0);
270 gcc_assert (icode != CODE_FOR_nothing);
272 if (nops >= 2)
274 oprnd1 = ops->op1;
275 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
278 /* The last operand is of a wider mode than the rest of the operands. */
279 if (nops == 2)
280 wmode = tmode1;
281 else if (nops == 3)
283 gcc_assert (tmode1 == tmode0);
284 gcc_assert (op1);
285 oprnd2 = ops->op2;
286 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
289 op = 0;
290 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
291 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
292 if (op1)
293 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
294 if (wide_op)
295 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
296 expand_insn (icode, op, eops);
297 return eops[0].value;
300 /* Generate code to perform an operation specified by TERNARY_OPTAB
301 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
303 UNSIGNEDP is for the case where we have to widen the operands
304 to perform the operation. It says to use zero-extension.
306 If TARGET is nonzero, the value
307 is generated there, if it is convenient to do so.
308 In all cases an rtx is returned for the locus of the value;
309 this may or may not be TARGET. */
312 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
313 rtx op1, rtx op2, rtx target, int unsignedp)
315 struct expand_operand ops[4];
316 enum insn_code icode = optab_handler (ternary_optab, mode);
318 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
320 create_output_operand (&ops[0], target, mode);
321 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
322 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
323 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
324 expand_insn (icode, 4, ops);
325 return ops[0].value;
329 /* Like expand_binop, but return a constant rtx if the result can be
330 calculated at compile time. The arguments and return value are
331 otherwise the same as for expand_binop. */
334 simplify_expand_binop (machine_mode mode, optab binoptab,
335 rtx op0, rtx op1, rtx target, int unsignedp,
336 enum optab_methods methods)
338 if (CONSTANT_P (op0) && CONSTANT_P (op1))
340 rtx x = simplify_binary_operation (optab_to_code (binoptab),
341 mode, op0, op1);
342 if (x)
343 return x;
346 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
349 /* Like simplify_expand_binop, but always put the result in TARGET.
350 Return true if the expansion succeeded. */
352 bool
353 force_expand_binop (machine_mode mode, optab binoptab,
354 rtx op0, rtx op1, rtx target, int unsignedp,
355 enum optab_methods methods)
357 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
358 target, unsignedp, methods);
359 if (x == 0)
360 return false;
361 if (x != target)
362 emit_move_insn (target, x);
363 return true;
366 /* Create a new vector value in VMODE with all elements set to OP. The
367 mode of OP must be the element mode of VMODE. If OP is a constant,
368 then the return value will be a constant. */
371 expand_vector_broadcast (machine_mode vmode, rtx op)
373 enum insn_code icode;
374 rtvec vec;
375 rtx ret;
376 int i, n;
378 gcc_checking_assert (VECTOR_MODE_P (vmode));
380 if (valid_for_const_vec_duplicate_p (vmode, op))
381 return gen_const_vec_duplicate (vmode, op);
383 icode = optab_handler (vec_duplicate_optab, vmode);
384 if (icode != CODE_FOR_nothing)
386 struct expand_operand ops[2];
387 create_output_operand (&ops[0], NULL_RTX, vmode);
388 create_input_operand (&ops[1], op, GET_MODE (op));
389 expand_insn (icode, 2, ops);
390 return ops[0].value;
393 /* ??? If the target doesn't have a vec_init, then we have no easy way
394 of performing this operation. Most of this sort of generic support
395 is hidden away in the vector lowering support in gimple. */
396 icode = convert_optab_handler (vec_init_optab, vmode,
397 GET_MODE_INNER (vmode));
398 if (icode == CODE_FOR_nothing)
399 return NULL;
401 n = GET_MODE_NUNITS (vmode);
402 vec = rtvec_alloc (n);
403 for (i = 0; i < n; ++i)
404 RTVEC_ELT (vec, i) = op;
405 ret = gen_reg_rtx (vmode);
406 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
408 return ret;
411 /* This subroutine of expand_doubleword_shift handles the cases in which
412 the effective shift value is >= BITS_PER_WORD. The arguments and return
413 value are the same as for the parent routine, except that SUPERWORD_OP1
414 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
415 INTO_TARGET may be null if the caller has decided to calculate it. */
417 static bool
418 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
419 rtx outof_target, rtx into_target,
420 int unsignedp, enum optab_methods methods)
422 if (into_target != 0)
423 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
424 into_target, unsignedp, methods))
425 return false;
427 if (outof_target != 0)
429 /* For a signed right shift, we must fill OUTOF_TARGET with copies
430 of the sign bit, otherwise we must fill it with zeros. */
431 if (binoptab != ashr_optab)
432 emit_move_insn (outof_target, CONST0_RTX (word_mode));
433 else
434 if (!force_expand_binop (word_mode, binoptab,
435 outof_input, GEN_INT (BITS_PER_WORD - 1),
436 outof_target, unsignedp, methods))
437 return false;
439 return true;
442 /* This subroutine of expand_doubleword_shift handles the cases in which
443 the effective shift value is < BITS_PER_WORD. The arguments and return
444 value are the same as for the parent routine. */
446 static bool
447 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
448 rtx outof_input, rtx into_input, rtx op1,
449 rtx outof_target, rtx into_target,
450 int unsignedp, enum optab_methods methods,
451 unsigned HOST_WIDE_INT shift_mask)
453 optab reverse_unsigned_shift, unsigned_shift;
454 rtx tmp, carries;
456 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
457 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
459 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
460 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
461 the opposite direction to BINOPTAB. */
462 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
464 carries = outof_input;
465 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
466 op1_mode), op1_mode);
467 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
468 0, true, methods);
470 else
472 /* We must avoid shifting by BITS_PER_WORD bits since that is either
473 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
474 has unknown behavior. Do a single shift first, then shift by the
475 remainder. It's OK to use ~OP1 as the remainder if shift counts
476 are truncated to the mode size. */
477 carries = expand_binop (word_mode, reverse_unsigned_shift,
478 outof_input, const1_rtx, 0, unsignedp, methods);
479 if (shift_mask == BITS_PER_WORD - 1)
481 tmp = immed_wide_int_const
482 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
483 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
484 0, true, methods);
486 else
488 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
489 op1_mode), op1_mode);
490 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
491 0, true, methods);
494 if (tmp == 0 || carries == 0)
495 return false;
496 carries = expand_binop (word_mode, reverse_unsigned_shift,
497 carries, tmp, 0, unsignedp, methods);
498 if (carries == 0)
499 return false;
501 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
502 so the result can go directly into INTO_TARGET if convenient. */
503 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
504 into_target, unsignedp, methods);
505 if (tmp == 0)
506 return false;
508 /* Now OR in the bits carried over from OUTOF_INPUT. */
509 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
510 into_target, unsignedp, methods))
511 return false;
513 /* Use a standard word_mode shift for the out-of half. */
514 if (outof_target != 0)
515 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
516 outof_target, unsignedp, methods))
517 return false;
519 return true;
523 /* Try implementing expand_doubleword_shift using conditional moves.
524 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
525 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
526 are the shift counts to use in the former and latter case. All other
527 arguments are the same as the parent routine. */
529 static bool
530 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
531 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
532 rtx outof_input, rtx into_input,
533 rtx subword_op1, rtx superword_op1,
534 rtx outof_target, rtx into_target,
535 int unsignedp, enum optab_methods methods,
536 unsigned HOST_WIDE_INT shift_mask)
538 rtx outof_superword, into_superword;
540 /* Put the superword version of the output into OUTOF_SUPERWORD and
541 INTO_SUPERWORD. */
542 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
543 if (outof_target != 0 && subword_op1 == superword_op1)
545 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
546 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
547 into_superword = outof_target;
548 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
549 outof_superword, 0, unsignedp, methods))
550 return false;
552 else
554 into_superword = gen_reg_rtx (word_mode);
555 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
556 outof_superword, into_superword,
557 unsignedp, methods))
558 return false;
561 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
562 if (!expand_subword_shift (op1_mode, binoptab,
563 outof_input, into_input, subword_op1,
564 outof_target, into_target,
565 unsignedp, methods, shift_mask))
566 return false;
568 /* Select between them. Do the INTO half first because INTO_SUPERWORD
569 might be the current value of OUTOF_TARGET. */
570 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
571 into_target, into_superword, word_mode, false))
572 return false;
574 if (outof_target != 0)
575 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
576 outof_target, outof_superword,
577 word_mode, false))
578 return false;
580 return true;
583 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
584 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
585 input operand; the shift moves bits in the direction OUTOF_INPUT->
586 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
587 of the target. OP1 is the shift count and OP1_MODE is its mode.
588 If OP1 is constant, it will have been truncated as appropriate
589 and is known to be nonzero.
591 If SHIFT_MASK is zero, the result of word shifts is undefined when the
592 shift count is outside the range [0, BITS_PER_WORD). This routine must
593 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
595 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
596 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
597 fill with zeros or sign bits as appropriate.
599 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
600 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
601 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
602 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
603 are undefined.
605 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
606 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
607 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
608 function wants to calculate it itself.
610 Return true if the shift could be successfully synthesized. */
612 static bool
613 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
614 rtx outof_input, rtx into_input, rtx op1,
615 rtx outof_target, rtx into_target,
616 int unsignedp, enum optab_methods methods,
617 unsigned HOST_WIDE_INT shift_mask)
619 rtx superword_op1, tmp, cmp1, cmp2;
620 enum rtx_code cmp_code;
622 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
623 fill the result with sign or zero bits as appropriate. If so, the value
624 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
625 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
626 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
628 This isn't worthwhile for constant shifts since the optimizers will
629 cope better with in-range shift counts. */
630 if (shift_mask >= BITS_PER_WORD
631 && outof_target != 0
632 && !CONSTANT_P (op1))
634 if (!expand_doubleword_shift (op1_mode, binoptab,
635 outof_input, into_input, op1,
636 0, into_target,
637 unsignedp, methods, shift_mask))
638 return false;
639 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
640 outof_target, unsignedp, methods))
641 return false;
642 return true;
645 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
646 is true when the effective shift value is less than BITS_PER_WORD.
647 Set SUPERWORD_OP1 to the shift count that should be used to shift
648 OUTOF_INPUT into INTO_TARGET when the condition is false. */
649 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
650 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
652 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
653 is a subword shift count. */
654 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
655 0, true, methods);
656 cmp2 = CONST0_RTX (op1_mode);
657 cmp_code = EQ;
658 superword_op1 = op1;
660 else
662 /* Set CMP1 to OP1 - BITS_PER_WORD. */
663 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
664 0, true, methods);
665 cmp2 = CONST0_RTX (op1_mode);
666 cmp_code = LT;
667 superword_op1 = cmp1;
669 if (cmp1 == 0)
670 return false;
672 /* If we can compute the condition at compile time, pick the
673 appropriate subroutine. */
674 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
675 if (tmp != 0 && CONST_INT_P (tmp))
677 if (tmp == const0_rtx)
678 return expand_superword_shift (binoptab, outof_input, superword_op1,
679 outof_target, into_target,
680 unsignedp, methods);
681 else
682 return expand_subword_shift (op1_mode, binoptab,
683 outof_input, into_input, op1,
684 outof_target, into_target,
685 unsignedp, methods, shift_mask);
688 /* Try using conditional moves to generate straight-line code. */
689 if (HAVE_conditional_move)
691 rtx_insn *start = get_last_insn ();
692 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
693 cmp_code, cmp1, cmp2,
694 outof_input, into_input,
695 op1, superword_op1,
696 outof_target, into_target,
697 unsignedp, methods, shift_mask))
698 return true;
699 delete_insns_since (start);
702 /* As a last resort, use branches to select the correct alternative. */
703 rtx_code_label *subword_label = gen_label_rtx ();
704 rtx_code_label *done_label = gen_label_rtx ();
706 NO_DEFER_POP;
707 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
708 0, 0, subword_label,
709 profile_probability::uninitialized ());
710 OK_DEFER_POP;
712 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
713 outof_target, into_target,
714 unsignedp, methods))
715 return false;
717 emit_jump_insn (targetm.gen_jump (done_label));
718 emit_barrier ();
719 emit_label (subword_label);
721 if (!expand_subword_shift (op1_mode, binoptab,
722 outof_input, into_input, op1,
723 outof_target, into_target,
724 unsignedp, methods, shift_mask))
725 return false;
727 emit_label (done_label);
728 return true;
731 /* Subroutine of expand_binop. Perform a double word multiplication of
732 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
733 as the target's word_mode. This function return NULL_RTX if anything
734 goes wrong, in which case it may have already emitted instructions
735 which need to be deleted.
737 If we want to multiply two two-word values and have normal and widening
738 multiplies of single-word values, we can do this with three smaller
739 multiplications.
741 The multiplication proceeds as follows:
742 _______________________
743 [__op0_high_|__op0_low__]
744 _______________________
745 * [__op1_high_|__op1_low__]
746 _______________________________________________
747 _______________________
748 (1) [__op0_low__*__op1_low__]
749 _______________________
750 (2a) [__op0_low__*__op1_high_]
751 _______________________
752 (2b) [__op0_high_*__op1_low__]
753 _______________________
754 (3) [__op0_high_*__op1_high_]
757 This gives a 4-word result. Since we are only interested in the
758 lower 2 words, partial result (3) and the upper words of (2a) and
759 (2b) don't need to be calculated. Hence (2a) and (2b) can be
760 calculated using non-widening multiplication.
762 (1), however, needs to be calculated with an unsigned widening
763 multiplication. If this operation is not directly supported we
764 try using a signed widening multiplication and adjust the result.
765 This adjustment works as follows:
767 If both operands are positive then no adjustment is needed.
769 If the operands have different signs, for example op0_low < 0 and
770 op1_low >= 0, the instruction treats the most significant bit of
771 op0_low as a sign bit instead of a bit with significance
772 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
773 with 2**BITS_PER_WORD - op0_low, and two's complements the
774 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
775 the result.
777 Similarly, if both operands are negative, we need to add
778 (op0_low + op1_low) * 2**BITS_PER_WORD.
780 We use a trick to adjust quickly. We logically shift op0_low right
781 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
782 op0_high (op1_high) before it is used to calculate 2b (2a). If no
783 logical shift exists, we do an arithmetic right shift and subtract
784 the 0 or -1. */
786 static rtx
787 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
788 bool umulp, enum optab_methods methods)
790 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
791 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
792 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
793 rtx product, adjust, product_high, temp;
795 rtx op0_high = operand_subword_force (op0, high, mode);
796 rtx op0_low = operand_subword_force (op0, low, mode);
797 rtx op1_high = operand_subword_force (op1, high, mode);
798 rtx op1_low = operand_subword_force (op1, low, mode);
800 /* If we're using an unsigned multiply to directly compute the product
801 of the low-order words of the operands and perform any required
802 adjustments of the operands, we begin by trying two more multiplications
803 and then computing the appropriate sum.
805 We have checked above that the required addition is provided.
806 Full-word addition will normally always succeed, especially if
807 it is provided at all, so we don't worry about its failure. The
808 multiplication may well fail, however, so we do handle that. */
810 if (!umulp)
812 /* ??? This could be done with emit_store_flag where available. */
813 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
814 NULL_RTX, 1, methods);
815 if (temp)
816 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
817 NULL_RTX, 0, OPTAB_DIRECT);
818 else
820 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
821 NULL_RTX, 0, methods);
822 if (!temp)
823 return NULL_RTX;
824 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
825 NULL_RTX, 0, OPTAB_DIRECT);
828 if (!op0_high)
829 return NULL_RTX;
832 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
833 NULL_RTX, 0, OPTAB_DIRECT);
834 if (!adjust)
835 return NULL_RTX;
837 /* OP0_HIGH should now be dead. */
839 if (!umulp)
841 /* ??? This could be done with emit_store_flag where available. */
842 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
843 NULL_RTX, 1, methods);
844 if (temp)
845 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
846 NULL_RTX, 0, OPTAB_DIRECT);
847 else
849 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
850 NULL_RTX, 0, methods);
851 if (!temp)
852 return NULL_RTX;
853 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
854 NULL_RTX, 0, OPTAB_DIRECT);
857 if (!op1_high)
858 return NULL_RTX;
861 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
862 NULL_RTX, 0, OPTAB_DIRECT);
863 if (!temp)
864 return NULL_RTX;
866 /* OP1_HIGH should now be dead. */
868 adjust = expand_binop (word_mode, add_optab, adjust, temp,
869 NULL_RTX, 0, OPTAB_DIRECT);
871 if (target && !REG_P (target))
872 target = NULL_RTX;
874 /* *_widen_optab needs to determine operand mode, make sure at least
875 one operand has non-VOID mode. */
876 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
877 op0_low = force_reg (word_mode, op0_low);
879 if (umulp)
880 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
881 target, 1, OPTAB_DIRECT);
882 else
883 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
884 target, 1, OPTAB_DIRECT);
886 if (!product)
887 return NULL_RTX;
889 product_high = operand_subword (product, high, 1, mode);
890 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
891 NULL_RTX, 0, OPTAB_DIRECT);
892 emit_move_insn (product_high, adjust);
893 return product;
896 /* Wrapper around expand_binop which takes an rtx code to specify
897 the operation to perform, not an optab pointer. All other
898 arguments are the same. */
900 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
901 rtx op1, rtx target, int unsignedp,
902 enum optab_methods methods)
904 optab binop = code_to_optab (code);
905 gcc_assert (binop);
907 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
910 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
911 binop. Order them according to commutative_operand_precedence and, if
912 possible, try to put TARGET or a pseudo first. */
913 static bool
914 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
916 int op0_prec = commutative_operand_precedence (op0);
917 int op1_prec = commutative_operand_precedence (op1);
919 if (op0_prec < op1_prec)
920 return true;
922 if (op0_prec > op1_prec)
923 return false;
925 /* With equal precedence, both orders are ok, but it is better if the
926 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
927 if (target == 0 || REG_P (target))
928 return (REG_P (op1) && !REG_P (op0)) || target == op1;
929 else
930 return rtx_equal_p (op1, target);
933 /* Return true if BINOPTAB implements a shift operation. */
935 static bool
936 shift_optab_p (optab binoptab)
938 switch (optab_to_code (binoptab))
940 case ASHIFT:
941 case SS_ASHIFT:
942 case US_ASHIFT:
943 case ASHIFTRT:
944 case LSHIFTRT:
945 case ROTATE:
946 case ROTATERT:
947 return true;
949 default:
950 return false;
954 /* Return true if BINOPTAB implements a commutative binary operation. */
956 static bool
957 commutative_optab_p (optab binoptab)
959 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
960 || binoptab == smul_widen_optab
961 || binoptab == umul_widen_optab
962 || binoptab == smul_highpart_optab
963 || binoptab == umul_highpart_optab);
966 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
967 optimizing, and if the operand is a constant that costs more than
968 1 instruction, force the constant into a register and return that
969 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
971 static rtx
972 avoid_expensive_constant (machine_mode mode, optab binoptab,
973 int opn, rtx x, bool unsignedp)
975 bool speed = optimize_insn_for_speed_p ();
977 if (mode != VOIDmode
978 && optimize
979 && CONSTANT_P (x)
980 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
981 > set_src_cost (x, mode, speed)))
983 if (CONST_INT_P (x))
985 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
986 if (intval != INTVAL (x))
987 x = GEN_INT (intval);
989 else
990 x = convert_modes (mode, VOIDmode, x, unsignedp);
991 x = force_reg (mode, x);
993 return x;
996 /* Helper function for expand_binop: handle the case where there
997 is an insn ICODE that directly implements the indicated operation.
998 Returns null if this is not possible. */
999 static rtx
1000 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1001 rtx op0, rtx op1,
1002 rtx target, int unsignedp, enum optab_methods methods,
1003 rtx_insn *last)
1005 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1006 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1007 machine_mode mode0, mode1, tmp_mode;
1008 struct expand_operand ops[3];
1009 bool commutative_p;
1010 rtx_insn *pat;
1011 rtx xop0 = op0, xop1 = op1;
1012 bool canonicalize_op1 = false;
1014 /* If it is a commutative operator and the modes would match
1015 if we would swap the operands, we can save the conversions. */
1016 commutative_p = commutative_optab_p (binoptab);
1017 if (commutative_p
1018 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1019 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1020 std::swap (xop0, xop1);
1022 /* If we are optimizing, force expensive constants into a register. */
1023 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1024 if (!shift_optab_p (binoptab))
1025 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1026 else
1027 /* Shifts and rotates often use a different mode for op1 from op0;
1028 for VOIDmode constants we don't know the mode, so force it
1029 to be canonicalized using convert_modes. */
1030 canonicalize_op1 = true;
1032 /* In case the insn wants input operands in modes different from
1033 those of the actual operands, convert the operands. It would
1034 seem that we don't need to convert CONST_INTs, but we do, so
1035 that they're properly zero-extended, sign-extended or truncated
1036 for their mode. */
1038 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1039 if (xmode0 != VOIDmode && xmode0 != mode0)
1041 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1042 mode0 = xmode0;
1045 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1046 ? GET_MODE (xop1) : mode);
1047 if (xmode1 != VOIDmode && xmode1 != mode1)
1049 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1050 mode1 = xmode1;
1053 /* If operation is commutative,
1054 try to make the first operand a register.
1055 Even better, try to make it the same as the target.
1056 Also try to make the last operand a constant. */
1057 if (commutative_p
1058 && swap_commutative_operands_with_target (target, xop0, xop1))
1059 std::swap (xop0, xop1);
1061 /* Now, if insn's predicates don't allow our operands, put them into
1062 pseudo regs. */
1064 if (binoptab == vec_pack_trunc_optab
1065 || binoptab == vec_pack_usat_optab
1066 || binoptab == vec_pack_ssat_optab
1067 || binoptab == vec_pack_ufix_trunc_optab
1068 || binoptab == vec_pack_sfix_trunc_optab)
1070 /* The mode of the result is different then the mode of the
1071 arguments. */
1072 tmp_mode = insn_data[(int) icode].operand[0].mode;
1073 if (VECTOR_MODE_P (mode)
1074 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1076 delete_insns_since (last);
1077 return NULL_RTX;
1080 else
1081 tmp_mode = mode;
1083 create_output_operand (&ops[0], target, tmp_mode);
1084 create_input_operand (&ops[1], xop0, mode0);
1085 create_input_operand (&ops[2], xop1, mode1);
1086 pat = maybe_gen_insn (icode, 3, ops);
1087 if (pat)
1089 /* If PAT is composed of more than one insn, try to add an appropriate
1090 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1091 operand, call expand_binop again, this time without a target. */
1092 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1093 && ! add_equal_note (pat, ops[0].value,
1094 optab_to_code (binoptab),
1095 ops[1].value, ops[2].value))
1097 delete_insns_since (last);
1098 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1099 unsignedp, methods);
1102 emit_insn (pat);
1103 return ops[0].value;
1105 delete_insns_since (last);
1106 return NULL_RTX;
1109 /* Generate code to perform an operation specified by BINOPTAB
1110 on operands OP0 and OP1, with result having machine-mode MODE.
1112 UNSIGNEDP is for the case where we have to widen the operands
1113 to perform the operation. It says to use zero-extension.
1115 If TARGET is nonzero, the value
1116 is generated there, if it is convenient to do so.
1117 In all cases an rtx is returned for the locus of the value;
1118 this may or may not be TARGET. */
1121 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1122 rtx target, int unsignedp, enum optab_methods methods)
1124 enum optab_methods next_methods
1125 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1126 ? OPTAB_WIDEN : methods);
1127 enum mode_class mclass;
1128 enum insn_code icode;
1129 machine_mode wider_mode;
1130 scalar_int_mode int_mode;
1131 rtx libfunc;
1132 rtx temp;
1133 rtx_insn *entry_last = get_last_insn ();
1134 rtx_insn *last;
1136 mclass = GET_MODE_CLASS (mode);
1138 /* If subtracting an integer constant, convert this into an addition of
1139 the negated constant. */
1141 if (binoptab == sub_optab && CONST_INT_P (op1))
1143 op1 = negate_rtx (mode, op1);
1144 binoptab = add_optab;
1146 /* For shifts, constant invalid op1 might be expanded from different
1147 mode than MODE. As those are invalid, force them to a register
1148 to avoid further problems during expansion. */
1149 else if (CONST_INT_P (op1)
1150 && shift_optab_p (binoptab)
1151 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1153 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1154 op1 = force_reg (GET_MODE_INNER (mode), op1);
1157 /* Record where to delete back to if we backtrack. */
1158 last = get_last_insn ();
1160 /* If we can do it with a three-operand insn, do so. */
1162 if (methods != OPTAB_MUST_WIDEN)
1164 if (convert_optab_p (binoptab))
1166 machine_mode from_mode = widened_mode (mode, op0, op1);
1167 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1169 else
1170 icode = optab_handler (binoptab, mode);
1171 if (icode != CODE_FOR_nothing)
1173 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1174 target, unsignedp, methods, last);
1175 if (temp)
1176 return temp;
1180 /* If we were trying to rotate, and that didn't work, try rotating
1181 the other direction before falling back to shifts and bitwise-or. */
1182 if (((binoptab == rotl_optab
1183 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1184 || (binoptab == rotr_optab
1185 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1186 && is_int_mode (mode, &int_mode))
1188 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1189 rtx newop1;
1190 unsigned int bits = GET_MODE_PRECISION (int_mode);
1192 if (CONST_INT_P (op1))
1193 newop1 = GEN_INT (bits - INTVAL (op1));
1194 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1195 newop1 = negate_rtx (GET_MODE (op1), op1);
1196 else
1197 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1198 gen_int_mode (bits, GET_MODE (op1)), op1,
1199 NULL_RTX, unsignedp, OPTAB_DIRECT);
1201 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1202 target, unsignedp, methods, last);
1203 if (temp)
1204 return temp;
1207 /* If this is a multiply, see if we can do a widening operation that
1208 takes operands of this mode and makes a wider mode. */
1210 if (binoptab == smul_optab
1211 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1212 && (convert_optab_handler ((unsignedp
1213 ? umul_widen_optab
1214 : smul_widen_optab),
1215 wider_mode, mode) != CODE_FOR_nothing))
1217 /* *_widen_optab needs to determine operand mode, make sure at least
1218 one operand has non-VOID mode. */
1219 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1220 op0 = force_reg (mode, op0);
1221 temp = expand_binop (wider_mode,
1222 unsignedp ? umul_widen_optab : smul_widen_optab,
1223 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1225 if (temp != 0)
1227 if (GET_MODE_CLASS (mode) == MODE_INT
1228 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1229 return gen_lowpart (mode, temp);
1230 else
1231 return convert_to_mode (mode, temp, unsignedp);
1235 /* If this is a vector shift by a scalar, see if we can do a vector
1236 shift by a vector. If so, broadcast the scalar into a vector. */
1237 if (mclass == MODE_VECTOR_INT)
1239 optab otheroptab = unknown_optab;
1241 if (binoptab == ashl_optab)
1242 otheroptab = vashl_optab;
1243 else if (binoptab == ashr_optab)
1244 otheroptab = vashr_optab;
1245 else if (binoptab == lshr_optab)
1246 otheroptab = vlshr_optab;
1247 else if (binoptab == rotl_optab)
1248 otheroptab = vrotl_optab;
1249 else if (binoptab == rotr_optab)
1250 otheroptab = vrotr_optab;
1252 if (otheroptab
1253 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1255 /* The scalar may have been extended to be too wide. Truncate
1256 it back to the proper size to fit in the broadcast vector. */
1257 scalar_mode inner_mode = GET_MODE_INNER (mode);
1258 if (!CONST_INT_P (op1)
1259 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1260 > GET_MODE_BITSIZE (inner_mode)))
1261 op1 = force_reg (inner_mode,
1262 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1263 GET_MODE (op1)));
1264 rtx vop1 = expand_vector_broadcast (mode, op1);
1265 if (vop1)
1267 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1268 target, unsignedp, methods, last);
1269 if (temp)
1270 return temp;
1275 /* Look for a wider mode of the same class for which we think we
1276 can open-code the operation. Check for a widening multiply at the
1277 wider mode as well. */
1279 if (CLASS_HAS_WIDER_MODES_P (mclass)
1280 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1281 FOR_EACH_WIDER_MODE (wider_mode, mode)
1283 machine_mode next_mode;
1284 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1285 || (binoptab == smul_optab
1286 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1287 && (find_widening_optab_handler ((unsignedp
1288 ? umul_widen_optab
1289 : smul_widen_optab),
1290 next_mode, mode)
1291 != CODE_FOR_nothing)))
1293 rtx xop0 = op0, xop1 = op1;
1294 int no_extend = 0;
1296 /* For certain integer operations, we need not actually extend
1297 the narrow operands, as long as we will truncate
1298 the results to the same narrowness. */
1300 if ((binoptab == ior_optab || binoptab == and_optab
1301 || binoptab == xor_optab
1302 || binoptab == add_optab || binoptab == sub_optab
1303 || binoptab == smul_optab || binoptab == ashl_optab)
1304 && mclass == MODE_INT)
1306 no_extend = 1;
1307 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1308 xop0, unsignedp);
1309 if (binoptab != ashl_optab)
1310 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1311 xop1, unsignedp);
1314 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1316 /* The second operand of a shift must always be extended. */
1317 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1318 no_extend && binoptab != ashl_optab);
1320 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1321 unsignedp, OPTAB_DIRECT);
1322 if (temp)
1324 if (mclass != MODE_INT
1325 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1327 if (target == 0)
1328 target = gen_reg_rtx (mode);
1329 convert_move (target, temp, 0);
1330 return target;
1332 else
1333 return gen_lowpart (mode, temp);
1335 else
1336 delete_insns_since (last);
1340 /* If operation is commutative,
1341 try to make the first operand a register.
1342 Even better, try to make it the same as the target.
1343 Also try to make the last operand a constant. */
1344 if (commutative_optab_p (binoptab)
1345 && swap_commutative_operands_with_target (target, op0, op1))
1346 std::swap (op0, op1);
1348 /* These can be done a word at a time. */
1349 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1350 && is_int_mode (mode, &int_mode)
1351 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1352 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1354 int i;
1355 rtx_insn *insns;
1357 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1358 won't be accurate, so use a new target. */
1359 if (target == 0
1360 || target == op0
1361 || target == op1
1362 || !valid_multiword_target_p (target))
1363 target = gen_reg_rtx (int_mode);
1365 start_sequence ();
1367 /* Do the actual arithmetic. */
1368 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1370 rtx target_piece = operand_subword (target, i, 1, int_mode);
1371 rtx x = expand_binop (word_mode, binoptab,
1372 operand_subword_force (op0, i, int_mode),
1373 operand_subword_force (op1, i, int_mode),
1374 target_piece, unsignedp, next_methods);
1376 if (x == 0)
1377 break;
1379 if (target_piece != x)
1380 emit_move_insn (target_piece, x);
1383 insns = get_insns ();
1384 end_sequence ();
1386 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1388 emit_insn (insns);
1389 return target;
1393 /* Synthesize double word shifts from single word shifts. */
1394 if ((binoptab == lshr_optab || binoptab == ashl_optab
1395 || binoptab == ashr_optab)
1396 && is_int_mode (mode, &int_mode)
1397 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1398 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1399 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1400 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1401 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1402 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1404 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1405 scalar_int_mode op1_mode;
1407 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1408 shift_mask = targetm.shift_truncation_mask (word_mode);
1409 op1_mode = (GET_MODE (op1) != VOIDmode
1410 ? as_a <scalar_int_mode> (GET_MODE (op1))
1411 : word_mode);
1413 /* Apply the truncation to constant shifts. */
1414 if (double_shift_mask > 0 && CONST_INT_P (op1))
1415 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1417 if (op1 == CONST0_RTX (op1_mode))
1418 return op0;
1420 /* Make sure that this is a combination that expand_doubleword_shift
1421 can handle. See the comments there for details. */
1422 if (double_shift_mask == 0
1423 || (shift_mask == BITS_PER_WORD - 1
1424 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1426 rtx_insn *insns;
1427 rtx into_target, outof_target;
1428 rtx into_input, outof_input;
1429 int left_shift, outof_word;
1431 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1432 won't be accurate, so use a new target. */
1433 if (target == 0
1434 || target == op0
1435 || target == op1
1436 || !valid_multiword_target_p (target))
1437 target = gen_reg_rtx (int_mode);
1439 start_sequence ();
1441 /* OUTOF_* is the word we are shifting bits away from, and
1442 INTO_* is the word that we are shifting bits towards, thus
1443 they differ depending on the direction of the shift and
1444 WORDS_BIG_ENDIAN. */
1446 left_shift = binoptab == ashl_optab;
1447 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1449 outof_target = operand_subword (target, outof_word, 1, int_mode);
1450 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1452 outof_input = operand_subword_force (op0, outof_word, int_mode);
1453 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1455 if (expand_doubleword_shift (op1_mode, binoptab,
1456 outof_input, into_input, op1,
1457 outof_target, into_target,
1458 unsignedp, next_methods, shift_mask))
1460 insns = get_insns ();
1461 end_sequence ();
1463 emit_insn (insns);
1464 return target;
1466 end_sequence ();
1470 /* Synthesize double word rotates from single word shifts. */
1471 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1472 && is_int_mode (mode, &int_mode)
1473 && CONST_INT_P (op1)
1474 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1475 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1476 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1478 rtx_insn *insns;
1479 rtx into_target, outof_target;
1480 rtx into_input, outof_input;
1481 rtx inter;
1482 int shift_count, left_shift, outof_word;
1484 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1485 won't be accurate, so use a new target. Do this also if target is not
1486 a REG, first because having a register instead may open optimization
1487 opportunities, and second because if target and op0 happen to be MEMs
1488 designating the same location, we would risk clobbering it too early
1489 in the code sequence we generate below. */
1490 if (target == 0
1491 || target == op0
1492 || target == op1
1493 || !REG_P (target)
1494 || !valid_multiword_target_p (target))
1495 target = gen_reg_rtx (int_mode);
1497 start_sequence ();
1499 shift_count = INTVAL (op1);
1501 /* OUTOF_* is the word we are shifting bits away from, and
1502 INTO_* is the word that we are shifting bits towards, thus
1503 they differ depending on the direction of the shift and
1504 WORDS_BIG_ENDIAN. */
1506 left_shift = (binoptab == rotl_optab);
1507 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1509 outof_target = operand_subword (target, outof_word, 1, int_mode);
1510 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1512 outof_input = operand_subword_force (op0, outof_word, int_mode);
1513 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1515 if (shift_count == BITS_PER_WORD)
1517 /* This is just a word swap. */
1518 emit_move_insn (outof_target, into_input);
1519 emit_move_insn (into_target, outof_input);
1520 inter = const0_rtx;
1522 else
1524 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1525 rtx first_shift_count, second_shift_count;
1526 optab reverse_unsigned_shift, unsigned_shift;
1528 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1529 ? lshr_optab : ashl_optab);
1531 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1532 ? ashl_optab : lshr_optab);
1534 if (shift_count > BITS_PER_WORD)
1536 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1537 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1539 else
1541 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1542 second_shift_count = GEN_INT (shift_count);
1545 into_temp1 = expand_binop (word_mode, unsigned_shift,
1546 outof_input, first_shift_count,
1547 NULL_RTX, unsignedp, next_methods);
1548 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1549 into_input, second_shift_count,
1550 NULL_RTX, unsignedp, next_methods);
1552 if (into_temp1 != 0 && into_temp2 != 0)
1553 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1554 into_target, unsignedp, next_methods);
1555 else
1556 inter = 0;
1558 if (inter != 0 && inter != into_target)
1559 emit_move_insn (into_target, inter);
1561 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1562 into_input, first_shift_count,
1563 NULL_RTX, unsignedp, next_methods);
1564 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1565 outof_input, second_shift_count,
1566 NULL_RTX, unsignedp, next_methods);
1568 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1569 inter = expand_binop (word_mode, ior_optab,
1570 outof_temp1, outof_temp2,
1571 outof_target, unsignedp, next_methods);
1573 if (inter != 0 && inter != outof_target)
1574 emit_move_insn (outof_target, inter);
1577 insns = get_insns ();
1578 end_sequence ();
1580 if (inter != 0)
1582 emit_insn (insns);
1583 return target;
1587 /* These can be done a word at a time by propagating carries. */
1588 if ((binoptab == add_optab || binoptab == sub_optab)
1589 && is_int_mode (mode, &int_mode)
1590 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1591 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1593 unsigned int i;
1594 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1595 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1596 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1597 rtx xop0, xop1, xtarget;
1599 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1600 value is one of those, use it. Otherwise, use 1 since it is the
1601 one easiest to get. */
1602 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1603 int normalizep = STORE_FLAG_VALUE;
1604 #else
1605 int normalizep = 1;
1606 #endif
1608 /* Prepare the operands. */
1609 xop0 = force_reg (int_mode, op0);
1610 xop1 = force_reg (int_mode, op1);
1612 xtarget = gen_reg_rtx (int_mode);
1614 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1615 target = xtarget;
1617 /* Indicate for flow that the entire target reg is being set. */
1618 if (REG_P (target))
1619 emit_clobber (xtarget);
1621 /* Do the actual arithmetic. */
1622 for (i = 0; i < nwords; i++)
1624 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1625 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1626 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1627 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1628 rtx x;
1630 /* Main add/subtract of the input operands. */
1631 x = expand_binop (word_mode, binoptab,
1632 op0_piece, op1_piece,
1633 target_piece, unsignedp, next_methods);
1634 if (x == 0)
1635 break;
1637 if (i + 1 < nwords)
1639 /* Store carry from main add/subtract. */
1640 carry_out = gen_reg_rtx (word_mode);
1641 carry_out = emit_store_flag_force (carry_out,
1642 (binoptab == add_optab
1643 ? LT : GT),
1644 x, op0_piece,
1645 word_mode, 1, normalizep);
1648 if (i > 0)
1650 rtx newx;
1652 /* Add/subtract previous carry to main result. */
1653 newx = expand_binop (word_mode,
1654 normalizep == 1 ? binoptab : otheroptab,
1655 x, carry_in,
1656 NULL_RTX, 1, next_methods);
1658 if (i + 1 < nwords)
1660 /* Get out carry from adding/subtracting carry in. */
1661 rtx carry_tmp = gen_reg_rtx (word_mode);
1662 carry_tmp = emit_store_flag_force (carry_tmp,
1663 (binoptab == add_optab
1664 ? LT : GT),
1665 newx, x,
1666 word_mode, 1, normalizep);
1668 /* Logical-ior the two poss. carry together. */
1669 carry_out = expand_binop (word_mode, ior_optab,
1670 carry_out, carry_tmp,
1671 carry_out, 0, next_methods);
1672 if (carry_out == 0)
1673 break;
1675 emit_move_insn (target_piece, newx);
1677 else
1679 if (x != target_piece)
1680 emit_move_insn (target_piece, x);
1683 carry_in = carry_out;
1686 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1688 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1689 || ! rtx_equal_p (target, xtarget))
1691 rtx_insn *temp = emit_move_insn (target, xtarget);
1693 set_dst_reg_note (temp, REG_EQUAL,
1694 gen_rtx_fmt_ee (optab_to_code (binoptab),
1695 int_mode, copy_rtx (xop0),
1696 copy_rtx (xop1)),
1697 target);
1699 else
1700 target = xtarget;
1702 return target;
1705 else
1706 delete_insns_since (last);
1709 /* Attempt to synthesize double word multiplies using a sequence of word
1710 mode multiplications. We first attempt to generate a sequence using a
1711 more efficient unsigned widening multiply, and if that fails we then
1712 try using a signed widening multiply. */
1714 if (binoptab == smul_optab
1715 && is_int_mode (mode, &int_mode)
1716 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1717 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1718 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1720 rtx product = NULL_RTX;
1721 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
1722 != CODE_FOR_nothing)
1724 product = expand_doubleword_mult (int_mode, op0, op1, target,
1725 true, methods);
1726 if (!product)
1727 delete_insns_since (last);
1730 if (product == NULL_RTX
1731 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
1732 != CODE_FOR_nothing))
1734 product = expand_doubleword_mult (int_mode, op0, op1, target,
1735 false, methods);
1736 if (!product)
1737 delete_insns_since (last);
1740 if (product != NULL_RTX)
1742 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1744 rtx_insn *move = emit_move_insn (target ? target : product,
1745 product);
1746 set_dst_reg_note (move,
1747 REG_EQUAL,
1748 gen_rtx_fmt_ee (MULT, int_mode,
1749 copy_rtx (op0),
1750 copy_rtx (op1)),
1751 target ? target : product);
1753 return product;
1757 /* It can't be open-coded in this mode.
1758 Use a library call if one is available and caller says that's ok. */
1760 libfunc = optab_libfunc (binoptab, mode);
1761 if (libfunc
1762 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1764 rtx_insn *insns;
1765 rtx op1x = op1;
1766 machine_mode op1_mode = mode;
1767 rtx value;
1769 start_sequence ();
1771 if (shift_optab_p (binoptab))
1773 op1_mode = targetm.libgcc_shift_count_mode ();
1774 /* Specify unsigned here,
1775 since negative shift counts are meaningless. */
1776 op1x = convert_to_mode (op1_mode, op1, 1);
1779 if (GET_MODE (op0) != VOIDmode
1780 && GET_MODE (op0) != mode)
1781 op0 = convert_to_mode (mode, op0, unsignedp);
1783 /* Pass 1 for NO_QUEUE so we don't lose any increments
1784 if the libcall is cse'd or moved. */
1785 value = emit_library_call_value (libfunc,
1786 NULL_RTX, LCT_CONST, mode,
1787 op0, mode, op1x, op1_mode);
1789 insns = get_insns ();
1790 end_sequence ();
1792 bool trapv = trapv_binoptab_p (binoptab);
1793 target = gen_reg_rtx (mode);
1794 emit_libcall_block_1 (insns, target, value,
1795 trapv ? NULL_RTX
1796 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1797 mode, op0, op1), trapv);
1799 return target;
1802 delete_insns_since (last);
1804 /* It can't be done in this mode. Can we do it in a wider mode? */
1806 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1807 || methods == OPTAB_MUST_WIDEN))
1809 /* Caller says, don't even try. */
1810 delete_insns_since (entry_last);
1811 return 0;
1814 /* Compute the value of METHODS to pass to recursive calls.
1815 Don't allow widening to be tried recursively. */
1817 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1819 /* Look for a wider mode of the same class for which it appears we can do
1820 the operation. */
1822 if (CLASS_HAS_WIDER_MODES_P (mclass))
1824 /* This code doesn't make sense for conversion optabs, since we
1825 wouldn't then want to extend the operands to be the same size
1826 as the result. */
1827 gcc_assert (!convert_optab_p (binoptab));
1828 FOR_EACH_WIDER_MODE (wider_mode, mode)
1830 if (optab_handler (binoptab, wider_mode)
1831 || (methods == OPTAB_LIB
1832 && optab_libfunc (binoptab, wider_mode)))
1834 rtx xop0 = op0, xop1 = op1;
1835 int no_extend = 0;
1837 /* For certain integer operations, we need not actually extend
1838 the narrow operands, as long as we will truncate
1839 the results to the same narrowness. */
1841 if ((binoptab == ior_optab || binoptab == and_optab
1842 || binoptab == xor_optab
1843 || binoptab == add_optab || binoptab == sub_optab
1844 || binoptab == smul_optab || binoptab == ashl_optab)
1845 && mclass == MODE_INT)
1846 no_extend = 1;
1848 xop0 = widen_operand (xop0, wider_mode, mode,
1849 unsignedp, no_extend);
1851 /* The second operand of a shift must always be extended. */
1852 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1853 no_extend && binoptab != ashl_optab);
1855 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1856 unsignedp, methods);
1857 if (temp)
1859 if (mclass != MODE_INT
1860 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1862 if (target == 0)
1863 target = gen_reg_rtx (mode);
1864 convert_move (target, temp, 0);
1865 return target;
1867 else
1868 return gen_lowpart (mode, temp);
1870 else
1871 delete_insns_since (last);
1876 delete_insns_since (entry_last);
1877 return 0;
1880 /* Expand a binary operator which has both signed and unsigned forms.
1881 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1882 signed operations.
1884 If we widen unsigned operands, we may use a signed wider operation instead
1885 of an unsigned wider operation, since the result would be the same. */
1888 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1889 rtx op0, rtx op1, rtx target, int unsignedp,
1890 enum optab_methods methods)
1892 rtx temp;
1893 optab direct_optab = unsignedp ? uoptab : soptab;
1894 bool save_enable;
1896 /* Do it without widening, if possible. */
1897 temp = expand_binop (mode, direct_optab, op0, op1, target,
1898 unsignedp, OPTAB_DIRECT);
1899 if (temp || methods == OPTAB_DIRECT)
1900 return temp;
1902 /* Try widening to a signed int. Disable any direct use of any
1903 signed insn in the current mode. */
1904 save_enable = swap_optab_enable (soptab, mode, false);
1906 temp = expand_binop (mode, soptab, op0, op1, target,
1907 unsignedp, OPTAB_WIDEN);
1909 /* For unsigned operands, try widening to an unsigned int. */
1910 if (!temp && unsignedp)
1911 temp = expand_binop (mode, uoptab, op0, op1, target,
1912 unsignedp, OPTAB_WIDEN);
1913 if (temp || methods == OPTAB_WIDEN)
1914 goto egress;
1916 /* Use the right width libcall if that exists. */
1917 temp = expand_binop (mode, direct_optab, op0, op1, target,
1918 unsignedp, OPTAB_LIB);
1919 if (temp || methods == OPTAB_LIB)
1920 goto egress;
1922 /* Must widen and use a libcall, use either signed or unsigned. */
1923 temp = expand_binop (mode, soptab, op0, op1, target,
1924 unsignedp, methods);
1925 if (!temp && unsignedp)
1926 temp = expand_binop (mode, uoptab, op0, op1, target,
1927 unsignedp, methods);
1929 egress:
1930 /* Undo the fiddling above. */
1931 if (save_enable)
1932 swap_optab_enable (soptab, mode, true);
1933 return temp;
1936 /* Generate code to perform an operation specified by UNOPPTAB
1937 on operand OP0, with two results to TARG0 and TARG1.
1938 We assume that the order of the operands for the instruction
1939 is TARG0, TARG1, OP0.
1941 Either TARG0 or TARG1 may be zero, but what that means is that
1942 the result is not actually wanted. We will generate it into
1943 a dummy pseudo-reg and discard it. They may not both be zero.
1945 Returns 1 if this operation can be performed; 0 if not. */
1948 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1949 int unsignedp)
1951 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1952 enum mode_class mclass;
1953 machine_mode wider_mode;
1954 rtx_insn *entry_last = get_last_insn ();
1955 rtx_insn *last;
1957 mclass = GET_MODE_CLASS (mode);
1959 if (!targ0)
1960 targ0 = gen_reg_rtx (mode);
1961 if (!targ1)
1962 targ1 = gen_reg_rtx (mode);
1964 /* Record where to go back to if we fail. */
1965 last = get_last_insn ();
1967 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1969 struct expand_operand ops[3];
1970 enum insn_code icode = optab_handler (unoptab, mode);
1972 create_fixed_operand (&ops[0], targ0);
1973 create_fixed_operand (&ops[1], targ1);
1974 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1975 if (maybe_expand_insn (icode, 3, ops))
1976 return 1;
1979 /* It can't be done in this mode. Can we do it in a wider mode? */
1981 if (CLASS_HAS_WIDER_MODES_P (mclass))
1983 FOR_EACH_WIDER_MODE (wider_mode, mode)
1985 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1987 rtx t0 = gen_reg_rtx (wider_mode);
1988 rtx t1 = gen_reg_rtx (wider_mode);
1989 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1991 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1993 convert_move (targ0, t0, unsignedp);
1994 convert_move (targ1, t1, unsignedp);
1995 return 1;
1997 else
1998 delete_insns_since (last);
2003 delete_insns_since (entry_last);
2004 return 0;
2007 /* Generate code to perform an operation specified by BINOPTAB
2008 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2009 We assume that the order of the operands for the instruction
2010 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2011 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2013 Either TARG0 or TARG1 may be zero, but what that means is that
2014 the result is not actually wanted. We will generate it into
2015 a dummy pseudo-reg and discard it. They may not both be zero.
2017 Returns 1 if this operation can be performed; 0 if not. */
2020 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2021 int unsignedp)
2023 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2024 enum mode_class mclass;
2025 machine_mode wider_mode;
2026 rtx_insn *entry_last = get_last_insn ();
2027 rtx_insn *last;
2029 mclass = GET_MODE_CLASS (mode);
2031 if (!targ0)
2032 targ0 = gen_reg_rtx (mode);
2033 if (!targ1)
2034 targ1 = gen_reg_rtx (mode);
2036 /* Record where to go back to if we fail. */
2037 last = get_last_insn ();
2039 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2041 struct expand_operand ops[4];
2042 enum insn_code icode = optab_handler (binoptab, mode);
2043 machine_mode mode0 = insn_data[icode].operand[1].mode;
2044 machine_mode mode1 = insn_data[icode].operand[2].mode;
2045 rtx xop0 = op0, xop1 = op1;
2047 /* If we are optimizing, force expensive constants into a register. */
2048 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2049 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2051 create_fixed_operand (&ops[0], targ0);
2052 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2053 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2054 create_fixed_operand (&ops[3], targ1);
2055 if (maybe_expand_insn (icode, 4, ops))
2056 return 1;
2057 delete_insns_since (last);
2060 /* It can't be done in this mode. Can we do it in a wider mode? */
2062 if (CLASS_HAS_WIDER_MODES_P (mclass))
2064 FOR_EACH_WIDER_MODE (wider_mode, mode)
2066 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2068 rtx t0 = gen_reg_rtx (wider_mode);
2069 rtx t1 = gen_reg_rtx (wider_mode);
2070 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2071 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2073 if (expand_twoval_binop (binoptab, cop0, cop1,
2074 t0, t1, unsignedp))
2076 convert_move (targ0, t0, unsignedp);
2077 convert_move (targ1, t1, unsignedp);
2078 return 1;
2080 else
2081 delete_insns_since (last);
2086 delete_insns_since (entry_last);
2087 return 0;
2090 /* Expand the two-valued library call indicated by BINOPTAB, but
2091 preserve only one of the values. If TARG0 is non-NULL, the first
2092 value is placed into TARG0; otherwise the second value is placed
2093 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2094 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2095 This routine assumes that the value returned by the library call is
2096 as if the return value was of an integral mode twice as wide as the
2097 mode of OP0. Returns 1 if the call was successful. */
2099 bool
2100 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2101 rtx targ0, rtx targ1, enum rtx_code code)
2103 machine_mode mode;
2104 machine_mode libval_mode;
2105 rtx libval;
2106 rtx_insn *insns;
2107 rtx libfunc;
2109 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2110 gcc_assert (!targ0 != !targ1);
2112 mode = GET_MODE (op0);
2113 libfunc = optab_libfunc (binoptab, mode);
2114 if (!libfunc)
2115 return false;
2117 /* The value returned by the library function will have twice as
2118 many bits as the nominal MODE. */
2119 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2120 start_sequence ();
2121 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2122 libval_mode,
2123 op0, mode,
2124 op1, mode);
2125 /* Get the part of VAL containing the value that we want. */
2126 libval = simplify_gen_subreg (mode, libval, libval_mode,
2127 targ0 ? 0 : GET_MODE_SIZE (mode));
2128 insns = get_insns ();
2129 end_sequence ();
2130 /* Move the into the desired location. */
2131 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2132 gen_rtx_fmt_ee (code, mode, op0, op1));
2134 return true;
2138 /* Wrapper around expand_unop which takes an rtx code to specify
2139 the operation to perform, not an optab pointer. All other
2140 arguments are the same. */
2142 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2143 rtx target, int unsignedp)
2145 optab unop = code_to_optab (code);
2146 gcc_assert (unop);
2148 return expand_unop (mode, unop, op0, target, unsignedp);
2151 /* Try calculating
2152 (clz:narrow x)
2154 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2156 A similar operation can be used for clrsb. UNOPTAB says which operation
2157 we are trying to expand. */
2158 static rtx
2159 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2161 opt_scalar_int_mode wider_mode_iter;
2162 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2164 scalar_int_mode wider_mode = wider_mode_iter.require ();
2165 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2167 rtx xop0, temp;
2168 rtx_insn *last;
2170 last = get_last_insn ();
2172 if (target == 0)
2173 target = gen_reg_rtx (mode);
2174 xop0 = widen_operand (op0, wider_mode, mode,
2175 unoptab != clrsb_optab, false);
2176 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2177 unoptab != clrsb_optab);
2178 if (temp != 0)
2179 temp = expand_binop
2180 (wider_mode, sub_optab, temp,
2181 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2182 - GET_MODE_PRECISION (mode),
2183 wider_mode),
2184 target, true, OPTAB_DIRECT);
2185 if (temp == 0)
2186 delete_insns_since (last);
2188 return temp;
2191 return 0;
2194 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2195 quantities, choosing which based on whether the high word is nonzero. */
2196 static rtx
2197 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2199 rtx xop0 = force_reg (mode, op0);
2200 rtx subhi = gen_highpart (word_mode, xop0);
2201 rtx sublo = gen_lowpart (word_mode, xop0);
2202 rtx_code_label *hi0_label = gen_label_rtx ();
2203 rtx_code_label *after_label = gen_label_rtx ();
2204 rtx_insn *seq;
2205 rtx temp, result;
2207 /* If we were not given a target, use a word_mode register, not a
2208 'mode' register. The result will fit, and nobody is expecting
2209 anything bigger (the return type of __builtin_clz* is int). */
2210 if (!target)
2211 target = gen_reg_rtx (word_mode);
2213 /* In any case, write to a word_mode scratch in both branches of the
2214 conditional, so we can ensure there is a single move insn setting
2215 'target' to tag a REG_EQUAL note on. */
2216 result = gen_reg_rtx (word_mode);
2218 start_sequence ();
2220 /* If the high word is not equal to zero,
2221 then clz of the full value is clz of the high word. */
2222 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2223 word_mode, true, hi0_label);
2225 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2226 if (!temp)
2227 goto fail;
2229 if (temp != result)
2230 convert_move (result, temp, true);
2232 emit_jump_insn (targetm.gen_jump (after_label));
2233 emit_barrier ();
2235 /* Else clz of the full value is clz of the low word plus the number
2236 of bits in the high word. */
2237 emit_label (hi0_label);
2239 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2240 if (!temp)
2241 goto fail;
2242 temp = expand_binop (word_mode, add_optab, temp,
2243 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2244 result, true, OPTAB_DIRECT);
2245 if (!temp)
2246 goto fail;
2247 if (temp != result)
2248 convert_move (result, temp, true);
2250 emit_label (after_label);
2251 convert_move (target, result, true);
2253 seq = get_insns ();
2254 end_sequence ();
2256 add_equal_note (seq, target, CLZ, xop0, 0);
2257 emit_insn (seq);
2258 return target;
2260 fail:
2261 end_sequence ();
2262 return 0;
2265 /* Try calculating popcount of a double-word quantity as two popcount's of
2266 word-sized quantities and summing up the results. */
2267 static rtx
2268 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2270 rtx t0, t1, t;
2271 rtx_insn *seq;
2273 start_sequence ();
2275 t0 = expand_unop_direct (word_mode, popcount_optab,
2276 operand_subword_force (op0, 0, mode), NULL_RTX,
2277 true);
2278 t1 = expand_unop_direct (word_mode, popcount_optab,
2279 operand_subword_force (op0, 1, mode), NULL_RTX,
2280 true);
2281 if (!t0 || !t1)
2283 end_sequence ();
2284 return NULL_RTX;
2287 /* If we were not given a target, use a word_mode register, not a
2288 'mode' register. The result will fit, and nobody is expecting
2289 anything bigger (the return type of __builtin_popcount* is int). */
2290 if (!target)
2291 target = gen_reg_rtx (word_mode);
2293 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2295 seq = get_insns ();
2296 end_sequence ();
2298 add_equal_note (seq, t, POPCOUNT, op0, 0);
2299 emit_insn (seq);
2300 return t;
2303 /* Try calculating
2304 (parity:wide x)
2306 (parity:narrow (low (x) ^ high (x))) */
2307 static rtx
2308 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2310 rtx t = expand_binop (word_mode, xor_optab,
2311 operand_subword_force (op0, 0, mode),
2312 operand_subword_force (op0, 1, mode),
2313 NULL_RTX, 0, OPTAB_DIRECT);
2314 return expand_unop (word_mode, parity_optab, t, target, true);
2317 /* Try calculating
2318 (bswap:narrow x)
2320 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2321 static rtx
2322 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2324 rtx x;
2325 rtx_insn *last;
2326 opt_scalar_int_mode wider_mode_iter;
2328 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2329 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2330 != CODE_FOR_nothing)
2331 break;
2333 if (!wider_mode_iter.exists ())
2334 return NULL_RTX;
2336 scalar_int_mode wider_mode = wider_mode_iter.require ();
2337 last = get_last_insn ();
2339 x = widen_operand (op0, wider_mode, mode, true, true);
2340 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2342 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2343 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2344 if (x != 0)
2345 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2346 GET_MODE_BITSIZE (wider_mode)
2347 - GET_MODE_BITSIZE (mode),
2348 NULL_RTX, true);
2350 if (x != 0)
2352 if (target == 0)
2353 target = gen_reg_rtx (mode);
2354 emit_move_insn (target, gen_lowpart (mode, x));
2356 else
2357 delete_insns_since (last);
2359 return target;
2362 /* Try calculating bswap as two bswaps of two word-sized operands. */
2364 static rtx
2365 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2367 rtx t0, t1;
2369 t1 = expand_unop (word_mode, bswap_optab,
2370 operand_subword_force (op, 0, mode), NULL_RTX, true);
2371 t0 = expand_unop (word_mode, bswap_optab,
2372 operand_subword_force (op, 1, mode), NULL_RTX, true);
2374 if (target == 0 || !valid_multiword_target_p (target))
2375 target = gen_reg_rtx (mode);
2376 if (REG_P (target))
2377 emit_clobber (target);
2378 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2379 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2381 return target;
2384 /* Try calculating (parity x) as (and (popcount x) 1), where
2385 popcount can also be done in a wider mode. */
2386 static rtx
2387 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2389 enum mode_class mclass = GET_MODE_CLASS (mode);
2390 opt_scalar_int_mode wider_mode_iter;
2391 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2393 scalar_int_mode wider_mode = wider_mode_iter.require ();
2394 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2396 rtx xop0, temp;
2397 rtx_insn *last;
2399 last = get_last_insn ();
2401 if (target == 0 || GET_MODE (target) != wider_mode)
2402 target = gen_reg_rtx (wider_mode);
2404 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2405 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2406 true);
2407 if (temp != 0)
2408 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2409 target, true, OPTAB_DIRECT);
2411 if (temp)
2413 if (mclass != MODE_INT
2414 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2415 return convert_to_mode (mode, temp, 0);
2416 else
2417 return gen_lowpart (mode, temp);
2419 else
2420 delete_insns_since (last);
2423 return 0;
2426 /* Try calculating ctz(x) as K - clz(x & -x) ,
2427 where K is GET_MODE_PRECISION(mode) - 1.
2429 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2430 don't have to worry about what the hardware does in that case. (If
2431 the clz instruction produces the usual value at 0, which is K, the
2432 result of this code sequence will be -1; expand_ffs, below, relies
2433 on this. It might be nice to have it be K instead, for consistency
2434 with the (very few) processors that provide a ctz with a defined
2435 value, but that would take one more instruction, and it would be
2436 less convenient for expand_ffs anyway. */
2438 static rtx
2439 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2441 rtx_insn *seq;
2442 rtx temp;
2444 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2445 return 0;
2447 start_sequence ();
2449 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2450 if (temp)
2451 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2452 true, OPTAB_DIRECT);
2453 if (temp)
2454 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2455 if (temp)
2456 temp = expand_binop (mode, sub_optab,
2457 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2458 temp, target,
2459 true, OPTAB_DIRECT);
2460 if (temp == 0)
2462 end_sequence ();
2463 return 0;
2466 seq = get_insns ();
2467 end_sequence ();
2469 add_equal_note (seq, temp, CTZ, op0, 0);
2470 emit_insn (seq);
2471 return temp;
2475 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2476 else with the sequence used by expand_clz.
2478 The ffs builtin promises to return zero for a zero value and ctz/clz
2479 may have an undefined value in that case. If they do not give us a
2480 convenient value, we have to generate a test and branch. */
2481 static rtx
2482 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2484 HOST_WIDE_INT val = 0;
2485 bool defined_at_zero = false;
2486 rtx temp;
2487 rtx_insn *seq;
2489 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2491 start_sequence ();
2493 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2494 if (!temp)
2495 goto fail;
2497 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2499 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2501 start_sequence ();
2502 temp = expand_ctz (mode, op0, 0);
2503 if (!temp)
2504 goto fail;
2506 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2508 defined_at_zero = true;
2509 val = (GET_MODE_PRECISION (mode) - 1) - val;
2512 else
2513 return 0;
2515 if (defined_at_zero && val == -1)
2516 /* No correction needed at zero. */;
2517 else
2519 /* We don't try to do anything clever with the situation found
2520 on some processors (eg Alpha) where ctz(0:mode) ==
2521 bitsize(mode). If someone can think of a way to send N to -1
2522 and leave alone all values in the range 0..N-1 (where N is a
2523 power of two), cheaper than this test-and-branch, please add it.
2525 The test-and-branch is done after the operation itself, in case
2526 the operation sets condition codes that can be recycled for this.
2527 (This is true on i386, for instance.) */
2529 rtx_code_label *nonzero_label = gen_label_rtx ();
2530 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2531 mode, true, nonzero_label);
2533 convert_move (temp, GEN_INT (-1), false);
2534 emit_label (nonzero_label);
2537 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2538 to produce a value in the range 0..bitsize. */
2539 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2540 target, false, OPTAB_DIRECT);
2541 if (!temp)
2542 goto fail;
2544 seq = get_insns ();
2545 end_sequence ();
2547 add_equal_note (seq, temp, FFS, op0, 0);
2548 emit_insn (seq);
2549 return temp;
2551 fail:
2552 end_sequence ();
2553 return 0;
2556 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2557 conditions, VAL may already be a SUBREG against which we cannot generate
2558 a further SUBREG. In this case, we expect forcing the value into a
2559 register will work around the situation. */
2561 static rtx
2562 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2563 machine_mode imode)
2565 rtx ret;
2566 ret = lowpart_subreg (omode, val, imode);
2567 if (ret == NULL)
2569 val = force_reg (imode, val);
2570 ret = lowpart_subreg (omode, val, imode);
2571 gcc_assert (ret != NULL);
2573 return ret;
2576 /* Expand a floating point absolute value or negation operation via a
2577 logical operation on the sign bit. */
2579 static rtx
2580 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2581 rtx op0, rtx target)
2583 const struct real_format *fmt;
2584 int bitpos, word, nwords, i;
2585 scalar_int_mode imode;
2586 rtx temp;
2587 rtx_insn *insns;
2589 /* The format has to have a simple sign bit. */
2590 fmt = REAL_MODE_FORMAT (mode);
2591 if (fmt == NULL)
2592 return NULL_RTX;
2594 bitpos = fmt->signbit_rw;
2595 if (bitpos < 0)
2596 return NULL_RTX;
2598 /* Don't create negative zeros if the format doesn't support them. */
2599 if (code == NEG && !fmt->has_signed_zero)
2600 return NULL_RTX;
2602 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2604 if (!int_mode_for_mode (mode).exists (&imode))
2605 return NULL_RTX;
2606 word = 0;
2607 nwords = 1;
2609 else
2611 imode = word_mode;
2613 if (FLOAT_WORDS_BIG_ENDIAN)
2614 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2615 else
2616 word = bitpos / BITS_PER_WORD;
2617 bitpos = bitpos % BITS_PER_WORD;
2618 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2621 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2622 if (code == ABS)
2623 mask = ~mask;
2625 if (target == 0
2626 || target == op0
2627 || (nwords > 1 && !valid_multiword_target_p (target)))
2628 target = gen_reg_rtx (mode);
2630 if (nwords > 1)
2632 start_sequence ();
2634 for (i = 0; i < nwords; ++i)
2636 rtx targ_piece = operand_subword (target, i, 1, mode);
2637 rtx op0_piece = operand_subword_force (op0, i, mode);
2639 if (i == word)
2641 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2642 op0_piece,
2643 immed_wide_int_const (mask, imode),
2644 targ_piece, 1, OPTAB_LIB_WIDEN);
2645 if (temp != targ_piece)
2646 emit_move_insn (targ_piece, temp);
2648 else
2649 emit_move_insn (targ_piece, op0_piece);
2652 insns = get_insns ();
2653 end_sequence ();
2655 emit_insn (insns);
2657 else
2659 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2660 gen_lowpart (imode, op0),
2661 immed_wide_int_const (mask, imode),
2662 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2663 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2665 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2666 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2667 target);
2670 return target;
2673 /* As expand_unop, but will fail rather than attempt the operation in a
2674 different mode or with a libcall. */
2675 static rtx
2676 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2677 int unsignedp)
2679 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2681 struct expand_operand ops[2];
2682 enum insn_code icode = optab_handler (unoptab, mode);
2683 rtx_insn *last = get_last_insn ();
2684 rtx_insn *pat;
2686 create_output_operand (&ops[0], target, mode);
2687 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2688 pat = maybe_gen_insn (icode, 2, ops);
2689 if (pat)
2691 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2692 && ! add_equal_note (pat, ops[0].value,
2693 optab_to_code (unoptab),
2694 ops[1].value, NULL_RTX))
2696 delete_insns_since (last);
2697 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2700 emit_insn (pat);
2702 return ops[0].value;
2705 return 0;
2708 /* Generate code to perform an operation specified by UNOPTAB
2709 on operand OP0, with result having machine-mode MODE.
2711 UNSIGNEDP is for the case where we have to widen the operands
2712 to perform the operation. It says to use zero-extension.
2714 If TARGET is nonzero, the value
2715 is generated there, if it is convenient to do so.
2716 In all cases an rtx is returned for the locus of the value;
2717 this may or may not be TARGET. */
2720 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2721 int unsignedp)
2723 enum mode_class mclass = GET_MODE_CLASS (mode);
2724 machine_mode wider_mode;
2725 scalar_int_mode int_mode;
2726 scalar_float_mode float_mode;
2727 rtx temp;
2728 rtx libfunc;
2730 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2731 if (temp)
2732 return temp;
2734 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2736 /* Widening (or narrowing) clz needs special treatment. */
2737 if (unoptab == clz_optab)
2739 if (is_a <scalar_int_mode> (mode, &int_mode))
2741 temp = widen_leading (int_mode, op0, target, unoptab);
2742 if (temp)
2743 return temp;
2745 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2746 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2748 temp = expand_doubleword_clz (int_mode, op0, target);
2749 if (temp)
2750 return temp;
2754 goto try_libcall;
2757 if (unoptab == clrsb_optab)
2759 if (is_a <scalar_int_mode> (mode, &int_mode))
2761 temp = widen_leading (int_mode, op0, target, unoptab);
2762 if (temp)
2763 return temp;
2765 goto try_libcall;
2768 if (unoptab == popcount_optab
2769 && is_a <scalar_int_mode> (mode, &int_mode)
2770 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2771 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2772 && optimize_insn_for_speed_p ())
2774 temp = expand_doubleword_popcount (int_mode, op0, target);
2775 if (temp)
2776 return temp;
2779 if (unoptab == parity_optab
2780 && is_a <scalar_int_mode> (mode, &int_mode)
2781 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2782 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2783 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2784 && optimize_insn_for_speed_p ())
2786 temp = expand_doubleword_parity (int_mode, op0, target);
2787 if (temp)
2788 return temp;
2791 /* Widening (or narrowing) bswap needs special treatment. */
2792 if (unoptab == bswap_optab)
2794 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2795 or ROTATERT. First try these directly; if this fails, then try the
2796 obvious pair of shifts with allowed widening, as this will probably
2797 be always more efficient than the other fallback methods. */
2798 if (mode == HImode)
2800 rtx_insn *last;
2801 rtx temp1, temp2;
2803 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2805 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2806 unsignedp, OPTAB_DIRECT);
2807 if (temp)
2808 return temp;
2811 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2813 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2814 unsignedp, OPTAB_DIRECT);
2815 if (temp)
2816 return temp;
2819 last = get_last_insn ();
2821 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2822 unsignedp, OPTAB_WIDEN);
2823 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2824 unsignedp, OPTAB_WIDEN);
2825 if (temp1 && temp2)
2827 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2828 unsignedp, OPTAB_WIDEN);
2829 if (temp)
2830 return temp;
2833 delete_insns_since (last);
2836 if (is_a <scalar_int_mode> (mode, &int_mode))
2838 temp = widen_bswap (int_mode, op0, target);
2839 if (temp)
2840 return temp;
2842 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2843 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2845 temp = expand_doubleword_bswap (mode, op0, target);
2846 if (temp)
2847 return temp;
2851 goto try_libcall;
2854 if (CLASS_HAS_WIDER_MODES_P (mclass))
2855 FOR_EACH_WIDER_MODE (wider_mode, mode)
2857 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2859 rtx xop0 = op0;
2860 rtx_insn *last = get_last_insn ();
2862 /* For certain operations, we need not actually extend
2863 the narrow operand, as long as we will truncate the
2864 results to the same narrowness. */
2866 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2867 (unoptab == neg_optab
2868 || unoptab == one_cmpl_optab)
2869 && mclass == MODE_INT);
2871 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2872 unsignedp);
2874 if (temp)
2876 if (mclass != MODE_INT
2877 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2879 if (target == 0)
2880 target = gen_reg_rtx (mode);
2881 convert_move (target, temp, 0);
2882 return target;
2884 else
2885 return gen_lowpart (mode, temp);
2887 else
2888 delete_insns_since (last);
2892 /* These can be done a word at a time. */
2893 if (unoptab == one_cmpl_optab
2894 && is_int_mode (mode, &int_mode)
2895 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2896 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2898 int i;
2899 rtx_insn *insns;
2901 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2902 target = gen_reg_rtx (int_mode);
2904 start_sequence ();
2906 /* Do the actual arithmetic. */
2907 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2909 rtx target_piece = operand_subword (target, i, 1, int_mode);
2910 rtx x = expand_unop (word_mode, unoptab,
2911 operand_subword_force (op0, i, int_mode),
2912 target_piece, unsignedp);
2914 if (target_piece != x)
2915 emit_move_insn (target_piece, x);
2918 insns = get_insns ();
2919 end_sequence ();
2921 emit_insn (insns);
2922 return target;
2925 if (optab_to_code (unoptab) == NEG)
2927 /* Try negating floating point values by flipping the sign bit. */
2928 if (is_a <scalar_float_mode> (mode, &float_mode))
2930 temp = expand_absneg_bit (NEG, float_mode, op0, target);
2931 if (temp)
2932 return temp;
2935 /* If there is no negation pattern, and we have no negative zero,
2936 try subtracting from zero. */
2937 if (!HONOR_SIGNED_ZEROS (mode))
2939 temp = expand_binop (mode, (unoptab == negv_optab
2940 ? subv_optab : sub_optab),
2941 CONST0_RTX (mode), op0, target,
2942 unsignedp, OPTAB_DIRECT);
2943 if (temp)
2944 return temp;
2948 /* Try calculating parity (x) as popcount (x) % 2. */
2949 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
2951 temp = expand_parity (int_mode, op0, target);
2952 if (temp)
2953 return temp;
2956 /* Try implementing ffs (x) in terms of clz (x). */
2957 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
2959 temp = expand_ffs (int_mode, op0, target);
2960 if (temp)
2961 return temp;
2964 /* Try implementing ctz (x) in terms of clz (x). */
2965 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
2967 temp = expand_ctz (int_mode, op0, target);
2968 if (temp)
2969 return temp;
2972 try_libcall:
2973 /* Now try a library call in this mode. */
2974 libfunc = optab_libfunc (unoptab, mode);
2975 if (libfunc)
2977 rtx_insn *insns;
2978 rtx value;
2979 rtx eq_value;
2980 machine_mode outmode = mode;
2982 /* All of these functions return small values. Thus we choose to
2983 have them return something that isn't a double-word. */
2984 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2985 || unoptab == clrsb_optab || unoptab == popcount_optab
2986 || unoptab == parity_optab)
2987 outmode
2988 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2989 optab_libfunc (unoptab, mode)));
2991 start_sequence ();
2993 /* Pass 1 for NO_QUEUE so we don't lose any increments
2994 if the libcall is cse'd or moved. */
2995 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2996 op0, mode);
2997 insns = get_insns ();
2998 end_sequence ();
3000 target = gen_reg_rtx (outmode);
3001 bool trapv = trapv_unoptab_p (unoptab);
3002 if (trapv)
3003 eq_value = NULL_RTX;
3004 else
3006 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3007 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3008 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3009 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3010 eq_value = simplify_gen_unary (ZERO_EXTEND,
3011 outmode, eq_value, mode);
3013 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3015 return target;
3018 /* It can't be done in this mode. Can we do it in a wider mode? */
3020 if (CLASS_HAS_WIDER_MODES_P (mclass))
3022 FOR_EACH_WIDER_MODE (wider_mode, mode)
3024 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3025 || optab_libfunc (unoptab, wider_mode))
3027 rtx xop0 = op0;
3028 rtx_insn *last = get_last_insn ();
3030 /* For certain operations, we need not actually extend
3031 the narrow operand, as long as we will truncate the
3032 results to the same narrowness. */
3033 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3034 (unoptab == neg_optab
3035 || unoptab == one_cmpl_optab
3036 || unoptab == bswap_optab)
3037 && mclass == MODE_INT);
3039 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3040 unsignedp);
3042 /* If we are generating clz using wider mode, adjust the
3043 result. Similarly for clrsb. */
3044 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3045 && temp != 0)
3047 scalar_int_mode wider_int_mode
3048 = as_a <scalar_int_mode> (wider_mode);
3049 int_mode = as_a <scalar_int_mode> (mode);
3050 temp = expand_binop
3051 (wider_mode, sub_optab, temp,
3052 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3053 - GET_MODE_PRECISION (int_mode),
3054 wider_int_mode),
3055 target, true, OPTAB_DIRECT);
3058 /* Likewise for bswap. */
3059 if (unoptab == bswap_optab && temp != 0)
3061 scalar_int_mode wider_int_mode
3062 = as_a <scalar_int_mode> (wider_mode);
3063 int_mode = as_a <scalar_int_mode> (mode);
3064 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3065 == GET_MODE_BITSIZE (wider_int_mode)
3066 && GET_MODE_PRECISION (int_mode)
3067 == GET_MODE_BITSIZE (int_mode));
3069 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3070 GET_MODE_BITSIZE (wider_int_mode)
3071 - GET_MODE_BITSIZE (int_mode),
3072 NULL_RTX, true);
3075 if (temp)
3077 if (mclass != MODE_INT)
3079 if (target == 0)
3080 target = gen_reg_rtx (mode);
3081 convert_move (target, temp, 0);
3082 return target;
3084 else
3085 return gen_lowpart (mode, temp);
3087 else
3088 delete_insns_since (last);
3093 /* One final attempt at implementing negation via subtraction,
3094 this time allowing widening of the operand. */
3095 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3097 rtx temp;
3098 temp = expand_binop (mode,
3099 unoptab == negv_optab ? subv_optab : sub_optab,
3100 CONST0_RTX (mode), op0,
3101 target, unsignedp, OPTAB_LIB_WIDEN);
3102 if (temp)
3103 return temp;
3106 return 0;
3109 /* Emit code to compute the absolute value of OP0, with result to
3110 TARGET if convenient. (TARGET may be 0.) The return value says
3111 where the result actually is to be found.
3113 MODE is the mode of the operand; the mode of the result is
3114 different but can be deduced from MODE.
3119 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3120 int result_unsignedp)
3122 rtx temp;
3124 if (GET_MODE_CLASS (mode) != MODE_INT
3125 || ! flag_trapv)
3126 result_unsignedp = 1;
3128 /* First try to do it with a special abs instruction. */
3129 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3130 op0, target, 0);
3131 if (temp != 0)
3132 return temp;
3134 /* For floating point modes, try clearing the sign bit. */
3135 scalar_float_mode float_mode;
3136 if (is_a <scalar_float_mode> (mode, &float_mode))
3138 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3139 if (temp)
3140 return temp;
3143 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3144 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3145 && !HONOR_SIGNED_ZEROS (mode))
3147 rtx_insn *last = get_last_insn ();
3149 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3150 op0, NULL_RTX, 0);
3151 if (temp != 0)
3152 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3153 OPTAB_WIDEN);
3155 if (temp != 0)
3156 return temp;
3158 delete_insns_since (last);
3161 /* If this machine has expensive jumps, we can do integer absolute
3162 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3163 where W is the width of MODE. */
3165 scalar_int_mode int_mode;
3166 if (is_int_mode (mode, &int_mode)
3167 && BRANCH_COST (optimize_insn_for_speed_p (),
3168 false) >= 2)
3170 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3171 GET_MODE_PRECISION (int_mode) - 1,
3172 NULL_RTX, 0);
3174 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3175 OPTAB_LIB_WIDEN);
3176 if (temp != 0)
3177 temp = expand_binop (int_mode,
3178 result_unsignedp ? sub_optab : subv_optab,
3179 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3181 if (temp != 0)
3182 return temp;
3185 return NULL_RTX;
3189 expand_abs (machine_mode mode, rtx op0, rtx target,
3190 int result_unsignedp, int safe)
3192 rtx temp;
3193 rtx_code_label *op1;
3195 if (GET_MODE_CLASS (mode) != MODE_INT
3196 || ! flag_trapv)
3197 result_unsignedp = 1;
3199 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3200 if (temp != 0)
3201 return temp;
3203 /* If that does not win, use conditional jump and negate. */
3205 /* It is safe to use the target if it is the same
3206 as the source if this is also a pseudo register */
3207 if (op0 == target && REG_P (op0)
3208 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3209 safe = 1;
3211 op1 = gen_label_rtx ();
3212 if (target == 0 || ! safe
3213 || GET_MODE (target) != mode
3214 || (MEM_P (target) && MEM_VOLATILE_P (target))
3215 || (REG_P (target)
3216 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3217 target = gen_reg_rtx (mode);
3219 emit_move_insn (target, op0);
3220 NO_DEFER_POP;
3222 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3223 NULL_RTX, NULL, op1,
3224 profile_probability::uninitialized ());
3226 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3227 target, target, 0);
3228 if (op0 != target)
3229 emit_move_insn (target, op0);
3230 emit_label (op1);
3231 OK_DEFER_POP;
3232 return target;
3235 /* Emit code to compute the one's complement absolute value of OP0
3236 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3237 (TARGET may be NULL_RTX.) The return value says where the result
3238 actually is to be found.
3240 MODE is the mode of the operand; the mode of the result is
3241 different but can be deduced from MODE. */
3244 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3246 rtx temp;
3248 /* Not applicable for floating point modes. */
3249 if (FLOAT_MODE_P (mode))
3250 return NULL_RTX;
3252 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3253 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3255 rtx_insn *last = get_last_insn ();
3257 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3258 if (temp != 0)
3259 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3260 OPTAB_WIDEN);
3262 if (temp != 0)
3263 return temp;
3265 delete_insns_since (last);
3268 /* If this machine has expensive jumps, we can do one's complement
3269 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3271 scalar_int_mode int_mode;
3272 if (is_int_mode (mode, &int_mode)
3273 && BRANCH_COST (optimize_insn_for_speed_p (),
3274 false) >= 2)
3276 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3277 GET_MODE_PRECISION (int_mode) - 1,
3278 NULL_RTX, 0);
3280 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3281 OPTAB_LIB_WIDEN);
3283 if (temp != 0)
3284 return temp;
3287 return NULL_RTX;
3290 /* A subroutine of expand_copysign, perform the copysign operation using the
3291 abs and neg primitives advertised to exist on the target. The assumption
3292 is that we have a split register file, and leaving op0 in fp registers,
3293 and not playing with subregs so much, will help the register allocator. */
3295 static rtx
3296 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3297 int bitpos, bool op0_is_abs)
3299 scalar_int_mode imode;
3300 enum insn_code icode;
3301 rtx sign;
3302 rtx_code_label *label;
3304 if (target == op1)
3305 target = NULL_RTX;
3307 /* Check if the back end provides an insn that handles signbit for the
3308 argument's mode. */
3309 icode = optab_handler (signbit_optab, mode);
3310 if (icode != CODE_FOR_nothing)
3312 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3313 sign = gen_reg_rtx (imode);
3314 emit_unop_insn (icode, sign, op1, UNKNOWN);
3316 else
3318 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3320 if (!int_mode_for_mode (mode).exists (&imode))
3321 return NULL_RTX;
3322 op1 = gen_lowpart (imode, op1);
3324 else
3326 int word;
3328 imode = word_mode;
3329 if (FLOAT_WORDS_BIG_ENDIAN)
3330 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3331 else
3332 word = bitpos / BITS_PER_WORD;
3333 bitpos = bitpos % BITS_PER_WORD;
3334 op1 = operand_subword_force (op1, word, mode);
3337 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3338 sign = expand_binop (imode, and_optab, op1,
3339 immed_wide_int_const (mask, imode),
3340 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3343 if (!op0_is_abs)
3345 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3346 if (op0 == NULL)
3347 return NULL_RTX;
3348 target = op0;
3350 else
3352 if (target == NULL_RTX)
3353 target = copy_to_reg (op0);
3354 else
3355 emit_move_insn (target, op0);
3358 label = gen_label_rtx ();
3359 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3361 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3362 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3363 else
3364 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3365 if (op0 != target)
3366 emit_move_insn (target, op0);
3368 emit_label (label);
3370 return target;
3374 /* A subroutine of expand_copysign, perform the entire copysign operation
3375 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3376 is true if op0 is known to have its sign bit clear. */
3378 static rtx
3379 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3380 int bitpos, bool op0_is_abs)
3382 scalar_int_mode imode;
3383 int word, nwords, i;
3384 rtx temp;
3385 rtx_insn *insns;
3387 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3389 if (!int_mode_for_mode (mode).exists (&imode))
3390 return NULL_RTX;
3391 word = 0;
3392 nwords = 1;
3394 else
3396 imode = word_mode;
3398 if (FLOAT_WORDS_BIG_ENDIAN)
3399 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3400 else
3401 word = bitpos / BITS_PER_WORD;
3402 bitpos = bitpos % BITS_PER_WORD;
3403 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3406 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3408 if (target == 0
3409 || target == op0
3410 || target == op1
3411 || (nwords > 1 && !valid_multiword_target_p (target)))
3412 target = gen_reg_rtx (mode);
3414 if (nwords > 1)
3416 start_sequence ();
3418 for (i = 0; i < nwords; ++i)
3420 rtx targ_piece = operand_subword (target, i, 1, mode);
3421 rtx op0_piece = operand_subword_force (op0, i, mode);
3423 if (i == word)
3425 if (!op0_is_abs)
3426 op0_piece
3427 = expand_binop (imode, and_optab, op0_piece,
3428 immed_wide_int_const (~mask, imode),
3429 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3430 op1 = expand_binop (imode, and_optab,
3431 operand_subword_force (op1, i, mode),
3432 immed_wide_int_const (mask, imode),
3433 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3435 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3436 targ_piece, 1, OPTAB_LIB_WIDEN);
3437 if (temp != targ_piece)
3438 emit_move_insn (targ_piece, temp);
3440 else
3441 emit_move_insn (targ_piece, op0_piece);
3444 insns = get_insns ();
3445 end_sequence ();
3447 emit_insn (insns);
3449 else
3451 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3452 immed_wide_int_const (mask, imode),
3453 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3455 op0 = gen_lowpart (imode, op0);
3456 if (!op0_is_abs)
3457 op0 = expand_binop (imode, and_optab, op0,
3458 immed_wide_int_const (~mask, imode),
3459 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3461 temp = expand_binop (imode, ior_optab, op0, op1,
3462 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3463 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3466 return target;
3469 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3470 scalar floating point mode. Return NULL if we do not know how to
3471 expand the operation inline. */
3474 expand_copysign (rtx op0, rtx op1, rtx target)
3476 scalar_float_mode mode;
3477 const struct real_format *fmt;
3478 bool op0_is_abs;
3479 rtx temp;
3481 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3482 gcc_assert (GET_MODE (op1) == mode);
3484 /* First try to do it with a special instruction. */
3485 temp = expand_binop (mode, copysign_optab, op0, op1,
3486 target, 0, OPTAB_DIRECT);
3487 if (temp)
3488 return temp;
3490 fmt = REAL_MODE_FORMAT (mode);
3491 if (fmt == NULL || !fmt->has_signed_zero)
3492 return NULL_RTX;
3494 op0_is_abs = false;
3495 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3497 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3498 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3499 op0_is_abs = true;
3502 if (fmt->signbit_ro >= 0
3503 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3504 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3505 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3507 temp = expand_copysign_absneg (mode, op0, op1, target,
3508 fmt->signbit_ro, op0_is_abs);
3509 if (temp)
3510 return temp;
3513 if (fmt->signbit_rw < 0)
3514 return NULL_RTX;
3515 return expand_copysign_bit (mode, op0, op1, target,
3516 fmt->signbit_rw, op0_is_abs);
3519 /* Generate an instruction whose insn-code is INSN_CODE,
3520 with two operands: an output TARGET and an input OP0.
3521 TARGET *must* be nonzero, and the output is always stored there.
3522 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3523 the value that is stored into TARGET.
3525 Return false if expansion failed. */
3527 bool
3528 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3529 enum rtx_code code)
3531 struct expand_operand ops[2];
3532 rtx_insn *pat;
3534 create_output_operand (&ops[0], target, GET_MODE (target));
3535 create_input_operand (&ops[1], op0, GET_MODE (op0));
3536 pat = maybe_gen_insn (icode, 2, ops);
3537 if (!pat)
3538 return false;
3540 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3541 && code != UNKNOWN)
3542 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3544 emit_insn (pat);
3546 if (ops[0].value != target)
3547 emit_move_insn (target, ops[0].value);
3548 return true;
3550 /* Generate an instruction whose insn-code is INSN_CODE,
3551 with two operands: an output TARGET and an input OP0.
3552 TARGET *must* be nonzero, and the output is always stored there.
3553 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3554 the value that is stored into TARGET. */
3556 void
3557 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3559 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3560 gcc_assert (ok);
3563 struct no_conflict_data
3565 rtx target;
3566 rtx_insn *first, *insn;
3567 bool must_stay;
3570 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3571 the currently examined clobber / store has to stay in the list of
3572 insns that constitute the actual libcall block. */
3573 static void
3574 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3576 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3578 /* If this inns directly contributes to setting the target, it must stay. */
3579 if (reg_overlap_mentioned_p (p->target, dest))
3580 p->must_stay = true;
3581 /* If we haven't committed to keeping any other insns in the list yet,
3582 there is nothing more to check. */
3583 else if (p->insn == p->first)
3584 return;
3585 /* If this insn sets / clobbers a register that feeds one of the insns
3586 already in the list, this insn has to stay too. */
3587 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3588 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3589 || reg_used_between_p (dest, p->first, p->insn)
3590 /* Likewise if this insn depends on a register set by a previous
3591 insn in the list, or if it sets a result (presumably a hard
3592 register) that is set or clobbered by a previous insn.
3593 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3594 SET_DEST perform the former check on the address, and the latter
3595 check on the MEM. */
3596 || (GET_CODE (set) == SET
3597 && (modified_in_p (SET_SRC (set), p->first)
3598 || modified_in_p (SET_DEST (set), p->first)
3599 || modified_between_p (SET_SRC (set), p->first, p->insn)
3600 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3601 p->must_stay = true;
3605 /* Emit code to make a call to a constant function or a library call.
3607 INSNS is a list containing all insns emitted in the call.
3608 These insns leave the result in RESULT. Our block is to copy RESULT
3609 to TARGET, which is logically equivalent to EQUIV.
3611 We first emit any insns that set a pseudo on the assumption that these are
3612 loading constants into registers; doing so allows them to be safely cse'ed
3613 between blocks. Then we emit all the other insns in the block, followed by
3614 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3615 note with an operand of EQUIV. */
3617 static void
3618 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3619 bool equiv_may_trap)
3621 rtx final_dest = target;
3622 rtx_insn *next, *last, *insn;
3624 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3625 into a MEM later. Protect the libcall block from this change. */
3626 if (! REG_P (target) || REG_USERVAR_P (target))
3627 target = gen_reg_rtx (GET_MODE (target));
3629 /* If we're using non-call exceptions, a libcall corresponding to an
3630 operation that may trap may also trap. */
3631 /* ??? See the comment in front of make_reg_eh_region_note. */
3632 if (cfun->can_throw_non_call_exceptions
3633 && (equiv_may_trap || may_trap_p (equiv)))
3635 for (insn = insns; insn; insn = NEXT_INSN (insn))
3636 if (CALL_P (insn))
3638 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3639 if (note)
3641 int lp_nr = INTVAL (XEXP (note, 0));
3642 if (lp_nr == 0 || lp_nr == INT_MIN)
3643 remove_note (insn, note);
3647 else
3649 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3650 reg note to indicate that this call cannot throw or execute a nonlocal
3651 goto (unless there is already a REG_EH_REGION note, in which case
3652 we update it). */
3653 for (insn = insns; insn; insn = NEXT_INSN (insn))
3654 if (CALL_P (insn))
3655 make_reg_eh_region_note_nothrow_nononlocal (insn);
3658 /* First emit all insns that set pseudos. Remove them from the list as
3659 we go. Avoid insns that set pseudos which were referenced in previous
3660 insns. These can be generated by move_by_pieces, for example,
3661 to update an address. Similarly, avoid insns that reference things
3662 set in previous insns. */
3664 for (insn = insns; insn; insn = next)
3666 rtx set = single_set (insn);
3668 next = NEXT_INSN (insn);
3670 if (set != 0 && REG_P (SET_DEST (set))
3671 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3673 struct no_conflict_data data;
3675 data.target = const0_rtx;
3676 data.first = insns;
3677 data.insn = insn;
3678 data.must_stay = 0;
3679 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3680 if (! data.must_stay)
3682 if (PREV_INSN (insn))
3683 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3684 else
3685 insns = next;
3687 if (next)
3688 SET_PREV_INSN (next) = PREV_INSN (insn);
3690 add_insn (insn);
3694 /* Some ports use a loop to copy large arguments onto the stack.
3695 Don't move anything outside such a loop. */
3696 if (LABEL_P (insn))
3697 break;
3700 /* Write the remaining insns followed by the final copy. */
3701 for (insn = insns; insn; insn = next)
3703 next = NEXT_INSN (insn);
3705 add_insn (insn);
3708 last = emit_move_insn (target, result);
3709 if (equiv)
3710 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3712 if (final_dest != target)
3713 emit_move_insn (final_dest, target);
3716 void
3717 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3719 emit_libcall_block_1 (insns, target, result, equiv, false);
3722 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3723 PURPOSE describes how this comparison will be used. CODE is the rtx
3724 comparison code we will be using.
3726 ??? Actually, CODE is slightly weaker than that. A target is still
3727 required to implement all of the normal bcc operations, but not
3728 required to implement all (or any) of the unordered bcc operations. */
3731 can_compare_p (enum rtx_code code, machine_mode mode,
3732 enum can_compare_purpose purpose)
3734 rtx test;
3735 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3738 enum insn_code icode;
3740 if (purpose == ccp_jump
3741 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3742 && insn_operand_matches (icode, 0, test))
3743 return 1;
3744 if (purpose == ccp_store_flag
3745 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3746 && insn_operand_matches (icode, 1, test))
3747 return 1;
3748 if (purpose == ccp_cmov
3749 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3750 return 1;
3752 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3753 PUT_MODE (test, mode);
3755 while (mode != VOIDmode);
3757 return 0;
3760 /* This function is called when we are going to emit a compare instruction that
3761 compares the values found in X and Y, using the rtl operator COMPARISON.
3763 If they have mode BLKmode, then SIZE specifies the size of both operands.
3765 UNSIGNEDP nonzero says that the operands are unsigned;
3766 this matters if they need to be widened (as given by METHODS).
3768 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3769 if we failed to produce one.
3771 *PMODE is the mode of the inputs (in case they are const_int).
3773 This function performs all the setup necessary so that the caller only has
3774 to emit a single comparison insn. This setup can involve doing a BLKmode
3775 comparison or emitting a library call to perform the comparison if no insn
3776 is available to handle it.
3777 The values which are passed in through pointers can be modified; the caller
3778 should perform the comparison on the modified values. Constant
3779 comparisons must have already been folded. */
3781 static void
3782 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3783 int unsignedp, enum optab_methods methods,
3784 rtx *ptest, machine_mode *pmode)
3786 machine_mode mode = *pmode;
3787 rtx libfunc, test;
3788 machine_mode cmp_mode;
3789 enum mode_class mclass;
3791 /* The other methods are not needed. */
3792 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3793 || methods == OPTAB_LIB_WIDEN);
3795 /* If we are optimizing, force expensive constants into a register. */
3796 if (CONSTANT_P (x) && optimize
3797 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3798 > COSTS_N_INSNS (1)))
3799 x = force_reg (mode, x);
3801 if (CONSTANT_P (y) && optimize
3802 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3803 > COSTS_N_INSNS (1)))
3804 y = force_reg (mode, y);
3806 #if HAVE_cc0
3807 /* Make sure if we have a canonical comparison. The RTL
3808 documentation states that canonical comparisons are required only
3809 for targets which have cc0. */
3810 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3811 #endif
3813 /* Don't let both operands fail to indicate the mode. */
3814 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3815 x = force_reg (mode, x);
3816 if (mode == VOIDmode)
3817 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3819 /* Handle all BLKmode compares. */
3821 if (mode == BLKmode)
3823 machine_mode result_mode;
3824 enum insn_code cmp_code;
3825 rtx result;
3826 rtx opalign
3827 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3829 gcc_assert (size);
3831 /* Try to use a memory block compare insn - either cmpstr
3832 or cmpmem will do. */
3833 opt_scalar_int_mode cmp_mode_iter;
3834 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
3836 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
3837 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3838 if (cmp_code == CODE_FOR_nothing)
3839 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3840 if (cmp_code == CODE_FOR_nothing)
3841 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3842 if (cmp_code == CODE_FOR_nothing)
3843 continue;
3845 /* Must make sure the size fits the insn's mode. */
3846 if (CONST_INT_P (size)
3847 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3848 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3849 > GET_MODE_BITSIZE (cmp_mode)))
3850 continue;
3852 result_mode = insn_data[cmp_code].operand[0].mode;
3853 result = gen_reg_rtx (result_mode);
3854 size = convert_to_mode (cmp_mode, size, 1);
3855 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3857 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3858 *pmode = result_mode;
3859 return;
3862 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3863 goto fail;
3865 /* Otherwise call a library function. */
3866 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3868 x = result;
3869 y = const0_rtx;
3870 mode = TYPE_MODE (integer_type_node);
3871 methods = OPTAB_LIB_WIDEN;
3872 unsignedp = false;
3875 /* Don't allow operands to the compare to trap, as that can put the
3876 compare and branch in different basic blocks. */
3877 if (cfun->can_throw_non_call_exceptions)
3879 if (may_trap_p (x))
3880 x = copy_to_reg (x);
3881 if (may_trap_p (y))
3882 y = copy_to_reg (y);
3885 if (GET_MODE_CLASS (mode) == MODE_CC)
3887 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3888 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3889 gcc_assert (icode != CODE_FOR_nothing
3890 && insn_operand_matches (icode, 0, test));
3891 *ptest = test;
3892 return;
3895 mclass = GET_MODE_CLASS (mode);
3896 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3897 FOR_EACH_MODE_FROM (cmp_mode, mode)
3899 enum insn_code icode;
3900 icode = optab_handler (cbranch_optab, cmp_mode);
3901 if (icode != CODE_FOR_nothing
3902 && insn_operand_matches (icode, 0, test))
3904 rtx_insn *last = get_last_insn ();
3905 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3906 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3907 if (op0 && op1
3908 && insn_operand_matches (icode, 1, op0)
3909 && insn_operand_matches (icode, 2, op1))
3911 XEXP (test, 0) = op0;
3912 XEXP (test, 1) = op1;
3913 *ptest = test;
3914 *pmode = cmp_mode;
3915 return;
3917 delete_insns_since (last);
3920 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3921 break;
3924 if (methods != OPTAB_LIB_WIDEN)
3925 goto fail;
3927 if (!SCALAR_FLOAT_MODE_P (mode))
3929 rtx result;
3930 machine_mode ret_mode;
3932 /* Handle a libcall just for the mode we are using. */
3933 libfunc = optab_libfunc (cmp_optab, mode);
3934 gcc_assert (libfunc);
3936 /* If we want unsigned, and this mode has a distinct unsigned
3937 comparison routine, use that. */
3938 if (unsignedp)
3940 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3941 if (ulibfunc)
3942 libfunc = ulibfunc;
3945 ret_mode = targetm.libgcc_cmp_return_mode ();
3946 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3947 ret_mode, x, mode, y, mode);
3949 /* There are two kinds of comparison routines. Biased routines
3950 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3951 of gcc expect that the comparison operation is equivalent
3952 to the modified comparison. For signed comparisons compare the
3953 result against 1 in the biased case, and zero in the unbiased
3954 case. For unsigned comparisons always compare against 1 after
3955 biasing the unbiased result by adding 1. This gives us a way to
3956 represent LTU.
3957 The comparisons in the fixed-point helper library are always
3958 biased. */
3959 x = result;
3960 y = const1_rtx;
3962 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3964 if (unsignedp)
3965 x = plus_constant (ret_mode, result, 1);
3966 else
3967 y = const0_rtx;
3970 *pmode = ret_mode;
3971 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3972 ptest, pmode);
3974 else
3975 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3977 return;
3979 fail:
3980 *ptest = NULL_RTX;
3983 /* Before emitting an insn with code ICODE, make sure that X, which is going
3984 to be used for operand OPNUM of the insn, is converted from mode MODE to
3985 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3986 that it is accepted by the operand predicate. Return the new value. */
3989 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3990 machine_mode wider_mode, int unsignedp)
3992 if (mode != wider_mode)
3993 x = convert_modes (wider_mode, mode, x, unsignedp);
3995 if (!insn_operand_matches (icode, opnum, x))
3997 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3998 if (reload_completed)
3999 return NULL_RTX;
4000 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4001 return NULL_RTX;
4002 x = copy_to_mode_reg (op_mode, x);
4005 return x;
4008 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4009 we can do the branch. */
4011 static void
4012 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4013 profile_probability prob)
4015 machine_mode optab_mode;
4016 enum mode_class mclass;
4017 enum insn_code icode;
4018 rtx_insn *insn;
4020 mclass = GET_MODE_CLASS (mode);
4021 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4022 icode = optab_handler (cbranch_optab, optab_mode);
4024 gcc_assert (icode != CODE_FOR_nothing);
4025 gcc_assert (insn_operand_matches (icode, 0, test));
4026 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4027 XEXP (test, 1), label));
4028 if (prob.initialized_p ()
4029 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4030 && insn
4031 && JUMP_P (insn)
4032 && any_condjump_p (insn)
4033 && !find_reg_note (insn, REG_BR_PROB, 0))
4034 add_reg_br_prob_note (insn, prob);
4037 /* Generate code to compare X with Y so that the condition codes are
4038 set and to jump to LABEL if the condition is true. If X is a
4039 constant and Y is not a constant, then the comparison is swapped to
4040 ensure that the comparison RTL has the canonical form.
4042 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4043 need to be widened. UNSIGNEDP is also used to select the proper
4044 branch condition code.
4046 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4048 MODE is the mode of the inputs (in case they are const_int).
4050 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4051 It will be potentially converted into an unsigned variant based on
4052 UNSIGNEDP to select a proper jump instruction.
4054 PROB is the probability of jumping to LABEL. */
4056 void
4057 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4058 machine_mode mode, int unsignedp, rtx label,
4059 profile_probability prob)
4061 rtx op0 = x, op1 = y;
4062 rtx test;
4064 /* Swap operands and condition to ensure canonical RTL. */
4065 if (swap_commutative_operands_p (x, y)
4066 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4068 op0 = y, op1 = x;
4069 comparison = swap_condition (comparison);
4072 /* If OP0 is still a constant, then both X and Y must be constants
4073 or the opposite comparison is not supported. Force X into a register
4074 to create canonical RTL. */
4075 if (CONSTANT_P (op0))
4076 op0 = force_reg (mode, op0);
4078 if (unsignedp)
4079 comparison = unsigned_condition (comparison);
4081 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4082 &test, &mode);
4083 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4087 /* Emit a library call comparison between floating point X and Y.
4088 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4090 static void
4091 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4092 rtx *ptest, machine_mode *pmode)
4094 enum rtx_code swapped = swap_condition (comparison);
4095 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4096 machine_mode orig_mode = GET_MODE (x);
4097 machine_mode mode;
4098 rtx true_rtx, false_rtx;
4099 rtx value, target, equiv;
4100 rtx_insn *insns;
4101 rtx libfunc = 0;
4102 bool reversed_p = false;
4103 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4105 FOR_EACH_MODE_FROM (mode, orig_mode)
4107 if (code_to_optab (comparison)
4108 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4109 break;
4111 if (code_to_optab (swapped)
4112 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4114 std::swap (x, y);
4115 comparison = swapped;
4116 break;
4119 if (code_to_optab (reversed)
4120 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4122 comparison = reversed;
4123 reversed_p = true;
4124 break;
4128 gcc_assert (mode != VOIDmode);
4130 if (mode != orig_mode)
4132 x = convert_to_mode (mode, x, 0);
4133 y = convert_to_mode (mode, y, 0);
4136 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4137 the RTL. The allows the RTL optimizers to delete the libcall if the
4138 condition can be determined at compile-time. */
4139 if (comparison == UNORDERED
4140 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4142 true_rtx = const_true_rtx;
4143 false_rtx = const0_rtx;
4145 else
4147 switch (comparison)
4149 case EQ:
4150 true_rtx = const0_rtx;
4151 false_rtx = const_true_rtx;
4152 break;
4154 case NE:
4155 true_rtx = const_true_rtx;
4156 false_rtx = const0_rtx;
4157 break;
4159 case GT:
4160 true_rtx = const1_rtx;
4161 false_rtx = const0_rtx;
4162 break;
4164 case GE:
4165 true_rtx = const0_rtx;
4166 false_rtx = constm1_rtx;
4167 break;
4169 case LT:
4170 true_rtx = constm1_rtx;
4171 false_rtx = const0_rtx;
4172 break;
4174 case LE:
4175 true_rtx = const0_rtx;
4176 false_rtx = const1_rtx;
4177 break;
4179 default:
4180 gcc_unreachable ();
4184 if (comparison == UNORDERED)
4186 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4187 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4188 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4189 temp, const_true_rtx, equiv);
4191 else
4193 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4194 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4195 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4196 equiv, true_rtx, false_rtx);
4199 start_sequence ();
4200 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4201 cmp_mode, x, mode, y, mode);
4202 insns = get_insns ();
4203 end_sequence ();
4205 target = gen_reg_rtx (cmp_mode);
4206 emit_libcall_block (insns, target, value, equiv);
4208 if (comparison == UNORDERED
4209 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4210 || reversed_p)
4211 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4212 else
4213 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4215 *pmode = cmp_mode;
4218 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4220 void
4221 emit_indirect_jump (rtx loc)
4223 if (!targetm.have_indirect_jump ())
4224 sorry ("indirect jumps are not available on this target");
4225 else
4227 struct expand_operand ops[1];
4228 create_address_operand (&ops[0], loc);
4229 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4230 emit_barrier ();
4235 /* Emit a conditional move instruction if the machine supports one for that
4236 condition and machine mode.
4238 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4239 the mode to use should they be constants. If it is VOIDmode, they cannot
4240 both be constants.
4242 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4243 should be stored there. MODE is the mode to use should they be constants.
4244 If it is VOIDmode, they cannot both be constants.
4246 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4247 is not supported. */
4250 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4251 machine_mode cmode, rtx op2, rtx op3,
4252 machine_mode mode, int unsignedp)
4254 rtx comparison;
4255 rtx_insn *last;
4256 enum insn_code icode;
4257 enum rtx_code reversed;
4259 /* If the two source operands are identical, that's just a move. */
4261 if (rtx_equal_p (op2, op3))
4263 if (!target)
4264 target = gen_reg_rtx (mode);
4266 emit_move_insn (target, op3);
4267 return target;
4270 /* If one operand is constant, make it the second one. Only do this
4271 if the other operand is not constant as well. */
4273 if (swap_commutative_operands_p (op0, op1))
4275 std::swap (op0, op1);
4276 code = swap_condition (code);
4279 /* get_condition will prefer to generate LT and GT even if the old
4280 comparison was against zero, so undo that canonicalization here since
4281 comparisons against zero are cheaper. */
4282 if (code == LT && op1 == const1_rtx)
4283 code = LE, op1 = const0_rtx;
4284 else if (code == GT && op1 == constm1_rtx)
4285 code = GE, op1 = const0_rtx;
4287 if (cmode == VOIDmode)
4288 cmode = GET_MODE (op0);
4290 enum rtx_code orig_code = code;
4291 bool swapped = false;
4292 if (swap_commutative_operands_p (op2, op3)
4293 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4294 != UNKNOWN))
4296 std::swap (op2, op3);
4297 code = reversed;
4298 swapped = true;
4301 if (mode == VOIDmode)
4302 mode = GET_MODE (op2);
4304 icode = direct_optab_handler (movcc_optab, mode);
4306 if (icode == CODE_FOR_nothing)
4307 return NULL_RTX;
4309 if (!target)
4310 target = gen_reg_rtx (mode);
4312 for (int pass = 0; ; pass++)
4314 code = unsignedp ? unsigned_condition (code) : code;
4315 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4317 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4318 punt and let the caller figure out how best to deal with this
4319 situation. */
4320 if (COMPARISON_P (comparison))
4322 saved_pending_stack_adjust save;
4323 save_pending_stack_adjust (&save);
4324 last = get_last_insn ();
4325 do_pending_stack_adjust ();
4326 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4327 GET_CODE (comparison), NULL_RTX, unsignedp,
4328 OPTAB_WIDEN, &comparison, &cmode);
4329 if (comparison)
4331 struct expand_operand ops[4];
4333 create_output_operand (&ops[0], target, mode);
4334 create_fixed_operand (&ops[1], comparison);
4335 create_input_operand (&ops[2], op2, mode);
4336 create_input_operand (&ops[3], op3, mode);
4337 if (maybe_expand_insn (icode, 4, ops))
4339 if (ops[0].value != target)
4340 convert_move (target, ops[0].value, false);
4341 return target;
4344 delete_insns_since (last);
4345 restore_pending_stack_adjust (&save);
4348 if (pass == 1)
4349 return NULL_RTX;
4351 /* If the preferred op2/op3 order is not usable, retry with other
4352 operand order, perhaps it will expand successfully. */
4353 if (swapped)
4354 code = orig_code;
4355 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4356 NULL))
4357 != UNKNOWN)
4358 code = reversed;
4359 else
4360 return NULL_RTX;
4361 std::swap (op2, op3);
4366 /* Emit a conditional negate or bitwise complement using the
4367 negcc or notcc optabs if available. Return NULL_RTX if such operations
4368 are not available. Otherwise return the RTX holding the result.
4369 TARGET is the desired destination of the result. COMP is the comparison
4370 on which to negate. If COND is true move into TARGET the negation
4371 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4372 CODE is either NEG or NOT. MODE is the machine mode in which the
4373 operation is performed. */
4376 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4377 machine_mode mode, rtx cond, rtx op1,
4378 rtx op2)
4380 optab op = unknown_optab;
4381 if (code == NEG)
4382 op = negcc_optab;
4383 else if (code == NOT)
4384 op = notcc_optab;
4385 else
4386 gcc_unreachable ();
4388 insn_code icode = direct_optab_handler (op, mode);
4390 if (icode == CODE_FOR_nothing)
4391 return NULL_RTX;
4393 if (!target)
4394 target = gen_reg_rtx (mode);
4396 rtx_insn *last = get_last_insn ();
4397 struct expand_operand ops[4];
4399 create_output_operand (&ops[0], target, mode);
4400 create_fixed_operand (&ops[1], cond);
4401 create_input_operand (&ops[2], op1, mode);
4402 create_input_operand (&ops[3], op2, mode);
4404 if (maybe_expand_insn (icode, 4, ops))
4406 if (ops[0].value != target)
4407 convert_move (target, ops[0].value, false);
4409 return target;
4411 delete_insns_since (last);
4412 return NULL_RTX;
4415 /* Emit a conditional addition instruction if the machine supports one for that
4416 condition and machine mode.
4418 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4419 the mode to use should they be constants. If it is VOIDmode, they cannot
4420 both be constants.
4422 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4423 should be stored there. MODE is the mode to use should they be constants.
4424 If it is VOIDmode, they cannot both be constants.
4426 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4427 is not supported. */
4430 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4431 machine_mode cmode, rtx op2, rtx op3,
4432 machine_mode mode, int unsignedp)
4434 rtx comparison;
4435 rtx_insn *last;
4436 enum insn_code icode;
4438 /* If one operand is constant, make it the second one. Only do this
4439 if the other operand is not constant as well. */
4441 if (swap_commutative_operands_p (op0, op1))
4443 std::swap (op0, op1);
4444 code = swap_condition (code);
4447 /* get_condition will prefer to generate LT and GT even if the old
4448 comparison was against zero, so undo that canonicalization here since
4449 comparisons against zero are cheaper. */
4450 if (code == LT && op1 == const1_rtx)
4451 code = LE, op1 = const0_rtx;
4452 else if (code == GT && op1 == constm1_rtx)
4453 code = GE, op1 = const0_rtx;
4455 if (cmode == VOIDmode)
4456 cmode = GET_MODE (op0);
4458 if (mode == VOIDmode)
4459 mode = GET_MODE (op2);
4461 icode = optab_handler (addcc_optab, mode);
4463 if (icode == CODE_FOR_nothing)
4464 return 0;
4466 if (!target)
4467 target = gen_reg_rtx (mode);
4469 code = unsignedp ? unsigned_condition (code) : code;
4470 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4472 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4473 return NULL and let the caller figure out how best to deal with this
4474 situation. */
4475 if (!COMPARISON_P (comparison))
4476 return NULL_RTX;
4478 do_pending_stack_adjust ();
4479 last = get_last_insn ();
4480 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4481 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4482 &comparison, &cmode);
4483 if (comparison)
4485 struct expand_operand ops[4];
4487 create_output_operand (&ops[0], target, mode);
4488 create_fixed_operand (&ops[1], comparison);
4489 create_input_operand (&ops[2], op2, mode);
4490 create_input_operand (&ops[3], op3, mode);
4491 if (maybe_expand_insn (icode, 4, ops))
4493 if (ops[0].value != target)
4494 convert_move (target, ops[0].value, false);
4495 return target;
4498 delete_insns_since (last);
4499 return NULL_RTX;
4502 /* These functions attempt to generate an insn body, rather than
4503 emitting the insn, but if the gen function already emits them, we
4504 make no attempt to turn them back into naked patterns. */
4506 /* Generate and return an insn body to add Y to X. */
4508 rtx_insn *
4509 gen_add2_insn (rtx x, rtx y)
4511 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4513 gcc_assert (insn_operand_matches (icode, 0, x));
4514 gcc_assert (insn_operand_matches (icode, 1, x));
4515 gcc_assert (insn_operand_matches (icode, 2, y));
4517 return GEN_FCN (icode) (x, x, y);
4520 /* Generate and return an insn body to add r1 and c,
4521 storing the result in r0. */
4523 rtx_insn *
4524 gen_add3_insn (rtx r0, rtx r1, rtx c)
4526 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4528 if (icode == CODE_FOR_nothing
4529 || !insn_operand_matches (icode, 0, r0)
4530 || !insn_operand_matches (icode, 1, r1)
4531 || !insn_operand_matches (icode, 2, c))
4532 return NULL;
4534 return GEN_FCN (icode) (r0, r1, c);
4538 have_add2_insn (rtx x, rtx y)
4540 enum insn_code icode;
4542 gcc_assert (GET_MODE (x) != VOIDmode);
4544 icode = optab_handler (add_optab, GET_MODE (x));
4546 if (icode == CODE_FOR_nothing)
4547 return 0;
4549 if (!insn_operand_matches (icode, 0, x)
4550 || !insn_operand_matches (icode, 1, x)
4551 || !insn_operand_matches (icode, 2, y))
4552 return 0;
4554 return 1;
4557 /* Generate and return an insn body to add Y to X. */
4559 rtx_insn *
4560 gen_addptr3_insn (rtx x, rtx y, rtx z)
4562 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4564 gcc_assert (insn_operand_matches (icode, 0, x));
4565 gcc_assert (insn_operand_matches (icode, 1, y));
4566 gcc_assert (insn_operand_matches (icode, 2, z));
4568 return GEN_FCN (icode) (x, y, z);
4571 /* Return true if the target implements an addptr pattern and X, Y,
4572 and Z are valid for the pattern predicates. */
4575 have_addptr3_insn (rtx x, rtx y, rtx z)
4577 enum insn_code icode;
4579 gcc_assert (GET_MODE (x) != VOIDmode);
4581 icode = optab_handler (addptr3_optab, GET_MODE (x));
4583 if (icode == CODE_FOR_nothing)
4584 return 0;
4586 if (!insn_operand_matches (icode, 0, x)
4587 || !insn_operand_matches (icode, 1, y)
4588 || !insn_operand_matches (icode, 2, z))
4589 return 0;
4591 return 1;
4594 /* Generate and return an insn body to subtract Y from X. */
4596 rtx_insn *
4597 gen_sub2_insn (rtx x, rtx y)
4599 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4601 gcc_assert (insn_operand_matches (icode, 0, x));
4602 gcc_assert (insn_operand_matches (icode, 1, x));
4603 gcc_assert (insn_operand_matches (icode, 2, y));
4605 return GEN_FCN (icode) (x, x, y);
4608 /* Generate and return an insn body to subtract r1 and c,
4609 storing the result in r0. */
4611 rtx_insn *
4612 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4614 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4616 if (icode == CODE_FOR_nothing
4617 || !insn_operand_matches (icode, 0, r0)
4618 || !insn_operand_matches (icode, 1, r1)
4619 || !insn_operand_matches (icode, 2, c))
4620 return NULL;
4622 return GEN_FCN (icode) (r0, r1, c);
4626 have_sub2_insn (rtx x, rtx y)
4628 enum insn_code icode;
4630 gcc_assert (GET_MODE (x) != VOIDmode);
4632 icode = optab_handler (sub_optab, GET_MODE (x));
4634 if (icode == CODE_FOR_nothing)
4635 return 0;
4637 if (!insn_operand_matches (icode, 0, x)
4638 || !insn_operand_matches (icode, 1, x)
4639 || !insn_operand_matches (icode, 2, y))
4640 return 0;
4642 return 1;
4645 /* Generate the body of an insn to extend Y (with mode MFROM)
4646 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4648 rtx_insn *
4649 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4650 machine_mode mfrom, int unsignedp)
4652 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4653 return GEN_FCN (icode) (x, y);
4656 /* Generate code to convert FROM to floating point
4657 and store in TO. FROM must be fixed point and not VOIDmode.
4658 UNSIGNEDP nonzero means regard FROM as unsigned.
4659 Normally this is done by correcting the final value
4660 if it is negative. */
4662 void
4663 expand_float (rtx to, rtx from, int unsignedp)
4665 enum insn_code icode;
4666 rtx target = to;
4667 scalar_mode from_mode, to_mode;
4668 machine_mode fmode, imode;
4669 bool can_do_signed = false;
4671 /* Crash now, because we won't be able to decide which mode to use. */
4672 gcc_assert (GET_MODE (from) != VOIDmode);
4674 /* Look for an insn to do the conversion. Do it in the specified
4675 modes if possible; otherwise convert either input, output or both to
4676 wider mode. If the integer mode is wider than the mode of FROM,
4677 we can do the conversion signed even if the input is unsigned. */
4679 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4680 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4682 int doing_unsigned = unsignedp;
4684 if (fmode != GET_MODE (to)
4685 && (significand_size (fmode)
4686 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
4687 continue;
4689 icode = can_float_p (fmode, imode, unsignedp);
4690 if (icode == CODE_FOR_nothing && unsignedp)
4692 enum insn_code scode = can_float_p (fmode, imode, 0);
4693 if (scode != CODE_FOR_nothing)
4694 can_do_signed = true;
4695 if (imode != GET_MODE (from))
4696 icode = scode, doing_unsigned = 0;
4699 if (icode != CODE_FOR_nothing)
4701 if (imode != GET_MODE (from))
4702 from = convert_to_mode (imode, from, unsignedp);
4704 if (fmode != GET_MODE (to))
4705 target = gen_reg_rtx (fmode);
4707 emit_unop_insn (icode, target, from,
4708 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4710 if (target != to)
4711 convert_move (to, target, 0);
4712 return;
4716 /* Unsigned integer, and no way to convert directly. Convert as signed,
4717 then unconditionally adjust the result. */
4718 if (unsignedp
4719 && can_do_signed
4720 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4721 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4723 opt_scalar_mode fmode_iter;
4724 rtx_code_label *label = gen_label_rtx ();
4725 rtx temp;
4726 REAL_VALUE_TYPE offset;
4728 /* Look for a usable floating mode FMODE wider than the source and at
4729 least as wide as the target. Using FMODE will avoid rounding woes
4730 with unsigned values greater than the signed maximum value. */
4732 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4734 scalar_mode fmode = fmode_iter.require ();
4735 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4736 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4737 break;
4740 if (!fmode_iter.exists (&fmode))
4742 /* There is no such mode. Pretend the target is wide enough. */
4743 fmode = to_mode;
4745 /* Avoid double-rounding when TO is narrower than FROM. */
4746 if ((significand_size (fmode) + 1)
4747 < GET_MODE_PRECISION (from_mode))
4749 rtx temp1;
4750 rtx_code_label *neglabel = gen_label_rtx ();
4752 /* Don't use TARGET if it isn't a register, is a hard register,
4753 or is the wrong mode. */
4754 if (!REG_P (target)
4755 || REGNO (target) < FIRST_PSEUDO_REGISTER
4756 || GET_MODE (target) != fmode)
4757 target = gen_reg_rtx (fmode);
4759 imode = from_mode;
4760 do_pending_stack_adjust ();
4762 /* Test whether the sign bit is set. */
4763 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4764 0, neglabel);
4766 /* The sign bit is not set. Convert as signed. */
4767 expand_float (target, from, 0);
4768 emit_jump_insn (targetm.gen_jump (label));
4769 emit_barrier ();
4771 /* The sign bit is set.
4772 Convert to a usable (positive signed) value by shifting right
4773 one bit, while remembering if a nonzero bit was shifted
4774 out; i.e., compute (from & 1) | (from >> 1). */
4776 emit_label (neglabel);
4777 temp = expand_binop (imode, and_optab, from, const1_rtx,
4778 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4779 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4780 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4781 OPTAB_LIB_WIDEN);
4782 expand_float (target, temp, 0);
4784 /* Multiply by 2 to undo the shift above. */
4785 temp = expand_binop (fmode, add_optab, target, target,
4786 target, 0, OPTAB_LIB_WIDEN);
4787 if (temp != target)
4788 emit_move_insn (target, temp);
4790 do_pending_stack_adjust ();
4791 emit_label (label);
4792 goto done;
4796 /* If we are about to do some arithmetic to correct for an
4797 unsigned operand, do it in a pseudo-register. */
4799 if (to_mode != fmode
4800 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4801 target = gen_reg_rtx (fmode);
4803 /* Convert as signed integer to floating. */
4804 expand_float (target, from, 0);
4806 /* If FROM is negative (and therefore TO is negative),
4807 correct its value by 2**bitwidth. */
4809 do_pending_stack_adjust ();
4810 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
4811 0, label);
4814 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
4815 temp = expand_binop (fmode, add_optab, target,
4816 const_double_from_real_value (offset, fmode),
4817 target, 0, OPTAB_LIB_WIDEN);
4818 if (temp != target)
4819 emit_move_insn (target, temp);
4821 do_pending_stack_adjust ();
4822 emit_label (label);
4823 goto done;
4826 /* No hardware instruction available; call a library routine. */
4828 rtx libfunc;
4829 rtx_insn *insns;
4830 rtx value;
4831 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4833 if (is_narrower_int_mode (GET_MODE (from), SImode))
4834 from = convert_to_mode (SImode, from, unsignedp);
4836 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4837 gcc_assert (libfunc);
4839 start_sequence ();
4841 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4842 GET_MODE (to), from, GET_MODE (from));
4843 insns = get_insns ();
4844 end_sequence ();
4846 emit_libcall_block (insns, target, value,
4847 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4848 GET_MODE (to), from));
4851 done:
4853 /* Copy result to requested destination
4854 if we have been computing in a temp location. */
4856 if (target != to)
4858 if (GET_MODE (target) == GET_MODE (to))
4859 emit_move_insn (to, target);
4860 else
4861 convert_move (to, target, 0);
4865 /* Generate code to convert FROM to fixed point and store in TO. FROM
4866 must be floating point. */
4868 void
4869 expand_fix (rtx to, rtx from, int unsignedp)
4871 enum insn_code icode;
4872 rtx target = to;
4873 machine_mode fmode, imode;
4874 opt_scalar_mode fmode_iter;
4875 bool must_trunc = false;
4877 /* We first try to find a pair of modes, one real and one integer, at
4878 least as wide as FROM and TO, respectively, in which we can open-code
4879 this conversion. If the integer mode is wider than the mode of TO,
4880 we can do the conversion either signed or unsigned. */
4882 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4883 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4885 int doing_unsigned = unsignedp;
4887 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4888 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4889 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4891 if (icode != CODE_FOR_nothing)
4893 rtx_insn *last = get_last_insn ();
4894 if (fmode != GET_MODE (from))
4895 from = convert_to_mode (fmode, from, 0);
4897 if (must_trunc)
4899 rtx temp = gen_reg_rtx (GET_MODE (from));
4900 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4901 temp, 0);
4904 if (imode != GET_MODE (to))
4905 target = gen_reg_rtx (imode);
4907 if (maybe_emit_unop_insn (icode, target, from,
4908 doing_unsigned ? UNSIGNED_FIX : FIX))
4910 if (target != to)
4911 convert_move (to, target, unsignedp);
4912 return;
4914 delete_insns_since (last);
4918 /* For an unsigned conversion, there is one more way to do it.
4919 If we have a signed conversion, we generate code that compares
4920 the real value to the largest representable positive number. If if
4921 is smaller, the conversion is done normally. Otherwise, subtract
4922 one plus the highest signed number, convert, and add it back.
4924 We only need to check all real modes, since we know we didn't find
4925 anything with a wider integer mode.
4927 This code used to extend FP value into mode wider than the destination.
4928 This is needed for decimal float modes which cannot accurately
4929 represent one plus the highest signed number of the same size, but
4930 not for binary modes. Consider, for instance conversion from SFmode
4931 into DImode.
4933 The hot path through the code is dealing with inputs smaller than 2^63
4934 and doing just the conversion, so there is no bits to lose.
4936 In the other path we know the value is positive in the range 2^63..2^64-1
4937 inclusive. (as for other input overflow happens and result is undefined)
4938 So we know that the most important bit set in mantissa corresponds to
4939 2^63. The subtraction of 2^63 should not generate any rounding as it
4940 simply clears out that bit. The rest is trivial. */
4942 scalar_int_mode to_mode;
4943 if (unsignedp
4944 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
4945 && HWI_COMPUTABLE_MODE_P (to_mode))
4946 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
4948 scalar_mode fmode = fmode_iter.require ();
4949 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
4950 0, &must_trunc)
4951 && (!DECIMAL_FLOAT_MODE_P (fmode)
4952 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
4954 int bitsize;
4955 REAL_VALUE_TYPE offset;
4956 rtx limit;
4957 rtx_code_label *lab1, *lab2;
4958 rtx_insn *insn;
4960 bitsize = GET_MODE_PRECISION (to_mode);
4961 real_2expN (&offset, bitsize - 1, fmode);
4962 limit = const_double_from_real_value (offset, fmode);
4963 lab1 = gen_label_rtx ();
4964 lab2 = gen_label_rtx ();
4966 if (fmode != GET_MODE (from))
4967 from = convert_to_mode (fmode, from, 0);
4969 /* See if we need to do the subtraction. */
4970 do_pending_stack_adjust ();
4971 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
4972 GET_MODE (from), 0, lab1);
4974 /* If not, do the signed "fix" and branch around fixup code. */
4975 expand_fix (to, from, 0);
4976 emit_jump_insn (targetm.gen_jump (lab2));
4977 emit_barrier ();
4979 /* Otherwise, subtract 2**(N-1), convert to signed number,
4980 then add 2**(N-1). Do the addition using XOR since this
4981 will often generate better code. */
4982 emit_label (lab1);
4983 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4984 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4985 expand_fix (to, target, 0);
4986 target = expand_binop (to_mode, xor_optab, to,
4987 gen_int_mode
4988 (HOST_WIDE_INT_1 << (bitsize - 1),
4989 to_mode),
4990 to, 1, OPTAB_LIB_WIDEN);
4992 if (target != to)
4993 emit_move_insn (to, target);
4995 emit_label (lab2);
4997 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
4999 /* Make a place for a REG_NOTE and add it. */
5000 insn = emit_move_insn (to, to);
5001 set_dst_reg_note (insn, REG_EQUAL,
5002 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5003 copy_rtx (from)),
5004 to);
5007 return;
5011 /* We can't do it with an insn, so use a library call. But first ensure
5012 that the mode of TO is at least as wide as SImode, since those are the
5013 only library calls we know about. */
5015 if (is_narrower_int_mode (GET_MODE (to), SImode))
5017 target = gen_reg_rtx (SImode);
5019 expand_fix (target, from, unsignedp);
5021 else
5023 rtx_insn *insns;
5024 rtx value;
5025 rtx libfunc;
5027 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5028 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5029 gcc_assert (libfunc);
5031 start_sequence ();
5033 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5034 GET_MODE (to), from, GET_MODE (from));
5035 insns = get_insns ();
5036 end_sequence ();
5038 emit_libcall_block (insns, target, value,
5039 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5040 GET_MODE (to), from));
5043 if (target != to)
5045 if (GET_MODE (to) == GET_MODE (target))
5046 emit_move_insn (to, target);
5047 else
5048 convert_move (to, target, 0);
5053 /* Promote integer arguments for a libcall if necessary.
5054 emit_library_call_value cannot do the promotion because it does not
5055 know if it should do a signed or unsigned promotion. This is because
5056 there are no tree types defined for libcalls. */
5058 static rtx
5059 prepare_libcall_arg (rtx arg, int uintp)
5061 scalar_int_mode mode;
5062 machine_mode arg_mode;
5063 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5065 /* If we need to promote the integer function argument we need to do
5066 it here instead of inside emit_library_call_value because in
5067 emit_library_call_value we don't know if we should do a signed or
5068 unsigned promotion. */
5070 int unsigned_p = 0;
5071 arg_mode = promote_function_mode (NULL_TREE, mode,
5072 &unsigned_p, NULL_TREE, 0);
5073 if (arg_mode != mode)
5074 return convert_to_mode (arg_mode, arg, uintp);
5076 return arg;
5079 /* Generate code to convert FROM or TO a fixed-point.
5080 If UINTP is true, either TO or FROM is an unsigned integer.
5081 If SATP is true, we need to saturate the result. */
5083 void
5084 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5086 machine_mode to_mode = GET_MODE (to);
5087 machine_mode from_mode = GET_MODE (from);
5088 convert_optab tab;
5089 enum rtx_code this_code;
5090 enum insn_code code;
5091 rtx_insn *insns;
5092 rtx value;
5093 rtx libfunc;
5095 if (to_mode == from_mode)
5097 emit_move_insn (to, from);
5098 return;
5101 if (uintp)
5103 tab = satp ? satfractuns_optab : fractuns_optab;
5104 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5106 else
5108 tab = satp ? satfract_optab : fract_optab;
5109 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5111 code = convert_optab_handler (tab, to_mode, from_mode);
5112 if (code != CODE_FOR_nothing)
5114 emit_unop_insn (code, to, from, this_code);
5115 return;
5118 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5119 gcc_assert (libfunc);
5121 from = prepare_libcall_arg (from, uintp);
5122 from_mode = GET_MODE (from);
5124 start_sequence ();
5125 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5126 from, from_mode);
5127 insns = get_insns ();
5128 end_sequence ();
5130 emit_libcall_block (insns, to, value,
5131 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5134 /* Generate code to convert FROM to fixed point and store in TO. FROM
5135 must be floating point, TO must be signed. Use the conversion optab
5136 TAB to do the conversion. */
5138 bool
5139 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5141 enum insn_code icode;
5142 rtx target = to;
5143 machine_mode fmode, imode;
5145 /* We first try to find a pair of modes, one real and one integer, at
5146 least as wide as FROM and TO, respectively, in which we can open-code
5147 this conversion. If the integer mode is wider than the mode of TO,
5148 we can do the conversion either signed or unsigned. */
5150 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5151 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5153 icode = convert_optab_handler (tab, imode, fmode);
5154 if (icode != CODE_FOR_nothing)
5156 rtx_insn *last = get_last_insn ();
5157 if (fmode != GET_MODE (from))
5158 from = convert_to_mode (fmode, from, 0);
5160 if (imode != GET_MODE (to))
5161 target = gen_reg_rtx (imode);
5163 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5165 delete_insns_since (last);
5166 continue;
5168 if (target != to)
5169 convert_move (to, target, 0);
5170 return true;
5174 return false;
5177 /* Report whether we have an instruction to perform the operation
5178 specified by CODE on operands of mode MODE. */
5180 have_insn_for (enum rtx_code code, machine_mode mode)
5182 return (code_to_optab (code)
5183 && (optab_handler (code_to_optab (code), mode)
5184 != CODE_FOR_nothing));
5187 /* Print information about the current contents of the optabs on
5188 STDERR. */
5190 DEBUG_FUNCTION void
5191 debug_optab_libfuncs (void)
5193 int i, j, k;
5195 /* Dump the arithmetic optabs. */
5196 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5197 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5199 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5200 if (l)
5202 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5203 fprintf (stderr, "%s\t%s:\t%s\n",
5204 GET_RTX_NAME (optab_to_code ((optab) i)),
5205 GET_MODE_NAME (j),
5206 XSTR (l, 0));
5210 /* Dump the conversion optabs. */
5211 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5212 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5213 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5215 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5216 (machine_mode) k);
5217 if (l)
5219 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5220 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5221 GET_RTX_NAME (optab_to_code ((optab) i)),
5222 GET_MODE_NAME (j),
5223 GET_MODE_NAME (k),
5224 XSTR (l, 0));
5229 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5230 CODE. Return 0 on failure. */
5232 rtx_insn *
5233 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5235 machine_mode mode = GET_MODE (op1);
5236 enum insn_code icode;
5237 rtx_insn *insn;
5238 rtx trap_rtx;
5240 if (mode == VOIDmode)
5241 return 0;
5243 icode = optab_handler (ctrap_optab, mode);
5244 if (icode == CODE_FOR_nothing)
5245 return 0;
5247 /* Some targets only accept a zero trap code. */
5248 if (!insn_operand_matches (icode, 3, tcode))
5249 return 0;
5251 do_pending_stack_adjust ();
5252 start_sequence ();
5253 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5254 &trap_rtx, &mode);
5255 if (!trap_rtx)
5256 insn = NULL;
5257 else
5258 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5259 tcode);
5261 /* If that failed, then give up. */
5262 if (insn == 0)
5264 end_sequence ();
5265 return 0;
5268 emit_insn (insn);
5269 insn = get_insns ();
5270 end_sequence ();
5271 return insn;
5274 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5275 or unsigned operation code. */
5277 enum rtx_code
5278 get_rtx_code (enum tree_code tcode, bool unsignedp)
5280 enum rtx_code code;
5281 switch (tcode)
5283 case EQ_EXPR:
5284 code = EQ;
5285 break;
5286 case NE_EXPR:
5287 code = NE;
5288 break;
5289 case LT_EXPR:
5290 code = unsignedp ? LTU : LT;
5291 break;
5292 case LE_EXPR:
5293 code = unsignedp ? LEU : LE;
5294 break;
5295 case GT_EXPR:
5296 code = unsignedp ? GTU : GT;
5297 break;
5298 case GE_EXPR:
5299 code = unsignedp ? GEU : GE;
5300 break;
5302 case UNORDERED_EXPR:
5303 code = UNORDERED;
5304 break;
5305 case ORDERED_EXPR:
5306 code = ORDERED;
5307 break;
5308 case UNLT_EXPR:
5309 code = UNLT;
5310 break;
5311 case UNLE_EXPR:
5312 code = UNLE;
5313 break;
5314 case UNGT_EXPR:
5315 code = UNGT;
5316 break;
5317 case UNGE_EXPR:
5318 code = UNGE;
5319 break;
5320 case UNEQ_EXPR:
5321 code = UNEQ;
5322 break;
5323 case LTGT_EXPR:
5324 code = LTGT;
5325 break;
5327 case BIT_AND_EXPR:
5328 code = AND;
5329 break;
5331 case BIT_IOR_EXPR:
5332 code = IOR;
5333 break;
5335 default:
5336 gcc_unreachable ();
5338 return code;
5341 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5342 select signed or unsigned operators. OPNO holds the index of the
5343 first comparison operand for insn ICODE. Do not generate the
5344 compare instruction itself. */
5346 static rtx
5347 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5348 tree t_op0, tree t_op1, bool unsignedp,
5349 enum insn_code icode, unsigned int opno)
5351 struct expand_operand ops[2];
5352 rtx rtx_op0, rtx_op1;
5353 machine_mode m0, m1;
5354 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5356 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5358 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5359 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5360 cases, use the original mode. */
5361 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5362 EXPAND_STACK_PARM);
5363 m0 = GET_MODE (rtx_op0);
5364 if (m0 == VOIDmode)
5365 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5367 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5368 EXPAND_STACK_PARM);
5369 m1 = GET_MODE (rtx_op1);
5370 if (m1 == VOIDmode)
5371 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5373 create_input_operand (&ops[0], rtx_op0, m0);
5374 create_input_operand (&ops[1], rtx_op1, m1);
5375 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5376 gcc_unreachable ();
5377 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5380 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5381 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5382 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5383 shift. */
5384 static rtx
5385 shift_amt_for_vec_perm_mask (rtx sel)
5387 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5388 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5390 if (GET_CODE (sel) != CONST_VECTOR)
5391 return NULL_RTX;
5393 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5394 if (first >= nelt)
5395 return NULL_RTX;
5396 for (i = 1; i < nelt; i++)
5398 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5399 unsigned int expected = i + first;
5400 /* Indices into the second vector are all equivalent. */
5401 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5402 return NULL_RTX;
5405 return GEN_INT (first * bitsize);
5408 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5410 static rtx
5411 expand_vec_perm_1 (enum insn_code icode, rtx target,
5412 rtx v0, rtx v1, rtx sel)
5414 machine_mode tmode = GET_MODE (target);
5415 machine_mode smode = GET_MODE (sel);
5416 struct expand_operand ops[4];
5418 create_output_operand (&ops[0], target, tmode);
5419 create_input_operand (&ops[3], sel, smode);
5421 /* Make an effort to preserve v0 == v1. The target expander is able to
5422 rely on this to determine if we're permuting a single input operand. */
5423 if (rtx_equal_p (v0, v1))
5425 if (!insn_operand_matches (icode, 1, v0))
5426 v0 = force_reg (tmode, v0);
5427 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5428 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5430 create_fixed_operand (&ops[1], v0);
5431 create_fixed_operand (&ops[2], v0);
5433 else
5435 create_input_operand (&ops[1], v0, tmode);
5436 create_input_operand (&ops[2], v1, tmode);
5439 if (maybe_expand_insn (icode, 4, ops))
5440 return ops[0].value;
5441 return NULL_RTX;
5444 /* Generate instructions for vec_perm optab given its mode
5445 and three operands. */
5448 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5450 enum insn_code icode;
5451 machine_mode qimode;
5452 unsigned int i, w, e, u;
5453 rtx tmp, sel_qi = NULL;
5454 rtvec vec;
5456 if (!target || GET_MODE (target) != mode)
5457 target = gen_reg_rtx (mode);
5459 w = GET_MODE_SIZE (mode);
5460 e = GET_MODE_NUNITS (mode);
5461 u = GET_MODE_UNIT_SIZE (mode);
5463 /* Set QIMODE to a different vector mode with byte elements.
5464 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5465 if (GET_MODE_INNER (mode) == QImode
5466 || !mode_for_vector (QImode, w).exists (&qimode)
5467 || !VECTOR_MODE_P (qimode))
5468 qimode = VOIDmode;
5470 /* If the input is a constant, expand it specially. */
5471 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5472 if (GET_CODE (sel) == CONST_VECTOR)
5474 /* See if this can be handled with a vec_shr. We only do this if the
5475 second vector is all zeroes. */
5476 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5477 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5478 ? optab_handler (vec_shr_optab, qimode)
5479 : CODE_FOR_nothing);
5480 rtx shift_amt = NULL_RTX;
5481 if (v1 == CONST0_RTX (GET_MODE (v1))
5482 && (shift_code != CODE_FOR_nothing
5483 || shift_code_qi != CODE_FOR_nothing))
5485 shift_amt = shift_amt_for_vec_perm_mask (sel);
5486 if (shift_amt)
5488 struct expand_operand ops[3];
5489 if (shift_code != CODE_FOR_nothing)
5491 create_output_operand (&ops[0], target, mode);
5492 create_input_operand (&ops[1], v0, mode);
5493 create_convert_operand_from_type (&ops[2], shift_amt,
5494 sizetype);
5495 if (maybe_expand_insn (shift_code, 3, ops))
5496 return ops[0].value;
5498 if (shift_code_qi != CODE_FOR_nothing)
5500 tmp = gen_reg_rtx (qimode);
5501 create_output_operand (&ops[0], tmp, qimode);
5502 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5503 qimode);
5504 create_convert_operand_from_type (&ops[2], shift_amt,
5505 sizetype);
5506 if (maybe_expand_insn (shift_code_qi, 3, ops))
5507 return gen_lowpart (mode, ops[0].value);
5512 icode = direct_optab_handler (vec_perm_const_optab, mode);
5513 if (icode != CODE_FOR_nothing)
5515 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5516 if (tmp)
5517 return tmp;
5520 /* Fall back to a constant byte-based permutation. */
5521 if (qimode != VOIDmode)
5523 vec = rtvec_alloc (w);
5524 for (i = 0; i < e; ++i)
5526 unsigned int j, this_e;
5528 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5529 this_e &= 2 * e - 1;
5530 this_e *= u;
5532 for (j = 0; j < u; ++j)
5533 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5535 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5537 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5538 if (icode != CODE_FOR_nothing)
5540 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5541 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5542 gen_lowpart (qimode, v1), sel_qi);
5543 if (tmp)
5544 return gen_lowpart (mode, tmp);
5549 /* Otherwise expand as a fully variable permuation. */
5550 icode = direct_optab_handler (vec_perm_optab, mode);
5551 if (icode != CODE_FOR_nothing)
5553 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5554 if (tmp)
5555 return tmp;
5558 /* As a special case to aid several targets, lower the element-based
5559 permutation to a byte-based permutation and try again. */
5560 if (qimode == VOIDmode)
5561 return NULL_RTX;
5562 icode = direct_optab_handler (vec_perm_optab, qimode);
5563 if (icode == CODE_FOR_nothing)
5564 return NULL_RTX;
5566 if (sel_qi == NULL)
5568 /* Multiply each element by its byte size. */
5569 machine_mode selmode = GET_MODE (sel);
5570 if (u == 2)
5571 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5572 NULL, 0, OPTAB_DIRECT);
5573 else
5574 sel = expand_simple_binop (selmode, ASHIFT, sel,
5575 GEN_INT (exact_log2 (u)),
5576 NULL, 0, OPTAB_DIRECT);
5577 gcc_assert (sel != NULL);
5579 /* Broadcast the low byte each element into each of its bytes. */
5580 vec = rtvec_alloc (w);
5581 for (i = 0; i < w; ++i)
5583 int this_e = i / u * u;
5584 if (BYTES_BIG_ENDIAN)
5585 this_e += u - 1;
5586 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5588 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5589 sel = gen_lowpart (qimode, sel);
5590 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5591 gcc_assert (sel != NULL);
5593 /* Add the byte offset to each byte element. */
5594 /* Note that the definition of the indicies here is memory ordering,
5595 so there should be no difference between big and little endian. */
5596 vec = rtvec_alloc (w);
5597 for (i = 0; i < w; ++i)
5598 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5599 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5600 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5601 sel, 0, OPTAB_DIRECT);
5602 gcc_assert (sel_qi != NULL);
5605 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5606 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5607 gen_lowpart (qimode, v1), sel_qi);
5608 if (tmp)
5609 tmp = gen_lowpart (mode, tmp);
5610 return tmp;
5613 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5614 three operands. */
5617 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5618 rtx target)
5620 struct expand_operand ops[4];
5621 machine_mode mode = TYPE_MODE (vec_cond_type);
5622 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5623 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5624 rtx mask, rtx_op1, rtx_op2;
5626 if (icode == CODE_FOR_nothing)
5627 return 0;
5629 mask = expand_normal (op0);
5630 rtx_op1 = expand_normal (op1);
5631 rtx_op2 = expand_normal (op2);
5633 mask = force_reg (mask_mode, mask);
5634 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5636 create_output_operand (&ops[0], target, mode);
5637 create_input_operand (&ops[1], rtx_op1, mode);
5638 create_input_operand (&ops[2], rtx_op2, mode);
5639 create_input_operand (&ops[3], mask, mask_mode);
5640 expand_insn (icode, 4, ops);
5642 return ops[0].value;
5645 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5646 three operands. */
5649 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5650 rtx target)
5652 struct expand_operand ops[6];
5653 enum insn_code icode;
5654 rtx comparison, rtx_op1, rtx_op2;
5655 machine_mode mode = TYPE_MODE (vec_cond_type);
5656 machine_mode cmp_op_mode;
5657 bool unsignedp;
5658 tree op0a, op0b;
5659 enum tree_code tcode;
5661 if (COMPARISON_CLASS_P (op0))
5663 op0a = TREE_OPERAND (op0, 0);
5664 op0b = TREE_OPERAND (op0, 1);
5665 tcode = TREE_CODE (op0);
5667 else
5669 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5670 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5671 != CODE_FOR_nothing)
5672 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5673 op2, target);
5674 /* Fake op0 < 0. */
5675 else
5677 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5678 == MODE_VECTOR_INT);
5679 op0a = op0;
5680 op0b = build_zero_cst (TREE_TYPE (op0));
5681 tcode = LT_EXPR;
5684 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5685 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5688 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5689 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5691 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5692 if (icode == CODE_FOR_nothing)
5694 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5695 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5696 if (icode == CODE_FOR_nothing)
5697 return 0;
5700 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5701 icode, 4);
5702 rtx_op1 = expand_normal (op1);
5703 rtx_op2 = expand_normal (op2);
5705 create_output_operand (&ops[0], target, mode);
5706 create_input_operand (&ops[1], rtx_op1, mode);
5707 create_input_operand (&ops[2], rtx_op2, mode);
5708 create_fixed_operand (&ops[3], comparison);
5709 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5710 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5711 expand_insn (icode, 6, ops);
5712 return ops[0].value;
5715 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5716 Use TARGET for the result if nonnull and convenient. */
5719 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
5721 struct expand_operand ops[3];
5722 enum insn_code icode;
5723 machine_mode emode = GET_MODE_INNER (vmode);
5725 icode = direct_optab_handler (vec_series_optab, vmode);
5726 gcc_assert (icode != CODE_FOR_nothing);
5728 create_output_operand (&ops[0], target, vmode);
5729 create_input_operand (&ops[1], op0, emode);
5730 create_input_operand (&ops[2], op1, emode);
5732 expand_insn (icode, 3, ops);
5733 return ops[0].value;
5736 /* Generate insns for a vector comparison into a mask. */
5739 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5741 struct expand_operand ops[4];
5742 enum insn_code icode;
5743 rtx comparison;
5744 machine_mode mask_mode = TYPE_MODE (type);
5745 machine_mode vmode;
5746 bool unsignedp;
5747 tree op0a, op0b;
5748 enum tree_code tcode;
5750 op0a = TREE_OPERAND (exp, 0);
5751 op0b = TREE_OPERAND (exp, 1);
5752 tcode = TREE_CODE (exp);
5754 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5755 vmode = TYPE_MODE (TREE_TYPE (op0a));
5757 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5758 if (icode == CODE_FOR_nothing)
5760 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5761 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5762 if (icode == CODE_FOR_nothing)
5763 return 0;
5766 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5767 unsignedp, icode, 2);
5768 create_output_operand (&ops[0], target, mask_mode);
5769 create_fixed_operand (&ops[1], comparison);
5770 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5771 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5772 expand_insn (icode, 4, ops);
5773 return ops[0].value;
5776 /* Expand a highpart multiply. */
5779 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5780 rtx target, bool uns_p)
5782 struct expand_operand eops[3];
5783 enum insn_code icode;
5784 int method, i, nunits;
5785 machine_mode wmode;
5786 rtx m1, m2, perm;
5787 optab tab1, tab2;
5788 rtvec v;
5790 method = can_mult_highpart_p (mode, uns_p);
5791 switch (method)
5793 case 0:
5794 return NULL_RTX;
5795 case 1:
5796 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5797 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5798 OPTAB_LIB_WIDEN);
5799 case 2:
5800 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5801 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5802 break;
5803 case 3:
5804 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5805 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5806 if (BYTES_BIG_ENDIAN)
5807 std::swap (tab1, tab2);
5808 break;
5809 default:
5810 gcc_unreachable ();
5813 icode = optab_handler (tab1, mode);
5814 nunits = GET_MODE_NUNITS (mode);
5815 wmode = insn_data[icode].operand[0].mode;
5816 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5817 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5819 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5820 create_input_operand (&eops[1], op0, mode);
5821 create_input_operand (&eops[2], op1, mode);
5822 expand_insn (icode, 3, eops);
5823 m1 = gen_lowpart (mode, eops[0].value);
5825 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5826 create_input_operand (&eops[1], op0, mode);
5827 create_input_operand (&eops[2], op1, mode);
5828 expand_insn (optab_handler (tab2, mode), 3, eops);
5829 m2 = gen_lowpart (mode, eops[0].value);
5831 v = rtvec_alloc (nunits);
5832 if (method == 2)
5834 for (i = 0; i < nunits; ++i)
5835 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5836 + ((i & 1) ? nunits : 0));
5837 perm = gen_rtx_CONST_VECTOR (mode, v);
5839 else
5841 int base = BYTES_BIG_ENDIAN ? 0 : 1;
5842 perm = gen_const_vec_series (mode, GEN_INT (base), GEN_INT (2));
5845 return expand_vec_perm (mode, m1, m2, perm, target);
5848 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5849 pattern. */
5851 static void
5852 find_cc_set (rtx x, const_rtx pat, void *data)
5854 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5855 && GET_CODE (pat) == SET)
5857 rtx *p_cc_reg = (rtx *) data;
5858 gcc_assert (!*p_cc_reg);
5859 *p_cc_reg = x;
5863 /* This is a helper function for the other atomic operations. This function
5864 emits a loop that contains SEQ that iterates until a compare-and-swap
5865 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5866 a set of instructions that takes a value from OLD_REG as an input and
5867 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5868 set to the current contents of MEM. After SEQ, a compare-and-swap will
5869 attempt to update MEM with NEW_REG. The function returns true when the
5870 loop was generated successfully. */
5872 static bool
5873 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5875 machine_mode mode = GET_MODE (mem);
5876 rtx_code_label *label;
5877 rtx cmp_reg, success, oldval;
5879 /* The loop we want to generate looks like
5881 cmp_reg = mem;
5882 label:
5883 old_reg = cmp_reg;
5884 seq;
5885 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5886 if (success)
5887 goto label;
5889 Note that we only do the plain load from memory once. Subsequent
5890 iterations use the value loaded by the compare-and-swap pattern. */
5892 label = gen_label_rtx ();
5893 cmp_reg = gen_reg_rtx (mode);
5895 emit_move_insn (cmp_reg, mem);
5896 emit_label (label);
5897 emit_move_insn (old_reg, cmp_reg);
5898 if (seq)
5899 emit_insn (seq);
5901 success = NULL_RTX;
5902 oldval = cmp_reg;
5903 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5904 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5905 MEMMODEL_RELAXED))
5906 return false;
5908 if (oldval != cmp_reg)
5909 emit_move_insn (cmp_reg, oldval);
5911 /* Mark this jump predicted not taken. */
5912 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5913 GET_MODE (success), 1, label,
5914 profile_probability::guessed_never ());
5915 return true;
5919 /* This function tries to emit an atomic_exchange intruction. VAL is written
5920 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5921 using TARGET if possible. */
5923 static rtx
5924 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5926 machine_mode mode = GET_MODE (mem);
5927 enum insn_code icode;
5929 /* If the target supports the exchange directly, great. */
5930 icode = direct_optab_handler (atomic_exchange_optab, mode);
5931 if (icode != CODE_FOR_nothing)
5933 struct expand_operand ops[4];
5935 create_output_operand (&ops[0], target, mode);
5936 create_fixed_operand (&ops[1], mem);
5937 create_input_operand (&ops[2], val, mode);
5938 create_integer_operand (&ops[3], model);
5939 if (maybe_expand_insn (icode, 4, ops))
5940 return ops[0].value;
5943 return NULL_RTX;
5946 /* This function tries to implement an atomic exchange operation using
5947 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5948 The previous contents of *MEM are returned, using TARGET if possible.
5949 Since this instructionn is an acquire barrier only, stronger memory
5950 models may require additional barriers to be emitted. */
5952 static rtx
5953 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5954 enum memmodel model)
5956 machine_mode mode = GET_MODE (mem);
5957 enum insn_code icode;
5958 rtx_insn *last_insn = get_last_insn ();
5960 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5962 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5963 exists, and the memory model is stronger than acquire, add a release
5964 barrier before the instruction. */
5966 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5967 expand_mem_thread_fence (model);
5969 if (icode != CODE_FOR_nothing)
5971 struct expand_operand ops[3];
5972 create_output_operand (&ops[0], target, mode);
5973 create_fixed_operand (&ops[1], mem);
5974 create_input_operand (&ops[2], val, mode);
5975 if (maybe_expand_insn (icode, 3, ops))
5976 return ops[0].value;
5979 /* If an external test-and-set libcall is provided, use that instead of
5980 any external compare-and-swap that we might get from the compare-and-
5981 swap-loop expansion later. */
5982 if (!can_compare_and_swap_p (mode, false))
5984 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5985 if (libfunc != NULL)
5987 rtx addr;
5989 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5990 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5991 mode, addr, ptr_mode,
5992 val, mode);
5996 /* If the test_and_set can't be emitted, eliminate any barrier that might
5997 have been emitted. */
5998 delete_insns_since (last_insn);
5999 return NULL_RTX;
6002 /* This function tries to implement an atomic exchange operation using a
6003 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6004 *MEM are returned, using TARGET if possible. No memory model is required
6005 since a compare_and_swap loop is seq-cst. */
6007 static rtx
6008 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6010 machine_mode mode = GET_MODE (mem);
6012 if (can_compare_and_swap_p (mode, true))
6014 if (!target || !register_operand (target, mode))
6015 target = gen_reg_rtx (mode);
6016 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6017 return target;
6020 return NULL_RTX;
6023 /* This function tries to implement an atomic test-and-set operation
6024 using the atomic_test_and_set instruction pattern. A boolean value
6025 is returned from the operation, using TARGET if possible. */
6027 static rtx
6028 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6030 machine_mode pat_bool_mode;
6031 struct expand_operand ops[3];
6033 if (!targetm.have_atomic_test_and_set ())
6034 return NULL_RTX;
6036 /* While we always get QImode from __atomic_test_and_set, we get
6037 other memory modes from __sync_lock_test_and_set. Note that we
6038 use no endian adjustment here. This matches the 4.6 behavior
6039 in the Sparc backend. */
6040 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6041 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6042 if (GET_MODE (mem) != QImode)
6043 mem = adjust_address_nv (mem, QImode, 0);
6045 pat_bool_mode = insn_data[icode].operand[0].mode;
6046 create_output_operand (&ops[0], target, pat_bool_mode);
6047 create_fixed_operand (&ops[1], mem);
6048 create_integer_operand (&ops[2], model);
6050 if (maybe_expand_insn (icode, 3, ops))
6051 return ops[0].value;
6052 return NULL_RTX;
6055 /* This function expands the legacy _sync_lock test_and_set operation which is
6056 generally an atomic exchange. Some limited targets only allow the
6057 constant 1 to be stored. This is an ACQUIRE operation.
6059 TARGET is an optional place to stick the return value.
6060 MEM is where VAL is stored. */
6063 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6065 rtx ret;
6067 /* Try an atomic_exchange first. */
6068 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6069 if (ret)
6070 return ret;
6072 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6073 MEMMODEL_SYNC_ACQUIRE);
6074 if (ret)
6075 return ret;
6077 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6078 if (ret)
6079 return ret;
6081 /* If there are no other options, try atomic_test_and_set if the value
6082 being stored is 1. */
6083 if (val == const1_rtx)
6084 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6086 return ret;
6089 /* This function expands the atomic test_and_set operation:
6090 atomically store a boolean TRUE into MEM and return the previous value.
6092 MEMMODEL is the memory model variant to use.
6093 TARGET is an optional place to stick the return value. */
6096 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6098 machine_mode mode = GET_MODE (mem);
6099 rtx ret, trueval, subtarget;
6101 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6102 if (ret)
6103 return ret;
6105 /* Be binary compatible with non-default settings of trueval, and different
6106 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6107 another only has atomic-exchange. */
6108 if (targetm.atomic_test_and_set_trueval == 1)
6110 trueval = const1_rtx;
6111 subtarget = target ? target : gen_reg_rtx (mode);
6113 else
6115 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6116 subtarget = gen_reg_rtx (mode);
6119 /* Try the atomic-exchange optab... */
6120 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6122 /* ... then an atomic-compare-and-swap loop ... */
6123 if (!ret)
6124 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6126 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6127 if (!ret)
6128 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6130 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6131 things with the value 1. Thus we try again without trueval. */
6132 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6133 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6135 /* Failing all else, assume a single threaded environment and simply
6136 perform the operation. */
6137 if (!ret)
6139 /* If the result is ignored skip the move to target. */
6140 if (subtarget != const0_rtx)
6141 emit_move_insn (subtarget, mem);
6143 emit_move_insn (mem, trueval);
6144 ret = subtarget;
6147 /* Recall that have to return a boolean value; rectify if trueval
6148 is not exactly one. */
6149 if (targetm.atomic_test_and_set_trueval != 1)
6150 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6152 return ret;
6155 /* This function expands the atomic exchange operation:
6156 atomically store VAL in MEM and return the previous value in MEM.
6158 MEMMODEL is the memory model variant to use.
6159 TARGET is an optional place to stick the return value. */
6162 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6164 machine_mode mode = GET_MODE (mem);
6165 rtx ret;
6167 /* If loads are not atomic for the required size and we are not called to
6168 provide a __sync builtin, do not do anything so that we stay consistent
6169 with atomic loads of the same size. */
6170 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6171 return NULL_RTX;
6173 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6175 /* Next try a compare-and-swap loop for the exchange. */
6176 if (!ret)
6177 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6179 return ret;
6182 /* This function expands the atomic compare exchange operation:
6184 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6185 *PTARGET_OVAL is an optional place to store the old value from memory.
6186 Both target parameters may be NULL or const0_rtx to indicate that we do
6187 not care about that return value. Both target parameters are updated on
6188 success to the actual location of the corresponding result.
6190 MEMMODEL is the memory model variant to use.
6192 The return value of the function is true for success. */
6194 bool
6195 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6196 rtx mem, rtx expected, rtx desired,
6197 bool is_weak, enum memmodel succ_model,
6198 enum memmodel fail_model)
6200 machine_mode mode = GET_MODE (mem);
6201 struct expand_operand ops[8];
6202 enum insn_code icode;
6203 rtx target_oval, target_bool = NULL_RTX;
6204 rtx libfunc;
6206 /* If loads are not atomic for the required size and we are not called to
6207 provide a __sync builtin, do not do anything so that we stay consistent
6208 with atomic loads of the same size. */
6209 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6210 return false;
6212 /* Load expected into a register for the compare and swap. */
6213 if (MEM_P (expected))
6214 expected = copy_to_reg (expected);
6216 /* Make sure we always have some place to put the return oldval.
6217 Further, make sure that place is distinct from the input expected,
6218 just in case we need that path down below. */
6219 if (ptarget_oval && *ptarget_oval == const0_rtx)
6220 ptarget_oval = NULL;
6222 if (ptarget_oval == NULL
6223 || (target_oval = *ptarget_oval) == NULL
6224 || reg_overlap_mentioned_p (expected, target_oval))
6225 target_oval = gen_reg_rtx (mode);
6227 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6228 if (icode != CODE_FOR_nothing)
6230 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6232 if (ptarget_bool && *ptarget_bool == const0_rtx)
6233 ptarget_bool = NULL;
6235 /* Make sure we always have a place for the bool operand. */
6236 if (ptarget_bool == NULL
6237 || (target_bool = *ptarget_bool) == NULL
6238 || GET_MODE (target_bool) != bool_mode)
6239 target_bool = gen_reg_rtx (bool_mode);
6241 /* Emit the compare_and_swap. */
6242 create_output_operand (&ops[0], target_bool, bool_mode);
6243 create_output_operand (&ops[1], target_oval, mode);
6244 create_fixed_operand (&ops[2], mem);
6245 create_input_operand (&ops[3], expected, mode);
6246 create_input_operand (&ops[4], desired, mode);
6247 create_integer_operand (&ops[5], is_weak);
6248 create_integer_operand (&ops[6], succ_model);
6249 create_integer_operand (&ops[7], fail_model);
6250 if (maybe_expand_insn (icode, 8, ops))
6252 /* Return success/failure. */
6253 target_bool = ops[0].value;
6254 target_oval = ops[1].value;
6255 goto success;
6259 /* Otherwise fall back to the original __sync_val_compare_and_swap
6260 which is always seq-cst. */
6261 icode = optab_handler (sync_compare_and_swap_optab, mode);
6262 if (icode != CODE_FOR_nothing)
6264 rtx cc_reg;
6266 create_output_operand (&ops[0], target_oval, mode);
6267 create_fixed_operand (&ops[1], mem);
6268 create_input_operand (&ops[2], expected, mode);
6269 create_input_operand (&ops[3], desired, mode);
6270 if (!maybe_expand_insn (icode, 4, ops))
6271 return false;
6273 target_oval = ops[0].value;
6275 /* If the caller isn't interested in the boolean return value,
6276 skip the computation of it. */
6277 if (ptarget_bool == NULL)
6278 goto success;
6280 /* Otherwise, work out if the compare-and-swap succeeded. */
6281 cc_reg = NULL_RTX;
6282 if (have_insn_for (COMPARE, CCmode))
6283 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6284 if (cc_reg)
6286 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6287 const0_rtx, VOIDmode, 0, 1);
6288 goto success;
6290 goto success_bool_from_val;
6293 /* Also check for library support for __sync_val_compare_and_swap. */
6294 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6295 if (libfunc != NULL)
6297 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6298 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6299 mode, addr, ptr_mode,
6300 expected, mode, desired, mode);
6301 emit_move_insn (target_oval, target);
6303 /* Compute the boolean return value only if requested. */
6304 if (ptarget_bool)
6305 goto success_bool_from_val;
6306 else
6307 goto success;
6310 /* Failure. */
6311 return false;
6313 success_bool_from_val:
6314 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6315 expected, VOIDmode, 1, 1);
6316 success:
6317 /* Make sure that the oval output winds up where the caller asked. */
6318 if (ptarget_oval)
6319 *ptarget_oval = target_oval;
6320 if (ptarget_bool)
6321 *ptarget_bool = target_bool;
6322 return true;
6325 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6327 static void
6328 expand_asm_memory_blockage (void)
6330 rtx asm_op, clob;
6332 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6333 rtvec_alloc (0), rtvec_alloc (0),
6334 rtvec_alloc (0), UNKNOWN_LOCATION);
6335 MEM_VOLATILE_P (asm_op) = 1;
6337 clob = gen_rtx_SCRATCH (VOIDmode);
6338 clob = gen_rtx_MEM (BLKmode, clob);
6339 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6341 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6344 /* Do not propagate memory accesses across this point. */
6346 static void
6347 expand_memory_blockage (void)
6349 if (targetm.have_memory_blockage ())
6350 emit_insn (targetm.gen_memory_blockage ());
6351 else
6352 expand_asm_memory_blockage ();
6355 /* This routine will either emit the mem_thread_fence pattern or issue a
6356 sync_synchronize to generate a fence for memory model MEMMODEL. */
6358 void
6359 expand_mem_thread_fence (enum memmodel model)
6361 if (is_mm_relaxed (model))
6362 return;
6363 if (targetm.have_mem_thread_fence ())
6365 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6366 expand_memory_blockage ();
6368 else if (targetm.have_memory_barrier ())
6369 emit_insn (targetm.gen_memory_barrier ());
6370 else if (synchronize_libfunc != NULL_RTX)
6371 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6372 else
6373 expand_memory_blockage ();
6376 /* Emit a signal fence with given memory model. */
6378 void
6379 expand_mem_signal_fence (enum memmodel model)
6381 /* No machine barrier is required to implement a signal fence, but
6382 a compiler memory barrier must be issued, except for relaxed MM. */
6383 if (!is_mm_relaxed (model))
6384 expand_memory_blockage ();
6387 /* This function expands the atomic load operation:
6388 return the atomically loaded value in MEM.
6390 MEMMODEL is the memory model variant to use.
6391 TARGET is an option place to stick the return value. */
6394 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6396 machine_mode mode = GET_MODE (mem);
6397 enum insn_code icode;
6399 /* If the target supports the load directly, great. */
6400 icode = direct_optab_handler (atomic_load_optab, mode);
6401 if (icode != CODE_FOR_nothing)
6403 struct expand_operand ops[3];
6404 rtx_insn *last = get_last_insn ();
6405 if (is_mm_seq_cst (model))
6406 expand_memory_blockage ();
6408 create_output_operand (&ops[0], target, mode);
6409 create_fixed_operand (&ops[1], mem);
6410 create_integer_operand (&ops[2], model);
6411 if (maybe_expand_insn (icode, 3, ops))
6413 if (!is_mm_relaxed (model))
6414 expand_memory_blockage ();
6415 return ops[0].value;
6417 delete_insns_since (last);
6420 /* If the size of the object is greater than word size on this target,
6421 then we assume that a load will not be atomic. We could try to
6422 emulate a load with a compare-and-swap operation, but the store that
6423 doing this could result in would be incorrect if this is a volatile
6424 atomic load or targetting read-only-mapped memory. */
6425 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6426 /* If there is no atomic load, leave the library call. */
6427 return NULL_RTX;
6429 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6430 if (!target || target == const0_rtx)
6431 target = gen_reg_rtx (mode);
6433 /* For SEQ_CST, emit a barrier before the load. */
6434 if (is_mm_seq_cst (model))
6435 expand_mem_thread_fence (model);
6437 emit_move_insn (target, mem);
6439 /* Emit the appropriate barrier after the load. */
6440 expand_mem_thread_fence (model);
6442 return target;
6445 /* This function expands the atomic store operation:
6446 Atomically store VAL in MEM.
6447 MEMMODEL is the memory model variant to use.
6448 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6449 function returns const0_rtx if a pattern was emitted. */
6452 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6454 machine_mode mode = GET_MODE (mem);
6455 enum insn_code icode;
6456 struct expand_operand ops[3];
6458 /* If the target supports the store directly, great. */
6459 icode = direct_optab_handler (atomic_store_optab, mode);
6460 if (icode != CODE_FOR_nothing)
6462 rtx_insn *last = get_last_insn ();
6463 if (!is_mm_relaxed (model))
6464 expand_memory_blockage ();
6465 create_fixed_operand (&ops[0], mem);
6466 create_input_operand (&ops[1], val, mode);
6467 create_integer_operand (&ops[2], model);
6468 if (maybe_expand_insn (icode, 3, ops))
6470 if (is_mm_seq_cst (model))
6471 expand_memory_blockage ();
6472 return const0_rtx;
6474 delete_insns_since (last);
6477 /* If using __sync_lock_release is a viable alternative, try it.
6478 Note that this will not be set to true if we are expanding a generic
6479 __atomic_store_n. */
6480 if (use_release)
6482 icode = direct_optab_handler (sync_lock_release_optab, mode);
6483 if (icode != CODE_FOR_nothing)
6485 create_fixed_operand (&ops[0], mem);
6486 create_input_operand (&ops[1], const0_rtx, mode);
6487 if (maybe_expand_insn (icode, 2, ops))
6489 /* lock_release is only a release barrier. */
6490 if (is_mm_seq_cst (model))
6491 expand_mem_thread_fence (model);
6492 return const0_rtx;
6497 /* If the size of the object is greater than word size on this target,
6498 a default store will not be atomic. */
6499 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6501 /* If loads are atomic or we are called to provide a __sync builtin,
6502 we can try a atomic_exchange and throw away the result. Otherwise,
6503 don't do anything so that we do not create an inconsistency between
6504 loads and stores. */
6505 if (can_atomic_load_p (mode) || is_mm_sync (model))
6507 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6508 if (!target)
6509 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6510 val);
6511 if (target)
6512 return const0_rtx;
6514 return NULL_RTX;
6517 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6518 expand_mem_thread_fence (model);
6520 emit_move_insn (mem, val);
6522 /* For SEQ_CST, also emit a barrier after the store. */
6523 if (is_mm_seq_cst (model))
6524 expand_mem_thread_fence (model);
6526 return const0_rtx;
6530 /* Structure containing the pointers and values required to process the
6531 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6533 struct atomic_op_functions
6535 direct_optab mem_fetch_before;
6536 direct_optab mem_fetch_after;
6537 direct_optab mem_no_result;
6538 optab fetch_before;
6539 optab fetch_after;
6540 direct_optab no_result;
6541 enum rtx_code reverse_code;
6545 /* Fill in structure pointed to by OP with the various optab entries for an
6546 operation of type CODE. */
6548 static void
6549 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6551 gcc_assert (op!= NULL);
6553 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6554 in the source code during compilation, and the optab entries are not
6555 computable until runtime. Fill in the values at runtime. */
6556 switch (code)
6558 case PLUS:
6559 op->mem_fetch_before = atomic_fetch_add_optab;
6560 op->mem_fetch_after = atomic_add_fetch_optab;
6561 op->mem_no_result = atomic_add_optab;
6562 op->fetch_before = sync_old_add_optab;
6563 op->fetch_after = sync_new_add_optab;
6564 op->no_result = sync_add_optab;
6565 op->reverse_code = MINUS;
6566 break;
6567 case MINUS:
6568 op->mem_fetch_before = atomic_fetch_sub_optab;
6569 op->mem_fetch_after = atomic_sub_fetch_optab;
6570 op->mem_no_result = atomic_sub_optab;
6571 op->fetch_before = sync_old_sub_optab;
6572 op->fetch_after = sync_new_sub_optab;
6573 op->no_result = sync_sub_optab;
6574 op->reverse_code = PLUS;
6575 break;
6576 case XOR:
6577 op->mem_fetch_before = atomic_fetch_xor_optab;
6578 op->mem_fetch_after = atomic_xor_fetch_optab;
6579 op->mem_no_result = atomic_xor_optab;
6580 op->fetch_before = sync_old_xor_optab;
6581 op->fetch_after = sync_new_xor_optab;
6582 op->no_result = sync_xor_optab;
6583 op->reverse_code = XOR;
6584 break;
6585 case AND:
6586 op->mem_fetch_before = atomic_fetch_and_optab;
6587 op->mem_fetch_after = atomic_and_fetch_optab;
6588 op->mem_no_result = atomic_and_optab;
6589 op->fetch_before = sync_old_and_optab;
6590 op->fetch_after = sync_new_and_optab;
6591 op->no_result = sync_and_optab;
6592 op->reverse_code = UNKNOWN;
6593 break;
6594 case IOR:
6595 op->mem_fetch_before = atomic_fetch_or_optab;
6596 op->mem_fetch_after = atomic_or_fetch_optab;
6597 op->mem_no_result = atomic_or_optab;
6598 op->fetch_before = sync_old_ior_optab;
6599 op->fetch_after = sync_new_ior_optab;
6600 op->no_result = sync_ior_optab;
6601 op->reverse_code = UNKNOWN;
6602 break;
6603 case NOT:
6604 op->mem_fetch_before = atomic_fetch_nand_optab;
6605 op->mem_fetch_after = atomic_nand_fetch_optab;
6606 op->mem_no_result = atomic_nand_optab;
6607 op->fetch_before = sync_old_nand_optab;
6608 op->fetch_after = sync_new_nand_optab;
6609 op->no_result = sync_nand_optab;
6610 op->reverse_code = UNKNOWN;
6611 break;
6612 default:
6613 gcc_unreachable ();
6617 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6618 using memory order MODEL. If AFTER is true the operation needs to return
6619 the value of *MEM after the operation, otherwise the previous value.
6620 TARGET is an optional place to place the result. The result is unused if
6621 it is const0_rtx.
6622 Return the result if there is a better sequence, otherwise NULL_RTX. */
6624 static rtx
6625 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6626 enum memmodel model, bool after)
6628 /* If the value is prefetched, or not used, it may be possible to replace
6629 the sequence with a native exchange operation. */
6630 if (!after || target == const0_rtx)
6632 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6633 if (code == AND && val == const0_rtx)
6635 if (target == const0_rtx)
6636 target = gen_reg_rtx (GET_MODE (mem));
6637 return maybe_emit_atomic_exchange (target, mem, val, model);
6640 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6641 if (code == IOR && val == constm1_rtx)
6643 if (target == const0_rtx)
6644 target = gen_reg_rtx (GET_MODE (mem));
6645 return maybe_emit_atomic_exchange (target, mem, val, model);
6649 return NULL_RTX;
6652 /* Try to emit an instruction for a specific operation varaition.
6653 OPTAB contains the OP functions.
6654 TARGET is an optional place to return the result. const0_rtx means unused.
6655 MEM is the memory location to operate on.
6656 VAL is the value to use in the operation.
6657 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6658 MODEL is the memory model, if used.
6659 AFTER is true if the returned result is the value after the operation. */
6661 static rtx
6662 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6663 rtx val, bool use_memmodel, enum memmodel model, bool after)
6665 machine_mode mode = GET_MODE (mem);
6666 struct expand_operand ops[4];
6667 enum insn_code icode;
6668 int op_counter = 0;
6669 int num_ops;
6671 /* Check to see if there is a result returned. */
6672 if (target == const0_rtx)
6674 if (use_memmodel)
6676 icode = direct_optab_handler (optab->mem_no_result, mode);
6677 create_integer_operand (&ops[2], model);
6678 num_ops = 3;
6680 else
6682 icode = direct_optab_handler (optab->no_result, mode);
6683 num_ops = 2;
6686 /* Otherwise, we need to generate a result. */
6687 else
6689 if (use_memmodel)
6691 icode = direct_optab_handler (after ? optab->mem_fetch_after
6692 : optab->mem_fetch_before, mode);
6693 create_integer_operand (&ops[3], model);
6694 num_ops = 4;
6696 else
6698 icode = optab_handler (after ? optab->fetch_after
6699 : optab->fetch_before, mode);
6700 num_ops = 3;
6702 create_output_operand (&ops[op_counter++], target, mode);
6704 if (icode == CODE_FOR_nothing)
6705 return NULL_RTX;
6707 create_fixed_operand (&ops[op_counter++], mem);
6708 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6709 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6711 if (maybe_expand_insn (icode, num_ops, ops))
6712 return (target == const0_rtx ? const0_rtx : ops[0].value);
6714 return NULL_RTX;
6718 /* This function expands an atomic fetch_OP or OP_fetch operation:
6719 TARGET is an option place to stick the return value. const0_rtx indicates
6720 the result is unused.
6721 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6722 CODE is the operation being performed (OP)
6723 MEMMODEL is the memory model variant to use.
6724 AFTER is true to return the result of the operation (OP_fetch).
6725 AFTER is false to return the value before the operation (fetch_OP).
6727 This function will *only* generate instructions if there is a direct
6728 optab. No compare and swap loops or libcalls will be generated. */
6730 static rtx
6731 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6732 enum rtx_code code, enum memmodel model,
6733 bool after)
6735 machine_mode mode = GET_MODE (mem);
6736 struct atomic_op_functions optab;
6737 rtx result;
6738 bool unused_result = (target == const0_rtx);
6740 get_atomic_op_for_code (&optab, code);
6742 /* Check to see if there are any better instructions. */
6743 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6744 if (result)
6745 return result;
6747 /* Check for the case where the result isn't used and try those patterns. */
6748 if (unused_result)
6750 /* Try the memory model variant first. */
6751 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6752 if (result)
6753 return result;
6755 /* Next try the old style withuot a memory model. */
6756 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6757 if (result)
6758 return result;
6760 /* There is no no-result pattern, so try patterns with a result. */
6761 target = NULL_RTX;
6764 /* Try the __atomic version. */
6765 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6766 if (result)
6767 return result;
6769 /* Try the older __sync version. */
6770 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6771 if (result)
6772 return result;
6774 /* If the fetch value can be calculated from the other variation of fetch,
6775 try that operation. */
6776 if (after || unused_result || optab.reverse_code != UNKNOWN)
6778 /* Try the __atomic version, then the older __sync version. */
6779 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6780 if (!result)
6781 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6783 if (result)
6785 /* If the result isn't used, no need to do compensation code. */
6786 if (unused_result)
6787 return result;
6789 /* Issue compensation code. Fetch_after == fetch_before OP val.
6790 Fetch_before == after REVERSE_OP val. */
6791 if (!after)
6792 code = optab.reverse_code;
6793 if (code == NOT)
6795 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6796 true, OPTAB_LIB_WIDEN);
6797 result = expand_simple_unop (mode, NOT, result, target, true);
6799 else
6800 result = expand_simple_binop (mode, code, result, val, target,
6801 true, OPTAB_LIB_WIDEN);
6802 return result;
6806 /* No direct opcode can be generated. */
6807 return NULL_RTX;
6812 /* This function expands an atomic fetch_OP or OP_fetch operation:
6813 TARGET is an option place to stick the return value. const0_rtx indicates
6814 the result is unused.
6815 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6816 CODE is the operation being performed (OP)
6817 MEMMODEL is the memory model variant to use.
6818 AFTER is true to return the result of the operation (OP_fetch).
6819 AFTER is false to return the value before the operation (fetch_OP). */
6821 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6822 enum memmodel model, bool after)
6824 machine_mode mode = GET_MODE (mem);
6825 rtx result;
6826 bool unused_result = (target == const0_rtx);
6828 /* If loads are not atomic for the required size and we are not called to
6829 provide a __sync builtin, do not do anything so that we stay consistent
6830 with atomic loads of the same size. */
6831 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6832 return NULL_RTX;
6834 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6835 after);
6837 if (result)
6838 return result;
6840 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6841 if (code == PLUS || code == MINUS)
6843 rtx tmp;
6844 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6846 start_sequence ();
6847 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6848 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6849 model, after);
6850 if (result)
6852 /* PLUS worked so emit the insns and return. */
6853 tmp = get_insns ();
6854 end_sequence ();
6855 emit_insn (tmp);
6856 return result;
6859 /* PLUS did not work, so throw away the negation code and continue. */
6860 end_sequence ();
6863 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6864 if (!can_compare_and_swap_p (mode, false))
6866 rtx libfunc;
6867 bool fixup = false;
6868 enum rtx_code orig_code = code;
6869 struct atomic_op_functions optab;
6871 get_atomic_op_for_code (&optab, code);
6872 libfunc = optab_libfunc (after ? optab.fetch_after
6873 : optab.fetch_before, mode);
6874 if (libfunc == NULL
6875 && (after || unused_result || optab.reverse_code != UNKNOWN))
6877 fixup = true;
6878 if (!after)
6879 code = optab.reverse_code;
6880 libfunc = optab_libfunc (after ? optab.fetch_before
6881 : optab.fetch_after, mode);
6883 if (libfunc != NULL)
6885 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6886 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6887 addr, ptr_mode, val, mode);
6889 if (!unused_result && fixup)
6890 result = expand_simple_binop (mode, code, result, val, target,
6891 true, OPTAB_LIB_WIDEN);
6892 return result;
6895 /* We need the original code for any further attempts. */
6896 code = orig_code;
6899 /* If nothing else has succeeded, default to a compare and swap loop. */
6900 if (can_compare_and_swap_p (mode, true))
6902 rtx_insn *insn;
6903 rtx t0 = gen_reg_rtx (mode), t1;
6905 start_sequence ();
6907 /* If the result is used, get a register for it. */
6908 if (!unused_result)
6910 if (!target || !register_operand (target, mode))
6911 target = gen_reg_rtx (mode);
6912 /* If fetch_before, copy the value now. */
6913 if (!after)
6914 emit_move_insn (target, t0);
6916 else
6917 target = const0_rtx;
6919 t1 = t0;
6920 if (code == NOT)
6922 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6923 true, OPTAB_LIB_WIDEN);
6924 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6926 else
6927 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6928 OPTAB_LIB_WIDEN);
6930 /* For after, copy the value now. */
6931 if (!unused_result && after)
6932 emit_move_insn (target, t1);
6933 insn = get_insns ();
6934 end_sequence ();
6936 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6937 return target;
6940 return NULL_RTX;
6943 /* Return true if OPERAND is suitable for operand number OPNO of
6944 instruction ICODE. */
6946 bool
6947 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6949 return (!insn_data[(int) icode].operand[opno].predicate
6950 || (insn_data[(int) icode].operand[opno].predicate
6951 (operand, insn_data[(int) icode].operand[opno].mode)));
6954 /* TARGET is a target of a multiword operation that we are going to
6955 implement as a series of word-mode operations. Return true if
6956 TARGET is suitable for this purpose. */
6958 bool
6959 valid_multiword_target_p (rtx target)
6961 machine_mode mode;
6962 int i;
6964 mode = GET_MODE (target);
6965 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6966 if (!validate_subreg (word_mode, mode, target, i))
6967 return false;
6968 return true;
6971 /* Like maybe_legitimize_operand, but do not change the code of the
6972 current rtx value. */
6974 static bool
6975 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6976 struct expand_operand *op)
6978 /* See if the operand matches in its current form. */
6979 if (insn_operand_matches (icode, opno, op->value))
6980 return true;
6982 /* If the operand is a memory whose address has no side effects,
6983 try forcing the address into a non-virtual pseudo register.
6984 The check for side effects is important because copy_to_mode_reg
6985 cannot handle things like auto-modified addresses. */
6986 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6988 rtx addr, mem;
6990 mem = op->value;
6991 addr = XEXP (mem, 0);
6992 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6993 && !side_effects_p (addr))
6995 rtx_insn *last;
6996 machine_mode mode;
6998 last = get_last_insn ();
6999 mode = get_address_mode (mem);
7000 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7001 if (insn_operand_matches (icode, opno, mem))
7003 op->value = mem;
7004 return true;
7006 delete_insns_since (last);
7010 return false;
7013 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7014 on success, storing the new operand value back in OP. */
7016 static bool
7017 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7018 struct expand_operand *op)
7020 machine_mode mode, imode;
7021 bool old_volatile_ok, result;
7023 mode = op->mode;
7024 switch (op->type)
7026 case EXPAND_FIXED:
7027 old_volatile_ok = volatile_ok;
7028 volatile_ok = true;
7029 result = maybe_legitimize_operand_same_code (icode, opno, op);
7030 volatile_ok = old_volatile_ok;
7031 return result;
7033 case EXPAND_OUTPUT:
7034 gcc_assert (mode != VOIDmode);
7035 if (op->value
7036 && op->value != const0_rtx
7037 && GET_MODE (op->value) == mode
7038 && maybe_legitimize_operand_same_code (icode, opno, op))
7039 return true;
7041 op->value = gen_reg_rtx (mode);
7042 op->target = 0;
7043 break;
7045 case EXPAND_INPUT:
7046 input:
7047 gcc_assert (mode != VOIDmode);
7048 gcc_assert (GET_MODE (op->value) == VOIDmode
7049 || GET_MODE (op->value) == mode);
7050 if (maybe_legitimize_operand_same_code (icode, opno, op))
7051 return true;
7053 op->value = copy_to_mode_reg (mode, op->value);
7054 break;
7056 case EXPAND_CONVERT_TO:
7057 gcc_assert (mode != VOIDmode);
7058 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7059 goto input;
7061 case EXPAND_CONVERT_FROM:
7062 if (GET_MODE (op->value) != VOIDmode)
7063 mode = GET_MODE (op->value);
7064 else
7065 /* The caller must tell us what mode this value has. */
7066 gcc_assert (mode != VOIDmode);
7068 imode = insn_data[(int) icode].operand[opno].mode;
7069 if (imode != VOIDmode && imode != mode)
7071 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7072 mode = imode;
7074 goto input;
7076 case EXPAND_ADDRESS:
7077 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7078 op->value);
7079 goto input;
7081 case EXPAND_INTEGER:
7082 mode = insn_data[(int) icode].operand[opno].mode;
7083 if (mode != VOIDmode && const_int_operand (op->value, mode))
7084 goto input;
7085 break;
7087 return insn_operand_matches (icode, opno, op->value);
7090 /* Make OP describe an input operand that should have the same value
7091 as VALUE, after any mode conversion that the target might request.
7092 TYPE is the type of VALUE. */
7094 void
7095 create_convert_operand_from_type (struct expand_operand *op,
7096 rtx value, tree type)
7098 create_convert_operand_from (op, value, TYPE_MODE (type),
7099 TYPE_UNSIGNED (type));
7102 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7103 of instruction ICODE. Return true on success, leaving the new operand
7104 values in the OPS themselves. Emit no code on failure. */
7106 bool
7107 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7108 unsigned int nops, struct expand_operand *ops)
7110 rtx_insn *last;
7111 unsigned int i;
7113 last = get_last_insn ();
7114 for (i = 0; i < nops; i++)
7115 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7117 delete_insns_since (last);
7118 return false;
7120 return true;
7123 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7124 as its operands. Return the instruction pattern on success,
7125 and emit any necessary set-up code. Return null and emit no
7126 code on failure. */
7128 rtx_insn *
7129 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7130 struct expand_operand *ops)
7132 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7133 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7134 return NULL;
7136 switch (nops)
7138 case 1:
7139 return GEN_FCN (icode) (ops[0].value);
7140 case 2:
7141 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7142 case 3:
7143 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7144 case 4:
7145 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7146 ops[3].value);
7147 case 5:
7148 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7149 ops[3].value, ops[4].value);
7150 case 6:
7151 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7152 ops[3].value, ops[4].value, ops[5].value);
7153 case 7:
7154 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7155 ops[3].value, ops[4].value, ops[5].value,
7156 ops[6].value);
7157 case 8:
7158 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7159 ops[3].value, ops[4].value, ops[5].value,
7160 ops[6].value, ops[7].value);
7161 case 9:
7162 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7163 ops[3].value, ops[4].value, ops[5].value,
7164 ops[6].value, ops[7].value, ops[8].value);
7166 gcc_unreachable ();
7169 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7170 as its operands. Return true on success and emit no code on failure. */
7172 bool
7173 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7174 struct expand_operand *ops)
7176 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7177 if (pat)
7179 emit_insn (pat);
7180 return true;
7182 return false;
7185 /* Like maybe_expand_insn, but for jumps. */
7187 bool
7188 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7189 struct expand_operand *ops)
7191 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7192 if (pat)
7194 emit_jump_insn (pat);
7195 return true;
7197 return false;
7200 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7201 as its operands. */
7203 void
7204 expand_insn (enum insn_code icode, unsigned int nops,
7205 struct expand_operand *ops)
7207 if (!maybe_expand_insn (icode, nops, ops))
7208 gcc_unreachable ();
7211 /* Like expand_insn, but for jumps. */
7213 void
7214 expand_jump_insn (enum insn_code icode, unsigned int nops,
7215 struct expand_operand *ops)
7217 if (!maybe_expand_jump_insn (icode, nops, ops))
7218 gcc_unreachable ();