[40/77] Use scalar_int_mode for extraction_insn fields
[official-gcc.git] / gcc / optabs.c
blobc54a56197e2463ebc2295f0948fc458a34ba1112
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
115 return 0;
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_SIZE (GET_MODE (op0))
142 > GET_MODE_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
160 return 1;
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
181 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
182 return to_mode;
184 return result;
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
197 rtx result;
198 scalar_int_mode int_mode;
200 /* If we don't have to extend and this is a constant, return it. */
201 if (no_extend && GET_MODE (op) == VOIDmode)
202 return op;
204 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
205 extend since it will be more efficient to do so unless the signedness of
206 a promoted object differs from our extension. */
207 if (! no_extend
208 || !is_a <scalar_int_mode> (mode, &int_mode)
209 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
210 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
211 return convert_modes (mode, oldmode, op, unsignedp);
213 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
214 SUBREG. */
215 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
216 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
218 /* Otherwise, get an object of MODE, clobber it, and set the low-order
219 part to OP. */
221 result = gen_reg_rtx (int_mode);
222 emit_clobber (result);
223 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
224 return result;
227 /* Expand vector widening operations.
229 There are two different classes of operations handled here:
230 1) Operations whose result is wider than all the arguments to the operation.
231 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
232 In this case OP0 and optionally OP1 would be initialized,
233 but WIDE_OP wouldn't (not relevant for this case).
234 2) Operations whose result is of the same size as the last argument to the
235 operation, but wider than all the other arguments to the operation.
236 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
237 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
239 E.g, when called to expand the following operations, this is how
240 the arguments will be initialized:
241 nops OP0 OP1 WIDE_OP
242 widening-sum 2 oprnd0 - oprnd1
243 widening-dot-product 3 oprnd0 oprnd1 oprnd2
244 widening-mult 2 oprnd0 oprnd1 -
245 type-promotion (vec-unpack) 1 oprnd0 - - */
248 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
249 rtx target, int unsignedp)
251 struct expand_operand eops[4];
252 tree oprnd0, oprnd1, oprnd2;
253 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
254 optab widen_pattern_optab;
255 enum insn_code icode;
256 int nops = TREE_CODE_LENGTH (ops->code);
257 int op;
259 oprnd0 = ops->op0;
260 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
261 widen_pattern_optab =
262 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
263 if (ops->code == WIDEN_MULT_PLUS_EXPR
264 || ops->code == WIDEN_MULT_MINUS_EXPR)
265 icode = find_widening_optab_handler (widen_pattern_optab,
266 TYPE_MODE (TREE_TYPE (ops->op2)),
267 tmode0, 0);
268 else
269 icode = optab_handler (widen_pattern_optab, tmode0);
270 gcc_assert (icode != CODE_FOR_nothing);
272 if (nops >= 2)
274 oprnd1 = ops->op1;
275 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
278 /* The last operand is of a wider mode than the rest of the operands. */
279 if (nops == 2)
280 wmode = tmode1;
281 else if (nops == 3)
283 gcc_assert (tmode1 == tmode0);
284 gcc_assert (op1);
285 oprnd2 = ops->op2;
286 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
289 op = 0;
290 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
291 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
292 if (op1)
293 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
294 if (wide_op)
295 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
296 expand_insn (icode, op, eops);
297 return eops[0].value;
300 /* Generate code to perform an operation specified by TERNARY_OPTAB
301 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
303 UNSIGNEDP is for the case where we have to widen the operands
304 to perform the operation. It says to use zero-extension.
306 If TARGET is nonzero, the value
307 is generated there, if it is convenient to do so.
308 In all cases an rtx is returned for the locus of the value;
309 this may or may not be TARGET. */
312 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
313 rtx op1, rtx op2, rtx target, int unsignedp)
315 struct expand_operand ops[4];
316 enum insn_code icode = optab_handler (ternary_optab, mode);
318 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
320 create_output_operand (&ops[0], target, mode);
321 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
322 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
323 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
324 expand_insn (icode, 4, ops);
325 return ops[0].value;
329 /* Like expand_binop, but return a constant rtx if the result can be
330 calculated at compile time. The arguments and return value are
331 otherwise the same as for expand_binop. */
334 simplify_expand_binop (machine_mode mode, optab binoptab,
335 rtx op0, rtx op1, rtx target, int unsignedp,
336 enum optab_methods methods)
338 if (CONSTANT_P (op0) && CONSTANT_P (op1))
340 rtx x = simplify_binary_operation (optab_to_code (binoptab),
341 mode, op0, op1);
342 if (x)
343 return x;
346 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
349 /* Like simplify_expand_binop, but always put the result in TARGET.
350 Return true if the expansion succeeded. */
352 bool
353 force_expand_binop (machine_mode mode, optab binoptab,
354 rtx op0, rtx op1, rtx target, int unsignedp,
355 enum optab_methods methods)
357 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
358 target, unsignedp, methods);
359 if (x == 0)
360 return false;
361 if (x != target)
362 emit_move_insn (target, x);
363 return true;
366 /* Create a new vector value in VMODE with all elements set to OP. The
367 mode of OP must be the element mode of VMODE. If OP is a constant,
368 then the return value will be a constant. */
370 static rtx
371 expand_vector_broadcast (machine_mode vmode, rtx op)
373 enum insn_code icode;
374 rtvec vec;
375 rtx ret;
376 int i, n;
378 gcc_checking_assert (VECTOR_MODE_P (vmode));
380 n = GET_MODE_NUNITS (vmode);
381 vec = rtvec_alloc (n);
382 for (i = 0; i < n; ++i)
383 RTVEC_ELT (vec, i) = op;
385 if (CONSTANT_P (op))
386 return gen_rtx_CONST_VECTOR (vmode, vec);
388 /* ??? If the target doesn't have a vec_init, then we have no easy way
389 of performing this operation. Most of this sort of generic support
390 is hidden away in the vector lowering support in gimple. */
391 icode = convert_optab_handler (vec_init_optab, vmode,
392 GET_MODE_INNER (vmode));
393 if (icode == CODE_FOR_nothing)
394 return NULL;
396 ret = gen_reg_rtx (vmode);
397 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
399 return ret;
402 /* This subroutine of expand_doubleword_shift handles the cases in which
403 the effective shift value is >= BITS_PER_WORD. The arguments and return
404 value are the same as for the parent routine, except that SUPERWORD_OP1
405 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
406 INTO_TARGET may be null if the caller has decided to calculate it. */
408 static bool
409 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
410 rtx outof_target, rtx into_target,
411 int unsignedp, enum optab_methods methods)
413 if (into_target != 0)
414 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
415 into_target, unsignedp, methods))
416 return false;
418 if (outof_target != 0)
420 /* For a signed right shift, we must fill OUTOF_TARGET with copies
421 of the sign bit, otherwise we must fill it with zeros. */
422 if (binoptab != ashr_optab)
423 emit_move_insn (outof_target, CONST0_RTX (word_mode));
424 else
425 if (!force_expand_binop (word_mode, binoptab,
426 outof_input, GEN_INT (BITS_PER_WORD - 1),
427 outof_target, unsignedp, methods))
428 return false;
430 return true;
433 /* This subroutine of expand_doubleword_shift handles the cases in which
434 the effective shift value is < BITS_PER_WORD. The arguments and return
435 value are the same as for the parent routine. */
437 static bool
438 expand_subword_shift (machine_mode op1_mode, optab binoptab,
439 rtx outof_input, rtx into_input, rtx op1,
440 rtx outof_target, rtx into_target,
441 int unsignedp, enum optab_methods methods,
442 unsigned HOST_WIDE_INT shift_mask)
444 optab reverse_unsigned_shift, unsigned_shift;
445 rtx tmp, carries;
447 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
448 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
450 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
451 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
452 the opposite direction to BINOPTAB. */
453 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
455 carries = outof_input;
456 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
457 op1_mode), op1_mode);
458 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
459 0, true, methods);
461 else
463 /* We must avoid shifting by BITS_PER_WORD bits since that is either
464 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
465 has unknown behavior. Do a single shift first, then shift by the
466 remainder. It's OK to use ~OP1 as the remainder if shift counts
467 are truncated to the mode size. */
468 carries = expand_binop (word_mode, reverse_unsigned_shift,
469 outof_input, const1_rtx, 0, unsignedp, methods);
470 if (shift_mask == BITS_PER_WORD - 1)
472 tmp = immed_wide_int_const
473 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
474 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
475 0, true, methods);
477 else
479 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
480 op1_mode), op1_mode);
481 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
482 0, true, methods);
485 if (tmp == 0 || carries == 0)
486 return false;
487 carries = expand_binop (word_mode, reverse_unsigned_shift,
488 carries, tmp, 0, unsignedp, methods);
489 if (carries == 0)
490 return false;
492 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
493 so the result can go directly into INTO_TARGET if convenient. */
494 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
495 into_target, unsignedp, methods);
496 if (tmp == 0)
497 return false;
499 /* Now OR in the bits carried over from OUTOF_INPUT. */
500 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
501 into_target, unsignedp, methods))
502 return false;
504 /* Use a standard word_mode shift for the out-of half. */
505 if (outof_target != 0)
506 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
507 outof_target, unsignedp, methods))
508 return false;
510 return true;
514 /* Try implementing expand_doubleword_shift using conditional moves.
515 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
516 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
517 are the shift counts to use in the former and latter case. All other
518 arguments are the same as the parent routine. */
520 static bool
521 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
522 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
523 rtx outof_input, rtx into_input,
524 rtx subword_op1, rtx superword_op1,
525 rtx outof_target, rtx into_target,
526 int unsignedp, enum optab_methods methods,
527 unsigned HOST_WIDE_INT shift_mask)
529 rtx outof_superword, into_superword;
531 /* Put the superword version of the output into OUTOF_SUPERWORD and
532 INTO_SUPERWORD. */
533 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
534 if (outof_target != 0 && subword_op1 == superword_op1)
536 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
537 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
538 into_superword = outof_target;
539 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
540 outof_superword, 0, unsignedp, methods))
541 return false;
543 else
545 into_superword = gen_reg_rtx (word_mode);
546 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
547 outof_superword, into_superword,
548 unsignedp, methods))
549 return false;
552 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
553 if (!expand_subword_shift (op1_mode, binoptab,
554 outof_input, into_input, subword_op1,
555 outof_target, into_target,
556 unsignedp, methods, shift_mask))
557 return false;
559 /* Select between them. Do the INTO half first because INTO_SUPERWORD
560 might be the current value of OUTOF_TARGET. */
561 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
562 into_target, into_superword, word_mode, false))
563 return false;
565 if (outof_target != 0)
566 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
567 outof_target, outof_superword,
568 word_mode, false))
569 return false;
571 return true;
574 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
575 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
576 input operand; the shift moves bits in the direction OUTOF_INPUT->
577 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
578 of the target. OP1 is the shift count and OP1_MODE is its mode.
579 If OP1 is constant, it will have been truncated as appropriate
580 and is known to be nonzero.
582 If SHIFT_MASK is zero, the result of word shifts is undefined when the
583 shift count is outside the range [0, BITS_PER_WORD). This routine must
584 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
586 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
587 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
588 fill with zeros or sign bits as appropriate.
590 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
591 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
592 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
593 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
594 are undefined.
596 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
597 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
598 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
599 function wants to calculate it itself.
601 Return true if the shift could be successfully synthesized. */
603 static bool
604 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
605 rtx outof_input, rtx into_input, rtx op1,
606 rtx outof_target, rtx into_target,
607 int unsignedp, enum optab_methods methods,
608 unsigned HOST_WIDE_INT shift_mask)
610 rtx superword_op1, tmp, cmp1, cmp2;
611 enum rtx_code cmp_code;
613 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
614 fill the result with sign or zero bits as appropriate. If so, the value
615 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
616 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
617 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
619 This isn't worthwhile for constant shifts since the optimizers will
620 cope better with in-range shift counts. */
621 if (shift_mask >= BITS_PER_WORD
622 && outof_target != 0
623 && !CONSTANT_P (op1))
625 if (!expand_doubleword_shift (op1_mode, binoptab,
626 outof_input, into_input, op1,
627 0, into_target,
628 unsignedp, methods, shift_mask))
629 return false;
630 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
631 outof_target, unsignedp, methods))
632 return false;
633 return true;
636 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
637 is true when the effective shift value is less than BITS_PER_WORD.
638 Set SUPERWORD_OP1 to the shift count that should be used to shift
639 OUTOF_INPUT into INTO_TARGET when the condition is false. */
640 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
641 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
643 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
644 is a subword shift count. */
645 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
646 0, true, methods);
647 cmp2 = CONST0_RTX (op1_mode);
648 cmp_code = EQ;
649 superword_op1 = op1;
651 else
653 /* Set CMP1 to OP1 - BITS_PER_WORD. */
654 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
655 0, true, methods);
656 cmp2 = CONST0_RTX (op1_mode);
657 cmp_code = LT;
658 superword_op1 = cmp1;
660 if (cmp1 == 0)
661 return false;
663 /* If we can compute the condition at compile time, pick the
664 appropriate subroutine. */
665 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
666 if (tmp != 0 && CONST_INT_P (tmp))
668 if (tmp == const0_rtx)
669 return expand_superword_shift (binoptab, outof_input, superword_op1,
670 outof_target, into_target,
671 unsignedp, methods);
672 else
673 return expand_subword_shift (op1_mode, binoptab,
674 outof_input, into_input, op1,
675 outof_target, into_target,
676 unsignedp, methods, shift_mask);
679 /* Try using conditional moves to generate straight-line code. */
680 if (HAVE_conditional_move)
682 rtx_insn *start = get_last_insn ();
683 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
684 cmp_code, cmp1, cmp2,
685 outof_input, into_input,
686 op1, superword_op1,
687 outof_target, into_target,
688 unsignedp, methods, shift_mask))
689 return true;
690 delete_insns_since (start);
693 /* As a last resort, use branches to select the correct alternative. */
694 rtx_code_label *subword_label = gen_label_rtx ();
695 rtx_code_label *done_label = gen_label_rtx ();
697 NO_DEFER_POP;
698 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
699 0, 0, subword_label,
700 profile_probability::uninitialized ());
701 OK_DEFER_POP;
703 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
704 outof_target, into_target,
705 unsignedp, methods))
706 return false;
708 emit_jump_insn (targetm.gen_jump (done_label));
709 emit_barrier ();
710 emit_label (subword_label);
712 if (!expand_subword_shift (op1_mode, binoptab,
713 outof_input, into_input, op1,
714 outof_target, into_target,
715 unsignedp, methods, shift_mask))
716 return false;
718 emit_label (done_label);
719 return true;
722 /* Subroutine of expand_binop. Perform a double word multiplication of
723 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
724 as the target's word_mode. This function return NULL_RTX if anything
725 goes wrong, in which case it may have already emitted instructions
726 which need to be deleted.
728 If we want to multiply two two-word values and have normal and widening
729 multiplies of single-word values, we can do this with three smaller
730 multiplications.
732 The multiplication proceeds as follows:
733 _______________________
734 [__op0_high_|__op0_low__]
735 _______________________
736 * [__op1_high_|__op1_low__]
737 _______________________________________________
738 _______________________
739 (1) [__op0_low__*__op1_low__]
740 _______________________
741 (2a) [__op0_low__*__op1_high_]
742 _______________________
743 (2b) [__op0_high_*__op1_low__]
744 _______________________
745 (3) [__op0_high_*__op1_high_]
748 This gives a 4-word result. Since we are only interested in the
749 lower 2 words, partial result (3) and the upper words of (2a) and
750 (2b) don't need to be calculated. Hence (2a) and (2b) can be
751 calculated using non-widening multiplication.
753 (1), however, needs to be calculated with an unsigned widening
754 multiplication. If this operation is not directly supported we
755 try using a signed widening multiplication and adjust the result.
756 This adjustment works as follows:
758 If both operands are positive then no adjustment is needed.
760 If the operands have different signs, for example op0_low < 0 and
761 op1_low >= 0, the instruction treats the most significant bit of
762 op0_low as a sign bit instead of a bit with significance
763 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
764 with 2**BITS_PER_WORD - op0_low, and two's complements the
765 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
766 the result.
768 Similarly, if both operands are negative, we need to add
769 (op0_low + op1_low) * 2**BITS_PER_WORD.
771 We use a trick to adjust quickly. We logically shift op0_low right
772 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
773 op0_high (op1_high) before it is used to calculate 2b (2a). If no
774 logical shift exists, we do an arithmetic right shift and subtract
775 the 0 or -1. */
777 static rtx
778 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
779 bool umulp, enum optab_methods methods)
781 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
782 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
783 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
784 rtx product, adjust, product_high, temp;
786 rtx op0_high = operand_subword_force (op0, high, mode);
787 rtx op0_low = operand_subword_force (op0, low, mode);
788 rtx op1_high = operand_subword_force (op1, high, mode);
789 rtx op1_low = operand_subword_force (op1, low, mode);
791 /* If we're using an unsigned multiply to directly compute the product
792 of the low-order words of the operands and perform any required
793 adjustments of the operands, we begin by trying two more multiplications
794 and then computing the appropriate sum.
796 We have checked above that the required addition is provided.
797 Full-word addition will normally always succeed, especially if
798 it is provided at all, so we don't worry about its failure. The
799 multiplication may well fail, however, so we do handle that. */
801 if (!umulp)
803 /* ??? This could be done with emit_store_flag where available. */
804 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
805 NULL_RTX, 1, methods);
806 if (temp)
807 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
808 NULL_RTX, 0, OPTAB_DIRECT);
809 else
811 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
812 NULL_RTX, 0, methods);
813 if (!temp)
814 return NULL_RTX;
815 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
816 NULL_RTX, 0, OPTAB_DIRECT);
819 if (!op0_high)
820 return NULL_RTX;
823 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
824 NULL_RTX, 0, OPTAB_DIRECT);
825 if (!adjust)
826 return NULL_RTX;
828 /* OP0_HIGH should now be dead. */
830 if (!umulp)
832 /* ??? This could be done with emit_store_flag where available. */
833 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
834 NULL_RTX, 1, methods);
835 if (temp)
836 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
837 NULL_RTX, 0, OPTAB_DIRECT);
838 else
840 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
841 NULL_RTX, 0, methods);
842 if (!temp)
843 return NULL_RTX;
844 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
845 NULL_RTX, 0, OPTAB_DIRECT);
848 if (!op1_high)
849 return NULL_RTX;
852 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
853 NULL_RTX, 0, OPTAB_DIRECT);
854 if (!temp)
855 return NULL_RTX;
857 /* OP1_HIGH should now be dead. */
859 adjust = expand_binop (word_mode, add_optab, adjust, temp,
860 NULL_RTX, 0, OPTAB_DIRECT);
862 if (target && !REG_P (target))
863 target = NULL_RTX;
865 if (umulp)
866 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
867 target, 1, OPTAB_DIRECT);
868 else
869 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
870 target, 1, OPTAB_DIRECT);
872 if (!product)
873 return NULL_RTX;
875 product_high = operand_subword (product, high, 1, mode);
876 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
877 NULL_RTX, 0, OPTAB_DIRECT);
878 emit_move_insn (product_high, adjust);
879 return product;
882 /* Wrapper around expand_binop which takes an rtx code to specify
883 the operation to perform, not an optab pointer. All other
884 arguments are the same. */
886 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
887 rtx op1, rtx target, int unsignedp,
888 enum optab_methods methods)
890 optab binop = code_to_optab (code);
891 gcc_assert (binop);
893 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
896 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
897 binop. Order them according to commutative_operand_precedence and, if
898 possible, try to put TARGET or a pseudo first. */
899 static bool
900 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
902 int op0_prec = commutative_operand_precedence (op0);
903 int op1_prec = commutative_operand_precedence (op1);
905 if (op0_prec < op1_prec)
906 return true;
908 if (op0_prec > op1_prec)
909 return false;
911 /* With equal precedence, both orders are ok, but it is better if the
912 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
913 if (target == 0 || REG_P (target))
914 return (REG_P (op1) && !REG_P (op0)) || target == op1;
915 else
916 return rtx_equal_p (op1, target);
919 /* Return true if BINOPTAB implements a shift operation. */
921 static bool
922 shift_optab_p (optab binoptab)
924 switch (optab_to_code (binoptab))
926 case ASHIFT:
927 case SS_ASHIFT:
928 case US_ASHIFT:
929 case ASHIFTRT:
930 case LSHIFTRT:
931 case ROTATE:
932 case ROTATERT:
933 return true;
935 default:
936 return false;
940 /* Return true if BINOPTAB implements a commutative binary operation. */
942 static bool
943 commutative_optab_p (optab binoptab)
945 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
946 || binoptab == smul_widen_optab
947 || binoptab == umul_widen_optab
948 || binoptab == smul_highpart_optab
949 || binoptab == umul_highpart_optab);
952 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
953 optimizing, and if the operand is a constant that costs more than
954 1 instruction, force the constant into a register and return that
955 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
957 static rtx
958 avoid_expensive_constant (machine_mode mode, optab binoptab,
959 int opn, rtx x, bool unsignedp)
961 bool speed = optimize_insn_for_speed_p ();
963 if (mode != VOIDmode
964 && optimize
965 && CONSTANT_P (x)
966 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
967 > set_src_cost (x, mode, speed)))
969 if (CONST_INT_P (x))
971 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
972 if (intval != INTVAL (x))
973 x = GEN_INT (intval);
975 else
976 x = convert_modes (mode, VOIDmode, x, unsignedp);
977 x = force_reg (mode, x);
979 return x;
982 /* Helper function for expand_binop: handle the case where there
983 is an insn that directly implements the indicated operation.
984 Returns null if this is not possible. */
985 static rtx
986 expand_binop_directly (machine_mode mode, optab binoptab,
987 rtx op0, rtx op1,
988 rtx target, int unsignedp, enum optab_methods methods,
989 rtx_insn *last)
991 machine_mode from_mode = widened_mode (mode, op0, op1);
992 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
993 from_mode, 1);
994 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
995 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
996 machine_mode mode0, mode1, tmp_mode;
997 struct expand_operand ops[3];
998 bool commutative_p;
999 rtx_insn *pat;
1000 rtx xop0 = op0, xop1 = op1;
1001 bool canonicalize_op1 = false;
1003 /* If it is a commutative operator and the modes would match
1004 if we would swap the operands, we can save the conversions. */
1005 commutative_p = commutative_optab_p (binoptab);
1006 if (commutative_p
1007 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1008 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1009 std::swap (xop0, xop1);
1011 /* If we are optimizing, force expensive constants into a register. */
1012 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1013 if (!shift_optab_p (binoptab))
1014 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1015 else
1016 /* Shifts and rotates often use a different mode for op1 from op0;
1017 for VOIDmode constants we don't know the mode, so force it
1018 to be canonicalized using convert_modes. */
1019 canonicalize_op1 = true;
1021 /* In case the insn wants input operands in modes different from
1022 those of the actual operands, convert the operands. It would
1023 seem that we don't need to convert CONST_INTs, but we do, so
1024 that they're properly zero-extended, sign-extended or truncated
1025 for their mode. */
1027 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1028 if (xmode0 != VOIDmode && xmode0 != mode0)
1030 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1031 mode0 = xmode0;
1034 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1035 ? GET_MODE (xop1) : mode);
1036 if (xmode1 != VOIDmode && xmode1 != mode1)
1038 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1039 mode1 = xmode1;
1042 /* If operation is commutative,
1043 try to make the first operand a register.
1044 Even better, try to make it the same as the target.
1045 Also try to make the last operand a constant. */
1046 if (commutative_p
1047 && swap_commutative_operands_with_target (target, xop0, xop1))
1048 std::swap (xop0, xop1);
1050 /* Now, if insn's predicates don't allow our operands, put them into
1051 pseudo regs. */
1053 if (binoptab == vec_pack_trunc_optab
1054 || binoptab == vec_pack_usat_optab
1055 || binoptab == vec_pack_ssat_optab
1056 || binoptab == vec_pack_ufix_trunc_optab
1057 || binoptab == vec_pack_sfix_trunc_optab)
1059 /* The mode of the result is different then the mode of the
1060 arguments. */
1061 tmp_mode = insn_data[(int) icode].operand[0].mode;
1062 if (VECTOR_MODE_P (mode)
1063 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1065 delete_insns_since (last);
1066 return NULL_RTX;
1069 else
1070 tmp_mode = mode;
1072 create_output_operand (&ops[0], target, tmp_mode);
1073 create_input_operand (&ops[1], xop0, mode0);
1074 create_input_operand (&ops[2], xop1, mode1);
1075 pat = maybe_gen_insn (icode, 3, ops);
1076 if (pat)
1078 /* If PAT is composed of more than one insn, try to add an appropriate
1079 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1080 operand, call expand_binop again, this time without a target. */
1081 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1082 && ! add_equal_note (pat, ops[0].value,
1083 optab_to_code (binoptab),
1084 ops[1].value, ops[2].value))
1086 delete_insns_since (last);
1087 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1088 unsignedp, methods);
1091 emit_insn (pat);
1092 return ops[0].value;
1094 delete_insns_since (last);
1095 return NULL_RTX;
1098 /* Generate code to perform an operation specified by BINOPTAB
1099 on operands OP0 and OP1, with result having machine-mode MODE.
1101 UNSIGNEDP is for the case where we have to widen the operands
1102 to perform the operation. It says to use zero-extension.
1104 If TARGET is nonzero, the value
1105 is generated there, if it is convenient to do so.
1106 In all cases an rtx is returned for the locus of the value;
1107 this may or may not be TARGET. */
1110 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1111 rtx target, int unsignedp, enum optab_methods methods)
1113 enum optab_methods next_methods
1114 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1115 ? OPTAB_WIDEN : methods);
1116 enum mode_class mclass;
1117 machine_mode wider_mode;
1118 scalar_int_mode int_mode;
1119 rtx libfunc;
1120 rtx temp;
1121 rtx_insn *entry_last = get_last_insn ();
1122 rtx_insn *last;
1124 mclass = GET_MODE_CLASS (mode);
1126 /* If subtracting an integer constant, convert this into an addition of
1127 the negated constant. */
1129 if (binoptab == sub_optab && CONST_INT_P (op1))
1131 op1 = negate_rtx (mode, op1);
1132 binoptab = add_optab;
1134 /* For shifts, constant invalid op1 might be expanded from different
1135 mode than MODE. As those are invalid, force them to a register
1136 to avoid further problems during expansion. */
1137 else if (CONST_INT_P (op1)
1138 && shift_optab_p (binoptab)
1139 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1141 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1142 op1 = force_reg (GET_MODE_INNER (mode), op1);
1145 /* Record where to delete back to if we backtrack. */
1146 last = get_last_insn ();
1148 /* If we can do it with a three-operand insn, do so. */
1150 if (methods != OPTAB_MUST_WIDEN
1151 && find_widening_optab_handler (binoptab, mode,
1152 widened_mode (mode, op0, op1), 1)
1153 != CODE_FOR_nothing)
1155 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1156 unsignedp, methods, last);
1157 if (temp)
1158 return temp;
1161 /* If we were trying to rotate, and that didn't work, try rotating
1162 the other direction before falling back to shifts and bitwise-or. */
1163 if (((binoptab == rotl_optab
1164 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1165 || (binoptab == rotr_optab
1166 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1167 && is_int_mode (mode, &int_mode))
1169 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1170 rtx newop1;
1171 unsigned int bits = GET_MODE_PRECISION (int_mode);
1173 if (CONST_INT_P (op1))
1174 newop1 = GEN_INT (bits - INTVAL (op1));
1175 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1176 newop1 = negate_rtx (GET_MODE (op1), op1);
1177 else
1178 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1179 gen_int_mode (bits, GET_MODE (op1)), op1,
1180 NULL_RTX, unsignedp, OPTAB_DIRECT);
1182 temp = expand_binop_directly (int_mode, otheroptab, op0, newop1,
1183 target, unsignedp, methods, last);
1184 if (temp)
1185 return temp;
1188 /* If this is a multiply, see if we can do a widening operation that
1189 takes operands of this mode and makes a wider mode. */
1191 if (binoptab == smul_optab
1192 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1193 && (convert_optab_handler ((unsignedp
1194 ? umul_widen_optab
1195 : smul_widen_optab),
1196 wider_mode, mode) != CODE_FOR_nothing))
1198 temp = expand_binop (wider_mode,
1199 unsignedp ? umul_widen_optab : smul_widen_optab,
1200 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1202 if (temp != 0)
1204 if (GET_MODE_CLASS (mode) == MODE_INT
1205 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1206 return gen_lowpart (mode, temp);
1207 else
1208 return convert_to_mode (mode, temp, unsignedp);
1212 /* If this is a vector shift by a scalar, see if we can do a vector
1213 shift by a vector. If so, broadcast the scalar into a vector. */
1214 if (mclass == MODE_VECTOR_INT)
1216 optab otheroptab = unknown_optab;
1218 if (binoptab == ashl_optab)
1219 otheroptab = vashl_optab;
1220 else if (binoptab == ashr_optab)
1221 otheroptab = vashr_optab;
1222 else if (binoptab == lshr_optab)
1223 otheroptab = vlshr_optab;
1224 else if (binoptab == rotl_optab)
1225 otheroptab = vrotl_optab;
1226 else if (binoptab == rotr_optab)
1227 otheroptab = vrotr_optab;
1229 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1231 /* The scalar may have been extended to be too wide. Truncate
1232 it back to the proper size to fit in the broadcast vector. */
1233 machine_mode inner_mode = GET_MODE_INNER (mode);
1234 if (!CONST_INT_P (op1)
1235 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1236 > GET_MODE_BITSIZE (inner_mode)))
1237 op1 = force_reg (inner_mode,
1238 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1239 GET_MODE (op1)));
1240 rtx vop1 = expand_vector_broadcast (mode, op1);
1241 if (vop1)
1243 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1244 target, unsignedp, methods, last);
1245 if (temp)
1246 return temp;
1251 /* Look for a wider mode of the same class for which we think we
1252 can open-code the operation. Check for a widening multiply at the
1253 wider mode as well. */
1255 if (CLASS_HAS_WIDER_MODES_P (mclass)
1256 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1257 FOR_EACH_WIDER_MODE (wider_mode, mode)
1259 machine_mode next_mode;
1260 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1261 || (binoptab == smul_optab
1262 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1263 && (find_widening_optab_handler ((unsignedp
1264 ? umul_widen_optab
1265 : smul_widen_optab),
1266 next_mode, mode, 0)
1267 != CODE_FOR_nothing)))
1269 rtx xop0 = op0, xop1 = op1;
1270 int no_extend = 0;
1272 /* For certain integer operations, we need not actually extend
1273 the narrow operands, as long as we will truncate
1274 the results to the same narrowness. */
1276 if ((binoptab == ior_optab || binoptab == and_optab
1277 || binoptab == xor_optab
1278 || binoptab == add_optab || binoptab == sub_optab
1279 || binoptab == smul_optab || binoptab == ashl_optab)
1280 && mclass == MODE_INT)
1282 no_extend = 1;
1283 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1284 xop0, unsignedp);
1285 if (binoptab != ashl_optab)
1286 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1287 xop1, unsignedp);
1290 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1292 /* The second operand of a shift must always be extended. */
1293 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1294 no_extend && binoptab != ashl_optab);
1296 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1297 unsignedp, OPTAB_DIRECT);
1298 if (temp)
1300 if (mclass != MODE_INT
1301 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1303 if (target == 0)
1304 target = gen_reg_rtx (mode);
1305 convert_move (target, temp, 0);
1306 return target;
1308 else
1309 return gen_lowpart (mode, temp);
1311 else
1312 delete_insns_since (last);
1316 /* If operation is commutative,
1317 try to make the first operand a register.
1318 Even better, try to make it the same as the target.
1319 Also try to make the last operand a constant. */
1320 if (commutative_optab_p (binoptab)
1321 && swap_commutative_operands_with_target (target, op0, op1))
1322 std::swap (op0, op1);
1324 /* These can be done a word at a time. */
1325 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1326 && is_int_mode (mode, &int_mode)
1327 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1328 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1330 int i;
1331 rtx_insn *insns;
1333 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1334 won't be accurate, so use a new target. */
1335 if (target == 0
1336 || target == op0
1337 || target == op1
1338 || !valid_multiword_target_p (target))
1339 target = gen_reg_rtx (int_mode);
1341 start_sequence ();
1343 /* Do the actual arithmetic. */
1344 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1346 rtx target_piece = operand_subword (target, i, 1, int_mode);
1347 rtx x = expand_binop (word_mode, binoptab,
1348 operand_subword_force (op0, i, int_mode),
1349 operand_subword_force (op1, i, int_mode),
1350 target_piece, unsignedp, next_methods);
1352 if (x == 0)
1353 break;
1355 if (target_piece != x)
1356 emit_move_insn (target_piece, x);
1359 insns = get_insns ();
1360 end_sequence ();
1362 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1364 emit_insn (insns);
1365 return target;
1369 /* Synthesize double word shifts from single word shifts. */
1370 if ((binoptab == lshr_optab || binoptab == ashl_optab
1371 || binoptab == ashr_optab)
1372 && is_int_mode (mode, &int_mode)
1373 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1374 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1375 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1376 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1377 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1378 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1380 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1381 scalar_int_mode op1_mode;
1383 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1384 shift_mask = targetm.shift_truncation_mask (word_mode);
1385 op1_mode = (GET_MODE (op1) != VOIDmode
1386 ? as_a <scalar_int_mode> (GET_MODE (op1))
1387 : word_mode);
1389 /* Apply the truncation to constant shifts. */
1390 if (double_shift_mask > 0 && CONST_INT_P (op1))
1391 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1393 if (op1 == CONST0_RTX (op1_mode))
1394 return op0;
1396 /* Make sure that this is a combination that expand_doubleword_shift
1397 can handle. See the comments there for details. */
1398 if (double_shift_mask == 0
1399 || (shift_mask == BITS_PER_WORD - 1
1400 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1402 rtx_insn *insns;
1403 rtx into_target, outof_target;
1404 rtx into_input, outof_input;
1405 int left_shift, outof_word;
1407 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1408 won't be accurate, so use a new target. */
1409 if (target == 0
1410 || target == op0
1411 || target == op1
1412 || !valid_multiword_target_p (target))
1413 target = gen_reg_rtx (int_mode);
1415 start_sequence ();
1417 /* OUTOF_* is the word we are shifting bits away from, and
1418 INTO_* is the word that we are shifting bits towards, thus
1419 they differ depending on the direction of the shift and
1420 WORDS_BIG_ENDIAN. */
1422 left_shift = binoptab == ashl_optab;
1423 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1425 outof_target = operand_subword (target, outof_word, 1, int_mode);
1426 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1428 outof_input = operand_subword_force (op0, outof_word, int_mode);
1429 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1431 if (expand_doubleword_shift (op1_mode, binoptab,
1432 outof_input, into_input, op1,
1433 outof_target, into_target,
1434 unsignedp, next_methods, shift_mask))
1436 insns = get_insns ();
1437 end_sequence ();
1439 emit_insn (insns);
1440 return target;
1442 end_sequence ();
1446 /* Synthesize double word rotates from single word shifts. */
1447 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1448 && is_int_mode (mode, &int_mode)
1449 && CONST_INT_P (op1)
1450 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1451 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1452 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1454 rtx_insn *insns;
1455 rtx into_target, outof_target;
1456 rtx into_input, outof_input;
1457 rtx inter;
1458 int shift_count, left_shift, outof_word;
1460 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1461 won't be accurate, so use a new target. Do this also if target is not
1462 a REG, first because having a register instead may open optimization
1463 opportunities, and second because if target and op0 happen to be MEMs
1464 designating the same location, we would risk clobbering it too early
1465 in the code sequence we generate below. */
1466 if (target == 0
1467 || target == op0
1468 || target == op1
1469 || !REG_P (target)
1470 || !valid_multiword_target_p (target))
1471 target = gen_reg_rtx (int_mode);
1473 start_sequence ();
1475 shift_count = INTVAL (op1);
1477 /* OUTOF_* is the word we are shifting bits away from, and
1478 INTO_* is the word that we are shifting bits towards, thus
1479 they differ depending on the direction of the shift and
1480 WORDS_BIG_ENDIAN. */
1482 left_shift = (binoptab == rotl_optab);
1483 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1485 outof_target = operand_subword (target, outof_word, 1, int_mode);
1486 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1488 outof_input = operand_subword_force (op0, outof_word, int_mode);
1489 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1491 if (shift_count == BITS_PER_WORD)
1493 /* This is just a word swap. */
1494 emit_move_insn (outof_target, into_input);
1495 emit_move_insn (into_target, outof_input);
1496 inter = const0_rtx;
1498 else
1500 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1501 rtx first_shift_count, second_shift_count;
1502 optab reverse_unsigned_shift, unsigned_shift;
1504 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1505 ? lshr_optab : ashl_optab);
1507 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1508 ? ashl_optab : lshr_optab);
1510 if (shift_count > BITS_PER_WORD)
1512 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1513 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1515 else
1517 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1518 second_shift_count = GEN_INT (shift_count);
1521 into_temp1 = expand_binop (word_mode, unsigned_shift,
1522 outof_input, first_shift_count,
1523 NULL_RTX, unsignedp, next_methods);
1524 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1525 into_input, second_shift_count,
1526 NULL_RTX, unsignedp, next_methods);
1528 if (into_temp1 != 0 && into_temp2 != 0)
1529 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1530 into_target, unsignedp, next_methods);
1531 else
1532 inter = 0;
1534 if (inter != 0 && inter != into_target)
1535 emit_move_insn (into_target, inter);
1537 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1538 into_input, first_shift_count,
1539 NULL_RTX, unsignedp, next_methods);
1540 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1541 outof_input, second_shift_count,
1542 NULL_RTX, unsignedp, next_methods);
1544 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1545 inter = expand_binop (word_mode, ior_optab,
1546 outof_temp1, outof_temp2,
1547 outof_target, unsignedp, next_methods);
1549 if (inter != 0 && inter != outof_target)
1550 emit_move_insn (outof_target, inter);
1553 insns = get_insns ();
1554 end_sequence ();
1556 if (inter != 0)
1558 emit_insn (insns);
1559 return target;
1563 /* These can be done a word at a time by propagating carries. */
1564 if ((binoptab == add_optab || binoptab == sub_optab)
1565 && is_int_mode (mode, &int_mode)
1566 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1567 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1569 unsigned int i;
1570 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1571 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1572 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1573 rtx xop0, xop1, xtarget;
1575 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1576 value is one of those, use it. Otherwise, use 1 since it is the
1577 one easiest to get. */
1578 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1579 int normalizep = STORE_FLAG_VALUE;
1580 #else
1581 int normalizep = 1;
1582 #endif
1584 /* Prepare the operands. */
1585 xop0 = force_reg (int_mode, op0);
1586 xop1 = force_reg (int_mode, op1);
1588 xtarget = gen_reg_rtx (int_mode);
1590 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1591 target = xtarget;
1593 /* Indicate for flow that the entire target reg is being set. */
1594 if (REG_P (target))
1595 emit_clobber (xtarget);
1597 /* Do the actual arithmetic. */
1598 for (i = 0; i < nwords; i++)
1600 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1601 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1602 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1603 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1604 rtx x;
1606 /* Main add/subtract of the input operands. */
1607 x = expand_binop (word_mode, binoptab,
1608 op0_piece, op1_piece,
1609 target_piece, unsignedp, next_methods);
1610 if (x == 0)
1611 break;
1613 if (i + 1 < nwords)
1615 /* Store carry from main add/subtract. */
1616 carry_out = gen_reg_rtx (word_mode);
1617 carry_out = emit_store_flag_force (carry_out,
1618 (binoptab == add_optab
1619 ? LT : GT),
1620 x, op0_piece,
1621 word_mode, 1, normalizep);
1624 if (i > 0)
1626 rtx newx;
1628 /* Add/subtract previous carry to main result. */
1629 newx = expand_binop (word_mode,
1630 normalizep == 1 ? binoptab : otheroptab,
1631 x, carry_in,
1632 NULL_RTX, 1, next_methods);
1634 if (i + 1 < nwords)
1636 /* Get out carry from adding/subtracting carry in. */
1637 rtx carry_tmp = gen_reg_rtx (word_mode);
1638 carry_tmp = emit_store_flag_force (carry_tmp,
1639 (binoptab == add_optab
1640 ? LT : GT),
1641 newx, x,
1642 word_mode, 1, normalizep);
1644 /* Logical-ior the two poss. carry together. */
1645 carry_out = expand_binop (word_mode, ior_optab,
1646 carry_out, carry_tmp,
1647 carry_out, 0, next_methods);
1648 if (carry_out == 0)
1649 break;
1651 emit_move_insn (target_piece, newx);
1653 else
1655 if (x != target_piece)
1656 emit_move_insn (target_piece, x);
1659 carry_in = carry_out;
1662 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1664 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1665 || ! rtx_equal_p (target, xtarget))
1667 rtx_insn *temp = emit_move_insn (target, xtarget);
1669 set_dst_reg_note (temp, REG_EQUAL,
1670 gen_rtx_fmt_ee (optab_to_code (binoptab),
1671 int_mode, copy_rtx (xop0),
1672 copy_rtx (xop1)),
1673 target);
1675 else
1676 target = xtarget;
1678 return target;
1681 else
1682 delete_insns_since (last);
1685 /* Attempt to synthesize double word multiplies using a sequence of word
1686 mode multiplications. We first attempt to generate a sequence using a
1687 more efficient unsigned widening multiply, and if that fails we then
1688 try using a signed widening multiply. */
1690 if (binoptab == smul_optab
1691 && is_int_mode (mode, &int_mode)
1692 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1693 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1694 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1696 rtx product = NULL_RTX;
1697 if (widening_optab_handler (umul_widen_optab, int_mode, word_mode)
1698 != CODE_FOR_nothing)
1700 product = expand_doubleword_mult (int_mode, op0, op1, target,
1701 true, methods);
1702 if (!product)
1703 delete_insns_since (last);
1706 if (product == NULL_RTX
1707 && (widening_optab_handler (smul_widen_optab, int_mode, word_mode)
1708 != CODE_FOR_nothing))
1710 product = expand_doubleword_mult (int_mode, op0, op1, target,
1711 false, methods);
1712 if (!product)
1713 delete_insns_since (last);
1716 if (product != NULL_RTX)
1718 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1720 rtx_insn *move = emit_move_insn (target ? target : product,
1721 product);
1722 set_dst_reg_note (move,
1723 REG_EQUAL,
1724 gen_rtx_fmt_ee (MULT, int_mode,
1725 copy_rtx (op0),
1726 copy_rtx (op1)),
1727 target ? target : product);
1729 return product;
1733 /* It can't be open-coded in this mode.
1734 Use a library call if one is available and caller says that's ok. */
1736 libfunc = optab_libfunc (binoptab, mode);
1737 if (libfunc
1738 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1740 rtx_insn *insns;
1741 rtx op1x = op1;
1742 machine_mode op1_mode = mode;
1743 rtx value;
1745 start_sequence ();
1747 if (shift_optab_p (binoptab))
1749 op1_mode = targetm.libgcc_shift_count_mode ();
1750 /* Specify unsigned here,
1751 since negative shift counts are meaningless. */
1752 op1x = convert_to_mode (op1_mode, op1, 1);
1755 if (GET_MODE (op0) != VOIDmode
1756 && GET_MODE (op0) != mode)
1757 op0 = convert_to_mode (mode, op0, unsignedp);
1759 /* Pass 1 for NO_QUEUE so we don't lose any increments
1760 if the libcall is cse'd or moved. */
1761 value = emit_library_call_value (libfunc,
1762 NULL_RTX, LCT_CONST, mode, 2,
1763 op0, mode, op1x, op1_mode);
1765 insns = get_insns ();
1766 end_sequence ();
1768 bool trapv = trapv_binoptab_p (binoptab);
1769 target = gen_reg_rtx (mode);
1770 emit_libcall_block_1 (insns, target, value,
1771 trapv ? NULL_RTX
1772 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1773 mode, op0, op1), trapv);
1775 return target;
1778 delete_insns_since (last);
1780 /* It can't be done in this mode. Can we do it in a wider mode? */
1782 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1783 || methods == OPTAB_MUST_WIDEN))
1785 /* Caller says, don't even try. */
1786 delete_insns_since (entry_last);
1787 return 0;
1790 /* Compute the value of METHODS to pass to recursive calls.
1791 Don't allow widening to be tried recursively. */
1793 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1795 /* Look for a wider mode of the same class for which it appears we can do
1796 the operation. */
1798 if (CLASS_HAS_WIDER_MODES_P (mclass))
1800 FOR_EACH_WIDER_MODE (wider_mode, mode)
1802 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1803 != CODE_FOR_nothing
1804 || (methods == OPTAB_LIB
1805 && optab_libfunc (binoptab, wider_mode)))
1807 rtx xop0 = op0, xop1 = op1;
1808 int no_extend = 0;
1810 /* For certain integer operations, we need not actually extend
1811 the narrow operands, as long as we will truncate
1812 the results to the same narrowness. */
1814 if ((binoptab == ior_optab || binoptab == and_optab
1815 || binoptab == xor_optab
1816 || binoptab == add_optab || binoptab == sub_optab
1817 || binoptab == smul_optab || binoptab == ashl_optab)
1818 && mclass == MODE_INT)
1819 no_extend = 1;
1821 xop0 = widen_operand (xop0, wider_mode, mode,
1822 unsignedp, no_extend);
1824 /* The second operand of a shift must always be extended. */
1825 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1826 no_extend && binoptab != ashl_optab);
1828 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1829 unsignedp, methods);
1830 if (temp)
1832 if (mclass != MODE_INT
1833 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1835 if (target == 0)
1836 target = gen_reg_rtx (mode);
1837 convert_move (target, temp, 0);
1838 return target;
1840 else
1841 return gen_lowpart (mode, temp);
1843 else
1844 delete_insns_since (last);
1849 delete_insns_since (entry_last);
1850 return 0;
1853 /* Expand a binary operator which has both signed and unsigned forms.
1854 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1855 signed operations.
1857 If we widen unsigned operands, we may use a signed wider operation instead
1858 of an unsigned wider operation, since the result would be the same. */
1861 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1862 rtx op0, rtx op1, rtx target, int unsignedp,
1863 enum optab_methods methods)
1865 rtx temp;
1866 optab direct_optab = unsignedp ? uoptab : soptab;
1867 bool save_enable;
1869 /* Do it without widening, if possible. */
1870 temp = expand_binop (mode, direct_optab, op0, op1, target,
1871 unsignedp, OPTAB_DIRECT);
1872 if (temp || methods == OPTAB_DIRECT)
1873 return temp;
1875 /* Try widening to a signed int. Disable any direct use of any
1876 signed insn in the current mode. */
1877 save_enable = swap_optab_enable (soptab, mode, false);
1879 temp = expand_binop (mode, soptab, op0, op1, target,
1880 unsignedp, OPTAB_WIDEN);
1882 /* For unsigned operands, try widening to an unsigned int. */
1883 if (!temp && unsignedp)
1884 temp = expand_binop (mode, uoptab, op0, op1, target,
1885 unsignedp, OPTAB_WIDEN);
1886 if (temp || methods == OPTAB_WIDEN)
1887 goto egress;
1889 /* Use the right width libcall if that exists. */
1890 temp = expand_binop (mode, direct_optab, op0, op1, target,
1891 unsignedp, OPTAB_LIB);
1892 if (temp || methods == OPTAB_LIB)
1893 goto egress;
1895 /* Must widen and use a libcall, use either signed or unsigned. */
1896 temp = expand_binop (mode, soptab, op0, op1, target,
1897 unsignedp, methods);
1898 if (!temp && unsignedp)
1899 temp = expand_binop (mode, uoptab, op0, op1, target,
1900 unsignedp, methods);
1902 egress:
1903 /* Undo the fiddling above. */
1904 if (save_enable)
1905 swap_optab_enable (soptab, mode, true);
1906 return temp;
1909 /* Generate code to perform an operation specified by UNOPPTAB
1910 on operand OP0, with two results to TARG0 and TARG1.
1911 We assume that the order of the operands for the instruction
1912 is TARG0, TARG1, OP0.
1914 Either TARG0 or TARG1 may be zero, but what that means is that
1915 the result is not actually wanted. We will generate it into
1916 a dummy pseudo-reg and discard it. They may not both be zero.
1918 Returns 1 if this operation can be performed; 0 if not. */
1921 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1922 int unsignedp)
1924 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1925 enum mode_class mclass;
1926 machine_mode wider_mode;
1927 rtx_insn *entry_last = get_last_insn ();
1928 rtx_insn *last;
1930 mclass = GET_MODE_CLASS (mode);
1932 if (!targ0)
1933 targ0 = gen_reg_rtx (mode);
1934 if (!targ1)
1935 targ1 = gen_reg_rtx (mode);
1937 /* Record where to go back to if we fail. */
1938 last = get_last_insn ();
1940 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1942 struct expand_operand ops[3];
1943 enum insn_code icode = optab_handler (unoptab, mode);
1945 create_fixed_operand (&ops[0], targ0);
1946 create_fixed_operand (&ops[1], targ1);
1947 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1948 if (maybe_expand_insn (icode, 3, ops))
1949 return 1;
1952 /* It can't be done in this mode. Can we do it in a wider mode? */
1954 if (CLASS_HAS_WIDER_MODES_P (mclass))
1956 FOR_EACH_WIDER_MODE (wider_mode, mode)
1958 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1960 rtx t0 = gen_reg_rtx (wider_mode);
1961 rtx t1 = gen_reg_rtx (wider_mode);
1962 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1964 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1966 convert_move (targ0, t0, unsignedp);
1967 convert_move (targ1, t1, unsignedp);
1968 return 1;
1970 else
1971 delete_insns_since (last);
1976 delete_insns_since (entry_last);
1977 return 0;
1980 /* Generate code to perform an operation specified by BINOPTAB
1981 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1982 We assume that the order of the operands for the instruction
1983 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1984 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1986 Either TARG0 or TARG1 may be zero, but what that means is that
1987 the result is not actually wanted. We will generate it into
1988 a dummy pseudo-reg and discard it. They may not both be zero.
1990 Returns 1 if this operation can be performed; 0 if not. */
1993 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1994 int unsignedp)
1996 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1997 enum mode_class mclass;
1998 machine_mode wider_mode;
1999 rtx_insn *entry_last = get_last_insn ();
2000 rtx_insn *last;
2002 mclass = GET_MODE_CLASS (mode);
2004 if (!targ0)
2005 targ0 = gen_reg_rtx (mode);
2006 if (!targ1)
2007 targ1 = gen_reg_rtx (mode);
2009 /* Record where to go back to if we fail. */
2010 last = get_last_insn ();
2012 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2014 struct expand_operand ops[4];
2015 enum insn_code icode = optab_handler (binoptab, mode);
2016 machine_mode mode0 = insn_data[icode].operand[1].mode;
2017 machine_mode mode1 = insn_data[icode].operand[2].mode;
2018 rtx xop0 = op0, xop1 = op1;
2020 /* If we are optimizing, force expensive constants into a register. */
2021 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2022 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2024 create_fixed_operand (&ops[0], targ0);
2025 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2026 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2027 create_fixed_operand (&ops[3], targ1);
2028 if (maybe_expand_insn (icode, 4, ops))
2029 return 1;
2030 delete_insns_since (last);
2033 /* It can't be done in this mode. Can we do it in a wider mode? */
2035 if (CLASS_HAS_WIDER_MODES_P (mclass))
2037 FOR_EACH_WIDER_MODE (wider_mode, mode)
2039 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2041 rtx t0 = gen_reg_rtx (wider_mode);
2042 rtx t1 = gen_reg_rtx (wider_mode);
2043 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2044 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2046 if (expand_twoval_binop (binoptab, cop0, cop1,
2047 t0, t1, unsignedp))
2049 convert_move (targ0, t0, unsignedp);
2050 convert_move (targ1, t1, unsignedp);
2051 return 1;
2053 else
2054 delete_insns_since (last);
2059 delete_insns_since (entry_last);
2060 return 0;
2063 /* Expand the two-valued library call indicated by BINOPTAB, but
2064 preserve only one of the values. If TARG0 is non-NULL, the first
2065 value is placed into TARG0; otherwise the second value is placed
2066 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2067 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2068 This routine assumes that the value returned by the library call is
2069 as if the return value was of an integral mode twice as wide as the
2070 mode of OP0. Returns 1 if the call was successful. */
2072 bool
2073 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2074 rtx targ0, rtx targ1, enum rtx_code code)
2076 machine_mode mode;
2077 machine_mode libval_mode;
2078 rtx libval;
2079 rtx_insn *insns;
2080 rtx libfunc;
2082 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2083 gcc_assert (!targ0 != !targ1);
2085 mode = GET_MODE (op0);
2086 libfunc = optab_libfunc (binoptab, mode);
2087 if (!libfunc)
2088 return false;
2090 /* The value returned by the library function will have twice as
2091 many bits as the nominal MODE. */
2092 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2093 start_sequence ();
2094 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2095 libval_mode, 2,
2096 op0, mode,
2097 op1, mode);
2098 /* Get the part of VAL containing the value that we want. */
2099 libval = simplify_gen_subreg (mode, libval, libval_mode,
2100 targ0 ? 0 : GET_MODE_SIZE (mode));
2101 insns = get_insns ();
2102 end_sequence ();
2103 /* Move the into the desired location. */
2104 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2105 gen_rtx_fmt_ee (code, mode, op0, op1));
2107 return true;
2111 /* Wrapper around expand_unop which takes an rtx code to specify
2112 the operation to perform, not an optab pointer. All other
2113 arguments are the same. */
2115 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2116 rtx target, int unsignedp)
2118 optab unop = code_to_optab (code);
2119 gcc_assert (unop);
2121 return expand_unop (mode, unop, op0, target, unsignedp);
2124 /* Try calculating
2125 (clz:narrow x)
2127 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2129 A similar operation can be used for clrsb. UNOPTAB says which operation
2130 we are trying to expand. */
2131 static rtx
2132 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2134 enum mode_class mclass = GET_MODE_CLASS (mode);
2135 if (CLASS_HAS_WIDER_MODES_P (mclass))
2137 machine_mode wider_mode;
2138 FOR_EACH_WIDER_MODE (wider_mode, mode)
2140 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2142 rtx xop0, temp;
2143 rtx_insn *last;
2145 last = get_last_insn ();
2147 if (target == 0)
2148 target = gen_reg_rtx (mode);
2149 xop0 = widen_operand (op0, wider_mode, mode,
2150 unoptab != clrsb_optab, false);
2151 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2152 unoptab != clrsb_optab);
2153 if (temp != 0)
2154 temp = expand_binop
2155 (wider_mode, sub_optab, temp,
2156 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2157 - GET_MODE_PRECISION (mode),
2158 wider_mode),
2159 target, true, OPTAB_DIRECT);
2160 if (temp == 0)
2161 delete_insns_since (last);
2163 return temp;
2167 return 0;
2170 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2171 quantities, choosing which based on whether the high word is nonzero. */
2172 static rtx
2173 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2175 rtx xop0 = force_reg (mode, op0);
2176 rtx subhi = gen_highpart (word_mode, xop0);
2177 rtx sublo = gen_lowpart (word_mode, xop0);
2178 rtx_code_label *hi0_label = gen_label_rtx ();
2179 rtx_code_label *after_label = gen_label_rtx ();
2180 rtx_insn *seq;
2181 rtx temp, result;
2183 /* If we were not given a target, use a word_mode register, not a
2184 'mode' register. The result will fit, and nobody is expecting
2185 anything bigger (the return type of __builtin_clz* is int). */
2186 if (!target)
2187 target = gen_reg_rtx (word_mode);
2189 /* In any case, write to a word_mode scratch in both branches of the
2190 conditional, so we can ensure there is a single move insn setting
2191 'target' to tag a REG_EQUAL note on. */
2192 result = gen_reg_rtx (word_mode);
2194 start_sequence ();
2196 /* If the high word is not equal to zero,
2197 then clz of the full value is clz of the high word. */
2198 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2199 word_mode, true, hi0_label);
2201 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2202 if (!temp)
2203 goto fail;
2205 if (temp != result)
2206 convert_move (result, temp, true);
2208 emit_jump_insn (targetm.gen_jump (after_label));
2209 emit_barrier ();
2211 /* Else clz of the full value is clz of the low word plus the number
2212 of bits in the high word. */
2213 emit_label (hi0_label);
2215 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2216 if (!temp)
2217 goto fail;
2218 temp = expand_binop (word_mode, add_optab, temp,
2219 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2220 result, true, OPTAB_DIRECT);
2221 if (!temp)
2222 goto fail;
2223 if (temp != result)
2224 convert_move (result, temp, true);
2226 emit_label (after_label);
2227 convert_move (target, result, true);
2229 seq = get_insns ();
2230 end_sequence ();
2232 add_equal_note (seq, target, CLZ, xop0, 0);
2233 emit_insn (seq);
2234 return target;
2236 fail:
2237 end_sequence ();
2238 return 0;
2241 /* Try calculating popcount of a double-word quantity as two popcount's of
2242 word-sized quantities and summing up the results. */
2243 static rtx
2244 expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
2246 rtx t0, t1, t;
2247 rtx_insn *seq;
2249 start_sequence ();
2251 t0 = expand_unop_direct (word_mode, popcount_optab,
2252 operand_subword_force (op0, 0, mode), NULL_RTX,
2253 true);
2254 t1 = expand_unop_direct (word_mode, popcount_optab,
2255 operand_subword_force (op0, 1, mode), NULL_RTX,
2256 true);
2257 if (!t0 || !t1)
2259 end_sequence ();
2260 return NULL_RTX;
2263 /* If we were not given a target, use a word_mode register, not a
2264 'mode' register. The result will fit, and nobody is expecting
2265 anything bigger (the return type of __builtin_popcount* is int). */
2266 if (!target)
2267 target = gen_reg_rtx (word_mode);
2269 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2271 seq = get_insns ();
2272 end_sequence ();
2274 add_equal_note (seq, t, POPCOUNT, op0, 0);
2275 emit_insn (seq);
2276 return t;
2279 /* Try calculating
2280 (parity:wide x)
2282 (parity:narrow (low (x) ^ high (x))) */
2283 static rtx
2284 expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
2286 rtx t = expand_binop (word_mode, xor_optab,
2287 operand_subword_force (op0, 0, mode),
2288 operand_subword_force (op0, 1, mode),
2289 NULL_RTX, 0, OPTAB_DIRECT);
2290 return expand_unop (word_mode, parity_optab, t, target, true);
2293 /* Try calculating
2294 (bswap:narrow x)
2296 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2297 static rtx
2298 widen_bswap (machine_mode mode, rtx op0, rtx target)
2300 enum mode_class mclass = GET_MODE_CLASS (mode);
2301 machine_mode wider_mode;
2302 rtx x;
2303 rtx_insn *last;
2305 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2306 return NULL_RTX;
2308 FOR_EACH_WIDER_MODE (wider_mode, mode)
2309 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2310 goto found;
2311 return NULL_RTX;
2313 found:
2314 last = get_last_insn ();
2316 x = widen_operand (op0, wider_mode, mode, true, true);
2317 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2319 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2320 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2321 if (x != 0)
2322 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2323 GET_MODE_BITSIZE (wider_mode)
2324 - GET_MODE_BITSIZE (mode),
2325 NULL_RTX, true);
2327 if (x != 0)
2329 if (target == 0)
2330 target = gen_reg_rtx (mode);
2331 emit_move_insn (target, gen_lowpart (mode, x));
2333 else
2334 delete_insns_since (last);
2336 return target;
2339 /* Try calculating bswap as two bswaps of two word-sized operands. */
2341 static rtx
2342 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2344 rtx t0, t1;
2346 t1 = expand_unop (word_mode, bswap_optab,
2347 operand_subword_force (op, 0, mode), NULL_RTX, true);
2348 t0 = expand_unop (word_mode, bswap_optab,
2349 operand_subword_force (op, 1, mode), NULL_RTX, true);
2351 if (target == 0 || !valid_multiword_target_p (target))
2352 target = gen_reg_rtx (mode);
2353 if (REG_P (target))
2354 emit_clobber (target);
2355 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2356 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2358 return target;
2361 /* Try calculating (parity x) as (and (popcount x) 1), where
2362 popcount can also be done in a wider mode. */
2363 static rtx
2364 expand_parity (machine_mode mode, rtx op0, rtx target)
2366 enum mode_class mclass = GET_MODE_CLASS (mode);
2367 if (CLASS_HAS_WIDER_MODES_P (mclass))
2369 machine_mode wider_mode;
2370 FOR_EACH_MODE_FROM (wider_mode, mode)
2372 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2374 rtx xop0, temp;
2375 rtx_insn *last;
2377 last = get_last_insn ();
2379 if (target == 0 || GET_MODE (target) != wider_mode)
2380 target = gen_reg_rtx (wider_mode);
2382 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2383 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2384 true);
2385 if (temp != 0)
2386 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2387 target, true, OPTAB_DIRECT);
2389 if (temp)
2391 if (mclass != MODE_INT
2392 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2393 return convert_to_mode (mode, temp, 0);
2394 else
2395 return gen_lowpart (mode, temp);
2397 else
2398 delete_insns_since (last);
2402 return 0;
2405 /* Try calculating ctz(x) as K - clz(x & -x) ,
2406 where K is GET_MODE_PRECISION(mode) - 1.
2408 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2409 don't have to worry about what the hardware does in that case. (If
2410 the clz instruction produces the usual value at 0, which is K, the
2411 result of this code sequence will be -1; expand_ffs, below, relies
2412 on this. It might be nice to have it be K instead, for consistency
2413 with the (very few) processors that provide a ctz with a defined
2414 value, but that would take one more instruction, and it would be
2415 less convenient for expand_ffs anyway. */
2417 static rtx
2418 expand_ctz (machine_mode mode, rtx op0, rtx target)
2420 rtx_insn *seq;
2421 rtx temp;
2423 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2424 return 0;
2426 start_sequence ();
2428 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2429 if (temp)
2430 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2431 true, OPTAB_DIRECT);
2432 if (temp)
2433 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2434 if (temp)
2435 temp = expand_binop (mode, sub_optab,
2436 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2437 temp, target,
2438 true, OPTAB_DIRECT);
2439 if (temp == 0)
2441 end_sequence ();
2442 return 0;
2445 seq = get_insns ();
2446 end_sequence ();
2448 add_equal_note (seq, temp, CTZ, op0, 0);
2449 emit_insn (seq);
2450 return temp;
2454 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2455 else with the sequence used by expand_clz.
2457 The ffs builtin promises to return zero for a zero value and ctz/clz
2458 may have an undefined value in that case. If they do not give us a
2459 convenient value, we have to generate a test and branch. */
2460 static rtx
2461 expand_ffs (machine_mode mode, rtx op0, rtx target)
2463 HOST_WIDE_INT val = 0;
2464 bool defined_at_zero = false;
2465 rtx temp;
2466 rtx_insn *seq;
2468 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2470 start_sequence ();
2472 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2473 if (!temp)
2474 goto fail;
2476 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2478 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2480 start_sequence ();
2481 temp = expand_ctz (mode, op0, 0);
2482 if (!temp)
2483 goto fail;
2485 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2487 defined_at_zero = true;
2488 val = (GET_MODE_PRECISION (mode) - 1) - val;
2491 else
2492 return 0;
2494 if (defined_at_zero && val == -1)
2495 /* No correction needed at zero. */;
2496 else
2498 /* We don't try to do anything clever with the situation found
2499 on some processors (eg Alpha) where ctz(0:mode) ==
2500 bitsize(mode). If someone can think of a way to send N to -1
2501 and leave alone all values in the range 0..N-1 (where N is a
2502 power of two), cheaper than this test-and-branch, please add it.
2504 The test-and-branch is done after the operation itself, in case
2505 the operation sets condition codes that can be recycled for this.
2506 (This is true on i386, for instance.) */
2508 rtx_code_label *nonzero_label = gen_label_rtx ();
2509 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2510 mode, true, nonzero_label);
2512 convert_move (temp, GEN_INT (-1), false);
2513 emit_label (nonzero_label);
2516 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2517 to produce a value in the range 0..bitsize. */
2518 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2519 target, false, OPTAB_DIRECT);
2520 if (!temp)
2521 goto fail;
2523 seq = get_insns ();
2524 end_sequence ();
2526 add_equal_note (seq, temp, FFS, op0, 0);
2527 emit_insn (seq);
2528 return temp;
2530 fail:
2531 end_sequence ();
2532 return 0;
2535 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2536 conditions, VAL may already be a SUBREG against which we cannot generate
2537 a further SUBREG. In this case, we expect forcing the value into a
2538 register will work around the situation. */
2540 static rtx
2541 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2542 machine_mode imode)
2544 rtx ret;
2545 ret = lowpart_subreg (omode, val, imode);
2546 if (ret == NULL)
2548 val = force_reg (imode, val);
2549 ret = lowpart_subreg (omode, val, imode);
2550 gcc_assert (ret != NULL);
2552 return ret;
2555 /* Expand a floating point absolute value or negation operation via a
2556 logical operation on the sign bit. */
2558 static rtx
2559 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2560 rtx op0, rtx target)
2562 const struct real_format *fmt;
2563 int bitpos, word, nwords, i;
2564 machine_mode imode;
2565 rtx temp;
2566 rtx_insn *insns;
2568 /* The format has to have a simple sign bit. */
2569 fmt = REAL_MODE_FORMAT (mode);
2570 if (fmt == NULL)
2571 return NULL_RTX;
2573 bitpos = fmt->signbit_rw;
2574 if (bitpos < 0)
2575 return NULL_RTX;
2577 /* Don't create negative zeros if the format doesn't support them. */
2578 if (code == NEG && !fmt->has_signed_zero)
2579 return NULL_RTX;
2581 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2583 if (!int_mode_for_mode (mode).exists (&imode))
2584 return NULL_RTX;
2585 word = 0;
2586 nwords = 1;
2588 else
2590 imode = word_mode;
2592 if (FLOAT_WORDS_BIG_ENDIAN)
2593 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2594 else
2595 word = bitpos / BITS_PER_WORD;
2596 bitpos = bitpos % BITS_PER_WORD;
2597 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2600 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2601 if (code == ABS)
2602 mask = ~mask;
2604 if (target == 0
2605 || target == op0
2606 || (nwords > 1 && !valid_multiword_target_p (target)))
2607 target = gen_reg_rtx (mode);
2609 if (nwords > 1)
2611 start_sequence ();
2613 for (i = 0; i < nwords; ++i)
2615 rtx targ_piece = operand_subword (target, i, 1, mode);
2616 rtx op0_piece = operand_subword_force (op0, i, mode);
2618 if (i == word)
2620 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2621 op0_piece,
2622 immed_wide_int_const (mask, imode),
2623 targ_piece, 1, OPTAB_LIB_WIDEN);
2624 if (temp != targ_piece)
2625 emit_move_insn (targ_piece, temp);
2627 else
2628 emit_move_insn (targ_piece, op0_piece);
2631 insns = get_insns ();
2632 end_sequence ();
2634 emit_insn (insns);
2636 else
2638 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2639 gen_lowpart (imode, op0),
2640 immed_wide_int_const (mask, imode),
2641 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2642 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2644 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2645 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2646 target);
2649 return target;
2652 /* As expand_unop, but will fail rather than attempt the operation in a
2653 different mode or with a libcall. */
2654 static rtx
2655 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2656 int unsignedp)
2658 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2660 struct expand_operand ops[2];
2661 enum insn_code icode = optab_handler (unoptab, mode);
2662 rtx_insn *last = get_last_insn ();
2663 rtx_insn *pat;
2665 create_output_operand (&ops[0], target, mode);
2666 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2667 pat = maybe_gen_insn (icode, 2, ops);
2668 if (pat)
2670 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2671 && ! add_equal_note (pat, ops[0].value,
2672 optab_to_code (unoptab),
2673 ops[1].value, NULL_RTX))
2675 delete_insns_since (last);
2676 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2679 emit_insn (pat);
2681 return ops[0].value;
2684 return 0;
2687 /* Generate code to perform an operation specified by UNOPTAB
2688 on operand OP0, with result having machine-mode MODE.
2690 UNSIGNEDP is for the case where we have to widen the operands
2691 to perform the operation. It says to use zero-extension.
2693 If TARGET is nonzero, the value
2694 is generated there, if it is convenient to do so.
2695 In all cases an rtx is returned for the locus of the value;
2696 this may or may not be TARGET. */
2699 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2700 int unsignedp)
2702 enum mode_class mclass = GET_MODE_CLASS (mode);
2703 machine_mode wider_mode;
2704 scalar_int_mode int_mode;
2705 scalar_float_mode float_mode;
2706 rtx temp;
2707 rtx libfunc;
2709 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2710 if (temp)
2711 return temp;
2713 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2715 /* Widening (or narrowing) clz needs special treatment. */
2716 if (unoptab == clz_optab)
2718 temp = widen_leading (mode, op0, target, unoptab);
2719 if (temp)
2720 return temp;
2722 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2723 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2725 temp = expand_doubleword_clz (mode, op0, target);
2726 if (temp)
2727 return temp;
2730 goto try_libcall;
2733 if (unoptab == clrsb_optab)
2735 temp = widen_leading (mode, op0, target, unoptab);
2736 if (temp)
2737 return temp;
2738 goto try_libcall;
2741 if (unoptab == popcount_optab
2742 && is_a <scalar_int_mode> (mode, &int_mode)
2743 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2744 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2745 && optimize_insn_for_speed_p ())
2747 temp = expand_doubleword_popcount (int_mode, op0, target);
2748 if (temp)
2749 return temp;
2752 if (unoptab == parity_optab
2753 && is_a <scalar_int_mode> (mode, &int_mode)
2754 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2755 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2756 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2757 && optimize_insn_for_speed_p ())
2759 temp = expand_doubleword_parity (int_mode, op0, target);
2760 if (temp)
2761 return temp;
2764 /* Widening (or narrowing) bswap needs special treatment. */
2765 if (unoptab == bswap_optab)
2767 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2768 or ROTATERT. First try these directly; if this fails, then try the
2769 obvious pair of shifts with allowed widening, as this will probably
2770 be always more efficient than the other fallback methods. */
2771 if (mode == HImode)
2773 rtx_insn *last;
2774 rtx temp1, temp2;
2776 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2778 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2779 unsignedp, OPTAB_DIRECT);
2780 if (temp)
2781 return temp;
2784 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2786 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2787 unsignedp, OPTAB_DIRECT);
2788 if (temp)
2789 return temp;
2792 last = get_last_insn ();
2794 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2795 unsignedp, OPTAB_WIDEN);
2796 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2797 unsignedp, OPTAB_WIDEN);
2798 if (temp1 && temp2)
2800 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2801 unsignedp, OPTAB_WIDEN);
2802 if (temp)
2803 return temp;
2806 delete_insns_since (last);
2809 temp = widen_bswap (mode, op0, target);
2810 if (temp)
2811 return temp;
2813 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2814 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2816 temp = expand_doubleword_bswap (mode, op0, target);
2817 if (temp)
2818 return temp;
2821 goto try_libcall;
2824 if (CLASS_HAS_WIDER_MODES_P (mclass))
2825 FOR_EACH_WIDER_MODE (wider_mode, mode)
2827 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2829 rtx xop0 = op0;
2830 rtx_insn *last = get_last_insn ();
2832 /* For certain operations, we need not actually extend
2833 the narrow operand, as long as we will truncate the
2834 results to the same narrowness. */
2836 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2837 (unoptab == neg_optab
2838 || unoptab == one_cmpl_optab)
2839 && mclass == MODE_INT);
2841 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2842 unsignedp);
2844 if (temp)
2846 if (mclass != MODE_INT
2847 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2849 if (target == 0)
2850 target = gen_reg_rtx (mode);
2851 convert_move (target, temp, 0);
2852 return target;
2854 else
2855 return gen_lowpart (mode, temp);
2857 else
2858 delete_insns_since (last);
2862 /* These can be done a word at a time. */
2863 if (unoptab == one_cmpl_optab
2864 && is_int_mode (mode, &int_mode)
2865 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2866 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2868 int i;
2869 rtx_insn *insns;
2871 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2872 target = gen_reg_rtx (int_mode);
2874 start_sequence ();
2876 /* Do the actual arithmetic. */
2877 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2879 rtx target_piece = operand_subword (target, i, 1, int_mode);
2880 rtx x = expand_unop (word_mode, unoptab,
2881 operand_subword_force (op0, i, int_mode),
2882 target_piece, unsignedp);
2884 if (target_piece != x)
2885 emit_move_insn (target_piece, x);
2888 insns = get_insns ();
2889 end_sequence ();
2891 emit_insn (insns);
2892 return target;
2895 if (optab_to_code (unoptab) == NEG)
2897 /* Try negating floating point values by flipping the sign bit. */
2898 if (is_a <scalar_float_mode> (mode, &float_mode))
2900 temp = expand_absneg_bit (NEG, float_mode, op0, target);
2901 if (temp)
2902 return temp;
2905 /* If there is no negation pattern, and we have no negative zero,
2906 try subtracting from zero. */
2907 if (!HONOR_SIGNED_ZEROS (mode))
2909 temp = expand_binop (mode, (unoptab == negv_optab
2910 ? subv_optab : sub_optab),
2911 CONST0_RTX (mode), op0, target,
2912 unsignedp, OPTAB_DIRECT);
2913 if (temp)
2914 return temp;
2918 /* Try calculating parity (x) as popcount (x) % 2. */
2919 if (unoptab == parity_optab)
2921 temp = expand_parity (mode, op0, target);
2922 if (temp)
2923 return temp;
2926 /* Try implementing ffs (x) in terms of clz (x). */
2927 if (unoptab == ffs_optab)
2929 temp = expand_ffs (mode, op0, target);
2930 if (temp)
2931 return temp;
2934 /* Try implementing ctz (x) in terms of clz (x). */
2935 if (unoptab == ctz_optab)
2937 temp = expand_ctz (mode, op0, target);
2938 if (temp)
2939 return temp;
2942 try_libcall:
2943 /* Now try a library call in this mode. */
2944 libfunc = optab_libfunc (unoptab, mode);
2945 if (libfunc)
2947 rtx_insn *insns;
2948 rtx value;
2949 rtx eq_value;
2950 machine_mode outmode = mode;
2952 /* All of these functions return small values. Thus we choose to
2953 have them return something that isn't a double-word. */
2954 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2955 || unoptab == clrsb_optab || unoptab == popcount_optab
2956 || unoptab == parity_optab)
2957 outmode
2958 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2959 optab_libfunc (unoptab, mode)));
2961 start_sequence ();
2963 /* Pass 1 for NO_QUEUE so we don't lose any increments
2964 if the libcall is cse'd or moved. */
2965 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2966 1, op0, mode);
2967 insns = get_insns ();
2968 end_sequence ();
2970 target = gen_reg_rtx (outmode);
2971 bool trapv = trapv_unoptab_p (unoptab);
2972 if (trapv)
2973 eq_value = NULL_RTX;
2974 else
2976 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2977 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2978 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2979 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2980 eq_value = simplify_gen_unary (ZERO_EXTEND,
2981 outmode, eq_value, mode);
2983 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2985 return target;
2988 /* It can't be done in this mode. Can we do it in a wider mode? */
2990 if (CLASS_HAS_WIDER_MODES_P (mclass))
2992 FOR_EACH_WIDER_MODE (wider_mode, mode)
2994 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
2995 || optab_libfunc (unoptab, wider_mode))
2997 rtx xop0 = op0;
2998 rtx_insn *last = get_last_insn ();
3000 /* For certain operations, we need not actually extend
3001 the narrow operand, as long as we will truncate the
3002 results to the same narrowness. */
3003 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3004 (unoptab == neg_optab
3005 || unoptab == one_cmpl_optab
3006 || unoptab == bswap_optab)
3007 && mclass == MODE_INT);
3009 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3010 unsignedp);
3012 /* If we are generating clz using wider mode, adjust the
3013 result. Similarly for clrsb. */
3014 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3015 && temp != 0)
3017 scalar_int_mode wider_int_mode
3018 = as_a <scalar_int_mode> (wider_mode);
3019 int_mode = as_a <scalar_int_mode> (mode);
3020 temp = expand_binop
3021 (wider_mode, sub_optab, temp,
3022 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3023 - GET_MODE_PRECISION (int_mode),
3024 wider_int_mode),
3025 target, true, OPTAB_DIRECT);
3028 /* Likewise for bswap. */
3029 if (unoptab == bswap_optab && temp != 0)
3031 scalar_int_mode wider_int_mode
3032 = as_a <scalar_int_mode> (wider_mode);
3033 int_mode = as_a <scalar_int_mode> (mode);
3034 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3035 == GET_MODE_BITSIZE (wider_int_mode)
3036 && GET_MODE_PRECISION (int_mode)
3037 == GET_MODE_BITSIZE (int_mode));
3039 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3040 GET_MODE_BITSIZE (wider_int_mode)
3041 - GET_MODE_BITSIZE (int_mode),
3042 NULL_RTX, true);
3045 if (temp)
3047 if (mclass != MODE_INT)
3049 if (target == 0)
3050 target = gen_reg_rtx (mode);
3051 convert_move (target, temp, 0);
3052 return target;
3054 else
3055 return gen_lowpart (mode, temp);
3057 else
3058 delete_insns_since (last);
3063 /* One final attempt at implementing negation via subtraction,
3064 this time allowing widening of the operand. */
3065 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3067 rtx temp;
3068 temp = expand_binop (mode,
3069 unoptab == negv_optab ? subv_optab : sub_optab,
3070 CONST0_RTX (mode), op0,
3071 target, unsignedp, OPTAB_LIB_WIDEN);
3072 if (temp)
3073 return temp;
3076 return 0;
3079 /* Emit code to compute the absolute value of OP0, with result to
3080 TARGET if convenient. (TARGET may be 0.) The return value says
3081 where the result actually is to be found.
3083 MODE is the mode of the operand; the mode of the result is
3084 different but can be deduced from MODE.
3089 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3090 int result_unsignedp)
3092 rtx temp;
3094 if (GET_MODE_CLASS (mode) != MODE_INT
3095 || ! flag_trapv)
3096 result_unsignedp = 1;
3098 /* First try to do it with a special abs instruction. */
3099 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3100 op0, target, 0);
3101 if (temp != 0)
3102 return temp;
3104 /* For floating point modes, try clearing the sign bit. */
3105 scalar_float_mode float_mode;
3106 if (is_a <scalar_float_mode> (mode, &float_mode))
3108 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3109 if (temp)
3110 return temp;
3113 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3114 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3115 && !HONOR_SIGNED_ZEROS (mode))
3117 rtx_insn *last = get_last_insn ();
3119 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3120 op0, NULL_RTX, 0);
3121 if (temp != 0)
3122 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3123 OPTAB_WIDEN);
3125 if (temp != 0)
3126 return temp;
3128 delete_insns_since (last);
3131 /* If this machine has expensive jumps, we can do integer absolute
3132 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3133 where W is the width of MODE. */
3135 scalar_int_mode int_mode;
3136 if (is_int_mode (mode, &int_mode)
3137 && BRANCH_COST (optimize_insn_for_speed_p (),
3138 false) >= 2)
3140 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3141 GET_MODE_PRECISION (int_mode) - 1,
3142 NULL_RTX, 0);
3144 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3145 OPTAB_LIB_WIDEN);
3146 if (temp != 0)
3147 temp = expand_binop (int_mode,
3148 result_unsignedp ? sub_optab : subv_optab,
3149 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3151 if (temp != 0)
3152 return temp;
3155 return NULL_RTX;
3159 expand_abs (machine_mode mode, rtx op0, rtx target,
3160 int result_unsignedp, int safe)
3162 rtx temp;
3163 rtx_code_label *op1;
3165 if (GET_MODE_CLASS (mode) != MODE_INT
3166 || ! flag_trapv)
3167 result_unsignedp = 1;
3169 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3170 if (temp != 0)
3171 return temp;
3173 /* If that does not win, use conditional jump and negate. */
3175 /* It is safe to use the target if it is the same
3176 as the source if this is also a pseudo register */
3177 if (op0 == target && REG_P (op0)
3178 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3179 safe = 1;
3181 op1 = gen_label_rtx ();
3182 if (target == 0 || ! safe
3183 || GET_MODE (target) != mode
3184 || (MEM_P (target) && MEM_VOLATILE_P (target))
3185 || (REG_P (target)
3186 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3187 target = gen_reg_rtx (mode);
3189 emit_move_insn (target, op0);
3190 NO_DEFER_POP;
3192 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3193 NULL_RTX, NULL, op1,
3194 profile_probability::uninitialized ());
3196 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3197 target, target, 0);
3198 if (op0 != target)
3199 emit_move_insn (target, op0);
3200 emit_label (op1);
3201 OK_DEFER_POP;
3202 return target;
3205 /* Emit code to compute the one's complement absolute value of OP0
3206 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3207 (TARGET may be NULL_RTX.) The return value says where the result
3208 actually is to be found.
3210 MODE is the mode of the operand; the mode of the result is
3211 different but can be deduced from MODE. */
3214 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3216 rtx temp;
3218 /* Not applicable for floating point modes. */
3219 if (FLOAT_MODE_P (mode))
3220 return NULL_RTX;
3222 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3223 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3225 rtx_insn *last = get_last_insn ();
3227 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3228 if (temp != 0)
3229 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3230 OPTAB_WIDEN);
3232 if (temp != 0)
3233 return temp;
3235 delete_insns_since (last);
3238 /* If this machine has expensive jumps, we can do one's complement
3239 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3241 scalar_int_mode int_mode;
3242 if (is_int_mode (mode, &int_mode)
3243 && BRANCH_COST (optimize_insn_for_speed_p (),
3244 false) >= 2)
3246 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3247 GET_MODE_PRECISION (int_mode) - 1,
3248 NULL_RTX, 0);
3250 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3251 OPTAB_LIB_WIDEN);
3253 if (temp != 0)
3254 return temp;
3257 return NULL_RTX;
3260 /* A subroutine of expand_copysign, perform the copysign operation using the
3261 abs and neg primitives advertised to exist on the target. The assumption
3262 is that we have a split register file, and leaving op0 in fp registers,
3263 and not playing with subregs so much, will help the register allocator. */
3265 static rtx
3266 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3267 int bitpos, bool op0_is_abs)
3269 scalar_int_mode imode;
3270 enum insn_code icode;
3271 rtx sign;
3272 rtx_code_label *label;
3274 if (target == op1)
3275 target = NULL_RTX;
3277 /* Check if the back end provides an insn that handles signbit for the
3278 argument's mode. */
3279 icode = optab_handler (signbit_optab, mode);
3280 if (icode != CODE_FOR_nothing)
3282 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3283 sign = gen_reg_rtx (imode);
3284 emit_unop_insn (icode, sign, op1, UNKNOWN);
3286 else
3288 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3290 if (!int_mode_for_mode (mode).exists (&imode))
3291 return NULL_RTX;
3292 op1 = gen_lowpart (imode, op1);
3294 else
3296 int word;
3298 imode = word_mode;
3299 if (FLOAT_WORDS_BIG_ENDIAN)
3300 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3301 else
3302 word = bitpos / BITS_PER_WORD;
3303 bitpos = bitpos % BITS_PER_WORD;
3304 op1 = operand_subword_force (op1, word, mode);
3307 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3308 sign = expand_binop (imode, and_optab, op1,
3309 immed_wide_int_const (mask, imode),
3310 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3313 if (!op0_is_abs)
3315 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3316 if (op0 == NULL)
3317 return NULL_RTX;
3318 target = op0;
3320 else
3322 if (target == NULL_RTX)
3323 target = copy_to_reg (op0);
3324 else
3325 emit_move_insn (target, op0);
3328 label = gen_label_rtx ();
3329 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3331 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3332 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3333 else
3334 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3335 if (op0 != target)
3336 emit_move_insn (target, op0);
3338 emit_label (label);
3340 return target;
3344 /* A subroutine of expand_copysign, perform the entire copysign operation
3345 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3346 is true if op0 is known to have its sign bit clear. */
3348 static rtx
3349 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3350 int bitpos, bool op0_is_abs)
3352 scalar_int_mode imode;
3353 int word, nwords, i;
3354 rtx temp;
3355 rtx_insn *insns;
3357 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3359 if (!int_mode_for_mode (mode).exists (&imode))
3360 return NULL_RTX;
3361 word = 0;
3362 nwords = 1;
3364 else
3366 imode = word_mode;
3368 if (FLOAT_WORDS_BIG_ENDIAN)
3369 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3370 else
3371 word = bitpos / BITS_PER_WORD;
3372 bitpos = bitpos % BITS_PER_WORD;
3373 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3376 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3378 if (target == 0
3379 || target == op0
3380 || target == op1
3381 || (nwords > 1 && !valid_multiword_target_p (target)))
3382 target = gen_reg_rtx (mode);
3384 if (nwords > 1)
3386 start_sequence ();
3388 for (i = 0; i < nwords; ++i)
3390 rtx targ_piece = operand_subword (target, i, 1, mode);
3391 rtx op0_piece = operand_subword_force (op0, i, mode);
3393 if (i == word)
3395 if (!op0_is_abs)
3396 op0_piece
3397 = expand_binop (imode, and_optab, op0_piece,
3398 immed_wide_int_const (~mask, imode),
3399 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3400 op1 = expand_binop (imode, and_optab,
3401 operand_subword_force (op1, i, mode),
3402 immed_wide_int_const (mask, imode),
3403 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3405 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3406 targ_piece, 1, OPTAB_LIB_WIDEN);
3407 if (temp != targ_piece)
3408 emit_move_insn (targ_piece, temp);
3410 else
3411 emit_move_insn (targ_piece, op0_piece);
3414 insns = get_insns ();
3415 end_sequence ();
3417 emit_insn (insns);
3419 else
3421 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3422 immed_wide_int_const (mask, imode),
3423 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3425 op0 = gen_lowpart (imode, op0);
3426 if (!op0_is_abs)
3427 op0 = expand_binop (imode, and_optab, op0,
3428 immed_wide_int_const (~mask, imode),
3429 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3431 temp = expand_binop (imode, ior_optab, op0, op1,
3432 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3433 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3436 return target;
3439 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3440 scalar floating point mode. Return NULL if we do not know how to
3441 expand the operation inline. */
3444 expand_copysign (rtx op0, rtx op1, rtx target)
3446 scalar_float_mode mode;
3447 const struct real_format *fmt;
3448 bool op0_is_abs;
3449 rtx temp;
3451 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3452 gcc_assert (GET_MODE (op1) == mode);
3454 /* First try to do it with a special instruction. */
3455 temp = expand_binop (mode, copysign_optab, op0, op1,
3456 target, 0, OPTAB_DIRECT);
3457 if (temp)
3458 return temp;
3460 fmt = REAL_MODE_FORMAT (mode);
3461 if (fmt == NULL || !fmt->has_signed_zero)
3462 return NULL_RTX;
3464 op0_is_abs = false;
3465 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3467 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3468 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3469 op0_is_abs = true;
3472 if (fmt->signbit_ro >= 0
3473 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3474 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3475 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3477 temp = expand_copysign_absneg (mode, op0, op1, target,
3478 fmt->signbit_ro, op0_is_abs);
3479 if (temp)
3480 return temp;
3483 if (fmt->signbit_rw < 0)
3484 return NULL_RTX;
3485 return expand_copysign_bit (mode, op0, op1, target,
3486 fmt->signbit_rw, op0_is_abs);
3489 /* Generate an instruction whose insn-code is INSN_CODE,
3490 with two operands: an output TARGET and an input OP0.
3491 TARGET *must* be nonzero, and the output is always stored there.
3492 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3493 the value that is stored into TARGET.
3495 Return false if expansion failed. */
3497 bool
3498 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3499 enum rtx_code code)
3501 struct expand_operand ops[2];
3502 rtx_insn *pat;
3504 create_output_operand (&ops[0], target, GET_MODE (target));
3505 create_input_operand (&ops[1], op0, GET_MODE (op0));
3506 pat = maybe_gen_insn (icode, 2, ops);
3507 if (!pat)
3508 return false;
3510 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3511 && code != UNKNOWN)
3512 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3514 emit_insn (pat);
3516 if (ops[0].value != target)
3517 emit_move_insn (target, ops[0].value);
3518 return true;
3520 /* Generate an instruction whose insn-code is INSN_CODE,
3521 with two operands: an output TARGET and an input OP0.
3522 TARGET *must* be nonzero, and the output is always stored there.
3523 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3524 the value that is stored into TARGET. */
3526 void
3527 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3529 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3530 gcc_assert (ok);
3533 struct no_conflict_data
3535 rtx target;
3536 rtx_insn *first, *insn;
3537 bool must_stay;
3540 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3541 the currently examined clobber / store has to stay in the list of
3542 insns that constitute the actual libcall block. */
3543 static void
3544 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3546 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3548 /* If this inns directly contributes to setting the target, it must stay. */
3549 if (reg_overlap_mentioned_p (p->target, dest))
3550 p->must_stay = true;
3551 /* If we haven't committed to keeping any other insns in the list yet,
3552 there is nothing more to check. */
3553 else if (p->insn == p->first)
3554 return;
3555 /* If this insn sets / clobbers a register that feeds one of the insns
3556 already in the list, this insn has to stay too. */
3557 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3558 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3559 || reg_used_between_p (dest, p->first, p->insn)
3560 /* Likewise if this insn depends on a register set by a previous
3561 insn in the list, or if it sets a result (presumably a hard
3562 register) that is set or clobbered by a previous insn.
3563 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3564 SET_DEST perform the former check on the address, and the latter
3565 check on the MEM. */
3566 || (GET_CODE (set) == SET
3567 && (modified_in_p (SET_SRC (set), p->first)
3568 || modified_in_p (SET_DEST (set), p->first)
3569 || modified_between_p (SET_SRC (set), p->first, p->insn)
3570 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3571 p->must_stay = true;
3575 /* Emit code to make a call to a constant function or a library call.
3577 INSNS is a list containing all insns emitted in the call.
3578 These insns leave the result in RESULT. Our block is to copy RESULT
3579 to TARGET, which is logically equivalent to EQUIV.
3581 We first emit any insns that set a pseudo on the assumption that these are
3582 loading constants into registers; doing so allows them to be safely cse'ed
3583 between blocks. Then we emit all the other insns in the block, followed by
3584 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3585 note with an operand of EQUIV. */
3587 static void
3588 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3589 bool equiv_may_trap)
3591 rtx final_dest = target;
3592 rtx_insn *next, *last, *insn;
3594 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3595 into a MEM later. Protect the libcall block from this change. */
3596 if (! REG_P (target) || REG_USERVAR_P (target))
3597 target = gen_reg_rtx (GET_MODE (target));
3599 /* If we're using non-call exceptions, a libcall corresponding to an
3600 operation that may trap may also trap. */
3601 /* ??? See the comment in front of make_reg_eh_region_note. */
3602 if (cfun->can_throw_non_call_exceptions
3603 && (equiv_may_trap || may_trap_p (equiv)))
3605 for (insn = insns; insn; insn = NEXT_INSN (insn))
3606 if (CALL_P (insn))
3608 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3609 if (note)
3611 int lp_nr = INTVAL (XEXP (note, 0));
3612 if (lp_nr == 0 || lp_nr == INT_MIN)
3613 remove_note (insn, note);
3617 else
3619 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3620 reg note to indicate that this call cannot throw or execute a nonlocal
3621 goto (unless there is already a REG_EH_REGION note, in which case
3622 we update it). */
3623 for (insn = insns; insn; insn = NEXT_INSN (insn))
3624 if (CALL_P (insn))
3625 make_reg_eh_region_note_nothrow_nononlocal (insn);
3628 /* First emit all insns that set pseudos. Remove them from the list as
3629 we go. Avoid insns that set pseudos which were referenced in previous
3630 insns. These can be generated by move_by_pieces, for example,
3631 to update an address. Similarly, avoid insns that reference things
3632 set in previous insns. */
3634 for (insn = insns; insn; insn = next)
3636 rtx set = single_set (insn);
3638 next = NEXT_INSN (insn);
3640 if (set != 0 && REG_P (SET_DEST (set))
3641 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3643 struct no_conflict_data data;
3645 data.target = const0_rtx;
3646 data.first = insns;
3647 data.insn = insn;
3648 data.must_stay = 0;
3649 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3650 if (! data.must_stay)
3652 if (PREV_INSN (insn))
3653 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3654 else
3655 insns = next;
3657 if (next)
3658 SET_PREV_INSN (next) = PREV_INSN (insn);
3660 add_insn (insn);
3664 /* Some ports use a loop to copy large arguments onto the stack.
3665 Don't move anything outside such a loop. */
3666 if (LABEL_P (insn))
3667 break;
3670 /* Write the remaining insns followed by the final copy. */
3671 for (insn = insns; insn; insn = next)
3673 next = NEXT_INSN (insn);
3675 add_insn (insn);
3678 last = emit_move_insn (target, result);
3679 if (equiv)
3680 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3682 if (final_dest != target)
3683 emit_move_insn (final_dest, target);
3686 void
3687 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3689 emit_libcall_block_1 (insns, target, result, equiv, false);
3692 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3693 PURPOSE describes how this comparison will be used. CODE is the rtx
3694 comparison code we will be using.
3696 ??? Actually, CODE is slightly weaker than that. A target is still
3697 required to implement all of the normal bcc operations, but not
3698 required to implement all (or any) of the unordered bcc operations. */
3701 can_compare_p (enum rtx_code code, machine_mode mode,
3702 enum can_compare_purpose purpose)
3704 rtx test;
3705 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3708 enum insn_code icode;
3710 if (purpose == ccp_jump
3711 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3712 && insn_operand_matches (icode, 0, test))
3713 return 1;
3714 if (purpose == ccp_store_flag
3715 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3716 && insn_operand_matches (icode, 1, test))
3717 return 1;
3718 if (purpose == ccp_cmov
3719 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3720 return 1;
3722 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3723 PUT_MODE (test, mode);
3725 while (mode != VOIDmode);
3727 return 0;
3730 /* This function is called when we are going to emit a compare instruction that
3731 compares the values found in X and Y, using the rtl operator COMPARISON.
3733 If they have mode BLKmode, then SIZE specifies the size of both operands.
3735 UNSIGNEDP nonzero says that the operands are unsigned;
3736 this matters if they need to be widened (as given by METHODS).
3738 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3739 if we failed to produce one.
3741 *PMODE is the mode of the inputs (in case they are const_int).
3743 This function performs all the setup necessary so that the caller only has
3744 to emit a single comparison insn. This setup can involve doing a BLKmode
3745 comparison or emitting a library call to perform the comparison if no insn
3746 is available to handle it.
3747 The values which are passed in through pointers can be modified; the caller
3748 should perform the comparison on the modified values. Constant
3749 comparisons must have already been folded. */
3751 static void
3752 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3753 int unsignedp, enum optab_methods methods,
3754 rtx *ptest, machine_mode *pmode)
3756 machine_mode mode = *pmode;
3757 rtx libfunc, test;
3758 machine_mode cmp_mode;
3759 enum mode_class mclass;
3761 /* The other methods are not needed. */
3762 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3763 || methods == OPTAB_LIB_WIDEN);
3765 /* If we are optimizing, force expensive constants into a register. */
3766 if (CONSTANT_P (x) && optimize
3767 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3768 > COSTS_N_INSNS (1)))
3769 x = force_reg (mode, x);
3771 if (CONSTANT_P (y) && optimize
3772 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3773 > COSTS_N_INSNS (1)))
3774 y = force_reg (mode, y);
3776 #if HAVE_cc0
3777 /* Make sure if we have a canonical comparison. The RTL
3778 documentation states that canonical comparisons are required only
3779 for targets which have cc0. */
3780 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3781 #endif
3783 /* Don't let both operands fail to indicate the mode. */
3784 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3785 x = force_reg (mode, x);
3786 if (mode == VOIDmode)
3787 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3789 /* Handle all BLKmode compares. */
3791 if (mode == BLKmode)
3793 machine_mode result_mode;
3794 enum insn_code cmp_code;
3795 rtx result;
3796 rtx opalign
3797 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3799 gcc_assert (size);
3801 /* Try to use a memory block compare insn - either cmpstr
3802 or cmpmem will do. */
3803 FOR_EACH_MODE_IN_CLASS (cmp_mode, MODE_INT)
3805 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3806 if (cmp_code == CODE_FOR_nothing)
3807 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3808 if (cmp_code == CODE_FOR_nothing)
3809 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3810 if (cmp_code == CODE_FOR_nothing)
3811 continue;
3813 /* Must make sure the size fits the insn's mode. */
3814 if (CONST_INT_P (size)
3815 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3816 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3817 > GET_MODE_BITSIZE (cmp_mode)))
3818 continue;
3820 result_mode = insn_data[cmp_code].operand[0].mode;
3821 result = gen_reg_rtx (result_mode);
3822 size = convert_to_mode (cmp_mode, size, 1);
3823 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3825 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3826 *pmode = result_mode;
3827 return;
3830 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3831 goto fail;
3833 /* Otherwise call a library function. */
3834 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3836 x = result;
3837 y = const0_rtx;
3838 mode = TYPE_MODE (integer_type_node);
3839 methods = OPTAB_LIB_WIDEN;
3840 unsignedp = false;
3843 /* Don't allow operands to the compare to trap, as that can put the
3844 compare and branch in different basic blocks. */
3845 if (cfun->can_throw_non_call_exceptions)
3847 if (may_trap_p (x))
3848 x = copy_to_reg (x);
3849 if (may_trap_p (y))
3850 y = copy_to_reg (y);
3853 if (GET_MODE_CLASS (mode) == MODE_CC)
3855 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3856 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3857 gcc_assert (icode != CODE_FOR_nothing
3858 && insn_operand_matches (icode, 0, test));
3859 *ptest = test;
3860 return;
3863 mclass = GET_MODE_CLASS (mode);
3864 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3865 FOR_EACH_MODE_FROM (cmp_mode, mode)
3867 enum insn_code icode;
3868 icode = optab_handler (cbranch_optab, cmp_mode);
3869 if (icode != CODE_FOR_nothing
3870 && insn_operand_matches (icode, 0, test))
3872 rtx_insn *last = get_last_insn ();
3873 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3874 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3875 if (op0 && op1
3876 && insn_operand_matches (icode, 1, op0)
3877 && insn_operand_matches (icode, 2, op1))
3879 XEXP (test, 0) = op0;
3880 XEXP (test, 1) = op1;
3881 *ptest = test;
3882 *pmode = cmp_mode;
3883 return;
3885 delete_insns_since (last);
3888 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3889 break;
3892 if (methods != OPTAB_LIB_WIDEN)
3893 goto fail;
3895 if (!SCALAR_FLOAT_MODE_P (mode))
3897 rtx result;
3898 machine_mode ret_mode;
3900 /* Handle a libcall just for the mode we are using. */
3901 libfunc = optab_libfunc (cmp_optab, mode);
3902 gcc_assert (libfunc);
3904 /* If we want unsigned, and this mode has a distinct unsigned
3905 comparison routine, use that. */
3906 if (unsignedp)
3908 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3909 if (ulibfunc)
3910 libfunc = ulibfunc;
3913 ret_mode = targetm.libgcc_cmp_return_mode ();
3914 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3915 ret_mode, 2, x, mode, y, mode);
3917 /* There are two kinds of comparison routines. Biased routines
3918 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3919 of gcc expect that the comparison operation is equivalent
3920 to the modified comparison. For signed comparisons compare the
3921 result against 1 in the biased case, and zero in the unbiased
3922 case. For unsigned comparisons always compare against 1 after
3923 biasing the unbiased result by adding 1. This gives us a way to
3924 represent LTU.
3925 The comparisons in the fixed-point helper library are always
3926 biased. */
3927 x = result;
3928 y = const1_rtx;
3930 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3932 if (unsignedp)
3933 x = plus_constant (ret_mode, result, 1);
3934 else
3935 y = const0_rtx;
3938 *pmode = ret_mode;
3939 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3940 ptest, pmode);
3942 else
3943 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3945 return;
3947 fail:
3948 *ptest = NULL_RTX;
3951 /* Before emitting an insn with code ICODE, make sure that X, which is going
3952 to be used for operand OPNUM of the insn, is converted from mode MODE to
3953 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3954 that it is accepted by the operand predicate. Return the new value. */
3957 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3958 machine_mode wider_mode, int unsignedp)
3960 if (mode != wider_mode)
3961 x = convert_modes (wider_mode, mode, x, unsignedp);
3963 if (!insn_operand_matches (icode, opnum, x))
3965 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3966 if (reload_completed)
3967 return NULL_RTX;
3968 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3969 return NULL_RTX;
3970 x = copy_to_mode_reg (op_mode, x);
3973 return x;
3976 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3977 we can do the branch. */
3979 static void
3980 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
3981 profile_probability prob)
3983 machine_mode optab_mode;
3984 enum mode_class mclass;
3985 enum insn_code icode;
3986 rtx_insn *insn;
3988 mclass = GET_MODE_CLASS (mode);
3989 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3990 icode = optab_handler (cbranch_optab, optab_mode);
3992 gcc_assert (icode != CODE_FOR_nothing);
3993 gcc_assert (insn_operand_matches (icode, 0, test));
3994 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
3995 XEXP (test, 1), label));
3996 if (prob.initialized_p ()
3997 && profile_status_for_fn (cfun) != PROFILE_ABSENT
3998 && insn
3999 && JUMP_P (insn)
4000 && any_condjump_p (insn)
4001 && !find_reg_note (insn, REG_BR_PROB, 0))
4002 add_reg_br_prob_note (insn, prob);
4005 /* Generate code to compare X with Y so that the condition codes are
4006 set and to jump to LABEL if the condition is true. If X is a
4007 constant and Y is not a constant, then the comparison is swapped to
4008 ensure that the comparison RTL has the canonical form.
4010 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4011 need to be widened. UNSIGNEDP is also used to select the proper
4012 branch condition code.
4014 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4016 MODE is the mode of the inputs (in case they are const_int).
4018 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4019 It will be potentially converted into an unsigned variant based on
4020 UNSIGNEDP to select a proper jump instruction.
4022 PROB is the probability of jumping to LABEL. */
4024 void
4025 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4026 machine_mode mode, int unsignedp, rtx label,
4027 profile_probability prob)
4029 rtx op0 = x, op1 = y;
4030 rtx test;
4032 /* Swap operands and condition to ensure canonical RTL. */
4033 if (swap_commutative_operands_p (x, y)
4034 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4036 op0 = y, op1 = x;
4037 comparison = swap_condition (comparison);
4040 /* If OP0 is still a constant, then both X and Y must be constants
4041 or the opposite comparison is not supported. Force X into a register
4042 to create canonical RTL. */
4043 if (CONSTANT_P (op0))
4044 op0 = force_reg (mode, op0);
4046 if (unsignedp)
4047 comparison = unsigned_condition (comparison);
4049 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4050 &test, &mode);
4051 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4055 /* Emit a library call comparison between floating point X and Y.
4056 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4058 static void
4059 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4060 rtx *ptest, machine_mode *pmode)
4062 enum rtx_code swapped = swap_condition (comparison);
4063 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4064 machine_mode orig_mode = GET_MODE (x);
4065 machine_mode mode, cmp_mode;
4066 rtx true_rtx, false_rtx;
4067 rtx value, target, equiv;
4068 rtx_insn *insns;
4069 rtx libfunc = 0;
4070 bool reversed_p = false;
4071 cmp_mode = targetm.libgcc_cmp_return_mode ();
4073 FOR_EACH_MODE_FROM (mode, orig_mode)
4075 if (code_to_optab (comparison)
4076 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4077 break;
4079 if (code_to_optab (swapped)
4080 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4082 std::swap (x, y);
4083 comparison = swapped;
4084 break;
4087 if (code_to_optab (reversed)
4088 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4090 comparison = reversed;
4091 reversed_p = true;
4092 break;
4096 gcc_assert (mode != VOIDmode);
4098 if (mode != orig_mode)
4100 x = convert_to_mode (mode, x, 0);
4101 y = convert_to_mode (mode, y, 0);
4104 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4105 the RTL. The allows the RTL optimizers to delete the libcall if the
4106 condition can be determined at compile-time. */
4107 if (comparison == UNORDERED
4108 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4110 true_rtx = const_true_rtx;
4111 false_rtx = const0_rtx;
4113 else
4115 switch (comparison)
4117 case EQ:
4118 true_rtx = const0_rtx;
4119 false_rtx = const_true_rtx;
4120 break;
4122 case NE:
4123 true_rtx = const_true_rtx;
4124 false_rtx = const0_rtx;
4125 break;
4127 case GT:
4128 true_rtx = const1_rtx;
4129 false_rtx = const0_rtx;
4130 break;
4132 case GE:
4133 true_rtx = const0_rtx;
4134 false_rtx = constm1_rtx;
4135 break;
4137 case LT:
4138 true_rtx = constm1_rtx;
4139 false_rtx = const0_rtx;
4140 break;
4142 case LE:
4143 true_rtx = const0_rtx;
4144 false_rtx = const1_rtx;
4145 break;
4147 default:
4148 gcc_unreachable ();
4152 if (comparison == UNORDERED)
4154 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4155 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4156 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4157 temp, const_true_rtx, equiv);
4159 else
4161 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4162 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4163 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4164 equiv, true_rtx, false_rtx);
4167 start_sequence ();
4168 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4169 cmp_mode, 2, x, mode, y, mode);
4170 insns = get_insns ();
4171 end_sequence ();
4173 target = gen_reg_rtx (cmp_mode);
4174 emit_libcall_block (insns, target, value, equiv);
4176 if (comparison == UNORDERED
4177 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4178 || reversed_p)
4179 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4180 else
4181 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4183 *pmode = cmp_mode;
4186 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4188 void
4189 emit_indirect_jump (rtx loc)
4191 if (!targetm.have_indirect_jump ())
4192 sorry ("indirect jumps are not available on this target");
4193 else
4195 struct expand_operand ops[1];
4196 create_address_operand (&ops[0], loc);
4197 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4198 emit_barrier ();
4203 /* Emit a conditional move instruction if the machine supports one for that
4204 condition and machine mode.
4206 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4207 the mode to use should they be constants. If it is VOIDmode, they cannot
4208 both be constants.
4210 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4211 should be stored there. MODE is the mode to use should they be constants.
4212 If it is VOIDmode, they cannot both be constants.
4214 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4215 is not supported. */
4218 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4219 machine_mode cmode, rtx op2, rtx op3,
4220 machine_mode mode, int unsignedp)
4222 rtx comparison;
4223 rtx_insn *last;
4224 enum insn_code icode;
4225 enum rtx_code reversed;
4227 /* If the two source operands are identical, that's just a move. */
4229 if (rtx_equal_p (op2, op3))
4231 if (!target)
4232 target = gen_reg_rtx (mode);
4234 emit_move_insn (target, op3);
4235 return target;
4238 /* If one operand is constant, make it the second one. Only do this
4239 if the other operand is not constant as well. */
4241 if (swap_commutative_operands_p (op0, op1))
4243 std::swap (op0, op1);
4244 code = swap_condition (code);
4247 /* get_condition will prefer to generate LT and GT even if the old
4248 comparison was against zero, so undo that canonicalization here since
4249 comparisons against zero are cheaper. */
4250 if (code == LT && op1 == const1_rtx)
4251 code = LE, op1 = const0_rtx;
4252 else if (code == GT && op1 == constm1_rtx)
4253 code = GE, op1 = const0_rtx;
4255 if (cmode == VOIDmode)
4256 cmode = GET_MODE (op0);
4258 enum rtx_code orig_code = code;
4259 bool swapped = false;
4260 if (swap_commutative_operands_p (op2, op3)
4261 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4262 != UNKNOWN))
4264 std::swap (op2, op3);
4265 code = reversed;
4266 swapped = true;
4269 if (mode == VOIDmode)
4270 mode = GET_MODE (op2);
4272 icode = direct_optab_handler (movcc_optab, mode);
4274 if (icode == CODE_FOR_nothing)
4275 return NULL_RTX;
4277 if (!target)
4278 target = gen_reg_rtx (mode);
4280 for (int pass = 0; ; pass++)
4282 code = unsignedp ? unsigned_condition (code) : code;
4283 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4285 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4286 punt and let the caller figure out how best to deal with this
4287 situation. */
4288 if (COMPARISON_P (comparison))
4290 saved_pending_stack_adjust save;
4291 save_pending_stack_adjust (&save);
4292 last = get_last_insn ();
4293 do_pending_stack_adjust ();
4294 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4295 GET_CODE (comparison), NULL_RTX, unsignedp,
4296 OPTAB_WIDEN, &comparison, &cmode);
4297 if (comparison)
4299 struct expand_operand ops[4];
4301 create_output_operand (&ops[0], target, mode);
4302 create_fixed_operand (&ops[1], comparison);
4303 create_input_operand (&ops[2], op2, mode);
4304 create_input_operand (&ops[3], op3, mode);
4305 if (maybe_expand_insn (icode, 4, ops))
4307 if (ops[0].value != target)
4308 convert_move (target, ops[0].value, false);
4309 return target;
4312 delete_insns_since (last);
4313 restore_pending_stack_adjust (&save);
4316 if (pass == 1)
4317 return NULL_RTX;
4319 /* If the preferred op2/op3 order is not usable, retry with other
4320 operand order, perhaps it will expand successfully. */
4321 if (swapped)
4322 code = orig_code;
4323 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4324 NULL))
4325 != UNKNOWN)
4326 code = reversed;
4327 else
4328 return NULL_RTX;
4329 std::swap (op2, op3);
4334 /* Emit a conditional negate or bitwise complement using the
4335 negcc or notcc optabs if available. Return NULL_RTX if such operations
4336 are not available. Otherwise return the RTX holding the result.
4337 TARGET is the desired destination of the result. COMP is the comparison
4338 on which to negate. If COND is true move into TARGET the negation
4339 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4340 CODE is either NEG or NOT. MODE is the machine mode in which the
4341 operation is performed. */
4344 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4345 machine_mode mode, rtx cond, rtx op1,
4346 rtx op2)
4348 optab op = unknown_optab;
4349 if (code == NEG)
4350 op = negcc_optab;
4351 else if (code == NOT)
4352 op = notcc_optab;
4353 else
4354 gcc_unreachable ();
4356 insn_code icode = direct_optab_handler (op, mode);
4358 if (icode == CODE_FOR_nothing)
4359 return NULL_RTX;
4361 if (!target)
4362 target = gen_reg_rtx (mode);
4364 rtx_insn *last = get_last_insn ();
4365 struct expand_operand ops[4];
4367 create_output_operand (&ops[0], target, mode);
4368 create_fixed_operand (&ops[1], cond);
4369 create_input_operand (&ops[2], op1, mode);
4370 create_input_operand (&ops[3], op2, mode);
4372 if (maybe_expand_insn (icode, 4, ops))
4374 if (ops[0].value != target)
4375 convert_move (target, ops[0].value, false);
4377 return target;
4379 delete_insns_since (last);
4380 return NULL_RTX;
4383 /* Emit a conditional addition instruction if the machine supports one for that
4384 condition and machine mode.
4386 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4387 the mode to use should they be constants. If it is VOIDmode, they cannot
4388 both be constants.
4390 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4391 should be stored there. MODE is the mode to use should they be constants.
4392 If it is VOIDmode, they cannot both be constants.
4394 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4395 is not supported. */
4398 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4399 machine_mode cmode, rtx op2, rtx op3,
4400 machine_mode mode, int unsignedp)
4402 rtx comparison;
4403 rtx_insn *last;
4404 enum insn_code icode;
4406 /* If one operand is constant, make it the second one. Only do this
4407 if the other operand is not constant as well. */
4409 if (swap_commutative_operands_p (op0, op1))
4411 std::swap (op0, op1);
4412 code = swap_condition (code);
4415 /* get_condition will prefer to generate LT and GT even if the old
4416 comparison was against zero, so undo that canonicalization here since
4417 comparisons against zero are cheaper. */
4418 if (code == LT && op1 == const1_rtx)
4419 code = LE, op1 = const0_rtx;
4420 else if (code == GT && op1 == constm1_rtx)
4421 code = GE, op1 = const0_rtx;
4423 if (cmode == VOIDmode)
4424 cmode = GET_MODE (op0);
4426 if (mode == VOIDmode)
4427 mode = GET_MODE (op2);
4429 icode = optab_handler (addcc_optab, mode);
4431 if (icode == CODE_FOR_nothing)
4432 return 0;
4434 if (!target)
4435 target = gen_reg_rtx (mode);
4437 code = unsignedp ? unsigned_condition (code) : code;
4438 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4440 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4441 return NULL and let the caller figure out how best to deal with this
4442 situation. */
4443 if (!COMPARISON_P (comparison))
4444 return NULL_RTX;
4446 do_pending_stack_adjust ();
4447 last = get_last_insn ();
4448 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4449 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4450 &comparison, &cmode);
4451 if (comparison)
4453 struct expand_operand ops[4];
4455 create_output_operand (&ops[0], target, mode);
4456 create_fixed_operand (&ops[1], comparison);
4457 create_input_operand (&ops[2], op2, mode);
4458 create_input_operand (&ops[3], op3, mode);
4459 if (maybe_expand_insn (icode, 4, ops))
4461 if (ops[0].value != target)
4462 convert_move (target, ops[0].value, false);
4463 return target;
4466 delete_insns_since (last);
4467 return NULL_RTX;
4470 /* These functions attempt to generate an insn body, rather than
4471 emitting the insn, but if the gen function already emits them, we
4472 make no attempt to turn them back into naked patterns. */
4474 /* Generate and return an insn body to add Y to X. */
4476 rtx_insn *
4477 gen_add2_insn (rtx x, rtx y)
4479 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4481 gcc_assert (insn_operand_matches (icode, 0, x));
4482 gcc_assert (insn_operand_matches (icode, 1, x));
4483 gcc_assert (insn_operand_matches (icode, 2, y));
4485 return GEN_FCN (icode) (x, x, y);
4488 /* Generate and return an insn body to add r1 and c,
4489 storing the result in r0. */
4491 rtx_insn *
4492 gen_add3_insn (rtx r0, rtx r1, rtx c)
4494 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4496 if (icode == CODE_FOR_nothing
4497 || !insn_operand_matches (icode, 0, r0)
4498 || !insn_operand_matches (icode, 1, r1)
4499 || !insn_operand_matches (icode, 2, c))
4500 return NULL;
4502 return GEN_FCN (icode) (r0, r1, c);
4506 have_add2_insn (rtx x, rtx y)
4508 enum insn_code icode;
4510 gcc_assert (GET_MODE (x) != VOIDmode);
4512 icode = optab_handler (add_optab, GET_MODE (x));
4514 if (icode == CODE_FOR_nothing)
4515 return 0;
4517 if (!insn_operand_matches (icode, 0, x)
4518 || !insn_operand_matches (icode, 1, x)
4519 || !insn_operand_matches (icode, 2, y))
4520 return 0;
4522 return 1;
4525 /* Generate and return an insn body to add Y to X. */
4527 rtx_insn *
4528 gen_addptr3_insn (rtx x, rtx y, rtx z)
4530 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4532 gcc_assert (insn_operand_matches (icode, 0, x));
4533 gcc_assert (insn_operand_matches (icode, 1, y));
4534 gcc_assert (insn_operand_matches (icode, 2, z));
4536 return GEN_FCN (icode) (x, y, z);
4539 /* Return true if the target implements an addptr pattern and X, Y,
4540 and Z are valid for the pattern predicates. */
4543 have_addptr3_insn (rtx x, rtx y, rtx z)
4545 enum insn_code icode;
4547 gcc_assert (GET_MODE (x) != VOIDmode);
4549 icode = optab_handler (addptr3_optab, GET_MODE (x));
4551 if (icode == CODE_FOR_nothing)
4552 return 0;
4554 if (!insn_operand_matches (icode, 0, x)
4555 || !insn_operand_matches (icode, 1, y)
4556 || !insn_operand_matches (icode, 2, z))
4557 return 0;
4559 return 1;
4562 /* Generate and return an insn body to subtract Y from X. */
4564 rtx_insn *
4565 gen_sub2_insn (rtx x, rtx y)
4567 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4569 gcc_assert (insn_operand_matches (icode, 0, x));
4570 gcc_assert (insn_operand_matches (icode, 1, x));
4571 gcc_assert (insn_operand_matches (icode, 2, y));
4573 return GEN_FCN (icode) (x, x, y);
4576 /* Generate and return an insn body to subtract r1 and c,
4577 storing the result in r0. */
4579 rtx_insn *
4580 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4582 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4584 if (icode == CODE_FOR_nothing
4585 || !insn_operand_matches (icode, 0, r0)
4586 || !insn_operand_matches (icode, 1, r1)
4587 || !insn_operand_matches (icode, 2, c))
4588 return NULL;
4590 return GEN_FCN (icode) (r0, r1, c);
4594 have_sub2_insn (rtx x, rtx y)
4596 enum insn_code icode;
4598 gcc_assert (GET_MODE (x) != VOIDmode);
4600 icode = optab_handler (sub_optab, GET_MODE (x));
4602 if (icode == CODE_FOR_nothing)
4603 return 0;
4605 if (!insn_operand_matches (icode, 0, x)
4606 || !insn_operand_matches (icode, 1, x)
4607 || !insn_operand_matches (icode, 2, y))
4608 return 0;
4610 return 1;
4613 /* Generate the body of an insn to extend Y (with mode MFROM)
4614 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4616 rtx_insn *
4617 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4618 machine_mode mfrom, int unsignedp)
4620 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4621 return GEN_FCN (icode) (x, y);
4624 /* Generate code to convert FROM to floating point
4625 and store in TO. FROM must be fixed point and not VOIDmode.
4626 UNSIGNEDP nonzero means regard FROM as unsigned.
4627 Normally this is done by correcting the final value
4628 if it is negative. */
4630 void
4631 expand_float (rtx to, rtx from, int unsignedp)
4633 enum insn_code icode;
4634 rtx target = to;
4635 machine_mode fmode, imode;
4636 bool can_do_signed = false;
4638 /* Crash now, because we won't be able to decide which mode to use. */
4639 gcc_assert (GET_MODE (from) != VOIDmode);
4641 /* Look for an insn to do the conversion. Do it in the specified
4642 modes if possible; otherwise convert either input, output or both to
4643 wider mode. If the integer mode is wider than the mode of FROM,
4644 we can do the conversion signed even if the input is unsigned. */
4646 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4647 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4649 int doing_unsigned = unsignedp;
4651 if (fmode != GET_MODE (to)
4652 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4653 continue;
4655 icode = can_float_p (fmode, imode, unsignedp);
4656 if (icode == CODE_FOR_nothing && unsignedp)
4658 enum insn_code scode = can_float_p (fmode, imode, 0);
4659 if (scode != CODE_FOR_nothing)
4660 can_do_signed = true;
4661 if (imode != GET_MODE (from))
4662 icode = scode, doing_unsigned = 0;
4665 if (icode != CODE_FOR_nothing)
4667 if (imode != GET_MODE (from))
4668 from = convert_to_mode (imode, from, unsignedp);
4670 if (fmode != GET_MODE (to))
4671 target = gen_reg_rtx (fmode);
4673 emit_unop_insn (icode, target, from,
4674 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4676 if (target != to)
4677 convert_move (to, target, 0);
4678 return;
4682 /* Unsigned integer, and no way to convert directly. Convert as signed,
4683 then unconditionally adjust the result. */
4684 if (unsignedp && can_do_signed)
4686 rtx_code_label *label = gen_label_rtx ();
4687 rtx temp;
4688 REAL_VALUE_TYPE offset;
4690 /* Look for a usable floating mode FMODE wider than the source and at
4691 least as wide as the target. Using FMODE will avoid rounding woes
4692 with unsigned values greater than the signed maximum value. */
4694 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4695 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4696 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4697 break;
4699 if (fmode == VOIDmode)
4701 /* There is no such mode. Pretend the target is wide enough. */
4702 fmode = GET_MODE (to);
4704 /* Avoid double-rounding when TO is narrower than FROM. */
4705 if ((significand_size (fmode) + 1)
4706 < GET_MODE_PRECISION (GET_MODE (from)))
4708 rtx temp1;
4709 rtx_code_label *neglabel = gen_label_rtx ();
4711 /* Don't use TARGET if it isn't a register, is a hard register,
4712 or is the wrong mode. */
4713 if (!REG_P (target)
4714 || REGNO (target) < FIRST_PSEUDO_REGISTER
4715 || GET_MODE (target) != fmode)
4716 target = gen_reg_rtx (fmode);
4718 imode = GET_MODE (from);
4719 do_pending_stack_adjust ();
4721 /* Test whether the sign bit is set. */
4722 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4723 0, neglabel);
4725 /* The sign bit is not set. Convert as signed. */
4726 expand_float (target, from, 0);
4727 emit_jump_insn (targetm.gen_jump (label));
4728 emit_barrier ();
4730 /* The sign bit is set.
4731 Convert to a usable (positive signed) value by shifting right
4732 one bit, while remembering if a nonzero bit was shifted
4733 out; i.e., compute (from & 1) | (from >> 1). */
4735 emit_label (neglabel);
4736 temp = expand_binop (imode, and_optab, from, const1_rtx,
4737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4738 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4739 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4740 OPTAB_LIB_WIDEN);
4741 expand_float (target, temp, 0);
4743 /* Multiply by 2 to undo the shift above. */
4744 temp = expand_binop (fmode, add_optab, target, target,
4745 target, 0, OPTAB_LIB_WIDEN);
4746 if (temp != target)
4747 emit_move_insn (target, temp);
4749 do_pending_stack_adjust ();
4750 emit_label (label);
4751 goto done;
4755 /* If we are about to do some arithmetic to correct for an
4756 unsigned operand, do it in a pseudo-register. */
4758 if (GET_MODE (to) != fmode
4759 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4760 target = gen_reg_rtx (fmode);
4762 /* Convert as signed integer to floating. */
4763 expand_float (target, from, 0);
4765 /* If FROM is negative (and therefore TO is negative),
4766 correct its value by 2**bitwidth. */
4768 do_pending_stack_adjust ();
4769 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4770 0, label);
4773 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4774 temp = expand_binop (fmode, add_optab, target,
4775 const_double_from_real_value (offset, fmode),
4776 target, 0, OPTAB_LIB_WIDEN);
4777 if (temp != target)
4778 emit_move_insn (target, temp);
4780 do_pending_stack_adjust ();
4781 emit_label (label);
4782 goto done;
4785 /* No hardware instruction available; call a library routine. */
4787 rtx libfunc;
4788 rtx_insn *insns;
4789 rtx value;
4790 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4792 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4793 from = convert_to_mode (SImode, from, unsignedp);
4795 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4796 gcc_assert (libfunc);
4798 start_sequence ();
4800 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4801 GET_MODE (to), 1, from,
4802 GET_MODE (from));
4803 insns = get_insns ();
4804 end_sequence ();
4806 emit_libcall_block (insns, target, value,
4807 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4808 GET_MODE (to), from));
4811 done:
4813 /* Copy result to requested destination
4814 if we have been computing in a temp location. */
4816 if (target != to)
4818 if (GET_MODE (target) == GET_MODE (to))
4819 emit_move_insn (to, target);
4820 else
4821 convert_move (to, target, 0);
4825 /* Generate code to convert FROM to fixed point and store in TO. FROM
4826 must be floating point. */
4828 void
4829 expand_fix (rtx to, rtx from, int unsignedp)
4831 enum insn_code icode;
4832 rtx target = to;
4833 machine_mode fmode, imode;
4834 bool must_trunc = false;
4836 /* We first try to find a pair of modes, one real and one integer, at
4837 least as wide as FROM and TO, respectively, in which we can open-code
4838 this conversion. If the integer mode is wider than the mode of TO,
4839 we can do the conversion either signed or unsigned. */
4841 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4842 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4844 int doing_unsigned = unsignedp;
4846 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4847 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4848 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4850 if (icode != CODE_FOR_nothing)
4852 rtx_insn *last = get_last_insn ();
4853 if (fmode != GET_MODE (from))
4854 from = convert_to_mode (fmode, from, 0);
4856 if (must_trunc)
4858 rtx temp = gen_reg_rtx (GET_MODE (from));
4859 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4860 temp, 0);
4863 if (imode != GET_MODE (to))
4864 target = gen_reg_rtx (imode);
4866 if (maybe_emit_unop_insn (icode, target, from,
4867 doing_unsigned ? UNSIGNED_FIX : FIX))
4869 if (target != to)
4870 convert_move (to, target, unsignedp);
4871 return;
4873 delete_insns_since (last);
4877 /* For an unsigned conversion, there is one more way to do it.
4878 If we have a signed conversion, we generate code that compares
4879 the real value to the largest representable positive number. If if
4880 is smaller, the conversion is done normally. Otherwise, subtract
4881 one plus the highest signed number, convert, and add it back.
4883 We only need to check all real modes, since we know we didn't find
4884 anything with a wider integer mode.
4886 This code used to extend FP value into mode wider than the destination.
4887 This is needed for decimal float modes which cannot accurately
4888 represent one plus the highest signed number of the same size, but
4889 not for binary modes. Consider, for instance conversion from SFmode
4890 into DImode.
4892 The hot path through the code is dealing with inputs smaller than 2^63
4893 and doing just the conversion, so there is no bits to lose.
4895 In the other path we know the value is positive in the range 2^63..2^64-1
4896 inclusive. (as for other input overflow happens and result is undefined)
4897 So we know that the most important bit set in mantissa corresponds to
4898 2^63. The subtraction of 2^63 should not generate any rounding as it
4899 simply clears out that bit. The rest is trivial. */
4901 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4902 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4903 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
4904 && (!DECIMAL_FLOAT_MODE_P (fmode)
4905 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
4907 int bitsize;
4908 REAL_VALUE_TYPE offset;
4909 rtx limit;
4910 rtx_code_label *lab1, *lab2;
4911 rtx_insn *insn;
4913 bitsize = GET_MODE_PRECISION (GET_MODE (to));
4914 real_2expN (&offset, bitsize - 1, fmode);
4915 limit = const_double_from_real_value (offset, fmode);
4916 lab1 = gen_label_rtx ();
4917 lab2 = gen_label_rtx ();
4919 if (fmode != GET_MODE (from))
4920 from = convert_to_mode (fmode, from, 0);
4922 /* See if we need to do the subtraction. */
4923 do_pending_stack_adjust ();
4924 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4925 0, lab1);
4927 /* If not, do the signed "fix" and branch around fixup code. */
4928 expand_fix (to, from, 0);
4929 emit_jump_insn (targetm.gen_jump (lab2));
4930 emit_barrier ();
4932 /* Otherwise, subtract 2**(N-1), convert to signed number,
4933 then add 2**(N-1). Do the addition using XOR since this
4934 will often generate better code. */
4935 emit_label (lab1);
4936 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4937 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4938 expand_fix (to, target, 0);
4939 target = expand_binop (GET_MODE (to), xor_optab, to,
4940 gen_int_mode
4941 (HOST_WIDE_INT_1 << (bitsize - 1),
4942 GET_MODE (to)),
4943 to, 1, OPTAB_LIB_WIDEN);
4945 if (target != to)
4946 emit_move_insn (to, target);
4948 emit_label (lab2);
4950 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
4952 /* Make a place for a REG_NOTE and add it. */
4953 insn = emit_move_insn (to, to);
4954 set_dst_reg_note (insn, REG_EQUAL,
4955 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
4956 copy_rtx (from)),
4957 to);
4960 return;
4963 /* We can't do it with an insn, so use a library call. But first ensure
4964 that the mode of TO is at least as wide as SImode, since those are the
4965 only library calls we know about. */
4967 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4969 target = gen_reg_rtx (SImode);
4971 expand_fix (target, from, unsignedp);
4973 else
4975 rtx_insn *insns;
4976 rtx value;
4977 rtx libfunc;
4979 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4980 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4981 gcc_assert (libfunc);
4983 start_sequence ();
4985 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4986 GET_MODE (to), 1, from,
4987 GET_MODE (from));
4988 insns = get_insns ();
4989 end_sequence ();
4991 emit_libcall_block (insns, target, value,
4992 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4993 GET_MODE (to), from));
4996 if (target != to)
4998 if (GET_MODE (to) == GET_MODE (target))
4999 emit_move_insn (to, target);
5000 else
5001 convert_move (to, target, 0);
5006 /* Promote integer arguments for a libcall if necessary.
5007 emit_library_call_value cannot do the promotion because it does not
5008 know if it should do a signed or unsigned promotion. This is because
5009 there are no tree types defined for libcalls. */
5011 static rtx
5012 prepare_libcall_arg (rtx arg, int uintp)
5014 scalar_int_mode mode;
5015 machine_mode arg_mode;
5016 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5018 /* If we need to promote the integer function argument we need to do
5019 it here instead of inside emit_library_call_value because in
5020 emit_library_call_value we don't know if we should do a signed or
5021 unsigned promotion. */
5023 int unsigned_p = 0;
5024 arg_mode = promote_function_mode (NULL_TREE, mode,
5025 &unsigned_p, NULL_TREE, 0);
5026 if (arg_mode != mode)
5027 return convert_to_mode (arg_mode, arg, uintp);
5029 return arg;
5032 /* Generate code to convert FROM or TO a fixed-point.
5033 If UINTP is true, either TO or FROM is an unsigned integer.
5034 If SATP is true, we need to saturate the result. */
5036 void
5037 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5039 machine_mode to_mode = GET_MODE (to);
5040 machine_mode from_mode = GET_MODE (from);
5041 convert_optab tab;
5042 enum rtx_code this_code;
5043 enum insn_code code;
5044 rtx_insn *insns;
5045 rtx value;
5046 rtx libfunc;
5048 if (to_mode == from_mode)
5050 emit_move_insn (to, from);
5051 return;
5054 if (uintp)
5056 tab = satp ? satfractuns_optab : fractuns_optab;
5057 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5059 else
5061 tab = satp ? satfract_optab : fract_optab;
5062 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5064 code = convert_optab_handler (tab, to_mode, from_mode);
5065 if (code != CODE_FOR_nothing)
5067 emit_unop_insn (code, to, from, this_code);
5068 return;
5071 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5072 gcc_assert (libfunc);
5074 from = prepare_libcall_arg (from, uintp);
5075 from_mode = GET_MODE (from);
5077 start_sequence ();
5078 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5079 1, from, from_mode);
5080 insns = get_insns ();
5081 end_sequence ();
5083 emit_libcall_block (insns, to, value,
5084 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5087 /* Generate code to convert FROM to fixed point and store in TO. FROM
5088 must be floating point, TO must be signed. Use the conversion optab
5089 TAB to do the conversion. */
5091 bool
5092 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5094 enum insn_code icode;
5095 rtx target = to;
5096 machine_mode fmode, imode;
5098 /* We first try to find a pair of modes, one real and one integer, at
5099 least as wide as FROM and TO, respectively, in which we can open-code
5100 this conversion. If the integer mode is wider than the mode of TO,
5101 we can do the conversion either signed or unsigned. */
5103 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5104 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5106 icode = convert_optab_handler (tab, imode, fmode);
5107 if (icode != CODE_FOR_nothing)
5109 rtx_insn *last = get_last_insn ();
5110 if (fmode != GET_MODE (from))
5111 from = convert_to_mode (fmode, from, 0);
5113 if (imode != GET_MODE (to))
5114 target = gen_reg_rtx (imode);
5116 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5118 delete_insns_since (last);
5119 continue;
5121 if (target != to)
5122 convert_move (to, target, 0);
5123 return true;
5127 return false;
5130 /* Report whether we have an instruction to perform the operation
5131 specified by CODE on operands of mode MODE. */
5133 have_insn_for (enum rtx_code code, machine_mode mode)
5135 return (code_to_optab (code)
5136 && (optab_handler (code_to_optab (code), mode)
5137 != CODE_FOR_nothing));
5140 /* Print information about the current contents of the optabs on
5141 STDERR. */
5143 DEBUG_FUNCTION void
5144 debug_optab_libfuncs (void)
5146 int i, j, k;
5148 /* Dump the arithmetic optabs. */
5149 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5150 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5152 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5153 if (l)
5155 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5156 fprintf (stderr, "%s\t%s:\t%s\n",
5157 GET_RTX_NAME (optab_to_code ((optab) i)),
5158 GET_MODE_NAME (j),
5159 XSTR (l, 0));
5163 /* Dump the conversion optabs. */
5164 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5165 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5166 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5168 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5169 (machine_mode) k);
5170 if (l)
5172 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5173 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5174 GET_RTX_NAME (optab_to_code ((optab) i)),
5175 GET_MODE_NAME (j),
5176 GET_MODE_NAME (k),
5177 XSTR (l, 0));
5182 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5183 CODE. Return 0 on failure. */
5185 rtx_insn *
5186 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5188 machine_mode mode = GET_MODE (op1);
5189 enum insn_code icode;
5190 rtx_insn *insn;
5191 rtx trap_rtx;
5193 if (mode == VOIDmode)
5194 return 0;
5196 icode = optab_handler (ctrap_optab, mode);
5197 if (icode == CODE_FOR_nothing)
5198 return 0;
5200 /* Some targets only accept a zero trap code. */
5201 if (!insn_operand_matches (icode, 3, tcode))
5202 return 0;
5204 do_pending_stack_adjust ();
5205 start_sequence ();
5206 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5207 &trap_rtx, &mode);
5208 if (!trap_rtx)
5209 insn = NULL;
5210 else
5211 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5212 tcode);
5214 /* If that failed, then give up. */
5215 if (insn == 0)
5217 end_sequence ();
5218 return 0;
5221 emit_insn (insn);
5222 insn = get_insns ();
5223 end_sequence ();
5224 return insn;
5227 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5228 or unsigned operation code. */
5230 enum rtx_code
5231 get_rtx_code (enum tree_code tcode, bool unsignedp)
5233 enum rtx_code code;
5234 switch (tcode)
5236 case EQ_EXPR:
5237 code = EQ;
5238 break;
5239 case NE_EXPR:
5240 code = NE;
5241 break;
5242 case LT_EXPR:
5243 code = unsignedp ? LTU : LT;
5244 break;
5245 case LE_EXPR:
5246 code = unsignedp ? LEU : LE;
5247 break;
5248 case GT_EXPR:
5249 code = unsignedp ? GTU : GT;
5250 break;
5251 case GE_EXPR:
5252 code = unsignedp ? GEU : GE;
5253 break;
5255 case UNORDERED_EXPR:
5256 code = UNORDERED;
5257 break;
5258 case ORDERED_EXPR:
5259 code = ORDERED;
5260 break;
5261 case UNLT_EXPR:
5262 code = UNLT;
5263 break;
5264 case UNLE_EXPR:
5265 code = UNLE;
5266 break;
5267 case UNGT_EXPR:
5268 code = UNGT;
5269 break;
5270 case UNGE_EXPR:
5271 code = UNGE;
5272 break;
5273 case UNEQ_EXPR:
5274 code = UNEQ;
5275 break;
5276 case LTGT_EXPR:
5277 code = LTGT;
5278 break;
5280 case BIT_AND_EXPR:
5281 code = AND;
5282 break;
5284 case BIT_IOR_EXPR:
5285 code = IOR;
5286 break;
5288 default:
5289 gcc_unreachable ();
5291 return code;
5294 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5295 select signed or unsigned operators. OPNO holds the index of the
5296 first comparison operand for insn ICODE. Do not generate the
5297 compare instruction itself. */
5299 static rtx
5300 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5301 tree t_op0, tree t_op1, bool unsignedp,
5302 enum insn_code icode, unsigned int opno)
5304 struct expand_operand ops[2];
5305 rtx rtx_op0, rtx_op1;
5306 machine_mode m0, m1;
5307 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5309 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5311 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5312 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5313 cases, use the original mode. */
5314 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5315 EXPAND_STACK_PARM);
5316 m0 = GET_MODE (rtx_op0);
5317 if (m0 == VOIDmode)
5318 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5320 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5321 EXPAND_STACK_PARM);
5322 m1 = GET_MODE (rtx_op1);
5323 if (m1 == VOIDmode)
5324 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5326 create_input_operand (&ops[0], rtx_op0, m0);
5327 create_input_operand (&ops[1], rtx_op1, m1);
5328 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5329 gcc_unreachable ();
5330 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5333 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5334 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5335 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5336 shift. */
5337 static rtx
5338 shift_amt_for_vec_perm_mask (rtx sel)
5340 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5341 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5343 if (GET_CODE (sel) != CONST_VECTOR)
5344 return NULL_RTX;
5346 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5347 if (first >= nelt)
5348 return NULL_RTX;
5349 for (i = 1; i < nelt; i++)
5351 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5352 unsigned int expected = i + first;
5353 /* Indices into the second vector are all equivalent. */
5354 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5355 return NULL_RTX;
5358 return GEN_INT (first * bitsize);
5361 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5363 static rtx
5364 expand_vec_perm_1 (enum insn_code icode, rtx target,
5365 rtx v0, rtx v1, rtx sel)
5367 machine_mode tmode = GET_MODE (target);
5368 machine_mode smode = GET_MODE (sel);
5369 struct expand_operand ops[4];
5371 create_output_operand (&ops[0], target, tmode);
5372 create_input_operand (&ops[3], sel, smode);
5374 /* Make an effort to preserve v0 == v1. The target expander is able to
5375 rely on this to determine if we're permuting a single input operand. */
5376 if (rtx_equal_p (v0, v1))
5378 if (!insn_operand_matches (icode, 1, v0))
5379 v0 = force_reg (tmode, v0);
5380 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5381 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5383 create_fixed_operand (&ops[1], v0);
5384 create_fixed_operand (&ops[2], v0);
5386 else
5388 create_input_operand (&ops[1], v0, tmode);
5389 create_input_operand (&ops[2], v1, tmode);
5392 if (maybe_expand_insn (icode, 4, ops))
5393 return ops[0].value;
5394 return NULL_RTX;
5397 /* Generate instructions for vec_perm optab given its mode
5398 and three operands. */
5401 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5403 enum insn_code icode;
5404 machine_mode qimode;
5405 unsigned int i, w, e, u;
5406 rtx tmp, sel_qi = NULL;
5407 rtvec vec;
5409 if (!target || GET_MODE (target) != mode)
5410 target = gen_reg_rtx (mode);
5412 w = GET_MODE_SIZE (mode);
5413 e = GET_MODE_NUNITS (mode);
5414 u = GET_MODE_UNIT_SIZE (mode);
5416 /* Set QIMODE to a different vector mode with byte elements.
5417 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5418 qimode = VOIDmode;
5419 if (GET_MODE_INNER (mode) != QImode)
5421 qimode = mode_for_vector (QImode, w);
5422 if (!VECTOR_MODE_P (qimode))
5423 qimode = VOIDmode;
5426 /* If the input is a constant, expand it specially. */
5427 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5428 if (GET_CODE (sel) == CONST_VECTOR)
5430 /* See if this can be handled with a vec_shr. We only do this if the
5431 second vector is all zeroes. */
5432 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5433 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5434 ? optab_handler (vec_shr_optab, qimode)
5435 : CODE_FOR_nothing);
5436 rtx shift_amt = NULL_RTX;
5437 if (v1 == CONST0_RTX (GET_MODE (v1))
5438 && (shift_code != CODE_FOR_nothing
5439 || shift_code_qi != CODE_FOR_nothing))
5441 shift_amt = shift_amt_for_vec_perm_mask (sel);
5442 if (shift_amt)
5444 struct expand_operand ops[3];
5445 if (shift_code != CODE_FOR_nothing)
5447 create_output_operand (&ops[0], target, mode);
5448 create_input_operand (&ops[1], v0, mode);
5449 create_convert_operand_from_type (&ops[2], shift_amt,
5450 sizetype);
5451 if (maybe_expand_insn (shift_code, 3, ops))
5452 return ops[0].value;
5454 if (shift_code_qi != CODE_FOR_nothing)
5456 tmp = gen_reg_rtx (qimode);
5457 create_output_operand (&ops[0], tmp, qimode);
5458 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5459 qimode);
5460 create_convert_operand_from_type (&ops[2], shift_amt,
5461 sizetype);
5462 if (maybe_expand_insn (shift_code_qi, 3, ops))
5463 return gen_lowpart (mode, ops[0].value);
5468 icode = direct_optab_handler (vec_perm_const_optab, mode);
5469 if (icode != CODE_FOR_nothing)
5471 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5472 if (tmp)
5473 return tmp;
5476 /* Fall back to a constant byte-based permutation. */
5477 if (qimode != VOIDmode)
5479 vec = rtvec_alloc (w);
5480 for (i = 0; i < e; ++i)
5482 unsigned int j, this_e;
5484 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5485 this_e &= 2 * e - 1;
5486 this_e *= u;
5488 for (j = 0; j < u; ++j)
5489 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5491 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5493 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5494 if (icode != CODE_FOR_nothing)
5496 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5497 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5498 gen_lowpart (qimode, v1), sel_qi);
5499 if (tmp)
5500 return gen_lowpart (mode, tmp);
5505 /* Otherwise expand as a fully variable permuation. */
5506 icode = direct_optab_handler (vec_perm_optab, mode);
5507 if (icode != CODE_FOR_nothing)
5509 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5510 if (tmp)
5511 return tmp;
5514 /* As a special case to aid several targets, lower the element-based
5515 permutation to a byte-based permutation and try again. */
5516 if (qimode == VOIDmode)
5517 return NULL_RTX;
5518 icode = direct_optab_handler (vec_perm_optab, qimode);
5519 if (icode == CODE_FOR_nothing)
5520 return NULL_RTX;
5522 if (sel_qi == NULL)
5524 /* Multiply each element by its byte size. */
5525 machine_mode selmode = GET_MODE (sel);
5526 if (u == 2)
5527 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5528 NULL, 0, OPTAB_DIRECT);
5529 else
5530 sel = expand_simple_binop (selmode, ASHIFT, sel,
5531 GEN_INT (exact_log2 (u)),
5532 NULL, 0, OPTAB_DIRECT);
5533 gcc_assert (sel != NULL);
5535 /* Broadcast the low byte each element into each of its bytes. */
5536 vec = rtvec_alloc (w);
5537 for (i = 0; i < w; ++i)
5539 int this_e = i / u * u;
5540 if (BYTES_BIG_ENDIAN)
5541 this_e += u - 1;
5542 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5544 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5545 sel = gen_lowpart (qimode, sel);
5546 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5547 gcc_assert (sel != NULL);
5549 /* Add the byte offset to each byte element. */
5550 /* Note that the definition of the indicies here is memory ordering,
5551 so there should be no difference between big and little endian. */
5552 vec = rtvec_alloc (w);
5553 for (i = 0; i < w; ++i)
5554 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5555 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5556 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5557 sel, 0, OPTAB_DIRECT);
5558 gcc_assert (sel_qi != NULL);
5561 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5562 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5563 gen_lowpart (qimode, v1), sel_qi);
5564 if (tmp)
5565 tmp = gen_lowpart (mode, tmp);
5566 return tmp;
5569 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5570 three operands. */
5573 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5574 rtx target)
5576 struct expand_operand ops[4];
5577 machine_mode mode = TYPE_MODE (vec_cond_type);
5578 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5579 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5580 rtx mask, rtx_op1, rtx_op2;
5582 if (icode == CODE_FOR_nothing)
5583 return 0;
5585 mask = expand_normal (op0);
5586 rtx_op1 = expand_normal (op1);
5587 rtx_op2 = expand_normal (op2);
5589 mask = force_reg (mask_mode, mask);
5590 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5592 create_output_operand (&ops[0], target, mode);
5593 create_input_operand (&ops[1], rtx_op1, mode);
5594 create_input_operand (&ops[2], rtx_op2, mode);
5595 create_input_operand (&ops[3], mask, mask_mode);
5596 expand_insn (icode, 4, ops);
5598 return ops[0].value;
5601 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5602 three operands. */
5605 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5606 rtx target)
5608 struct expand_operand ops[6];
5609 enum insn_code icode;
5610 rtx comparison, rtx_op1, rtx_op2;
5611 machine_mode mode = TYPE_MODE (vec_cond_type);
5612 machine_mode cmp_op_mode;
5613 bool unsignedp;
5614 tree op0a, op0b;
5615 enum tree_code tcode;
5617 if (COMPARISON_CLASS_P (op0))
5619 op0a = TREE_OPERAND (op0, 0);
5620 op0b = TREE_OPERAND (op0, 1);
5621 tcode = TREE_CODE (op0);
5623 else
5625 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5626 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5627 != CODE_FOR_nothing)
5628 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5629 op2, target);
5630 /* Fake op0 < 0. */
5631 else
5633 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5634 == MODE_VECTOR_INT);
5635 op0a = op0;
5636 op0b = build_zero_cst (TREE_TYPE (op0));
5637 tcode = LT_EXPR;
5640 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5641 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5644 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5645 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5647 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5648 if (icode == CODE_FOR_nothing)
5650 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5651 icode = get_vcond_eq_icode (mode, cmp_op_mode);
5652 if (icode == CODE_FOR_nothing)
5653 return 0;
5656 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5657 icode, 4);
5658 rtx_op1 = expand_normal (op1);
5659 rtx_op2 = expand_normal (op2);
5661 create_output_operand (&ops[0], target, mode);
5662 create_input_operand (&ops[1], rtx_op1, mode);
5663 create_input_operand (&ops[2], rtx_op2, mode);
5664 create_fixed_operand (&ops[3], comparison);
5665 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5666 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5667 expand_insn (icode, 6, ops);
5668 return ops[0].value;
5671 /* Generate insns for a vector comparison into a mask. */
5674 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5676 struct expand_operand ops[4];
5677 enum insn_code icode;
5678 rtx comparison;
5679 machine_mode mask_mode = TYPE_MODE (type);
5680 machine_mode vmode;
5681 bool unsignedp;
5682 tree op0a, op0b;
5683 enum tree_code tcode;
5685 op0a = TREE_OPERAND (exp, 0);
5686 op0b = TREE_OPERAND (exp, 1);
5687 tcode = TREE_CODE (exp);
5689 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5690 vmode = TYPE_MODE (TREE_TYPE (op0a));
5692 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5693 if (icode == CODE_FOR_nothing)
5695 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5696 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5697 if (icode == CODE_FOR_nothing)
5698 return 0;
5701 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5702 unsignedp, icode, 2);
5703 create_output_operand (&ops[0], target, mask_mode);
5704 create_fixed_operand (&ops[1], comparison);
5705 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5706 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5707 expand_insn (icode, 4, ops);
5708 return ops[0].value;
5711 /* Expand a highpart multiply. */
5714 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5715 rtx target, bool uns_p)
5717 struct expand_operand eops[3];
5718 enum insn_code icode;
5719 int method, i, nunits;
5720 machine_mode wmode;
5721 rtx m1, m2, perm;
5722 optab tab1, tab2;
5723 rtvec v;
5725 method = can_mult_highpart_p (mode, uns_p);
5726 switch (method)
5728 case 0:
5729 return NULL_RTX;
5730 case 1:
5731 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5732 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5733 OPTAB_LIB_WIDEN);
5734 case 2:
5735 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5736 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5737 break;
5738 case 3:
5739 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5740 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5741 if (BYTES_BIG_ENDIAN)
5742 std::swap (tab1, tab2);
5743 break;
5744 default:
5745 gcc_unreachable ();
5748 icode = optab_handler (tab1, mode);
5749 nunits = GET_MODE_NUNITS (mode);
5750 wmode = insn_data[icode].operand[0].mode;
5751 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5752 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5754 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5755 create_input_operand (&eops[1], op0, mode);
5756 create_input_operand (&eops[2], op1, mode);
5757 expand_insn (icode, 3, eops);
5758 m1 = gen_lowpart (mode, eops[0].value);
5760 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5761 create_input_operand (&eops[1], op0, mode);
5762 create_input_operand (&eops[2], op1, mode);
5763 expand_insn (optab_handler (tab2, mode), 3, eops);
5764 m2 = gen_lowpart (mode, eops[0].value);
5766 v = rtvec_alloc (nunits);
5767 if (method == 2)
5769 for (i = 0; i < nunits; ++i)
5770 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5771 + ((i & 1) ? nunits : 0));
5773 else
5775 for (i = 0; i < nunits; ++i)
5776 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5778 perm = gen_rtx_CONST_VECTOR (mode, v);
5780 return expand_vec_perm (mode, m1, m2, perm, target);
5783 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5784 pattern. */
5786 static void
5787 find_cc_set (rtx x, const_rtx pat, void *data)
5789 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5790 && GET_CODE (pat) == SET)
5792 rtx *p_cc_reg = (rtx *) data;
5793 gcc_assert (!*p_cc_reg);
5794 *p_cc_reg = x;
5798 /* This is a helper function for the other atomic operations. This function
5799 emits a loop that contains SEQ that iterates until a compare-and-swap
5800 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5801 a set of instructions that takes a value from OLD_REG as an input and
5802 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5803 set to the current contents of MEM. After SEQ, a compare-and-swap will
5804 attempt to update MEM with NEW_REG. The function returns true when the
5805 loop was generated successfully. */
5807 static bool
5808 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5810 machine_mode mode = GET_MODE (mem);
5811 rtx_code_label *label;
5812 rtx cmp_reg, success, oldval;
5814 /* The loop we want to generate looks like
5816 cmp_reg = mem;
5817 label:
5818 old_reg = cmp_reg;
5819 seq;
5820 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5821 if (success)
5822 goto label;
5824 Note that we only do the plain load from memory once. Subsequent
5825 iterations use the value loaded by the compare-and-swap pattern. */
5827 label = gen_label_rtx ();
5828 cmp_reg = gen_reg_rtx (mode);
5830 emit_move_insn (cmp_reg, mem);
5831 emit_label (label);
5832 emit_move_insn (old_reg, cmp_reg);
5833 if (seq)
5834 emit_insn (seq);
5836 success = NULL_RTX;
5837 oldval = cmp_reg;
5838 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5839 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5840 MEMMODEL_RELAXED))
5841 return false;
5843 if (oldval != cmp_reg)
5844 emit_move_insn (cmp_reg, oldval);
5846 /* Mark this jump predicted not taken. */
5847 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5848 GET_MODE (success), 1, label,
5849 profile_probability::guessed_never ());
5850 return true;
5854 /* This function tries to emit an atomic_exchange intruction. VAL is written
5855 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5856 using TARGET if possible. */
5858 static rtx
5859 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5861 machine_mode mode = GET_MODE (mem);
5862 enum insn_code icode;
5864 /* If the target supports the exchange directly, great. */
5865 icode = direct_optab_handler (atomic_exchange_optab, mode);
5866 if (icode != CODE_FOR_nothing)
5868 struct expand_operand ops[4];
5870 create_output_operand (&ops[0], target, mode);
5871 create_fixed_operand (&ops[1], mem);
5872 create_input_operand (&ops[2], val, mode);
5873 create_integer_operand (&ops[3], model);
5874 if (maybe_expand_insn (icode, 4, ops))
5875 return ops[0].value;
5878 return NULL_RTX;
5881 /* This function tries to implement an atomic exchange operation using
5882 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5883 The previous contents of *MEM are returned, using TARGET if possible.
5884 Since this instructionn is an acquire barrier only, stronger memory
5885 models may require additional barriers to be emitted. */
5887 static rtx
5888 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5889 enum memmodel model)
5891 machine_mode mode = GET_MODE (mem);
5892 enum insn_code icode;
5893 rtx_insn *last_insn = get_last_insn ();
5895 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5897 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5898 exists, and the memory model is stronger than acquire, add a release
5899 barrier before the instruction. */
5901 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5902 expand_mem_thread_fence (model);
5904 if (icode != CODE_FOR_nothing)
5906 struct expand_operand ops[3];
5907 create_output_operand (&ops[0], target, mode);
5908 create_fixed_operand (&ops[1], mem);
5909 create_input_operand (&ops[2], val, mode);
5910 if (maybe_expand_insn (icode, 3, ops))
5911 return ops[0].value;
5914 /* If an external test-and-set libcall is provided, use that instead of
5915 any external compare-and-swap that we might get from the compare-and-
5916 swap-loop expansion later. */
5917 if (!can_compare_and_swap_p (mode, false))
5919 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5920 if (libfunc != NULL)
5922 rtx addr;
5924 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5925 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5926 mode, 2, addr, ptr_mode,
5927 val, mode);
5931 /* If the test_and_set can't be emitted, eliminate any barrier that might
5932 have been emitted. */
5933 delete_insns_since (last_insn);
5934 return NULL_RTX;
5937 /* This function tries to implement an atomic exchange operation using a
5938 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5939 *MEM are returned, using TARGET if possible. No memory model is required
5940 since a compare_and_swap loop is seq-cst. */
5942 static rtx
5943 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5945 machine_mode mode = GET_MODE (mem);
5947 if (can_compare_and_swap_p (mode, true))
5949 if (!target || !register_operand (target, mode))
5950 target = gen_reg_rtx (mode);
5951 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5952 return target;
5955 return NULL_RTX;
5958 /* This function tries to implement an atomic test-and-set operation
5959 using the atomic_test_and_set instruction pattern. A boolean value
5960 is returned from the operation, using TARGET if possible. */
5962 static rtx
5963 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5965 machine_mode pat_bool_mode;
5966 struct expand_operand ops[3];
5968 if (!targetm.have_atomic_test_and_set ())
5969 return NULL_RTX;
5971 /* While we always get QImode from __atomic_test_and_set, we get
5972 other memory modes from __sync_lock_test_and_set. Note that we
5973 use no endian adjustment here. This matches the 4.6 behavior
5974 in the Sparc backend. */
5975 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5976 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5977 if (GET_MODE (mem) != QImode)
5978 mem = adjust_address_nv (mem, QImode, 0);
5980 pat_bool_mode = insn_data[icode].operand[0].mode;
5981 create_output_operand (&ops[0], target, pat_bool_mode);
5982 create_fixed_operand (&ops[1], mem);
5983 create_integer_operand (&ops[2], model);
5985 if (maybe_expand_insn (icode, 3, ops))
5986 return ops[0].value;
5987 return NULL_RTX;
5990 /* This function expands the legacy _sync_lock test_and_set operation which is
5991 generally an atomic exchange. Some limited targets only allow the
5992 constant 1 to be stored. This is an ACQUIRE operation.
5994 TARGET is an optional place to stick the return value.
5995 MEM is where VAL is stored. */
5998 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6000 rtx ret;
6002 /* Try an atomic_exchange first. */
6003 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6004 if (ret)
6005 return ret;
6007 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6008 MEMMODEL_SYNC_ACQUIRE);
6009 if (ret)
6010 return ret;
6012 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6013 if (ret)
6014 return ret;
6016 /* If there are no other options, try atomic_test_and_set if the value
6017 being stored is 1. */
6018 if (val == const1_rtx)
6019 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6021 return ret;
6024 /* This function expands the atomic test_and_set operation:
6025 atomically store a boolean TRUE into MEM and return the previous value.
6027 MEMMODEL is the memory model variant to use.
6028 TARGET is an optional place to stick the return value. */
6031 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6033 machine_mode mode = GET_MODE (mem);
6034 rtx ret, trueval, subtarget;
6036 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6037 if (ret)
6038 return ret;
6040 /* Be binary compatible with non-default settings of trueval, and different
6041 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6042 another only has atomic-exchange. */
6043 if (targetm.atomic_test_and_set_trueval == 1)
6045 trueval = const1_rtx;
6046 subtarget = target ? target : gen_reg_rtx (mode);
6048 else
6050 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6051 subtarget = gen_reg_rtx (mode);
6054 /* Try the atomic-exchange optab... */
6055 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6057 /* ... then an atomic-compare-and-swap loop ... */
6058 if (!ret)
6059 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6061 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6062 if (!ret)
6063 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6065 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6066 things with the value 1. Thus we try again without trueval. */
6067 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6068 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6070 /* Failing all else, assume a single threaded environment and simply
6071 perform the operation. */
6072 if (!ret)
6074 /* If the result is ignored skip the move to target. */
6075 if (subtarget != const0_rtx)
6076 emit_move_insn (subtarget, mem);
6078 emit_move_insn (mem, trueval);
6079 ret = subtarget;
6082 /* Recall that have to return a boolean value; rectify if trueval
6083 is not exactly one. */
6084 if (targetm.atomic_test_and_set_trueval != 1)
6085 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6087 return ret;
6090 /* This function expands the atomic exchange operation:
6091 atomically store VAL in MEM and return the previous value in MEM.
6093 MEMMODEL is the memory model variant to use.
6094 TARGET is an optional place to stick the return value. */
6097 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6099 machine_mode mode = GET_MODE (mem);
6100 rtx ret;
6102 /* If loads are not atomic for the required size and we are not called to
6103 provide a __sync builtin, do not do anything so that we stay consistent
6104 with atomic loads of the same size. */
6105 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6106 return NULL_RTX;
6108 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6110 /* Next try a compare-and-swap loop for the exchange. */
6111 if (!ret)
6112 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6114 return ret;
6117 /* This function expands the atomic compare exchange operation:
6119 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6120 *PTARGET_OVAL is an optional place to store the old value from memory.
6121 Both target parameters may be NULL or const0_rtx to indicate that we do
6122 not care about that return value. Both target parameters are updated on
6123 success to the actual location of the corresponding result.
6125 MEMMODEL is the memory model variant to use.
6127 The return value of the function is true for success. */
6129 bool
6130 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6131 rtx mem, rtx expected, rtx desired,
6132 bool is_weak, enum memmodel succ_model,
6133 enum memmodel fail_model)
6135 machine_mode mode = GET_MODE (mem);
6136 struct expand_operand ops[8];
6137 enum insn_code icode;
6138 rtx target_oval, target_bool = NULL_RTX;
6139 rtx libfunc;
6141 /* If loads are not atomic for the required size and we are not called to
6142 provide a __sync builtin, do not do anything so that we stay consistent
6143 with atomic loads of the same size. */
6144 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6145 return false;
6147 /* Load expected into a register for the compare and swap. */
6148 if (MEM_P (expected))
6149 expected = copy_to_reg (expected);
6151 /* Make sure we always have some place to put the return oldval.
6152 Further, make sure that place is distinct from the input expected,
6153 just in case we need that path down below. */
6154 if (ptarget_oval && *ptarget_oval == const0_rtx)
6155 ptarget_oval = NULL;
6157 if (ptarget_oval == NULL
6158 || (target_oval = *ptarget_oval) == NULL
6159 || reg_overlap_mentioned_p (expected, target_oval))
6160 target_oval = gen_reg_rtx (mode);
6162 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6163 if (icode != CODE_FOR_nothing)
6165 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6167 if (ptarget_bool && *ptarget_bool == const0_rtx)
6168 ptarget_bool = NULL;
6170 /* Make sure we always have a place for the bool operand. */
6171 if (ptarget_bool == NULL
6172 || (target_bool = *ptarget_bool) == NULL
6173 || GET_MODE (target_bool) != bool_mode)
6174 target_bool = gen_reg_rtx (bool_mode);
6176 /* Emit the compare_and_swap. */
6177 create_output_operand (&ops[0], target_bool, bool_mode);
6178 create_output_operand (&ops[1], target_oval, mode);
6179 create_fixed_operand (&ops[2], mem);
6180 create_input_operand (&ops[3], expected, mode);
6181 create_input_operand (&ops[4], desired, mode);
6182 create_integer_operand (&ops[5], is_weak);
6183 create_integer_operand (&ops[6], succ_model);
6184 create_integer_operand (&ops[7], fail_model);
6185 if (maybe_expand_insn (icode, 8, ops))
6187 /* Return success/failure. */
6188 target_bool = ops[0].value;
6189 target_oval = ops[1].value;
6190 goto success;
6194 /* Otherwise fall back to the original __sync_val_compare_and_swap
6195 which is always seq-cst. */
6196 icode = optab_handler (sync_compare_and_swap_optab, mode);
6197 if (icode != CODE_FOR_nothing)
6199 rtx cc_reg;
6201 create_output_operand (&ops[0], target_oval, mode);
6202 create_fixed_operand (&ops[1], mem);
6203 create_input_operand (&ops[2], expected, mode);
6204 create_input_operand (&ops[3], desired, mode);
6205 if (!maybe_expand_insn (icode, 4, ops))
6206 return false;
6208 target_oval = ops[0].value;
6210 /* If the caller isn't interested in the boolean return value,
6211 skip the computation of it. */
6212 if (ptarget_bool == NULL)
6213 goto success;
6215 /* Otherwise, work out if the compare-and-swap succeeded. */
6216 cc_reg = NULL_RTX;
6217 if (have_insn_for (COMPARE, CCmode))
6218 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6219 if (cc_reg)
6221 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6222 const0_rtx, VOIDmode, 0, 1);
6223 goto success;
6225 goto success_bool_from_val;
6228 /* Also check for library support for __sync_val_compare_and_swap. */
6229 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6230 if (libfunc != NULL)
6232 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6233 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6234 mode, 3, addr, ptr_mode,
6235 expected, mode, desired, mode);
6236 emit_move_insn (target_oval, target);
6238 /* Compute the boolean return value only if requested. */
6239 if (ptarget_bool)
6240 goto success_bool_from_val;
6241 else
6242 goto success;
6245 /* Failure. */
6246 return false;
6248 success_bool_from_val:
6249 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6250 expected, VOIDmode, 1, 1);
6251 success:
6252 /* Make sure that the oval output winds up where the caller asked. */
6253 if (ptarget_oval)
6254 *ptarget_oval = target_oval;
6255 if (ptarget_bool)
6256 *ptarget_bool = target_bool;
6257 return true;
6260 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6262 static void
6263 expand_asm_memory_barrier (void)
6265 rtx asm_op, clob;
6267 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6268 rtvec_alloc (0), rtvec_alloc (0),
6269 rtvec_alloc (0), UNKNOWN_LOCATION);
6270 MEM_VOLATILE_P (asm_op) = 1;
6272 clob = gen_rtx_SCRATCH (VOIDmode);
6273 clob = gen_rtx_MEM (BLKmode, clob);
6274 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6276 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6279 /* This routine will either emit the mem_thread_fence pattern or issue a
6280 sync_synchronize to generate a fence for memory model MEMMODEL. */
6282 void
6283 expand_mem_thread_fence (enum memmodel model)
6285 if (is_mm_relaxed (model))
6286 return;
6287 if (targetm.have_mem_thread_fence ())
6289 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6290 expand_asm_memory_barrier ();
6292 else if (targetm.have_memory_barrier ())
6293 emit_insn (targetm.gen_memory_barrier ());
6294 else if (synchronize_libfunc != NULL_RTX)
6295 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
6296 else
6297 expand_asm_memory_barrier ();
6300 /* This routine will either emit the mem_signal_fence pattern or issue a
6301 sync_synchronize to generate a fence for memory model MEMMODEL. */
6303 void
6304 expand_mem_signal_fence (enum memmodel model)
6306 if (targetm.have_mem_signal_fence ())
6307 emit_insn (targetm.gen_mem_signal_fence (GEN_INT (model)));
6308 else if (!is_mm_relaxed (model))
6310 /* By default targets are coherent between a thread and the signal
6311 handler running on the same thread. Thus this really becomes a
6312 compiler barrier, in that stores must not be sunk past
6313 (or raised above) a given point. */
6314 expand_asm_memory_barrier ();
6318 /* This function expands the atomic load operation:
6319 return the atomically loaded value in MEM.
6321 MEMMODEL is the memory model variant to use.
6322 TARGET is an option place to stick the return value. */
6325 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6327 machine_mode mode = GET_MODE (mem);
6328 enum insn_code icode;
6330 /* If the target supports the load directly, great. */
6331 icode = direct_optab_handler (atomic_load_optab, mode);
6332 if (icode != CODE_FOR_nothing)
6334 struct expand_operand ops[3];
6336 create_output_operand (&ops[0], target, mode);
6337 create_fixed_operand (&ops[1], mem);
6338 create_integer_operand (&ops[2], model);
6339 if (maybe_expand_insn (icode, 3, ops))
6340 return ops[0].value;
6343 /* If the size of the object is greater than word size on this target,
6344 then we assume that a load will not be atomic. We could try to
6345 emulate a load with a compare-and-swap operation, but the store that
6346 doing this could result in would be incorrect if this is a volatile
6347 atomic load or targetting read-only-mapped memory. */
6348 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6349 /* If there is no atomic load, leave the library call. */
6350 return NULL_RTX;
6352 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6353 if (!target || target == const0_rtx)
6354 target = gen_reg_rtx (mode);
6356 /* For SEQ_CST, emit a barrier before the load. */
6357 if (is_mm_seq_cst (model))
6358 expand_mem_thread_fence (model);
6360 emit_move_insn (target, mem);
6362 /* Emit the appropriate barrier after the load. */
6363 expand_mem_thread_fence (model);
6365 return target;
6368 /* This function expands the atomic store operation:
6369 Atomically store VAL in MEM.
6370 MEMMODEL is the memory model variant to use.
6371 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6372 function returns const0_rtx if a pattern was emitted. */
6375 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6377 machine_mode mode = GET_MODE (mem);
6378 enum insn_code icode;
6379 struct expand_operand ops[3];
6381 /* If the target supports the store directly, great. */
6382 icode = direct_optab_handler (atomic_store_optab, mode);
6383 if (icode != CODE_FOR_nothing)
6385 create_fixed_operand (&ops[0], mem);
6386 create_input_operand (&ops[1], val, mode);
6387 create_integer_operand (&ops[2], model);
6388 if (maybe_expand_insn (icode, 3, ops))
6389 return const0_rtx;
6392 /* If using __sync_lock_release is a viable alternative, try it.
6393 Note that this will not be set to true if we are expanding a generic
6394 __atomic_store_n. */
6395 if (use_release)
6397 icode = direct_optab_handler (sync_lock_release_optab, mode);
6398 if (icode != CODE_FOR_nothing)
6400 create_fixed_operand (&ops[0], mem);
6401 create_input_operand (&ops[1], const0_rtx, mode);
6402 if (maybe_expand_insn (icode, 2, ops))
6404 /* lock_release is only a release barrier. */
6405 if (is_mm_seq_cst (model))
6406 expand_mem_thread_fence (model);
6407 return const0_rtx;
6412 /* If the size of the object is greater than word size on this target,
6413 a default store will not be atomic. */
6414 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6416 /* If loads are atomic or we are called to provide a __sync builtin,
6417 we can try a atomic_exchange and throw away the result. Otherwise,
6418 don't do anything so that we do not create an inconsistency between
6419 loads and stores. */
6420 if (can_atomic_load_p (mode) || is_mm_sync (model))
6422 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6423 if (!target)
6424 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6425 val);
6426 if (target)
6427 return const0_rtx;
6429 return NULL_RTX;
6432 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6433 expand_mem_thread_fence (model);
6435 emit_move_insn (mem, val);
6437 /* For SEQ_CST, also emit a barrier after the store. */
6438 if (is_mm_seq_cst (model))
6439 expand_mem_thread_fence (model);
6441 return const0_rtx;
6445 /* Structure containing the pointers and values required to process the
6446 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6448 struct atomic_op_functions
6450 direct_optab mem_fetch_before;
6451 direct_optab mem_fetch_after;
6452 direct_optab mem_no_result;
6453 optab fetch_before;
6454 optab fetch_after;
6455 direct_optab no_result;
6456 enum rtx_code reverse_code;
6460 /* Fill in structure pointed to by OP with the various optab entries for an
6461 operation of type CODE. */
6463 static void
6464 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6466 gcc_assert (op!= NULL);
6468 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6469 in the source code during compilation, and the optab entries are not
6470 computable until runtime. Fill in the values at runtime. */
6471 switch (code)
6473 case PLUS:
6474 op->mem_fetch_before = atomic_fetch_add_optab;
6475 op->mem_fetch_after = atomic_add_fetch_optab;
6476 op->mem_no_result = atomic_add_optab;
6477 op->fetch_before = sync_old_add_optab;
6478 op->fetch_after = sync_new_add_optab;
6479 op->no_result = sync_add_optab;
6480 op->reverse_code = MINUS;
6481 break;
6482 case MINUS:
6483 op->mem_fetch_before = atomic_fetch_sub_optab;
6484 op->mem_fetch_after = atomic_sub_fetch_optab;
6485 op->mem_no_result = atomic_sub_optab;
6486 op->fetch_before = sync_old_sub_optab;
6487 op->fetch_after = sync_new_sub_optab;
6488 op->no_result = sync_sub_optab;
6489 op->reverse_code = PLUS;
6490 break;
6491 case XOR:
6492 op->mem_fetch_before = atomic_fetch_xor_optab;
6493 op->mem_fetch_after = atomic_xor_fetch_optab;
6494 op->mem_no_result = atomic_xor_optab;
6495 op->fetch_before = sync_old_xor_optab;
6496 op->fetch_after = sync_new_xor_optab;
6497 op->no_result = sync_xor_optab;
6498 op->reverse_code = XOR;
6499 break;
6500 case AND:
6501 op->mem_fetch_before = atomic_fetch_and_optab;
6502 op->mem_fetch_after = atomic_and_fetch_optab;
6503 op->mem_no_result = atomic_and_optab;
6504 op->fetch_before = sync_old_and_optab;
6505 op->fetch_after = sync_new_and_optab;
6506 op->no_result = sync_and_optab;
6507 op->reverse_code = UNKNOWN;
6508 break;
6509 case IOR:
6510 op->mem_fetch_before = atomic_fetch_or_optab;
6511 op->mem_fetch_after = atomic_or_fetch_optab;
6512 op->mem_no_result = atomic_or_optab;
6513 op->fetch_before = sync_old_ior_optab;
6514 op->fetch_after = sync_new_ior_optab;
6515 op->no_result = sync_ior_optab;
6516 op->reverse_code = UNKNOWN;
6517 break;
6518 case NOT:
6519 op->mem_fetch_before = atomic_fetch_nand_optab;
6520 op->mem_fetch_after = atomic_nand_fetch_optab;
6521 op->mem_no_result = atomic_nand_optab;
6522 op->fetch_before = sync_old_nand_optab;
6523 op->fetch_after = sync_new_nand_optab;
6524 op->no_result = sync_nand_optab;
6525 op->reverse_code = UNKNOWN;
6526 break;
6527 default:
6528 gcc_unreachable ();
6532 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6533 using memory order MODEL. If AFTER is true the operation needs to return
6534 the value of *MEM after the operation, otherwise the previous value.
6535 TARGET is an optional place to place the result. The result is unused if
6536 it is const0_rtx.
6537 Return the result if there is a better sequence, otherwise NULL_RTX. */
6539 static rtx
6540 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6541 enum memmodel model, bool after)
6543 /* If the value is prefetched, or not used, it may be possible to replace
6544 the sequence with a native exchange operation. */
6545 if (!after || target == const0_rtx)
6547 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6548 if (code == AND && val == const0_rtx)
6550 if (target == const0_rtx)
6551 target = gen_reg_rtx (GET_MODE (mem));
6552 return maybe_emit_atomic_exchange (target, mem, val, model);
6555 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6556 if (code == IOR && val == constm1_rtx)
6558 if (target == const0_rtx)
6559 target = gen_reg_rtx (GET_MODE (mem));
6560 return maybe_emit_atomic_exchange (target, mem, val, model);
6564 return NULL_RTX;
6567 /* Try to emit an instruction for a specific operation varaition.
6568 OPTAB contains the OP functions.
6569 TARGET is an optional place to return the result. const0_rtx means unused.
6570 MEM is the memory location to operate on.
6571 VAL is the value to use in the operation.
6572 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6573 MODEL is the memory model, if used.
6574 AFTER is true if the returned result is the value after the operation. */
6576 static rtx
6577 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6578 rtx val, bool use_memmodel, enum memmodel model, bool after)
6580 machine_mode mode = GET_MODE (mem);
6581 struct expand_operand ops[4];
6582 enum insn_code icode;
6583 int op_counter = 0;
6584 int num_ops;
6586 /* Check to see if there is a result returned. */
6587 if (target == const0_rtx)
6589 if (use_memmodel)
6591 icode = direct_optab_handler (optab->mem_no_result, mode);
6592 create_integer_operand (&ops[2], model);
6593 num_ops = 3;
6595 else
6597 icode = direct_optab_handler (optab->no_result, mode);
6598 num_ops = 2;
6601 /* Otherwise, we need to generate a result. */
6602 else
6604 if (use_memmodel)
6606 icode = direct_optab_handler (after ? optab->mem_fetch_after
6607 : optab->mem_fetch_before, mode);
6608 create_integer_operand (&ops[3], model);
6609 num_ops = 4;
6611 else
6613 icode = optab_handler (after ? optab->fetch_after
6614 : optab->fetch_before, mode);
6615 num_ops = 3;
6617 create_output_operand (&ops[op_counter++], target, mode);
6619 if (icode == CODE_FOR_nothing)
6620 return NULL_RTX;
6622 create_fixed_operand (&ops[op_counter++], mem);
6623 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6624 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6626 if (maybe_expand_insn (icode, num_ops, ops))
6627 return (target == const0_rtx ? const0_rtx : ops[0].value);
6629 return NULL_RTX;
6633 /* This function expands an atomic fetch_OP or OP_fetch operation:
6634 TARGET is an option place to stick the return value. const0_rtx indicates
6635 the result is unused.
6636 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6637 CODE is the operation being performed (OP)
6638 MEMMODEL is the memory model variant to use.
6639 AFTER is true to return the result of the operation (OP_fetch).
6640 AFTER is false to return the value before the operation (fetch_OP).
6642 This function will *only* generate instructions if there is a direct
6643 optab. No compare and swap loops or libcalls will be generated. */
6645 static rtx
6646 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6647 enum rtx_code code, enum memmodel model,
6648 bool after)
6650 machine_mode mode = GET_MODE (mem);
6651 struct atomic_op_functions optab;
6652 rtx result;
6653 bool unused_result = (target == const0_rtx);
6655 get_atomic_op_for_code (&optab, code);
6657 /* Check to see if there are any better instructions. */
6658 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6659 if (result)
6660 return result;
6662 /* Check for the case where the result isn't used and try those patterns. */
6663 if (unused_result)
6665 /* Try the memory model variant first. */
6666 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6667 if (result)
6668 return result;
6670 /* Next try the old style withuot a memory model. */
6671 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6672 if (result)
6673 return result;
6675 /* There is no no-result pattern, so try patterns with a result. */
6676 target = NULL_RTX;
6679 /* Try the __atomic version. */
6680 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6681 if (result)
6682 return result;
6684 /* Try the older __sync version. */
6685 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6686 if (result)
6687 return result;
6689 /* If the fetch value can be calculated from the other variation of fetch,
6690 try that operation. */
6691 if (after || unused_result || optab.reverse_code != UNKNOWN)
6693 /* Try the __atomic version, then the older __sync version. */
6694 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6695 if (!result)
6696 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6698 if (result)
6700 /* If the result isn't used, no need to do compensation code. */
6701 if (unused_result)
6702 return result;
6704 /* Issue compensation code. Fetch_after == fetch_before OP val.
6705 Fetch_before == after REVERSE_OP val. */
6706 if (!after)
6707 code = optab.reverse_code;
6708 if (code == NOT)
6710 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6711 true, OPTAB_LIB_WIDEN);
6712 result = expand_simple_unop (mode, NOT, result, target, true);
6714 else
6715 result = expand_simple_binop (mode, code, result, val, target,
6716 true, OPTAB_LIB_WIDEN);
6717 return result;
6721 /* No direct opcode can be generated. */
6722 return NULL_RTX;
6727 /* This function expands an atomic fetch_OP or OP_fetch operation:
6728 TARGET is an option place to stick the return value. const0_rtx indicates
6729 the result is unused.
6730 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6731 CODE is the operation being performed (OP)
6732 MEMMODEL is the memory model variant to use.
6733 AFTER is true to return the result of the operation (OP_fetch).
6734 AFTER is false to return the value before the operation (fetch_OP). */
6736 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6737 enum memmodel model, bool after)
6739 machine_mode mode = GET_MODE (mem);
6740 rtx result;
6741 bool unused_result = (target == const0_rtx);
6743 /* If loads are not atomic for the required size and we are not called to
6744 provide a __sync builtin, do not do anything so that we stay consistent
6745 with atomic loads of the same size. */
6746 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6747 return NULL_RTX;
6749 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6750 after);
6752 if (result)
6753 return result;
6755 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6756 if (code == PLUS || code == MINUS)
6758 rtx tmp;
6759 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6761 start_sequence ();
6762 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6763 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6764 model, after);
6765 if (result)
6767 /* PLUS worked so emit the insns and return. */
6768 tmp = get_insns ();
6769 end_sequence ();
6770 emit_insn (tmp);
6771 return result;
6774 /* PLUS did not work, so throw away the negation code and continue. */
6775 end_sequence ();
6778 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6779 if (!can_compare_and_swap_p (mode, false))
6781 rtx libfunc;
6782 bool fixup = false;
6783 enum rtx_code orig_code = code;
6784 struct atomic_op_functions optab;
6786 get_atomic_op_for_code (&optab, code);
6787 libfunc = optab_libfunc (after ? optab.fetch_after
6788 : optab.fetch_before, mode);
6789 if (libfunc == NULL
6790 && (after || unused_result || optab.reverse_code != UNKNOWN))
6792 fixup = true;
6793 if (!after)
6794 code = optab.reverse_code;
6795 libfunc = optab_libfunc (after ? optab.fetch_before
6796 : optab.fetch_after, mode);
6798 if (libfunc != NULL)
6800 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6801 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6802 2, addr, ptr_mode, val, mode);
6804 if (!unused_result && fixup)
6805 result = expand_simple_binop (mode, code, result, val, target,
6806 true, OPTAB_LIB_WIDEN);
6807 return result;
6810 /* We need the original code for any further attempts. */
6811 code = orig_code;
6814 /* If nothing else has succeeded, default to a compare and swap loop. */
6815 if (can_compare_and_swap_p (mode, true))
6817 rtx_insn *insn;
6818 rtx t0 = gen_reg_rtx (mode), t1;
6820 start_sequence ();
6822 /* If the result is used, get a register for it. */
6823 if (!unused_result)
6825 if (!target || !register_operand (target, mode))
6826 target = gen_reg_rtx (mode);
6827 /* If fetch_before, copy the value now. */
6828 if (!after)
6829 emit_move_insn (target, t0);
6831 else
6832 target = const0_rtx;
6834 t1 = t0;
6835 if (code == NOT)
6837 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6838 true, OPTAB_LIB_WIDEN);
6839 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6841 else
6842 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6843 OPTAB_LIB_WIDEN);
6845 /* For after, copy the value now. */
6846 if (!unused_result && after)
6847 emit_move_insn (target, t1);
6848 insn = get_insns ();
6849 end_sequence ();
6851 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6852 return target;
6855 return NULL_RTX;
6858 /* Return true if OPERAND is suitable for operand number OPNO of
6859 instruction ICODE. */
6861 bool
6862 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6864 return (!insn_data[(int) icode].operand[opno].predicate
6865 || (insn_data[(int) icode].operand[opno].predicate
6866 (operand, insn_data[(int) icode].operand[opno].mode)));
6869 /* TARGET is a target of a multiword operation that we are going to
6870 implement as a series of word-mode operations. Return true if
6871 TARGET is suitable for this purpose. */
6873 bool
6874 valid_multiword_target_p (rtx target)
6876 machine_mode mode;
6877 int i;
6879 mode = GET_MODE (target);
6880 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6881 if (!validate_subreg (word_mode, mode, target, i))
6882 return false;
6883 return true;
6886 /* Like maybe_legitimize_operand, but do not change the code of the
6887 current rtx value. */
6889 static bool
6890 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6891 struct expand_operand *op)
6893 /* See if the operand matches in its current form. */
6894 if (insn_operand_matches (icode, opno, op->value))
6895 return true;
6897 /* If the operand is a memory whose address has no side effects,
6898 try forcing the address into a non-virtual pseudo register.
6899 The check for side effects is important because copy_to_mode_reg
6900 cannot handle things like auto-modified addresses. */
6901 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6903 rtx addr, mem;
6905 mem = op->value;
6906 addr = XEXP (mem, 0);
6907 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6908 && !side_effects_p (addr))
6910 rtx_insn *last;
6911 machine_mode mode;
6913 last = get_last_insn ();
6914 mode = get_address_mode (mem);
6915 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6916 if (insn_operand_matches (icode, opno, mem))
6918 op->value = mem;
6919 return true;
6921 delete_insns_since (last);
6925 return false;
6928 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6929 on success, storing the new operand value back in OP. */
6931 static bool
6932 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6933 struct expand_operand *op)
6935 machine_mode mode, imode;
6936 bool old_volatile_ok, result;
6938 mode = op->mode;
6939 switch (op->type)
6941 case EXPAND_FIXED:
6942 old_volatile_ok = volatile_ok;
6943 volatile_ok = true;
6944 result = maybe_legitimize_operand_same_code (icode, opno, op);
6945 volatile_ok = old_volatile_ok;
6946 return result;
6948 case EXPAND_OUTPUT:
6949 gcc_assert (mode != VOIDmode);
6950 if (op->value
6951 && op->value != const0_rtx
6952 && GET_MODE (op->value) == mode
6953 && maybe_legitimize_operand_same_code (icode, opno, op))
6954 return true;
6956 op->value = gen_reg_rtx (mode);
6957 op->target = 0;
6958 break;
6960 case EXPAND_INPUT:
6961 input:
6962 gcc_assert (mode != VOIDmode);
6963 gcc_assert (GET_MODE (op->value) == VOIDmode
6964 || GET_MODE (op->value) == mode);
6965 if (maybe_legitimize_operand_same_code (icode, opno, op))
6966 return true;
6968 op->value = copy_to_mode_reg (mode, op->value);
6969 break;
6971 case EXPAND_CONVERT_TO:
6972 gcc_assert (mode != VOIDmode);
6973 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6974 goto input;
6976 case EXPAND_CONVERT_FROM:
6977 if (GET_MODE (op->value) != VOIDmode)
6978 mode = GET_MODE (op->value);
6979 else
6980 /* The caller must tell us what mode this value has. */
6981 gcc_assert (mode != VOIDmode);
6983 imode = insn_data[(int) icode].operand[opno].mode;
6984 if (imode != VOIDmode && imode != mode)
6986 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
6987 mode = imode;
6989 goto input;
6991 case EXPAND_ADDRESS:
6992 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
6993 op->value);
6994 goto input;
6996 case EXPAND_INTEGER:
6997 mode = insn_data[(int) icode].operand[opno].mode;
6998 if (mode != VOIDmode && const_int_operand (op->value, mode))
6999 goto input;
7000 break;
7002 return insn_operand_matches (icode, opno, op->value);
7005 /* Make OP describe an input operand that should have the same value
7006 as VALUE, after any mode conversion that the target might request.
7007 TYPE is the type of VALUE. */
7009 void
7010 create_convert_operand_from_type (struct expand_operand *op,
7011 rtx value, tree type)
7013 create_convert_operand_from (op, value, TYPE_MODE (type),
7014 TYPE_UNSIGNED (type));
7017 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7018 of instruction ICODE. Return true on success, leaving the new operand
7019 values in the OPS themselves. Emit no code on failure. */
7021 bool
7022 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7023 unsigned int nops, struct expand_operand *ops)
7025 rtx_insn *last;
7026 unsigned int i;
7028 last = get_last_insn ();
7029 for (i = 0; i < nops; i++)
7030 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7032 delete_insns_since (last);
7033 return false;
7035 return true;
7038 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7039 as its operands. Return the instruction pattern on success,
7040 and emit any necessary set-up code. Return null and emit no
7041 code on failure. */
7043 rtx_insn *
7044 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7045 struct expand_operand *ops)
7047 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7048 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7049 return NULL;
7051 switch (nops)
7053 case 1:
7054 return GEN_FCN (icode) (ops[0].value);
7055 case 2:
7056 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7057 case 3:
7058 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7059 case 4:
7060 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7061 ops[3].value);
7062 case 5:
7063 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7064 ops[3].value, ops[4].value);
7065 case 6:
7066 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7067 ops[3].value, ops[4].value, ops[5].value);
7068 case 7:
7069 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7070 ops[3].value, ops[4].value, ops[5].value,
7071 ops[6].value);
7072 case 8:
7073 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7074 ops[3].value, ops[4].value, ops[5].value,
7075 ops[6].value, ops[7].value);
7076 case 9:
7077 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7078 ops[3].value, ops[4].value, ops[5].value,
7079 ops[6].value, ops[7].value, ops[8].value);
7081 gcc_unreachable ();
7084 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7085 as its operands. Return true on success and emit no code on failure. */
7087 bool
7088 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7089 struct expand_operand *ops)
7091 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7092 if (pat)
7094 emit_insn (pat);
7095 return true;
7097 return false;
7100 /* Like maybe_expand_insn, but for jumps. */
7102 bool
7103 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7104 struct expand_operand *ops)
7106 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7107 if (pat)
7109 emit_jump_insn (pat);
7110 return true;
7112 return false;
7115 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7116 as its operands. */
7118 void
7119 expand_insn (enum insn_code icode, unsigned int nops,
7120 struct expand_operand *ops)
7122 if (!maybe_expand_insn (icode, nops, ops))
7123 gcc_unreachable ();
7126 /* Like expand_insn, but for jumps. */
7128 void
7129 expand_jump_insn (enum insn_code icode, unsigned int nops,
7130 struct expand_operand *ops)
7132 if (!maybe_expand_jump_insn (icode, nops, ops))
7133 gcc_unreachable ();