2016-10-13 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / optabs.c
blobc5e9b4f8e135add93b34f13e6f5b2b020682173e
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
38 is properly defined. */
39 #include "stor-layout.h"
40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "expr.h"
44 #include "optabs-tree.h"
45 #include "libfuncs.h"
47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
48 machine_mode *);
49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
52 /* Debug facility for use in GDB. */
53 void debug_optab_libfuncs (void);
55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
57 operation).
59 If the last insn does not set TARGET, don't do anything, but return 1.
61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
63 try again, ensuring that TARGET is not one of the operands. */
65 static int
66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
68 rtx_insn *last_insn;
69 rtx set;
70 rtx note;
72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
75 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
76 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
77 && GET_RTX_CLASS (code) != RTX_COMPARE
78 && GET_RTX_CLASS (code) != RTX_UNARY)
79 return 1;
81 if (GET_CODE (target) == ZERO_EXTRACT)
82 return 1;
84 for (last_insn = insns;
85 NEXT_INSN (last_insn) != NULL_RTX;
86 last_insn = NEXT_INSN (last_insn))
89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
115 return 0;
118 set = set_for_reg_notes (last_insn);
119 if (set == NULL_RTX)
120 return 1;
122 if (! rtx_equal_p (SET_DEST (set), target)
123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
126 return 1;
128 if (GET_RTX_CLASS (code) == RTX_UNARY)
129 switch (code)
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_SIZE (GET_MODE (op0))
142 > GET_MODE_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
155 else
156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
158 set_unique_reg_note (last_insn, REG_EQUAL, note);
160 return 1;
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
181 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
182 return to_mode;
184 return result;
187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
189 not actually do a sign-extend or zero-extend, but can leave the
190 higher-order bits of the result rtx undefined, for example, in the case
191 of logical operations, but not right shifts. */
193 static rtx
194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
195 int unsignedp, int no_extend)
197 rtx result;
199 /* If we don't have to extend and this is a constant, return it. */
200 if (no_extend && GET_MODE (op) == VOIDmode)
201 return op;
203 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
204 extend since it will be more efficient to do so unless the signedness of
205 a promoted object differs from our extension. */
206 if (! no_extend
207 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
208 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
209 return convert_modes (mode, oldmode, op, unsignedp);
211 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
212 SUBREG. */
213 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
214 return gen_lowpart (mode, force_reg (GET_MODE (op), op));
216 /* Otherwise, get an object of MODE, clobber it, and set the low-order
217 part to OP. */
219 result = gen_reg_rtx (mode);
220 emit_clobber (result);
221 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
222 return result;
225 /* Expand vector widening operations.
227 There are two different classes of operations handled here:
228 1) Operations whose result is wider than all the arguments to the operation.
229 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
230 In this case OP0 and optionally OP1 would be initialized,
231 but WIDE_OP wouldn't (not relevant for this case).
232 2) Operations whose result is of the same size as the last argument to the
233 operation, but wider than all the other arguments to the operation.
234 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
235 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
237 E.g, when called to expand the following operations, this is how
238 the arguments will be initialized:
239 nops OP0 OP1 WIDE_OP
240 widening-sum 2 oprnd0 - oprnd1
241 widening-dot-product 3 oprnd0 oprnd1 oprnd2
242 widening-mult 2 oprnd0 oprnd1 -
243 type-promotion (vec-unpack) 1 oprnd0 - - */
246 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
247 rtx target, int unsignedp)
249 struct expand_operand eops[4];
250 tree oprnd0, oprnd1, oprnd2;
251 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
252 optab widen_pattern_optab;
253 enum insn_code icode;
254 int nops = TREE_CODE_LENGTH (ops->code);
255 int op;
257 oprnd0 = ops->op0;
258 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
259 widen_pattern_optab =
260 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
261 if (ops->code == WIDEN_MULT_PLUS_EXPR
262 || ops->code == WIDEN_MULT_MINUS_EXPR)
263 icode = find_widening_optab_handler (widen_pattern_optab,
264 TYPE_MODE (TREE_TYPE (ops->op2)),
265 tmode0, 0);
266 else
267 icode = optab_handler (widen_pattern_optab, tmode0);
268 gcc_assert (icode != CODE_FOR_nothing);
270 if (nops >= 2)
272 oprnd1 = ops->op1;
273 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
276 /* The last operand is of a wider mode than the rest of the operands. */
277 if (nops == 2)
278 wmode = tmode1;
279 else if (nops == 3)
281 gcc_assert (tmode1 == tmode0);
282 gcc_assert (op1);
283 oprnd2 = ops->op2;
284 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
287 op = 0;
288 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
289 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
290 if (op1)
291 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
292 if (wide_op)
293 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
294 expand_insn (icode, op, eops);
295 return eops[0].value;
298 /* Generate code to perform an operation specified by TERNARY_OPTAB
299 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
301 UNSIGNEDP is for the case where we have to widen the operands
302 to perform the operation. It says to use zero-extension.
304 If TARGET is nonzero, the value
305 is generated there, if it is convenient to do so.
306 In all cases an rtx is returned for the locus of the value;
307 this may or may not be TARGET. */
310 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
311 rtx op1, rtx op2, rtx target, int unsignedp)
313 struct expand_operand ops[4];
314 enum insn_code icode = optab_handler (ternary_optab, mode);
316 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
318 create_output_operand (&ops[0], target, mode);
319 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
320 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
321 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
322 expand_insn (icode, 4, ops);
323 return ops[0].value;
327 /* Like expand_binop, but return a constant rtx if the result can be
328 calculated at compile time. The arguments and return value are
329 otherwise the same as for expand_binop. */
332 simplify_expand_binop (machine_mode mode, optab binoptab,
333 rtx op0, rtx op1, rtx target, int unsignedp,
334 enum optab_methods methods)
336 if (CONSTANT_P (op0) && CONSTANT_P (op1))
338 rtx x = simplify_binary_operation (optab_to_code (binoptab),
339 mode, op0, op1);
340 if (x)
341 return x;
344 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
347 /* Like simplify_expand_binop, but always put the result in TARGET.
348 Return true if the expansion succeeded. */
350 bool
351 force_expand_binop (machine_mode mode, optab binoptab,
352 rtx op0, rtx op1, rtx target, int unsignedp,
353 enum optab_methods methods)
355 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
356 target, unsignedp, methods);
357 if (x == 0)
358 return false;
359 if (x != target)
360 emit_move_insn (target, x);
361 return true;
364 /* Create a new vector value in VMODE with all elements set to OP. The
365 mode of OP must be the element mode of VMODE. If OP is a constant,
366 then the return value will be a constant. */
368 static rtx
369 expand_vector_broadcast (machine_mode vmode, rtx op)
371 enum insn_code icode;
372 rtvec vec;
373 rtx ret;
374 int i, n;
376 gcc_checking_assert (VECTOR_MODE_P (vmode));
378 n = GET_MODE_NUNITS (vmode);
379 vec = rtvec_alloc (n);
380 for (i = 0; i < n; ++i)
381 RTVEC_ELT (vec, i) = op;
383 if (CONSTANT_P (op))
384 return gen_rtx_CONST_VECTOR (vmode, vec);
386 /* ??? If the target doesn't have a vec_init, then we have no easy way
387 of performing this operation. Most of this sort of generic support
388 is hidden away in the vector lowering support in gimple. */
389 icode = optab_handler (vec_init_optab, vmode);
390 if (icode == CODE_FOR_nothing)
391 return NULL;
393 ret = gen_reg_rtx (vmode);
394 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
396 return ret;
399 /* This subroutine of expand_doubleword_shift handles the cases in which
400 the effective shift value is >= BITS_PER_WORD. The arguments and return
401 value are the same as for the parent routine, except that SUPERWORD_OP1
402 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
403 INTO_TARGET may be null if the caller has decided to calculate it. */
405 static bool
406 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
407 rtx outof_target, rtx into_target,
408 int unsignedp, enum optab_methods methods)
410 if (into_target != 0)
411 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
412 into_target, unsignedp, methods))
413 return false;
415 if (outof_target != 0)
417 /* For a signed right shift, we must fill OUTOF_TARGET with copies
418 of the sign bit, otherwise we must fill it with zeros. */
419 if (binoptab != ashr_optab)
420 emit_move_insn (outof_target, CONST0_RTX (word_mode));
421 else
422 if (!force_expand_binop (word_mode, binoptab,
423 outof_input, GEN_INT (BITS_PER_WORD - 1),
424 outof_target, unsignedp, methods))
425 return false;
427 return true;
430 /* This subroutine of expand_doubleword_shift handles the cases in which
431 the effective shift value is < BITS_PER_WORD. The arguments and return
432 value are the same as for the parent routine. */
434 static bool
435 expand_subword_shift (machine_mode op1_mode, optab binoptab,
436 rtx outof_input, rtx into_input, rtx op1,
437 rtx outof_target, rtx into_target,
438 int unsignedp, enum optab_methods methods,
439 unsigned HOST_WIDE_INT shift_mask)
441 optab reverse_unsigned_shift, unsigned_shift;
442 rtx tmp, carries;
444 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
445 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
447 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
448 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
449 the opposite direction to BINOPTAB. */
450 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
452 carries = outof_input;
453 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
454 op1_mode), op1_mode);
455 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
456 0, true, methods);
458 else
460 /* We must avoid shifting by BITS_PER_WORD bits since that is either
461 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
462 has unknown behavior. Do a single shift first, then shift by the
463 remainder. It's OK to use ~OP1 as the remainder if shift counts
464 are truncated to the mode size. */
465 carries = expand_binop (word_mode, reverse_unsigned_shift,
466 outof_input, const1_rtx, 0, unsignedp, methods);
467 if (shift_mask == BITS_PER_WORD - 1)
469 tmp = immed_wide_int_const
470 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
471 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
472 0, true, methods);
474 else
476 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
477 op1_mode), op1_mode);
478 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
479 0, true, methods);
482 if (tmp == 0 || carries == 0)
483 return false;
484 carries = expand_binop (word_mode, reverse_unsigned_shift,
485 carries, tmp, 0, unsignedp, methods);
486 if (carries == 0)
487 return false;
489 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
490 so the result can go directly into INTO_TARGET if convenient. */
491 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
492 into_target, unsignedp, methods);
493 if (tmp == 0)
494 return false;
496 /* Now OR in the bits carried over from OUTOF_INPUT. */
497 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
498 into_target, unsignedp, methods))
499 return false;
501 /* Use a standard word_mode shift for the out-of half. */
502 if (outof_target != 0)
503 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
504 outof_target, unsignedp, methods))
505 return false;
507 return true;
511 /* Try implementing expand_doubleword_shift using conditional moves.
512 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
513 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
514 are the shift counts to use in the former and latter case. All other
515 arguments are the same as the parent routine. */
517 static bool
518 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
519 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
520 rtx outof_input, rtx into_input,
521 rtx subword_op1, rtx superword_op1,
522 rtx outof_target, rtx into_target,
523 int unsignedp, enum optab_methods methods,
524 unsigned HOST_WIDE_INT shift_mask)
526 rtx outof_superword, into_superword;
528 /* Put the superword version of the output into OUTOF_SUPERWORD and
529 INTO_SUPERWORD. */
530 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
531 if (outof_target != 0 && subword_op1 == superword_op1)
533 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
534 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
535 into_superword = outof_target;
536 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
537 outof_superword, 0, unsignedp, methods))
538 return false;
540 else
542 into_superword = gen_reg_rtx (word_mode);
543 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
544 outof_superword, into_superword,
545 unsignedp, methods))
546 return false;
549 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
550 if (!expand_subword_shift (op1_mode, binoptab,
551 outof_input, into_input, subword_op1,
552 outof_target, into_target,
553 unsignedp, methods, shift_mask))
554 return false;
556 /* Select between them. Do the INTO half first because INTO_SUPERWORD
557 might be the current value of OUTOF_TARGET. */
558 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
559 into_target, into_superword, word_mode, false))
560 return false;
562 if (outof_target != 0)
563 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
564 outof_target, outof_superword,
565 word_mode, false))
566 return false;
568 return true;
571 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
572 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
573 input operand; the shift moves bits in the direction OUTOF_INPUT->
574 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
575 of the target. OP1 is the shift count and OP1_MODE is its mode.
576 If OP1 is constant, it will have been truncated as appropriate
577 and is known to be nonzero.
579 If SHIFT_MASK is zero, the result of word shifts is undefined when the
580 shift count is outside the range [0, BITS_PER_WORD). This routine must
581 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
583 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
584 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
585 fill with zeros or sign bits as appropriate.
587 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
588 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
589 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
590 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
591 are undefined.
593 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
594 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
595 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
596 function wants to calculate it itself.
598 Return true if the shift could be successfully synthesized. */
600 static bool
601 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
602 rtx outof_input, rtx into_input, rtx op1,
603 rtx outof_target, rtx into_target,
604 int unsignedp, enum optab_methods methods,
605 unsigned HOST_WIDE_INT shift_mask)
607 rtx superword_op1, tmp, cmp1, cmp2;
608 enum rtx_code cmp_code;
610 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
611 fill the result with sign or zero bits as appropriate. If so, the value
612 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
613 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
614 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
616 This isn't worthwhile for constant shifts since the optimizers will
617 cope better with in-range shift counts. */
618 if (shift_mask >= BITS_PER_WORD
619 && outof_target != 0
620 && !CONSTANT_P (op1))
622 if (!expand_doubleword_shift (op1_mode, binoptab,
623 outof_input, into_input, op1,
624 0, into_target,
625 unsignedp, methods, shift_mask))
626 return false;
627 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
628 outof_target, unsignedp, methods))
629 return false;
630 return true;
633 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
634 is true when the effective shift value is less than BITS_PER_WORD.
635 Set SUPERWORD_OP1 to the shift count that should be used to shift
636 OUTOF_INPUT into INTO_TARGET when the condition is false. */
637 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
638 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
640 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
641 is a subword shift count. */
642 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
643 0, true, methods);
644 cmp2 = CONST0_RTX (op1_mode);
645 cmp_code = EQ;
646 superword_op1 = op1;
648 else
650 /* Set CMP1 to OP1 - BITS_PER_WORD. */
651 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
652 0, true, methods);
653 cmp2 = CONST0_RTX (op1_mode);
654 cmp_code = LT;
655 superword_op1 = cmp1;
657 if (cmp1 == 0)
658 return false;
660 /* If we can compute the condition at compile time, pick the
661 appropriate subroutine. */
662 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
663 if (tmp != 0 && CONST_INT_P (tmp))
665 if (tmp == const0_rtx)
666 return expand_superword_shift (binoptab, outof_input, superword_op1,
667 outof_target, into_target,
668 unsignedp, methods);
669 else
670 return expand_subword_shift (op1_mode, binoptab,
671 outof_input, into_input, op1,
672 outof_target, into_target,
673 unsignedp, methods, shift_mask);
676 /* Try using conditional moves to generate straight-line code. */
677 if (HAVE_conditional_move)
679 rtx_insn *start = get_last_insn ();
680 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
681 cmp_code, cmp1, cmp2,
682 outof_input, into_input,
683 op1, superword_op1,
684 outof_target, into_target,
685 unsignedp, methods, shift_mask))
686 return true;
687 delete_insns_since (start);
690 /* As a last resort, use branches to select the correct alternative. */
691 rtx_code_label *subword_label = gen_label_rtx ();
692 rtx_code_label *done_label = gen_label_rtx ();
694 NO_DEFER_POP;
695 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
696 0, 0, subword_label, -1);
697 OK_DEFER_POP;
699 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
700 outof_target, into_target,
701 unsignedp, methods))
702 return false;
704 emit_jump_insn (targetm.gen_jump (done_label));
705 emit_barrier ();
706 emit_label (subword_label);
708 if (!expand_subword_shift (op1_mode, binoptab,
709 outof_input, into_input, op1,
710 outof_target, into_target,
711 unsignedp, methods, shift_mask))
712 return false;
714 emit_label (done_label);
715 return true;
718 /* Subroutine of expand_binop. Perform a double word multiplication of
719 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
720 as the target's word_mode. This function return NULL_RTX if anything
721 goes wrong, in which case it may have already emitted instructions
722 which need to be deleted.
724 If we want to multiply two two-word values and have normal and widening
725 multiplies of single-word values, we can do this with three smaller
726 multiplications.
728 The multiplication proceeds as follows:
729 _______________________
730 [__op0_high_|__op0_low__]
731 _______________________
732 * [__op1_high_|__op1_low__]
733 _______________________________________________
734 _______________________
735 (1) [__op0_low__*__op1_low__]
736 _______________________
737 (2a) [__op0_low__*__op1_high_]
738 _______________________
739 (2b) [__op0_high_*__op1_low__]
740 _______________________
741 (3) [__op0_high_*__op1_high_]
744 This gives a 4-word result. Since we are only interested in the
745 lower 2 words, partial result (3) and the upper words of (2a) and
746 (2b) don't need to be calculated. Hence (2a) and (2b) can be
747 calculated using non-widening multiplication.
749 (1), however, needs to be calculated with an unsigned widening
750 multiplication. If this operation is not directly supported we
751 try using a signed widening multiplication and adjust the result.
752 This adjustment works as follows:
754 If both operands are positive then no adjustment is needed.
756 If the operands have different signs, for example op0_low < 0 and
757 op1_low >= 0, the instruction treats the most significant bit of
758 op0_low as a sign bit instead of a bit with significance
759 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
760 with 2**BITS_PER_WORD - op0_low, and two's complements the
761 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
762 the result.
764 Similarly, if both operands are negative, we need to add
765 (op0_low + op1_low) * 2**BITS_PER_WORD.
767 We use a trick to adjust quickly. We logically shift op0_low right
768 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
769 op0_high (op1_high) before it is used to calculate 2b (2a). If no
770 logical shift exists, we do an arithmetic right shift and subtract
771 the 0 or -1. */
773 static rtx
774 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
775 bool umulp, enum optab_methods methods)
777 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
778 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
779 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
780 rtx product, adjust, product_high, temp;
782 rtx op0_high = operand_subword_force (op0, high, mode);
783 rtx op0_low = operand_subword_force (op0, low, mode);
784 rtx op1_high = operand_subword_force (op1, high, mode);
785 rtx op1_low = operand_subword_force (op1, low, mode);
787 /* If we're using an unsigned multiply to directly compute the product
788 of the low-order words of the operands and perform any required
789 adjustments of the operands, we begin by trying two more multiplications
790 and then computing the appropriate sum.
792 We have checked above that the required addition is provided.
793 Full-word addition will normally always succeed, especially if
794 it is provided at all, so we don't worry about its failure. The
795 multiplication may well fail, however, so we do handle that. */
797 if (!umulp)
799 /* ??? This could be done with emit_store_flag where available. */
800 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
801 NULL_RTX, 1, methods);
802 if (temp)
803 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
804 NULL_RTX, 0, OPTAB_DIRECT);
805 else
807 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
808 NULL_RTX, 0, methods);
809 if (!temp)
810 return NULL_RTX;
811 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
812 NULL_RTX, 0, OPTAB_DIRECT);
815 if (!op0_high)
816 return NULL_RTX;
819 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
820 NULL_RTX, 0, OPTAB_DIRECT);
821 if (!adjust)
822 return NULL_RTX;
824 /* OP0_HIGH should now be dead. */
826 if (!umulp)
828 /* ??? This could be done with emit_store_flag where available. */
829 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
830 NULL_RTX, 1, methods);
831 if (temp)
832 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
833 NULL_RTX, 0, OPTAB_DIRECT);
834 else
836 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
837 NULL_RTX, 0, methods);
838 if (!temp)
839 return NULL_RTX;
840 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
841 NULL_RTX, 0, OPTAB_DIRECT);
844 if (!op1_high)
845 return NULL_RTX;
848 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
849 NULL_RTX, 0, OPTAB_DIRECT);
850 if (!temp)
851 return NULL_RTX;
853 /* OP1_HIGH should now be dead. */
855 adjust = expand_binop (word_mode, add_optab, adjust, temp,
856 NULL_RTX, 0, OPTAB_DIRECT);
858 if (target && !REG_P (target))
859 target = NULL_RTX;
861 if (umulp)
862 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
863 target, 1, OPTAB_DIRECT);
864 else
865 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
866 target, 1, OPTAB_DIRECT);
868 if (!product)
869 return NULL_RTX;
871 product_high = operand_subword (product, high, 1, mode);
872 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
873 NULL_RTX, 0, OPTAB_DIRECT);
874 emit_move_insn (product_high, adjust);
875 return product;
878 /* Wrapper around expand_binop which takes an rtx code to specify
879 the operation to perform, not an optab pointer. All other
880 arguments are the same. */
882 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
883 rtx op1, rtx target, int unsignedp,
884 enum optab_methods methods)
886 optab binop = code_to_optab (code);
887 gcc_assert (binop);
889 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
892 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
893 binop. Order them according to commutative_operand_precedence and, if
894 possible, try to put TARGET or a pseudo first. */
895 static bool
896 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
898 int op0_prec = commutative_operand_precedence (op0);
899 int op1_prec = commutative_operand_precedence (op1);
901 if (op0_prec < op1_prec)
902 return true;
904 if (op0_prec > op1_prec)
905 return false;
907 /* With equal precedence, both orders are ok, but it is better if the
908 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
909 if (target == 0 || REG_P (target))
910 return (REG_P (op1) && !REG_P (op0)) || target == op1;
911 else
912 return rtx_equal_p (op1, target);
915 /* Return true if BINOPTAB implements a shift operation. */
917 static bool
918 shift_optab_p (optab binoptab)
920 switch (optab_to_code (binoptab))
922 case ASHIFT:
923 case SS_ASHIFT:
924 case US_ASHIFT:
925 case ASHIFTRT:
926 case LSHIFTRT:
927 case ROTATE:
928 case ROTATERT:
929 return true;
931 default:
932 return false;
936 /* Return true if BINOPTAB implements a commutative binary operation. */
938 static bool
939 commutative_optab_p (optab binoptab)
941 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
942 || binoptab == smul_widen_optab
943 || binoptab == umul_widen_optab
944 || binoptab == smul_highpart_optab
945 || binoptab == umul_highpart_optab);
948 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
949 optimizing, and if the operand is a constant that costs more than
950 1 instruction, force the constant into a register and return that
951 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
953 static rtx
954 avoid_expensive_constant (machine_mode mode, optab binoptab,
955 int opn, rtx x, bool unsignedp)
957 bool speed = optimize_insn_for_speed_p ();
959 if (mode != VOIDmode
960 && optimize
961 && CONSTANT_P (x)
962 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
963 > set_src_cost (x, mode, speed)))
965 if (CONST_INT_P (x))
967 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
968 if (intval != INTVAL (x))
969 x = GEN_INT (intval);
971 else
972 x = convert_modes (mode, VOIDmode, x, unsignedp);
973 x = force_reg (mode, x);
975 return x;
978 /* Helper function for expand_binop: handle the case where there
979 is an insn that directly implements the indicated operation.
980 Returns null if this is not possible. */
981 static rtx
982 expand_binop_directly (machine_mode mode, optab binoptab,
983 rtx op0, rtx op1,
984 rtx target, int unsignedp, enum optab_methods methods,
985 rtx_insn *last)
987 machine_mode from_mode = widened_mode (mode, op0, op1);
988 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
989 from_mode, 1);
990 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
991 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
992 machine_mode mode0, mode1, tmp_mode;
993 struct expand_operand ops[3];
994 bool commutative_p;
995 rtx_insn *pat;
996 rtx xop0 = op0, xop1 = op1;
997 bool canonicalize_op1 = false;
999 /* If it is a commutative operator and the modes would match
1000 if we would swap the operands, we can save the conversions. */
1001 commutative_p = commutative_optab_p (binoptab);
1002 if (commutative_p
1003 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1004 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1005 std::swap (xop0, xop1);
1007 /* If we are optimizing, force expensive constants into a register. */
1008 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1009 if (!shift_optab_p (binoptab))
1010 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1011 else
1012 /* Shifts and rotates often use a different mode for op1 from op0;
1013 for VOIDmode constants we don't know the mode, so force it
1014 to be canonicalized using convert_modes. */
1015 canonicalize_op1 = true;
1017 /* In case the insn wants input operands in modes different from
1018 those of the actual operands, convert the operands. It would
1019 seem that we don't need to convert CONST_INTs, but we do, so
1020 that they're properly zero-extended, sign-extended or truncated
1021 for their mode. */
1023 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1024 if (xmode0 != VOIDmode && xmode0 != mode0)
1026 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1027 mode0 = xmode0;
1030 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1031 ? GET_MODE (xop1) : mode);
1032 if (xmode1 != VOIDmode && xmode1 != mode1)
1034 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1035 mode1 = xmode1;
1038 /* If operation is commutative,
1039 try to make the first operand a register.
1040 Even better, try to make it the same as the target.
1041 Also try to make the last operand a constant. */
1042 if (commutative_p
1043 && swap_commutative_operands_with_target (target, xop0, xop1))
1044 std::swap (xop0, xop1);
1046 /* Now, if insn's predicates don't allow our operands, put them into
1047 pseudo regs. */
1049 if (binoptab == vec_pack_trunc_optab
1050 || binoptab == vec_pack_usat_optab
1051 || binoptab == vec_pack_ssat_optab
1052 || binoptab == vec_pack_ufix_trunc_optab
1053 || binoptab == vec_pack_sfix_trunc_optab)
1055 /* The mode of the result is different then the mode of the
1056 arguments. */
1057 tmp_mode = insn_data[(int) icode].operand[0].mode;
1058 if (VECTOR_MODE_P (mode)
1059 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1061 delete_insns_since (last);
1062 return NULL_RTX;
1065 else
1066 tmp_mode = mode;
1068 create_output_operand (&ops[0], target, tmp_mode);
1069 create_input_operand (&ops[1], xop0, mode0);
1070 create_input_operand (&ops[2], xop1, mode1);
1071 pat = maybe_gen_insn (icode, 3, ops);
1072 if (pat)
1074 /* If PAT is composed of more than one insn, try to add an appropriate
1075 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1076 operand, call expand_binop again, this time without a target. */
1077 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1078 && ! add_equal_note (pat, ops[0].value,
1079 optab_to_code (binoptab),
1080 ops[1].value, ops[2].value))
1082 delete_insns_since (last);
1083 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1084 unsignedp, methods);
1087 emit_insn (pat);
1088 return ops[0].value;
1090 delete_insns_since (last);
1091 return NULL_RTX;
1094 /* Generate code to perform an operation specified by BINOPTAB
1095 on operands OP0 and OP1, with result having machine-mode MODE.
1097 UNSIGNEDP is for the case where we have to widen the operands
1098 to perform the operation. It says to use zero-extension.
1100 If TARGET is nonzero, the value
1101 is generated there, if it is convenient to do so.
1102 In all cases an rtx is returned for the locus of the value;
1103 this may or may not be TARGET. */
1106 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1107 rtx target, int unsignedp, enum optab_methods methods)
1109 enum optab_methods next_methods
1110 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1111 ? OPTAB_WIDEN : methods);
1112 enum mode_class mclass;
1113 machine_mode wider_mode;
1114 rtx libfunc;
1115 rtx temp;
1116 rtx_insn *entry_last = get_last_insn ();
1117 rtx_insn *last;
1119 mclass = GET_MODE_CLASS (mode);
1121 /* If subtracting an integer constant, convert this into an addition of
1122 the negated constant. */
1124 if (binoptab == sub_optab && CONST_INT_P (op1))
1126 op1 = negate_rtx (mode, op1);
1127 binoptab = add_optab;
1129 /* For shifts, constant invalid op1 might be expanded from different
1130 mode than MODE. As those are invalid, force them to a register
1131 to avoid further problems during expansion. */
1132 else if (CONST_INT_P (op1)
1133 && shift_optab_p (binoptab)
1134 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1136 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1137 op1 = force_reg (GET_MODE_INNER (mode), op1);
1140 /* Record where to delete back to if we backtrack. */
1141 last = get_last_insn ();
1143 /* If we can do it with a three-operand insn, do so. */
1145 if (methods != OPTAB_MUST_WIDEN
1146 && find_widening_optab_handler (binoptab, mode,
1147 widened_mode (mode, op0, op1), 1)
1148 != CODE_FOR_nothing)
1150 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1151 unsignedp, methods, last);
1152 if (temp)
1153 return temp;
1156 /* If we were trying to rotate, and that didn't work, try rotating
1157 the other direction before falling back to shifts and bitwise-or. */
1158 if (((binoptab == rotl_optab
1159 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1160 || (binoptab == rotr_optab
1161 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1162 && mclass == MODE_INT)
1164 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1165 rtx newop1;
1166 unsigned int bits = GET_MODE_PRECISION (mode);
1168 if (CONST_INT_P (op1))
1169 newop1 = GEN_INT (bits - INTVAL (op1));
1170 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1171 newop1 = negate_rtx (GET_MODE (op1), op1);
1172 else
1173 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1174 gen_int_mode (bits, GET_MODE (op1)), op1,
1175 NULL_RTX, unsignedp, OPTAB_DIRECT);
1177 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1178 target, unsignedp, methods, last);
1179 if (temp)
1180 return temp;
1183 /* If this is a multiply, see if we can do a widening operation that
1184 takes operands of this mode and makes a wider mode. */
1186 if (binoptab == smul_optab
1187 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1188 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1189 : smul_widen_optab),
1190 GET_MODE_2XWIDER_MODE (mode), mode)
1191 != CODE_FOR_nothing))
1193 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1194 unsignedp ? umul_widen_optab : smul_widen_optab,
1195 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1197 if (temp != 0)
1199 if (GET_MODE_CLASS (mode) == MODE_INT
1200 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1201 return gen_lowpart (mode, temp);
1202 else
1203 return convert_to_mode (mode, temp, unsignedp);
1207 /* If this is a vector shift by a scalar, see if we can do a vector
1208 shift by a vector. If so, broadcast the scalar into a vector. */
1209 if (mclass == MODE_VECTOR_INT)
1211 optab otheroptab = unknown_optab;
1213 if (binoptab == ashl_optab)
1214 otheroptab = vashl_optab;
1215 else if (binoptab == ashr_optab)
1216 otheroptab = vashr_optab;
1217 else if (binoptab == lshr_optab)
1218 otheroptab = vlshr_optab;
1219 else if (binoptab == rotl_optab)
1220 otheroptab = vrotl_optab;
1221 else if (binoptab == rotr_optab)
1222 otheroptab = vrotr_optab;
1224 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1226 /* The scalar may have been extended to be too wide. Truncate
1227 it back to the proper size to fit in the broadcast vector. */
1228 machine_mode inner_mode = GET_MODE_INNER (mode);
1229 if (!CONST_INT_P (op1)
1230 && (GET_MODE_BITSIZE (inner_mode)
1231 < GET_MODE_BITSIZE (GET_MODE (op1))))
1232 op1 = force_reg (inner_mode,
1233 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1234 GET_MODE (op1)));
1235 rtx vop1 = expand_vector_broadcast (mode, op1);
1236 if (vop1)
1238 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1239 target, unsignedp, methods, last);
1240 if (temp)
1241 return temp;
1246 /* Look for a wider mode of the same class for which we think we
1247 can open-code the operation. Check for a widening multiply at the
1248 wider mode as well. */
1250 if (CLASS_HAS_WIDER_MODES_P (mclass)
1251 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1252 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1253 wider_mode != VOIDmode;
1254 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1256 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1257 || (binoptab == smul_optab
1258 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1259 && (find_widening_optab_handler ((unsignedp
1260 ? umul_widen_optab
1261 : smul_widen_optab),
1262 GET_MODE_WIDER_MODE (wider_mode),
1263 mode, 0)
1264 != CODE_FOR_nothing)))
1266 rtx xop0 = op0, xop1 = op1;
1267 int no_extend = 0;
1269 /* For certain integer operations, we need not actually extend
1270 the narrow operands, as long as we will truncate
1271 the results to the same narrowness. */
1273 if ((binoptab == ior_optab || binoptab == and_optab
1274 || binoptab == xor_optab
1275 || binoptab == add_optab || binoptab == sub_optab
1276 || binoptab == smul_optab || binoptab == ashl_optab)
1277 && mclass == MODE_INT)
1279 no_extend = 1;
1280 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1281 xop0, unsignedp);
1282 if (binoptab != ashl_optab)
1283 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1284 xop1, unsignedp);
1287 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1289 /* The second operand of a shift must always be extended. */
1290 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1291 no_extend && binoptab != ashl_optab);
1293 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1294 unsignedp, OPTAB_DIRECT);
1295 if (temp)
1297 if (mclass != MODE_INT
1298 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1300 if (target == 0)
1301 target = gen_reg_rtx (mode);
1302 convert_move (target, temp, 0);
1303 return target;
1305 else
1306 return gen_lowpart (mode, temp);
1308 else
1309 delete_insns_since (last);
1313 /* If operation is commutative,
1314 try to make the first operand a register.
1315 Even better, try to make it the same as the target.
1316 Also try to make the last operand a constant. */
1317 if (commutative_optab_p (binoptab)
1318 && swap_commutative_operands_with_target (target, op0, op1))
1319 std::swap (op0, op1);
1321 /* These can be done a word at a time. */
1322 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1323 && mclass == MODE_INT
1324 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1325 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1327 int i;
1328 rtx_insn *insns;
1330 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1331 won't be accurate, so use a new target. */
1332 if (target == 0
1333 || target == op0
1334 || target == op1
1335 || !valid_multiword_target_p (target))
1336 target = gen_reg_rtx (mode);
1338 start_sequence ();
1340 /* Do the actual arithmetic. */
1341 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1343 rtx target_piece = operand_subword (target, i, 1, mode);
1344 rtx x = expand_binop (word_mode, binoptab,
1345 operand_subword_force (op0, i, mode),
1346 operand_subword_force (op1, i, mode),
1347 target_piece, unsignedp, next_methods);
1349 if (x == 0)
1350 break;
1352 if (target_piece != x)
1353 emit_move_insn (target_piece, x);
1356 insns = get_insns ();
1357 end_sequence ();
1359 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1361 emit_insn (insns);
1362 return target;
1366 /* Synthesize double word shifts from single word shifts. */
1367 if ((binoptab == lshr_optab || binoptab == ashl_optab
1368 || binoptab == ashr_optab)
1369 && mclass == MODE_INT
1370 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1371 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1372 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1373 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1374 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1375 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1377 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1378 machine_mode op1_mode;
1380 double_shift_mask = targetm.shift_truncation_mask (mode);
1381 shift_mask = targetm.shift_truncation_mask (word_mode);
1382 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1384 /* Apply the truncation to constant shifts. */
1385 if (double_shift_mask > 0 && CONST_INT_P (op1))
1386 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1388 if (op1 == CONST0_RTX (op1_mode))
1389 return op0;
1391 /* Make sure that this is a combination that expand_doubleword_shift
1392 can handle. See the comments there for details. */
1393 if (double_shift_mask == 0
1394 || (shift_mask == BITS_PER_WORD - 1
1395 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1397 rtx_insn *insns;
1398 rtx into_target, outof_target;
1399 rtx into_input, outof_input;
1400 int left_shift, outof_word;
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. */
1404 if (target == 0
1405 || target == op0
1406 || target == op1
1407 || !valid_multiword_target_p (target))
1408 target = gen_reg_rtx (mode);
1410 start_sequence ();
1412 /* OUTOF_* is the word we are shifting bits away from, and
1413 INTO_* is the word that we are shifting bits towards, thus
1414 they differ depending on the direction of the shift and
1415 WORDS_BIG_ENDIAN. */
1417 left_shift = binoptab == ashl_optab;
1418 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1420 outof_target = operand_subword (target, outof_word, 1, mode);
1421 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1423 outof_input = operand_subword_force (op0, outof_word, mode);
1424 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1426 if (expand_doubleword_shift (op1_mode, binoptab,
1427 outof_input, into_input, op1,
1428 outof_target, into_target,
1429 unsignedp, next_methods, shift_mask))
1431 insns = get_insns ();
1432 end_sequence ();
1434 emit_insn (insns);
1435 return target;
1437 end_sequence ();
1441 /* Synthesize double word rotates from single word shifts. */
1442 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1443 && mclass == MODE_INT
1444 && CONST_INT_P (op1)
1445 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1446 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1447 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1449 rtx_insn *insns;
1450 rtx into_target, outof_target;
1451 rtx into_input, outof_input;
1452 rtx inter;
1453 int shift_count, left_shift, outof_word;
1455 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1456 won't be accurate, so use a new target. Do this also if target is not
1457 a REG, first because having a register instead may open optimization
1458 opportunities, and second because if target and op0 happen to be MEMs
1459 designating the same location, we would risk clobbering it too early
1460 in the code sequence we generate below. */
1461 if (target == 0
1462 || target == op0
1463 || target == op1
1464 || !REG_P (target)
1465 || !valid_multiword_target_p (target))
1466 target = gen_reg_rtx (mode);
1468 start_sequence ();
1470 shift_count = INTVAL (op1);
1472 /* OUTOF_* is the word we are shifting bits away from, and
1473 INTO_* is the word that we are shifting bits towards, thus
1474 they differ depending on the direction of the shift and
1475 WORDS_BIG_ENDIAN. */
1477 left_shift = (binoptab == rotl_optab);
1478 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1480 outof_target = operand_subword (target, outof_word, 1, mode);
1481 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1483 outof_input = operand_subword_force (op0, outof_word, mode);
1484 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1486 if (shift_count == BITS_PER_WORD)
1488 /* This is just a word swap. */
1489 emit_move_insn (outof_target, into_input);
1490 emit_move_insn (into_target, outof_input);
1491 inter = const0_rtx;
1493 else
1495 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1496 rtx first_shift_count, second_shift_count;
1497 optab reverse_unsigned_shift, unsigned_shift;
1499 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1500 ? lshr_optab : ashl_optab);
1502 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1503 ? ashl_optab : lshr_optab);
1505 if (shift_count > BITS_PER_WORD)
1507 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1508 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1510 else
1512 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1513 second_shift_count = GEN_INT (shift_count);
1516 into_temp1 = expand_binop (word_mode, unsigned_shift,
1517 outof_input, first_shift_count,
1518 NULL_RTX, unsignedp, next_methods);
1519 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1520 into_input, second_shift_count,
1521 NULL_RTX, unsignedp, next_methods);
1523 if (into_temp1 != 0 && into_temp2 != 0)
1524 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1525 into_target, unsignedp, next_methods);
1526 else
1527 inter = 0;
1529 if (inter != 0 && inter != into_target)
1530 emit_move_insn (into_target, inter);
1532 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1533 into_input, first_shift_count,
1534 NULL_RTX, unsignedp, next_methods);
1535 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1536 outof_input, second_shift_count,
1537 NULL_RTX, unsignedp, next_methods);
1539 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1540 inter = expand_binop (word_mode, ior_optab,
1541 outof_temp1, outof_temp2,
1542 outof_target, unsignedp, next_methods);
1544 if (inter != 0 && inter != outof_target)
1545 emit_move_insn (outof_target, inter);
1548 insns = get_insns ();
1549 end_sequence ();
1551 if (inter != 0)
1553 emit_insn (insns);
1554 return target;
1558 /* These can be done a word at a time by propagating carries. */
1559 if ((binoptab == add_optab || binoptab == sub_optab)
1560 && mclass == MODE_INT
1561 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1562 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1564 unsigned int i;
1565 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1566 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1567 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1568 rtx xop0, xop1, xtarget;
1570 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1571 value is one of those, use it. Otherwise, use 1 since it is the
1572 one easiest to get. */
1573 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1574 int normalizep = STORE_FLAG_VALUE;
1575 #else
1576 int normalizep = 1;
1577 #endif
1579 /* Prepare the operands. */
1580 xop0 = force_reg (mode, op0);
1581 xop1 = force_reg (mode, op1);
1583 xtarget = gen_reg_rtx (mode);
1585 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1586 target = xtarget;
1588 /* Indicate for flow that the entire target reg is being set. */
1589 if (REG_P (target))
1590 emit_clobber (xtarget);
1592 /* Do the actual arithmetic. */
1593 for (i = 0; i < nwords; i++)
1595 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1596 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1597 rtx op0_piece = operand_subword_force (xop0, index, mode);
1598 rtx op1_piece = operand_subword_force (xop1, index, mode);
1599 rtx x;
1601 /* Main add/subtract of the input operands. */
1602 x = expand_binop (word_mode, binoptab,
1603 op0_piece, op1_piece,
1604 target_piece, unsignedp, next_methods);
1605 if (x == 0)
1606 break;
1608 if (i + 1 < nwords)
1610 /* Store carry from main add/subtract. */
1611 carry_out = gen_reg_rtx (word_mode);
1612 carry_out = emit_store_flag_force (carry_out,
1613 (binoptab == add_optab
1614 ? LT : GT),
1615 x, op0_piece,
1616 word_mode, 1, normalizep);
1619 if (i > 0)
1621 rtx newx;
1623 /* Add/subtract previous carry to main result. */
1624 newx = expand_binop (word_mode,
1625 normalizep == 1 ? binoptab : otheroptab,
1626 x, carry_in,
1627 NULL_RTX, 1, next_methods);
1629 if (i + 1 < nwords)
1631 /* Get out carry from adding/subtracting carry in. */
1632 rtx carry_tmp = gen_reg_rtx (word_mode);
1633 carry_tmp = emit_store_flag_force (carry_tmp,
1634 (binoptab == add_optab
1635 ? LT : GT),
1636 newx, x,
1637 word_mode, 1, normalizep);
1639 /* Logical-ior the two poss. carry together. */
1640 carry_out = expand_binop (word_mode, ior_optab,
1641 carry_out, carry_tmp,
1642 carry_out, 0, next_methods);
1643 if (carry_out == 0)
1644 break;
1646 emit_move_insn (target_piece, newx);
1648 else
1650 if (x != target_piece)
1651 emit_move_insn (target_piece, x);
1654 carry_in = carry_out;
1657 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1659 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
1660 || ! rtx_equal_p (target, xtarget))
1662 rtx_insn *temp = emit_move_insn (target, xtarget);
1664 set_dst_reg_note (temp, REG_EQUAL,
1665 gen_rtx_fmt_ee (optab_to_code (binoptab),
1666 mode, copy_rtx (xop0),
1667 copy_rtx (xop1)),
1668 target);
1670 else
1671 target = xtarget;
1673 return target;
1676 else
1677 delete_insns_since (last);
1680 /* Attempt to synthesize double word multiplies using a sequence of word
1681 mode multiplications. We first attempt to generate a sequence using a
1682 more efficient unsigned widening multiply, and if that fails we then
1683 try using a signed widening multiply. */
1685 if (binoptab == smul_optab
1686 && mclass == MODE_INT
1687 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1688 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1689 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1691 rtx product = NULL_RTX;
1692 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
1693 != CODE_FOR_nothing)
1695 product = expand_doubleword_mult (mode, op0, op1, target,
1696 true, methods);
1697 if (!product)
1698 delete_insns_since (last);
1701 if (product == NULL_RTX
1702 && widening_optab_handler (smul_widen_optab, mode, word_mode)
1703 != CODE_FOR_nothing)
1705 product = expand_doubleword_mult (mode, op0, op1, target,
1706 false, methods);
1707 if (!product)
1708 delete_insns_since (last);
1711 if (product != NULL_RTX)
1713 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
1715 temp = emit_move_insn (target ? target : product, product);
1716 set_dst_reg_note (temp,
1717 REG_EQUAL,
1718 gen_rtx_fmt_ee (MULT, mode,
1719 copy_rtx (op0),
1720 copy_rtx (op1)),
1721 target ? target : product);
1723 return product;
1727 /* It can't be open-coded in this mode.
1728 Use a library call if one is available and caller says that's ok. */
1730 libfunc = optab_libfunc (binoptab, mode);
1731 if (libfunc
1732 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1734 rtx_insn *insns;
1735 rtx op1x = op1;
1736 machine_mode op1_mode = mode;
1737 rtx value;
1739 start_sequence ();
1741 if (shift_optab_p (binoptab))
1743 op1_mode = targetm.libgcc_shift_count_mode ();
1744 /* Specify unsigned here,
1745 since negative shift counts are meaningless. */
1746 op1x = convert_to_mode (op1_mode, op1, 1);
1749 if (GET_MODE (op0) != VOIDmode
1750 && GET_MODE (op0) != mode)
1751 op0 = convert_to_mode (mode, op0, unsignedp);
1753 /* Pass 1 for NO_QUEUE so we don't lose any increments
1754 if the libcall is cse'd or moved. */
1755 value = emit_library_call_value (libfunc,
1756 NULL_RTX, LCT_CONST, mode, 2,
1757 op0, mode, op1x, op1_mode);
1759 insns = get_insns ();
1760 end_sequence ();
1762 bool trapv = trapv_binoptab_p (binoptab);
1763 target = gen_reg_rtx (mode);
1764 emit_libcall_block_1 (insns, target, value,
1765 trapv ? NULL_RTX
1766 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1767 mode, op0, op1), trapv);
1769 return target;
1772 delete_insns_since (last);
1774 /* It can't be done in this mode. Can we do it in a wider mode? */
1776 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1777 || methods == OPTAB_MUST_WIDEN))
1779 /* Caller says, don't even try. */
1780 delete_insns_since (entry_last);
1781 return 0;
1784 /* Compute the value of METHODS to pass to recursive calls.
1785 Don't allow widening to be tried recursively. */
1787 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1789 /* Look for a wider mode of the same class for which it appears we can do
1790 the operation. */
1792 if (CLASS_HAS_WIDER_MODES_P (mclass))
1794 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1795 wider_mode != VOIDmode;
1796 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1798 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
1799 != CODE_FOR_nothing
1800 || (methods == OPTAB_LIB
1801 && optab_libfunc (binoptab, wider_mode)))
1803 rtx xop0 = op0, xop1 = op1;
1804 int no_extend = 0;
1806 /* For certain integer operations, we need not actually extend
1807 the narrow operands, as long as we will truncate
1808 the results to the same narrowness. */
1810 if ((binoptab == ior_optab || binoptab == and_optab
1811 || binoptab == xor_optab
1812 || binoptab == add_optab || binoptab == sub_optab
1813 || binoptab == smul_optab || binoptab == ashl_optab)
1814 && mclass == MODE_INT)
1815 no_extend = 1;
1817 xop0 = widen_operand (xop0, wider_mode, mode,
1818 unsignedp, no_extend);
1820 /* The second operand of a shift must always be extended. */
1821 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1822 no_extend && binoptab != ashl_optab);
1824 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1825 unsignedp, methods);
1826 if (temp)
1828 if (mclass != MODE_INT
1829 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1831 if (target == 0)
1832 target = gen_reg_rtx (mode);
1833 convert_move (target, temp, 0);
1834 return target;
1836 else
1837 return gen_lowpart (mode, temp);
1839 else
1840 delete_insns_since (last);
1845 delete_insns_since (entry_last);
1846 return 0;
1849 /* Expand a binary operator which has both signed and unsigned forms.
1850 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1851 signed operations.
1853 If we widen unsigned operands, we may use a signed wider operation instead
1854 of an unsigned wider operation, since the result would be the same. */
1857 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1858 rtx op0, rtx op1, rtx target, int unsignedp,
1859 enum optab_methods methods)
1861 rtx temp;
1862 optab direct_optab = unsignedp ? uoptab : soptab;
1863 bool save_enable;
1865 /* Do it without widening, if possible. */
1866 temp = expand_binop (mode, direct_optab, op0, op1, target,
1867 unsignedp, OPTAB_DIRECT);
1868 if (temp || methods == OPTAB_DIRECT)
1869 return temp;
1871 /* Try widening to a signed int. Disable any direct use of any
1872 signed insn in the current mode. */
1873 save_enable = swap_optab_enable (soptab, mode, false);
1875 temp = expand_binop (mode, soptab, op0, op1, target,
1876 unsignedp, OPTAB_WIDEN);
1878 /* For unsigned operands, try widening to an unsigned int. */
1879 if (!temp && unsignedp)
1880 temp = expand_binop (mode, uoptab, op0, op1, target,
1881 unsignedp, OPTAB_WIDEN);
1882 if (temp || methods == OPTAB_WIDEN)
1883 goto egress;
1885 /* Use the right width libcall if that exists. */
1886 temp = expand_binop (mode, direct_optab, op0, op1, target,
1887 unsignedp, OPTAB_LIB);
1888 if (temp || methods == OPTAB_LIB)
1889 goto egress;
1891 /* Must widen and use a libcall, use either signed or unsigned. */
1892 temp = expand_binop (mode, soptab, op0, op1, target,
1893 unsignedp, methods);
1894 if (!temp && unsignedp)
1895 temp = expand_binop (mode, uoptab, op0, op1, target,
1896 unsignedp, methods);
1898 egress:
1899 /* Undo the fiddling above. */
1900 if (save_enable)
1901 swap_optab_enable (soptab, mode, true);
1902 return temp;
1905 /* Generate code to perform an operation specified by UNOPPTAB
1906 on operand OP0, with two results to TARG0 and TARG1.
1907 We assume that the order of the operands for the instruction
1908 is TARG0, TARG1, OP0.
1910 Either TARG0 or TARG1 may be zero, but what that means is that
1911 the result is not actually wanted. We will generate it into
1912 a dummy pseudo-reg and discard it. They may not both be zero.
1914 Returns 1 if this operation can be performed; 0 if not. */
1917 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1918 int unsignedp)
1920 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1921 enum mode_class mclass;
1922 machine_mode wider_mode;
1923 rtx_insn *entry_last = get_last_insn ();
1924 rtx_insn *last;
1926 mclass = GET_MODE_CLASS (mode);
1928 if (!targ0)
1929 targ0 = gen_reg_rtx (mode);
1930 if (!targ1)
1931 targ1 = gen_reg_rtx (mode);
1933 /* Record where to go back to if we fail. */
1934 last = get_last_insn ();
1936 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1938 struct expand_operand ops[3];
1939 enum insn_code icode = optab_handler (unoptab, mode);
1941 create_fixed_operand (&ops[0], targ0);
1942 create_fixed_operand (&ops[1], targ1);
1943 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1944 if (maybe_expand_insn (icode, 3, ops))
1945 return 1;
1948 /* It can't be done in this mode. Can we do it in a wider mode? */
1950 if (CLASS_HAS_WIDER_MODES_P (mclass))
1952 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1953 wider_mode != VOIDmode;
1954 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1956 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1958 rtx t0 = gen_reg_rtx (wider_mode);
1959 rtx t1 = gen_reg_rtx (wider_mode);
1960 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1962 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1964 convert_move (targ0, t0, unsignedp);
1965 convert_move (targ1, t1, unsignedp);
1966 return 1;
1968 else
1969 delete_insns_since (last);
1974 delete_insns_since (entry_last);
1975 return 0;
1978 /* Generate code to perform an operation specified by BINOPTAB
1979 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1980 We assume that the order of the operands for the instruction
1981 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1982 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1984 Either TARG0 or TARG1 may be zero, but what that means is that
1985 the result is not actually wanted. We will generate it into
1986 a dummy pseudo-reg and discard it. They may not both be zero.
1988 Returns 1 if this operation can be performed; 0 if not. */
1991 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1992 int unsignedp)
1994 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1995 enum mode_class mclass;
1996 machine_mode wider_mode;
1997 rtx_insn *entry_last = get_last_insn ();
1998 rtx_insn *last;
2000 mclass = GET_MODE_CLASS (mode);
2002 if (!targ0)
2003 targ0 = gen_reg_rtx (mode);
2004 if (!targ1)
2005 targ1 = gen_reg_rtx (mode);
2007 /* Record where to go back to if we fail. */
2008 last = get_last_insn ();
2010 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2012 struct expand_operand ops[4];
2013 enum insn_code icode = optab_handler (binoptab, mode);
2014 machine_mode mode0 = insn_data[icode].operand[1].mode;
2015 machine_mode mode1 = insn_data[icode].operand[2].mode;
2016 rtx xop0 = op0, xop1 = op1;
2018 /* If we are optimizing, force expensive constants into a register. */
2019 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2020 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2022 create_fixed_operand (&ops[0], targ0);
2023 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2024 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2025 create_fixed_operand (&ops[3], targ1);
2026 if (maybe_expand_insn (icode, 4, ops))
2027 return 1;
2028 delete_insns_since (last);
2031 /* It can't be done in this mode. Can we do it in a wider mode? */
2033 if (CLASS_HAS_WIDER_MODES_P (mclass))
2035 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2036 wider_mode != VOIDmode;
2037 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2039 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2041 rtx t0 = gen_reg_rtx (wider_mode);
2042 rtx t1 = gen_reg_rtx (wider_mode);
2043 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2044 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2046 if (expand_twoval_binop (binoptab, cop0, cop1,
2047 t0, t1, unsignedp))
2049 convert_move (targ0, t0, unsignedp);
2050 convert_move (targ1, t1, unsignedp);
2051 return 1;
2053 else
2054 delete_insns_since (last);
2059 delete_insns_since (entry_last);
2060 return 0;
2063 /* Expand the two-valued library call indicated by BINOPTAB, but
2064 preserve only one of the values. If TARG0 is non-NULL, the first
2065 value is placed into TARG0; otherwise the second value is placed
2066 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2067 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2068 This routine assumes that the value returned by the library call is
2069 as if the return value was of an integral mode twice as wide as the
2070 mode of OP0. Returns 1 if the call was successful. */
2072 bool
2073 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2074 rtx targ0, rtx targ1, enum rtx_code code)
2076 machine_mode mode;
2077 machine_mode libval_mode;
2078 rtx libval;
2079 rtx_insn *insns;
2080 rtx libfunc;
2082 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2083 gcc_assert (!targ0 != !targ1);
2085 mode = GET_MODE (op0);
2086 libfunc = optab_libfunc (binoptab, mode);
2087 if (!libfunc)
2088 return false;
2090 /* The value returned by the library function will have twice as
2091 many bits as the nominal MODE. */
2092 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2093 MODE_INT);
2094 start_sequence ();
2095 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2096 libval_mode, 2,
2097 op0, mode,
2098 op1, mode);
2099 /* Get the part of VAL containing the value that we want. */
2100 libval = simplify_gen_subreg (mode, libval, libval_mode,
2101 targ0 ? 0 : GET_MODE_SIZE (mode));
2102 insns = get_insns ();
2103 end_sequence ();
2104 /* Move the into the desired location. */
2105 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2106 gen_rtx_fmt_ee (code, mode, op0, op1));
2108 return true;
2112 /* Wrapper around expand_unop which takes an rtx code to specify
2113 the operation to perform, not an optab pointer. All other
2114 arguments are the same. */
2116 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2117 rtx target, int unsignedp)
2119 optab unop = code_to_optab (code);
2120 gcc_assert (unop);
2122 return expand_unop (mode, unop, op0, target, unsignedp);
2125 /* Try calculating
2126 (clz:narrow x)
2128 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2130 A similar operation can be used for clrsb. UNOPTAB says which operation
2131 we are trying to expand. */
2132 static rtx
2133 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2135 enum mode_class mclass = GET_MODE_CLASS (mode);
2136 if (CLASS_HAS_WIDER_MODES_P (mclass))
2138 machine_mode wider_mode;
2139 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2140 wider_mode != VOIDmode;
2141 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2143 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2145 rtx xop0, temp;
2146 rtx_insn *last;
2148 last = get_last_insn ();
2150 if (target == 0)
2151 target = gen_reg_rtx (mode);
2152 xop0 = widen_operand (op0, wider_mode, mode,
2153 unoptab != clrsb_optab, false);
2154 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2155 unoptab != clrsb_optab);
2156 if (temp != 0)
2157 temp = expand_binop
2158 (wider_mode, sub_optab, temp,
2159 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2160 - GET_MODE_PRECISION (mode),
2161 wider_mode),
2162 target, true, OPTAB_DIRECT);
2163 if (temp == 0)
2164 delete_insns_since (last);
2166 return temp;
2170 return 0;
2173 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2174 quantities, choosing which based on whether the high word is nonzero. */
2175 static rtx
2176 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2178 rtx xop0 = force_reg (mode, op0);
2179 rtx subhi = gen_highpart (word_mode, xop0);
2180 rtx sublo = gen_lowpart (word_mode, xop0);
2181 rtx_code_label *hi0_label = gen_label_rtx ();
2182 rtx_code_label *after_label = gen_label_rtx ();
2183 rtx_insn *seq;
2184 rtx temp, result;
2186 /* If we were not given a target, use a word_mode register, not a
2187 'mode' register. The result will fit, and nobody is expecting
2188 anything bigger (the return type of __builtin_clz* is int). */
2189 if (!target)
2190 target = gen_reg_rtx (word_mode);
2192 /* In any case, write to a word_mode scratch in both branches of the
2193 conditional, so we can ensure there is a single move insn setting
2194 'target' to tag a REG_EQUAL note on. */
2195 result = gen_reg_rtx (word_mode);
2197 start_sequence ();
2199 /* If the high word is not equal to zero,
2200 then clz of the full value is clz of the high word. */
2201 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2202 word_mode, true, hi0_label);
2204 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2205 if (!temp)
2206 goto fail;
2208 if (temp != result)
2209 convert_move (result, temp, true);
2211 emit_jump_insn (targetm.gen_jump (after_label));
2212 emit_barrier ();
2214 /* Else clz of the full value is clz of the low word plus the number
2215 of bits in the high word. */
2216 emit_label (hi0_label);
2218 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2219 if (!temp)
2220 goto fail;
2221 temp = expand_binop (word_mode, add_optab, temp,
2222 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2223 result, true, OPTAB_DIRECT);
2224 if (!temp)
2225 goto fail;
2226 if (temp != result)
2227 convert_move (result, temp, true);
2229 emit_label (after_label);
2230 convert_move (target, result, true);
2232 seq = get_insns ();
2233 end_sequence ();
2235 add_equal_note (seq, target, CLZ, xop0, 0);
2236 emit_insn (seq);
2237 return target;
2239 fail:
2240 end_sequence ();
2241 return 0;
2244 /* Try calculating popcount of a double-word quantity as two popcount's of
2245 word-sized quantities and summing up the results. */
2246 static rtx
2247 expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
2249 rtx t0, t1, t;
2250 rtx_insn *seq;
2252 start_sequence ();
2254 t0 = expand_unop_direct (word_mode, popcount_optab,
2255 operand_subword_force (op0, 0, mode), NULL_RTX,
2256 true);
2257 t1 = expand_unop_direct (word_mode, popcount_optab,
2258 operand_subword_force (op0, 1, mode), NULL_RTX,
2259 true);
2260 if (!t0 || !t1)
2262 end_sequence ();
2263 return NULL_RTX;
2266 /* If we were not given a target, use a word_mode register, not a
2267 'mode' register. The result will fit, and nobody is expecting
2268 anything bigger (the return type of __builtin_popcount* is int). */
2269 if (!target)
2270 target = gen_reg_rtx (word_mode);
2272 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2274 seq = get_insns ();
2275 end_sequence ();
2277 add_equal_note (seq, t, POPCOUNT, op0, 0);
2278 emit_insn (seq);
2279 return t;
2282 /* Try calculating
2283 (parity:wide x)
2285 (parity:narrow (low (x) ^ high (x))) */
2286 static rtx
2287 expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
2289 rtx t = expand_binop (word_mode, xor_optab,
2290 operand_subword_force (op0, 0, mode),
2291 operand_subword_force (op0, 1, mode),
2292 NULL_RTX, 0, OPTAB_DIRECT);
2293 return expand_unop (word_mode, parity_optab, t, target, true);
2296 /* Try calculating
2297 (bswap:narrow x)
2299 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2300 static rtx
2301 widen_bswap (machine_mode mode, rtx op0, rtx target)
2303 enum mode_class mclass = GET_MODE_CLASS (mode);
2304 machine_mode wider_mode;
2305 rtx x;
2306 rtx_insn *last;
2308 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2309 return NULL_RTX;
2311 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2312 wider_mode != VOIDmode;
2313 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2314 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2315 goto found;
2316 return NULL_RTX;
2318 found:
2319 last = get_last_insn ();
2321 x = widen_operand (op0, wider_mode, mode, true, true);
2322 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2324 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2325 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2326 if (x != 0)
2327 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2328 GET_MODE_BITSIZE (wider_mode)
2329 - GET_MODE_BITSIZE (mode),
2330 NULL_RTX, true);
2332 if (x != 0)
2334 if (target == 0)
2335 target = gen_reg_rtx (mode);
2336 emit_move_insn (target, gen_lowpart (mode, x));
2338 else
2339 delete_insns_since (last);
2341 return target;
2344 /* Try calculating bswap as two bswaps of two word-sized operands. */
2346 static rtx
2347 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2349 rtx t0, t1;
2351 t1 = expand_unop (word_mode, bswap_optab,
2352 operand_subword_force (op, 0, mode), NULL_RTX, true);
2353 t0 = expand_unop (word_mode, bswap_optab,
2354 operand_subword_force (op, 1, mode), NULL_RTX, true);
2356 if (target == 0 || !valid_multiword_target_p (target))
2357 target = gen_reg_rtx (mode);
2358 if (REG_P (target))
2359 emit_clobber (target);
2360 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2361 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2363 return target;
2366 /* Try calculating (parity x) as (and (popcount x) 1), where
2367 popcount can also be done in a wider mode. */
2368 static rtx
2369 expand_parity (machine_mode mode, rtx op0, rtx target)
2371 enum mode_class mclass = GET_MODE_CLASS (mode);
2372 if (CLASS_HAS_WIDER_MODES_P (mclass))
2374 machine_mode wider_mode;
2375 for (wider_mode = mode; wider_mode != VOIDmode;
2376 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2378 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2380 rtx xop0, temp;
2381 rtx_insn *last;
2383 last = get_last_insn ();
2385 if (target == 0)
2386 target = gen_reg_rtx (mode);
2387 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2388 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2389 true);
2390 if (temp != 0)
2391 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2392 target, true, OPTAB_DIRECT);
2393 if (temp == 0)
2394 delete_insns_since (last);
2396 return temp;
2400 return 0;
2403 /* Try calculating ctz(x) as K - clz(x & -x) ,
2404 where K is GET_MODE_PRECISION(mode) - 1.
2406 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2407 don't have to worry about what the hardware does in that case. (If
2408 the clz instruction produces the usual value at 0, which is K, the
2409 result of this code sequence will be -1; expand_ffs, below, relies
2410 on this. It might be nice to have it be K instead, for consistency
2411 with the (very few) processors that provide a ctz with a defined
2412 value, but that would take one more instruction, and it would be
2413 less convenient for expand_ffs anyway. */
2415 static rtx
2416 expand_ctz (machine_mode mode, rtx op0, rtx target)
2418 rtx_insn *seq;
2419 rtx temp;
2421 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2422 return 0;
2424 start_sequence ();
2426 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2427 if (temp)
2428 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2429 true, OPTAB_DIRECT);
2430 if (temp)
2431 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2432 if (temp)
2433 temp = expand_binop (mode, sub_optab,
2434 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2435 temp, target,
2436 true, OPTAB_DIRECT);
2437 if (temp == 0)
2439 end_sequence ();
2440 return 0;
2443 seq = get_insns ();
2444 end_sequence ();
2446 add_equal_note (seq, temp, CTZ, op0, 0);
2447 emit_insn (seq);
2448 return temp;
2452 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2453 else with the sequence used by expand_clz.
2455 The ffs builtin promises to return zero for a zero value and ctz/clz
2456 may have an undefined value in that case. If they do not give us a
2457 convenient value, we have to generate a test and branch. */
2458 static rtx
2459 expand_ffs (machine_mode mode, rtx op0, rtx target)
2461 HOST_WIDE_INT val = 0;
2462 bool defined_at_zero = false;
2463 rtx temp;
2464 rtx_insn *seq;
2466 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2468 start_sequence ();
2470 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2471 if (!temp)
2472 goto fail;
2474 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2476 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2478 start_sequence ();
2479 temp = expand_ctz (mode, op0, 0);
2480 if (!temp)
2481 goto fail;
2483 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2485 defined_at_zero = true;
2486 val = (GET_MODE_PRECISION (mode) - 1) - val;
2489 else
2490 return 0;
2492 if (defined_at_zero && val == -1)
2493 /* No correction needed at zero. */;
2494 else
2496 /* We don't try to do anything clever with the situation found
2497 on some processors (eg Alpha) where ctz(0:mode) ==
2498 bitsize(mode). If someone can think of a way to send N to -1
2499 and leave alone all values in the range 0..N-1 (where N is a
2500 power of two), cheaper than this test-and-branch, please add it.
2502 The test-and-branch is done after the operation itself, in case
2503 the operation sets condition codes that can be recycled for this.
2504 (This is true on i386, for instance.) */
2506 rtx_code_label *nonzero_label = gen_label_rtx ();
2507 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2508 mode, true, nonzero_label);
2510 convert_move (temp, GEN_INT (-1), false);
2511 emit_label (nonzero_label);
2514 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2515 to produce a value in the range 0..bitsize. */
2516 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2517 target, false, OPTAB_DIRECT);
2518 if (!temp)
2519 goto fail;
2521 seq = get_insns ();
2522 end_sequence ();
2524 add_equal_note (seq, temp, FFS, op0, 0);
2525 emit_insn (seq);
2526 return temp;
2528 fail:
2529 end_sequence ();
2530 return 0;
2533 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2534 conditions, VAL may already be a SUBREG against which we cannot generate
2535 a further SUBREG. In this case, we expect forcing the value into a
2536 register will work around the situation. */
2538 static rtx
2539 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2540 machine_mode imode)
2542 rtx ret;
2543 ret = lowpart_subreg (omode, val, imode);
2544 if (ret == NULL)
2546 val = force_reg (imode, val);
2547 ret = lowpart_subreg (omode, val, imode);
2548 gcc_assert (ret != NULL);
2550 return ret;
2553 /* Expand a floating point absolute value or negation operation via a
2554 logical operation on the sign bit. */
2556 static rtx
2557 expand_absneg_bit (enum rtx_code code, machine_mode mode,
2558 rtx op0, rtx target)
2560 const struct real_format *fmt;
2561 int bitpos, word, nwords, i;
2562 machine_mode imode;
2563 rtx temp;
2564 rtx_insn *insns;
2566 /* The format has to have a simple sign bit. */
2567 fmt = REAL_MODE_FORMAT (mode);
2568 if (fmt == NULL)
2569 return NULL_RTX;
2571 bitpos = fmt->signbit_rw;
2572 if (bitpos < 0)
2573 return NULL_RTX;
2575 /* Don't create negative zeros if the format doesn't support them. */
2576 if (code == NEG && !fmt->has_signed_zero)
2577 return NULL_RTX;
2579 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2581 imode = int_mode_for_mode (mode);
2582 if (imode == BLKmode)
2583 return NULL_RTX;
2584 word = 0;
2585 nwords = 1;
2587 else
2589 imode = word_mode;
2591 if (FLOAT_WORDS_BIG_ENDIAN)
2592 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2593 else
2594 word = bitpos / BITS_PER_WORD;
2595 bitpos = bitpos % BITS_PER_WORD;
2596 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2599 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2600 if (code == ABS)
2601 mask = ~mask;
2603 if (target == 0
2604 || target == op0
2605 || (nwords > 1 && !valid_multiword_target_p (target)))
2606 target = gen_reg_rtx (mode);
2608 if (nwords > 1)
2610 start_sequence ();
2612 for (i = 0; i < nwords; ++i)
2614 rtx targ_piece = operand_subword (target, i, 1, mode);
2615 rtx op0_piece = operand_subword_force (op0, i, mode);
2617 if (i == word)
2619 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2620 op0_piece,
2621 immed_wide_int_const (mask, imode),
2622 targ_piece, 1, OPTAB_LIB_WIDEN);
2623 if (temp != targ_piece)
2624 emit_move_insn (targ_piece, temp);
2626 else
2627 emit_move_insn (targ_piece, op0_piece);
2630 insns = get_insns ();
2631 end_sequence ();
2633 emit_insn (insns);
2635 else
2637 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2638 gen_lowpart (imode, op0),
2639 immed_wide_int_const (mask, imode),
2640 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2641 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2643 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2644 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2645 target);
2648 return target;
2651 /* As expand_unop, but will fail rather than attempt the operation in a
2652 different mode or with a libcall. */
2653 static rtx
2654 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2655 int unsignedp)
2657 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2659 struct expand_operand ops[2];
2660 enum insn_code icode = optab_handler (unoptab, mode);
2661 rtx_insn *last = get_last_insn ();
2662 rtx_insn *pat;
2664 create_output_operand (&ops[0], target, mode);
2665 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2666 pat = maybe_gen_insn (icode, 2, ops);
2667 if (pat)
2669 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2670 && ! add_equal_note (pat, ops[0].value,
2671 optab_to_code (unoptab),
2672 ops[1].value, NULL_RTX))
2674 delete_insns_since (last);
2675 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2678 emit_insn (pat);
2680 return ops[0].value;
2683 return 0;
2686 /* Generate code to perform an operation specified by UNOPTAB
2687 on operand OP0, with result having machine-mode MODE.
2689 UNSIGNEDP is for the case where we have to widen the operands
2690 to perform the operation. It says to use zero-extension.
2692 If TARGET is nonzero, the value
2693 is generated there, if it is convenient to do so.
2694 In all cases an rtx is returned for the locus of the value;
2695 this may or may not be TARGET. */
2698 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2699 int unsignedp)
2701 enum mode_class mclass = GET_MODE_CLASS (mode);
2702 machine_mode wider_mode;
2703 rtx temp;
2704 rtx libfunc;
2706 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2707 if (temp)
2708 return temp;
2710 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2712 /* Widening (or narrowing) clz needs special treatment. */
2713 if (unoptab == clz_optab)
2715 temp = widen_leading (mode, op0, target, unoptab);
2716 if (temp)
2717 return temp;
2719 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2720 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2722 temp = expand_doubleword_clz (mode, op0, target);
2723 if (temp)
2724 return temp;
2727 goto try_libcall;
2730 if (unoptab == clrsb_optab)
2732 temp = widen_leading (mode, op0, target, unoptab);
2733 if (temp)
2734 return temp;
2735 goto try_libcall;
2738 if (unoptab == popcount_optab
2739 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2740 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2741 && optimize_insn_for_speed_p ())
2743 temp = expand_doubleword_popcount (mode, op0, target);
2744 if (temp)
2745 return temp;
2748 if (unoptab == parity_optab
2749 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2750 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2751 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2752 && optimize_insn_for_speed_p ())
2754 temp = expand_doubleword_parity (mode, op0, target);
2755 if (temp)
2756 return temp;
2759 /* Widening (or narrowing) bswap needs special treatment. */
2760 if (unoptab == bswap_optab)
2762 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2763 or ROTATERT. First try these directly; if this fails, then try the
2764 obvious pair of shifts with allowed widening, as this will probably
2765 be always more efficient than the other fallback methods. */
2766 if (mode == HImode)
2768 rtx_insn *last;
2769 rtx temp1, temp2;
2771 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2773 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2774 unsignedp, OPTAB_DIRECT);
2775 if (temp)
2776 return temp;
2779 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2781 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2782 unsignedp, OPTAB_DIRECT);
2783 if (temp)
2784 return temp;
2787 last = get_last_insn ();
2789 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2790 unsignedp, OPTAB_WIDEN);
2791 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2792 unsignedp, OPTAB_WIDEN);
2793 if (temp1 && temp2)
2795 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2796 unsignedp, OPTAB_WIDEN);
2797 if (temp)
2798 return temp;
2801 delete_insns_since (last);
2804 temp = widen_bswap (mode, op0, target);
2805 if (temp)
2806 return temp;
2808 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2809 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2811 temp = expand_doubleword_bswap (mode, op0, target);
2812 if (temp)
2813 return temp;
2816 goto try_libcall;
2819 if (CLASS_HAS_WIDER_MODES_P (mclass))
2820 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2821 wider_mode != VOIDmode;
2822 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2824 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2826 rtx xop0 = op0;
2827 rtx_insn *last = get_last_insn ();
2829 /* For certain operations, we need not actually extend
2830 the narrow operand, as long as we will truncate the
2831 results to the same narrowness. */
2833 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2834 (unoptab == neg_optab
2835 || unoptab == one_cmpl_optab)
2836 && mclass == MODE_INT);
2838 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2839 unsignedp);
2841 if (temp)
2843 if (mclass != MODE_INT
2844 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2846 if (target == 0)
2847 target = gen_reg_rtx (mode);
2848 convert_move (target, temp, 0);
2849 return target;
2851 else
2852 return gen_lowpart (mode, temp);
2854 else
2855 delete_insns_since (last);
2859 /* These can be done a word at a time. */
2860 if (unoptab == one_cmpl_optab
2861 && mclass == MODE_INT
2862 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2863 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2865 int i;
2866 rtx_insn *insns;
2868 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2869 target = gen_reg_rtx (mode);
2871 start_sequence ();
2873 /* Do the actual arithmetic. */
2874 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2876 rtx target_piece = operand_subword (target, i, 1, mode);
2877 rtx x = expand_unop (word_mode, unoptab,
2878 operand_subword_force (op0, i, mode),
2879 target_piece, unsignedp);
2881 if (target_piece != x)
2882 emit_move_insn (target_piece, x);
2885 insns = get_insns ();
2886 end_sequence ();
2888 emit_insn (insns);
2889 return target;
2892 if (optab_to_code (unoptab) == NEG)
2894 /* Try negating floating point values by flipping the sign bit. */
2895 if (SCALAR_FLOAT_MODE_P (mode))
2897 temp = expand_absneg_bit (NEG, mode, op0, target);
2898 if (temp)
2899 return temp;
2902 /* If there is no negation pattern, and we have no negative zero,
2903 try subtracting from zero. */
2904 if (!HONOR_SIGNED_ZEROS (mode))
2906 temp = expand_binop (mode, (unoptab == negv_optab
2907 ? subv_optab : sub_optab),
2908 CONST0_RTX (mode), op0, target,
2909 unsignedp, OPTAB_DIRECT);
2910 if (temp)
2911 return temp;
2915 /* Try calculating parity (x) as popcount (x) % 2. */
2916 if (unoptab == parity_optab)
2918 temp = expand_parity (mode, op0, target);
2919 if (temp)
2920 return temp;
2923 /* Try implementing ffs (x) in terms of clz (x). */
2924 if (unoptab == ffs_optab)
2926 temp = expand_ffs (mode, op0, target);
2927 if (temp)
2928 return temp;
2931 /* Try implementing ctz (x) in terms of clz (x). */
2932 if (unoptab == ctz_optab)
2934 temp = expand_ctz (mode, op0, target);
2935 if (temp)
2936 return temp;
2939 try_libcall:
2940 /* Now try a library call in this mode. */
2941 libfunc = optab_libfunc (unoptab, mode);
2942 if (libfunc)
2944 rtx_insn *insns;
2945 rtx value;
2946 rtx eq_value;
2947 machine_mode outmode = mode;
2949 /* All of these functions return small values. Thus we choose to
2950 have them return something that isn't a double-word. */
2951 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2952 || unoptab == clrsb_optab || unoptab == popcount_optab
2953 || unoptab == parity_optab)
2954 outmode
2955 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
2956 optab_libfunc (unoptab, mode)));
2958 start_sequence ();
2960 /* Pass 1 for NO_QUEUE so we don't lose any increments
2961 if the libcall is cse'd or moved. */
2962 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
2963 1, op0, mode);
2964 insns = get_insns ();
2965 end_sequence ();
2967 target = gen_reg_rtx (outmode);
2968 bool trapv = trapv_unoptab_p (unoptab);
2969 if (trapv)
2970 eq_value = NULL_RTX;
2971 else
2973 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2974 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
2975 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2976 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
2977 eq_value = simplify_gen_unary (ZERO_EXTEND,
2978 outmode, eq_value, mode);
2980 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
2982 return target;
2985 /* It can't be done in this mode. Can we do it in a wider mode? */
2987 if (CLASS_HAS_WIDER_MODES_P (mclass))
2989 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2990 wider_mode != VOIDmode;
2991 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2993 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
2994 || optab_libfunc (unoptab, wider_mode))
2996 rtx xop0 = op0;
2997 rtx_insn *last = get_last_insn ();
2999 /* For certain operations, we need not actually extend
3000 the narrow operand, as long as we will truncate the
3001 results to the same narrowness. */
3002 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3003 (unoptab == neg_optab
3004 || unoptab == one_cmpl_optab
3005 || unoptab == bswap_optab)
3006 && mclass == MODE_INT);
3008 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3009 unsignedp);
3011 /* If we are generating clz using wider mode, adjust the
3012 result. Similarly for clrsb. */
3013 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3014 && temp != 0)
3015 temp = expand_binop
3016 (wider_mode, sub_optab, temp,
3017 gen_int_mode (GET_MODE_PRECISION (wider_mode)
3018 - GET_MODE_PRECISION (mode),
3019 wider_mode),
3020 target, true, OPTAB_DIRECT);
3022 /* Likewise for bswap. */
3023 if (unoptab == bswap_optab && temp != 0)
3025 gcc_assert (GET_MODE_PRECISION (wider_mode)
3026 == GET_MODE_BITSIZE (wider_mode)
3027 && GET_MODE_PRECISION (mode)
3028 == GET_MODE_BITSIZE (mode));
3030 temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3031 GET_MODE_BITSIZE (wider_mode)
3032 - GET_MODE_BITSIZE (mode),
3033 NULL_RTX, true);
3036 if (temp)
3038 if (mclass != MODE_INT)
3040 if (target == 0)
3041 target = gen_reg_rtx (mode);
3042 convert_move (target, temp, 0);
3043 return target;
3045 else
3046 return gen_lowpart (mode, temp);
3048 else
3049 delete_insns_since (last);
3054 /* One final attempt at implementing negation via subtraction,
3055 this time allowing widening of the operand. */
3056 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3058 rtx temp;
3059 temp = expand_binop (mode,
3060 unoptab == negv_optab ? subv_optab : sub_optab,
3061 CONST0_RTX (mode), op0,
3062 target, unsignedp, OPTAB_LIB_WIDEN);
3063 if (temp)
3064 return temp;
3067 return 0;
3070 /* Emit code to compute the absolute value of OP0, with result to
3071 TARGET if convenient. (TARGET may be 0.) The return value says
3072 where the result actually is to be found.
3074 MODE is the mode of the operand; the mode of the result is
3075 different but can be deduced from MODE.
3080 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3081 int result_unsignedp)
3083 rtx temp;
3085 if (GET_MODE_CLASS (mode) != MODE_INT
3086 || ! flag_trapv)
3087 result_unsignedp = 1;
3089 /* First try to do it with a special abs instruction. */
3090 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3091 op0, target, 0);
3092 if (temp != 0)
3093 return temp;
3095 /* For floating point modes, try clearing the sign bit. */
3096 if (SCALAR_FLOAT_MODE_P (mode))
3098 temp = expand_absneg_bit (ABS, mode, op0, target);
3099 if (temp)
3100 return temp;
3103 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3104 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3105 && !HONOR_SIGNED_ZEROS (mode))
3107 rtx_insn *last = get_last_insn ();
3109 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3110 op0, NULL_RTX, 0);
3111 if (temp != 0)
3112 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3113 OPTAB_WIDEN);
3115 if (temp != 0)
3116 return temp;
3118 delete_insns_since (last);
3121 /* If this machine has expensive jumps, we can do integer absolute
3122 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3123 where W is the width of MODE. */
3125 if (GET_MODE_CLASS (mode) == MODE_INT
3126 && BRANCH_COST (optimize_insn_for_speed_p (),
3127 false) >= 2)
3129 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3130 GET_MODE_PRECISION (mode) - 1,
3131 NULL_RTX, 0);
3133 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3134 OPTAB_LIB_WIDEN);
3135 if (temp != 0)
3136 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3137 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3139 if (temp != 0)
3140 return temp;
3143 return NULL_RTX;
3147 expand_abs (machine_mode mode, rtx op0, rtx target,
3148 int result_unsignedp, int safe)
3150 rtx temp;
3151 rtx_code_label *op1;
3153 if (GET_MODE_CLASS (mode) != MODE_INT
3154 || ! flag_trapv)
3155 result_unsignedp = 1;
3157 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3158 if (temp != 0)
3159 return temp;
3161 /* If that does not win, use conditional jump and negate. */
3163 /* It is safe to use the target if it is the same
3164 as the source if this is also a pseudo register */
3165 if (op0 == target && REG_P (op0)
3166 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3167 safe = 1;
3169 op1 = gen_label_rtx ();
3170 if (target == 0 || ! safe
3171 || GET_MODE (target) != mode
3172 || (MEM_P (target) && MEM_VOLATILE_P (target))
3173 || (REG_P (target)
3174 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3175 target = gen_reg_rtx (mode);
3177 emit_move_insn (target, op0);
3178 NO_DEFER_POP;
3180 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3181 NULL_RTX, NULL, op1, -1);
3183 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3184 target, target, 0);
3185 if (op0 != target)
3186 emit_move_insn (target, op0);
3187 emit_label (op1);
3188 OK_DEFER_POP;
3189 return target;
3192 /* Emit code to compute the one's complement absolute value of OP0
3193 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3194 (TARGET may be NULL_RTX.) The return value says where the result
3195 actually is to be found.
3197 MODE is the mode of the operand; the mode of the result is
3198 different but can be deduced from MODE. */
3201 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3203 rtx temp;
3205 /* Not applicable for floating point modes. */
3206 if (FLOAT_MODE_P (mode))
3207 return NULL_RTX;
3209 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3210 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3212 rtx_insn *last = get_last_insn ();
3214 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3215 if (temp != 0)
3216 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3217 OPTAB_WIDEN);
3219 if (temp != 0)
3220 return temp;
3222 delete_insns_since (last);
3225 /* If this machine has expensive jumps, we can do one's complement
3226 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3228 if (GET_MODE_CLASS (mode) == MODE_INT
3229 && BRANCH_COST (optimize_insn_for_speed_p (),
3230 false) >= 2)
3232 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3233 GET_MODE_PRECISION (mode) - 1,
3234 NULL_RTX, 0);
3236 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3237 OPTAB_LIB_WIDEN);
3239 if (temp != 0)
3240 return temp;
3243 return NULL_RTX;
3246 /* A subroutine of expand_copysign, perform the copysign operation using the
3247 abs and neg primitives advertised to exist on the target. The assumption
3248 is that we have a split register file, and leaving op0 in fp registers,
3249 and not playing with subregs so much, will help the register allocator. */
3251 static rtx
3252 expand_copysign_absneg (machine_mode mode, rtx op0, rtx op1, rtx target,
3253 int bitpos, bool op0_is_abs)
3255 machine_mode imode;
3256 enum insn_code icode;
3257 rtx sign;
3258 rtx_code_label *label;
3260 if (target == op1)
3261 target = NULL_RTX;
3263 /* Check if the back end provides an insn that handles signbit for the
3264 argument's mode. */
3265 icode = optab_handler (signbit_optab, mode);
3266 if (icode != CODE_FOR_nothing)
3268 imode = insn_data[(int) icode].operand[0].mode;
3269 sign = gen_reg_rtx (imode);
3270 emit_unop_insn (icode, sign, op1, UNKNOWN);
3272 else
3274 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3276 imode = int_mode_for_mode (mode);
3277 if (imode == BLKmode)
3278 return NULL_RTX;
3279 op1 = gen_lowpart (imode, op1);
3281 else
3283 int word;
3285 imode = word_mode;
3286 if (FLOAT_WORDS_BIG_ENDIAN)
3287 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3288 else
3289 word = bitpos / BITS_PER_WORD;
3290 bitpos = bitpos % BITS_PER_WORD;
3291 op1 = operand_subword_force (op1, word, mode);
3294 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3295 sign = expand_binop (imode, and_optab, op1,
3296 immed_wide_int_const (mask, imode),
3297 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3300 if (!op0_is_abs)
3302 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3303 if (op0 == NULL)
3304 return NULL_RTX;
3305 target = op0;
3307 else
3309 if (target == NULL_RTX)
3310 target = copy_to_reg (op0);
3311 else
3312 emit_move_insn (target, op0);
3315 label = gen_label_rtx ();
3316 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3318 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3319 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3320 else
3321 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3322 if (op0 != target)
3323 emit_move_insn (target, op0);
3325 emit_label (label);
3327 return target;
3331 /* A subroutine of expand_copysign, perform the entire copysign operation
3332 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3333 is true if op0 is known to have its sign bit clear. */
3335 static rtx
3336 expand_copysign_bit (machine_mode mode, rtx op0, rtx op1, rtx target,
3337 int bitpos, bool op0_is_abs)
3339 machine_mode imode;
3340 int word, nwords, i;
3341 rtx temp;
3342 rtx_insn *insns;
3344 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3346 imode = int_mode_for_mode (mode);
3347 if (imode == BLKmode)
3348 return NULL_RTX;
3349 word = 0;
3350 nwords = 1;
3352 else
3354 imode = word_mode;
3356 if (FLOAT_WORDS_BIG_ENDIAN)
3357 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3358 else
3359 word = bitpos / BITS_PER_WORD;
3360 bitpos = bitpos % BITS_PER_WORD;
3361 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3364 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3366 if (target == 0
3367 || target == op0
3368 || target == op1
3369 || (nwords > 1 && !valid_multiword_target_p (target)))
3370 target = gen_reg_rtx (mode);
3372 if (nwords > 1)
3374 start_sequence ();
3376 for (i = 0; i < nwords; ++i)
3378 rtx targ_piece = operand_subword (target, i, 1, mode);
3379 rtx op0_piece = operand_subword_force (op0, i, mode);
3381 if (i == word)
3383 if (!op0_is_abs)
3384 op0_piece
3385 = expand_binop (imode, and_optab, op0_piece,
3386 immed_wide_int_const (~mask, imode),
3387 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3388 op1 = expand_binop (imode, and_optab,
3389 operand_subword_force (op1, i, mode),
3390 immed_wide_int_const (mask, imode),
3391 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3393 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3394 targ_piece, 1, OPTAB_LIB_WIDEN);
3395 if (temp != targ_piece)
3396 emit_move_insn (targ_piece, temp);
3398 else
3399 emit_move_insn (targ_piece, op0_piece);
3402 insns = get_insns ();
3403 end_sequence ();
3405 emit_insn (insns);
3407 else
3409 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3410 immed_wide_int_const (mask, imode),
3411 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3413 op0 = gen_lowpart (imode, op0);
3414 if (!op0_is_abs)
3415 op0 = expand_binop (imode, and_optab, op0,
3416 immed_wide_int_const (~mask, imode),
3417 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3419 temp = expand_binop (imode, ior_optab, op0, op1,
3420 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3421 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3424 return target;
3427 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3428 scalar floating point mode. Return NULL if we do not know how to
3429 expand the operation inline. */
3432 expand_copysign (rtx op0, rtx op1, rtx target)
3434 machine_mode mode = GET_MODE (op0);
3435 const struct real_format *fmt;
3436 bool op0_is_abs;
3437 rtx temp;
3439 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3440 gcc_assert (GET_MODE (op1) == mode);
3442 /* First try to do it with a special instruction. */
3443 temp = expand_binop (mode, copysign_optab, op0, op1,
3444 target, 0, OPTAB_DIRECT);
3445 if (temp)
3446 return temp;
3448 fmt = REAL_MODE_FORMAT (mode);
3449 if (fmt == NULL || !fmt->has_signed_zero)
3450 return NULL_RTX;
3452 op0_is_abs = false;
3453 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3455 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3456 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3457 op0_is_abs = true;
3460 if (fmt->signbit_ro >= 0
3461 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3462 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3463 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3465 temp = expand_copysign_absneg (mode, op0, op1, target,
3466 fmt->signbit_ro, op0_is_abs);
3467 if (temp)
3468 return temp;
3471 if (fmt->signbit_rw < 0)
3472 return NULL_RTX;
3473 return expand_copysign_bit (mode, op0, op1, target,
3474 fmt->signbit_rw, op0_is_abs);
3477 /* Generate an instruction whose insn-code is INSN_CODE,
3478 with two operands: an output TARGET and an input OP0.
3479 TARGET *must* be nonzero, and the output is always stored there.
3480 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3481 the value that is stored into TARGET.
3483 Return false if expansion failed. */
3485 bool
3486 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3487 enum rtx_code code)
3489 struct expand_operand ops[2];
3490 rtx_insn *pat;
3492 create_output_operand (&ops[0], target, GET_MODE (target));
3493 create_input_operand (&ops[1], op0, GET_MODE (op0));
3494 pat = maybe_gen_insn (icode, 2, ops);
3495 if (!pat)
3496 return false;
3498 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3499 && code != UNKNOWN)
3500 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3502 emit_insn (pat);
3504 if (ops[0].value != target)
3505 emit_move_insn (target, ops[0].value);
3506 return true;
3508 /* Generate an instruction whose insn-code is INSN_CODE,
3509 with two operands: an output TARGET and an input OP0.
3510 TARGET *must* be nonzero, and the output is always stored there.
3511 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3512 the value that is stored into TARGET. */
3514 void
3515 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3517 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3518 gcc_assert (ok);
3521 struct no_conflict_data
3523 rtx target;
3524 rtx_insn *first, *insn;
3525 bool must_stay;
3528 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3529 the currently examined clobber / store has to stay in the list of
3530 insns that constitute the actual libcall block. */
3531 static void
3532 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3534 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3536 /* If this inns directly contributes to setting the target, it must stay. */
3537 if (reg_overlap_mentioned_p (p->target, dest))
3538 p->must_stay = true;
3539 /* If we haven't committed to keeping any other insns in the list yet,
3540 there is nothing more to check. */
3541 else if (p->insn == p->first)
3542 return;
3543 /* If this insn sets / clobbers a register that feeds one of the insns
3544 already in the list, this insn has to stay too. */
3545 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3546 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3547 || reg_used_between_p (dest, p->first, p->insn)
3548 /* Likewise if this insn depends on a register set by a previous
3549 insn in the list, or if it sets a result (presumably a hard
3550 register) that is set or clobbered by a previous insn.
3551 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3552 SET_DEST perform the former check on the address, and the latter
3553 check on the MEM. */
3554 || (GET_CODE (set) == SET
3555 && (modified_in_p (SET_SRC (set), p->first)
3556 || modified_in_p (SET_DEST (set), p->first)
3557 || modified_between_p (SET_SRC (set), p->first, p->insn)
3558 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3559 p->must_stay = true;
3563 /* Emit code to make a call to a constant function or a library call.
3565 INSNS is a list containing all insns emitted in the call.
3566 These insns leave the result in RESULT. Our block is to copy RESULT
3567 to TARGET, which is logically equivalent to EQUIV.
3569 We first emit any insns that set a pseudo on the assumption that these are
3570 loading constants into registers; doing so allows them to be safely cse'ed
3571 between blocks. Then we emit all the other insns in the block, followed by
3572 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3573 note with an operand of EQUIV. */
3575 static void
3576 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3577 bool equiv_may_trap)
3579 rtx final_dest = target;
3580 rtx_insn *next, *last, *insn;
3582 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3583 into a MEM later. Protect the libcall block from this change. */
3584 if (! REG_P (target) || REG_USERVAR_P (target))
3585 target = gen_reg_rtx (GET_MODE (target));
3587 /* If we're using non-call exceptions, a libcall corresponding to an
3588 operation that may trap may also trap. */
3589 /* ??? See the comment in front of make_reg_eh_region_note. */
3590 if (cfun->can_throw_non_call_exceptions
3591 && (equiv_may_trap || may_trap_p (equiv)))
3593 for (insn = insns; insn; insn = NEXT_INSN (insn))
3594 if (CALL_P (insn))
3596 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3597 if (note)
3599 int lp_nr = INTVAL (XEXP (note, 0));
3600 if (lp_nr == 0 || lp_nr == INT_MIN)
3601 remove_note (insn, note);
3605 else
3607 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3608 reg note to indicate that this call cannot throw or execute a nonlocal
3609 goto (unless there is already a REG_EH_REGION note, in which case
3610 we update it). */
3611 for (insn = insns; insn; insn = NEXT_INSN (insn))
3612 if (CALL_P (insn))
3613 make_reg_eh_region_note_nothrow_nononlocal (insn);
3616 /* First emit all insns that set pseudos. Remove them from the list as
3617 we go. Avoid insns that set pseudos which were referenced in previous
3618 insns. These can be generated by move_by_pieces, for example,
3619 to update an address. Similarly, avoid insns that reference things
3620 set in previous insns. */
3622 for (insn = insns; insn; insn = next)
3624 rtx set = single_set (insn);
3626 next = NEXT_INSN (insn);
3628 if (set != 0 && REG_P (SET_DEST (set))
3629 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3631 struct no_conflict_data data;
3633 data.target = const0_rtx;
3634 data.first = insns;
3635 data.insn = insn;
3636 data.must_stay = 0;
3637 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3638 if (! data.must_stay)
3640 if (PREV_INSN (insn))
3641 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3642 else
3643 insns = next;
3645 if (next)
3646 SET_PREV_INSN (next) = PREV_INSN (insn);
3648 add_insn (insn);
3652 /* Some ports use a loop to copy large arguments onto the stack.
3653 Don't move anything outside such a loop. */
3654 if (LABEL_P (insn))
3655 break;
3658 /* Write the remaining insns followed by the final copy. */
3659 for (insn = insns; insn; insn = next)
3661 next = NEXT_INSN (insn);
3663 add_insn (insn);
3666 last = emit_move_insn (target, result);
3667 if (equiv)
3668 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3670 if (final_dest != target)
3671 emit_move_insn (final_dest, target);
3674 void
3675 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3677 emit_libcall_block_1 (safe_as_a <rtx_insn *> (insns),
3678 target, result, equiv, false);
3681 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3682 PURPOSE describes how this comparison will be used. CODE is the rtx
3683 comparison code we will be using.
3685 ??? Actually, CODE is slightly weaker than that. A target is still
3686 required to implement all of the normal bcc operations, but not
3687 required to implement all (or any) of the unordered bcc operations. */
3690 can_compare_p (enum rtx_code code, machine_mode mode,
3691 enum can_compare_purpose purpose)
3693 rtx test;
3694 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3697 enum insn_code icode;
3699 if (purpose == ccp_jump
3700 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3701 && insn_operand_matches (icode, 0, test))
3702 return 1;
3703 if (purpose == ccp_store_flag
3704 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3705 && insn_operand_matches (icode, 1, test))
3706 return 1;
3707 if (purpose == ccp_cmov
3708 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3709 return 1;
3711 mode = GET_MODE_WIDER_MODE (mode);
3712 PUT_MODE (test, mode);
3714 while (mode != VOIDmode);
3716 return 0;
3719 /* This function is called when we are going to emit a compare instruction that
3720 compares the values found in X and Y, using the rtl operator COMPARISON.
3722 If they have mode BLKmode, then SIZE specifies the size of both operands.
3724 UNSIGNEDP nonzero says that the operands are unsigned;
3725 this matters if they need to be widened (as given by METHODS).
3727 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3728 if we failed to produce one.
3730 *PMODE is the mode of the inputs (in case they are const_int).
3732 This function performs all the setup necessary so that the caller only has
3733 to emit a single comparison insn. This setup can involve doing a BLKmode
3734 comparison or emitting a library call to perform the comparison if no insn
3735 is available to handle it.
3736 The values which are passed in through pointers can be modified; the caller
3737 should perform the comparison on the modified values. Constant
3738 comparisons must have already been folded. */
3740 static void
3741 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3742 int unsignedp, enum optab_methods methods,
3743 rtx *ptest, machine_mode *pmode)
3745 machine_mode mode = *pmode;
3746 rtx libfunc, test;
3747 machine_mode cmp_mode;
3748 enum mode_class mclass;
3750 /* The other methods are not needed. */
3751 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3752 || methods == OPTAB_LIB_WIDEN);
3754 /* If we are optimizing, force expensive constants into a register. */
3755 if (CONSTANT_P (x) && optimize
3756 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3757 > COSTS_N_INSNS (1)))
3758 x = force_reg (mode, x);
3760 if (CONSTANT_P (y) && optimize
3761 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3762 > COSTS_N_INSNS (1)))
3763 y = force_reg (mode, y);
3765 #if HAVE_cc0
3766 /* Make sure if we have a canonical comparison. The RTL
3767 documentation states that canonical comparisons are required only
3768 for targets which have cc0. */
3769 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3770 #endif
3772 /* Don't let both operands fail to indicate the mode. */
3773 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3774 x = force_reg (mode, x);
3775 if (mode == VOIDmode)
3776 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3778 /* Handle all BLKmode compares. */
3780 if (mode == BLKmode)
3782 machine_mode result_mode;
3783 enum insn_code cmp_code;
3784 rtx result;
3785 rtx opalign
3786 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3788 gcc_assert (size);
3790 /* Try to use a memory block compare insn - either cmpstr
3791 or cmpmem will do. */
3792 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3793 cmp_mode != VOIDmode;
3794 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3796 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3797 if (cmp_code == CODE_FOR_nothing)
3798 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3799 if (cmp_code == CODE_FOR_nothing)
3800 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3801 if (cmp_code == CODE_FOR_nothing)
3802 continue;
3804 /* Must make sure the size fits the insn's mode. */
3805 if ((CONST_INT_P (size)
3806 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3807 || (GET_MODE_BITSIZE (GET_MODE (size))
3808 > GET_MODE_BITSIZE (cmp_mode)))
3809 continue;
3811 result_mode = insn_data[cmp_code].operand[0].mode;
3812 result = gen_reg_rtx (result_mode);
3813 size = convert_to_mode (cmp_mode, size, 1);
3814 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3816 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3817 *pmode = result_mode;
3818 return;
3821 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3822 goto fail;
3824 /* Otherwise call a library function. */
3825 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3827 x = result;
3828 y = const0_rtx;
3829 mode = TYPE_MODE (integer_type_node);
3830 methods = OPTAB_LIB_WIDEN;
3831 unsignedp = false;
3834 /* Don't allow operands to the compare to trap, as that can put the
3835 compare and branch in different basic blocks. */
3836 if (cfun->can_throw_non_call_exceptions)
3838 if (may_trap_p (x))
3839 x = force_reg (mode, x);
3840 if (may_trap_p (y))
3841 y = force_reg (mode, y);
3844 if (GET_MODE_CLASS (mode) == MODE_CC)
3846 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3847 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3848 gcc_assert (icode != CODE_FOR_nothing
3849 && insn_operand_matches (icode, 0, test));
3850 *ptest = test;
3851 return;
3854 mclass = GET_MODE_CLASS (mode);
3855 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3856 cmp_mode = mode;
3859 enum insn_code icode;
3860 icode = optab_handler (cbranch_optab, cmp_mode);
3861 if (icode != CODE_FOR_nothing
3862 && insn_operand_matches (icode, 0, test))
3864 rtx_insn *last = get_last_insn ();
3865 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3866 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3867 if (op0 && op1
3868 && insn_operand_matches (icode, 1, op0)
3869 && insn_operand_matches (icode, 2, op1))
3871 XEXP (test, 0) = op0;
3872 XEXP (test, 1) = op1;
3873 *ptest = test;
3874 *pmode = cmp_mode;
3875 return;
3877 delete_insns_since (last);
3880 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3881 break;
3882 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
3884 while (cmp_mode != VOIDmode);
3886 if (methods != OPTAB_LIB_WIDEN)
3887 goto fail;
3889 if (!SCALAR_FLOAT_MODE_P (mode))
3891 rtx result;
3892 machine_mode ret_mode;
3894 /* Handle a libcall just for the mode we are using. */
3895 libfunc = optab_libfunc (cmp_optab, mode);
3896 gcc_assert (libfunc);
3898 /* If we want unsigned, and this mode has a distinct unsigned
3899 comparison routine, use that. */
3900 if (unsignedp)
3902 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3903 if (ulibfunc)
3904 libfunc = ulibfunc;
3907 ret_mode = targetm.libgcc_cmp_return_mode ();
3908 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3909 ret_mode, 2, x, mode, y, mode);
3911 /* There are two kinds of comparison routines. Biased routines
3912 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3913 of gcc expect that the comparison operation is equivalent
3914 to the modified comparison. For signed comparisons compare the
3915 result against 1 in the biased case, and zero in the unbiased
3916 case. For unsigned comparisons always compare against 1 after
3917 biasing the unbiased result by adding 1. This gives us a way to
3918 represent LTU.
3919 The comparisons in the fixed-point helper library are always
3920 biased. */
3921 x = result;
3922 y = const1_rtx;
3924 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3926 if (unsignedp)
3927 x = plus_constant (ret_mode, result, 1);
3928 else
3929 y = const0_rtx;
3932 *pmode = ret_mode;
3933 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3934 ptest, pmode);
3936 else
3937 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3939 return;
3941 fail:
3942 *ptest = NULL_RTX;
3945 /* Before emitting an insn with code ICODE, make sure that X, which is going
3946 to be used for operand OPNUM of the insn, is converted from mode MODE to
3947 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3948 that it is accepted by the operand predicate. Return the new value. */
3951 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
3952 machine_mode wider_mode, int unsignedp)
3954 if (mode != wider_mode)
3955 x = convert_modes (wider_mode, mode, x, unsignedp);
3957 if (!insn_operand_matches (icode, opnum, x))
3959 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
3960 if (reload_completed)
3961 return NULL_RTX;
3962 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3963 return NULL_RTX;
3964 x = copy_to_mode_reg (op_mode, x);
3967 return x;
3970 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3971 we can do the branch. */
3973 static void
3974 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, int prob)
3976 machine_mode optab_mode;
3977 enum mode_class mclass;
3978 enum insn_code icode;
3979 rtx_insn *insn;
3981 mclass = GET_MODE_CLASS (mode);
3982 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
3983 icode = optab_handler (cbranch_optab, optab_mode);
3985 gcc_assert (icode != CODE_FOR_nothing);
3986 gcc_assert (insn_operand_matches (icode, 0, test));
3987 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
3988 XEXP (test, 1), label));
3989 if (prob != -1
3990 && profile_status_for_fn (cfun) != PROFILE_ABSENT
3991 && insn
3992 && JUMP_P (insn)
3993 && any_condjump_p (insn)
3994 && !find_reg_note (insn, REG_BR_PROB, 0))
3995 add_int_reg_note (insn, REG_BR_PROB, prob);
3998 /* Generate code to compare X with Y so that the condition codes are
3999 set and to jump to LABEL if the condition is true. If X is a
4000 constant and Y is not a constant, then the comparison is swapped to
4001 ensure that the comparison RTL has the canonical form.
4003 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4004 need to be widened. UNSIGNEDP is also used to select the proper
4005 branch condition code.
4007 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4009 MODE is the mode of the inputs (in case they are const_int).
4011 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4012 It will be potentially converted into an unsigned variant based on
4013 UNSIGNEDP to select a proper jump instruction.
4015 PROB is the probability of jumping to LABEL. */
4017 void
4018 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4019 machine_mode mode, int unsignedp, rtx label,
4020 int prob)
4022 rtx op0 = x, op1 = y;
4023 rtx test;
4025 /* Swap operands and condition to ensure canonical RTL. */
4026 if (swap_commutative_operands_p (x, y)
4027 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4029 op0 = y, op1 = x;
4030 comparison = swap_condition (comparison);
4033 /* If OP0 is still a constant, then both X and Y must be constants
4034 or the opposite comparison is not supported. Force X into a register
4035 to create canonical RTL. */
4036 if (CONSTANT_P (op0))
4037 op0 = force_reg (mode, op0);
4039 if (unsignedp)
4040 comparison = unsigned_condition (comparison);
4042 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4043 &test, &mode);
4044 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4048 /* Emit a library call comparison between floating point X and Y.
4049 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4051 static void
4052 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4053 rtx *ptest, machine_mode *pmode)
4055 enum rtx_code swapped = swap_condition (comparison);
4056 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4057 machine_mode orig_mode = GET_MODE (x);
4058 machine_mode mode, cmp_mode;
4059 rtx true_rtx, false_rtx;
4060 rtx value, target, equiv;
4061 rtx_insn *insns;
4062 rtx libfunc = 0;
4063 bool reversed_p = false;
4064 cmp_mode = targetm.libgcc_cmp_return_mode ();
4066 for (mode = orig_mode;
4067 mode != VOIDmode;
4068 mode = GET_MODE_WIDER_MODE (mode))
4070 if (code_to_optab (comparison)
4071 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4072 break;
4074 if (code_to_optab (swapped)
4075 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4077 std::swap (x, y);
4078 comparison = swapped;
4079 break;
4082 if (code_to_optab (reversed)
4083 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4085 comparison = reversed;
4086 reversed_p = true;
4087 break;
4091 gcc_assert (mode != VOIDmode);
4093 if (mode != orig_mode)
4095 x = convert_to_mode (mode, x, 0);
4096 y = convert_to_mode (mode, y, 0);
4099 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4100 the RTL. The allows the RTL optimizers to delete the libcall if the
4101 condition can be determined at compile-time. */
4102 if (comparison == UNORDERED
4103 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4105 true_rtx = const_true_rtx;
4106 false_rtx = const0_rtx;
4108 else
4110 switch (comparison)
4112 case EQ:
4113 true_rtx = const0_rtx;
4114 false_rtx = const_true_rtx;
4115 break;
4117 case NE:
4118 true_rtx = const_true_rtx;
4119 false_rtx = const0_rtx;
4120 break;
4122 case GT:
4123 true_rtx = const1_rtx;
4124 false_rtx = const0_rtx;
4125 break;
4127 case GE:
4128 true_rtx = const0_rtx;
4129 false_rtx = constm1_rtx;
4130 break;
4132 case LT:
4133 true_rtx = constm1_rtx;
4134 false_rtx = const0_rtx;
4135 break;
4137 case LE:
4138 true_rtx = const0_rtx;
4139 false_rtx = const1_rtx;
4140 break;
4142 default:
4143 gcc_unreachable ();
4147 if (comparison == UNORDERED)
4149 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4150 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4151 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4152 temp, const_true_rtx, equiv);
4154 else
4156 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4157 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4158 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4159 equiv, true_rtx, false_rtx);
4162 start_sequence ();
4163 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4164 cmp_mode, 2, x, mode, y, mode);
4165 insns = get_insns ();
4166 end_sequence ();
4168 target = gen_reg_rtx (cmp_mode);
4169 emit_libcall_block (insns, target, value, equiv);
4171 if (comparison == UNORDERED
4172 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4173 || reversed_p)
4174 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4175 else
4176 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4178 *pmode = cmp_mode;
4181 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4183 void
4184 emit_indirect_jump (rtx loc)
4186 if (!targetm.have_indirect_jump ())
4187 sorry ("indirect jumps are not available on this target");
4188 else
4190 struct expand_operand ops[1];
4191 create_address_operand (&ops[0], loc);
4192 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4193 emit_barrier ();
4198 /* Emit a conditional move instruction if the machine supports one for that
4199 condition and machine mode.
4201 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4202 the mode to use should they be constants. If it is VOIDmode, they cannot
4203 both be constants.
4205 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4206 should be stored there. MODE is the mode to use should they be constants.
4207 If it is VOIDmode, they cannot both be constants.
4209 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4210 is not supported. */
4213 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4214 machine_mode cmode, rtx op2, rtx op3,
4215 machine_mode mode, int unsignedp)
4217 rtx comparison;
4218 rtx_insn *last;
4219 enum insn_code icode;
4220 enum rtx_code reversed;
4222 /* If the two source operands are identical, that's just a move. */
4224 if (rtx_equal_p (op2, op3))
4226 if (!target)
4227 target = gen_reg_rtx (mode);
4229 emit_move_insn (target, op3);
4230 return target;
4233 /* If one operand is constant, make it the second one. Only do this
4234 if the other operand is not constant as well. */
4236 if (swap_commutative_operands_p (op0, op1))
4238 std::swap (op0, op1);
4239 code = swap_condition (code);
4242 /* get_condition will prefer to generate LT and GT even if the old
4243 comparison was against zero, so undo that canonicalization here since
4244 comparisons against zero are cheaper. */
4245 if (code == LT && op1 == const1_rtx)
4246 code = LE, op1 = const0_rtx;
4247 else if (code == GT && op1 == constm1_rtx)
4248 code = GE, op1 = const0_rtx;
4250 if (cmode == VOIDmode)
4251 cmode = GET_MODE (op0);
4253 if (swap_commutative_operands_p (op2, op3)
4254 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4255 != UNKNOWN))
4257 std::swap (op2, op3);
4258 code = reversed;
4261 if (mode == VOIDmode)
4262 mode = GET_MODE (op2);
4264 icode = direct_optab_handler (movcc_optab, mode);
4266 if (icode == CODE_FOR_nothing)
4267 return 0;
4269 if (!target)
4270 target = gen_reg_rtx (mode);
4272 code = unsignedp ? unsigned_condition (code) : code;
4273 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4275 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4276 return NULL and let the caller figure out how best to deal with this
4277 situation. */
4278 if (!COMPARISON_P (comparison))
4279 return NULL_RTX;
4281 saved_pending_stack_adjust save;
4282 save_pending_stack_adjust (&save);
4283 last = get_last_insn ();
4284 do_pending_stack_adjust ();
4285 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4286 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4287 &comparison, &cmode);
4288 if (comparison)
4290 struct expand_operand ops[4];
4292 create_output_operand (&ops[0], target, mode);
4293 create_fixed_operand (&ops[1], comparison);
4294 create_input_operand (&ops[2], op2, mode);
4295 create_input_operand (&ops[3], op3, mode);
4296 if (maybe_expand_insn (icode, 4, ops))
4298 if (ops[0].value != target)
4299 convert_move (target, ops[0].value, false);
4300 return target;
4303 delete_insns_since (last);
4304 restore_pending_stack_adjust (&save);
4305 return NULL_RTX;
4309 /* Emit a conditional negate or bitwise complement using the
4310 negcc or notcc optabs if available. Return NULL_RTX if such operations
4311 are not available. Otherwise return the RTX holding the result.
4312 TARGET is the desired destination of the result. COMP is the comparison
4313 on which to negate. If COND is true move into TARGET the negation
4314 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4315 CODE is either NEG or NOT. MODE is the machine mode in which the
4316 operation is performed. */
4319 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4320 machine_mode mode, rtx cond, rtx op1,
4321 rtx op2)
4323 optab op = unknown_optab;
4324 if (code == NEG)
4325 op = negcc_optab;
4326 else if (code == NOT)
4327 op = notcc_optab;
4328 else
4329 gcc_unreachable ();
4331 insn_code icode = direct_optab_handler (op, mode);
4333 if (icode == CODE_FOR_nothing)
4334 return NULL_RTX;
4336 if (!target)
4337 target = gen_reg_rtx (mode);
4339 rtx_insn *last = get_last_insn ();
4340 struct expand_operand ops[4];
4342 create_output_operand (&ops[0], target, mode);
4343 create_fixed_operand (&ops[1], cond);
4344 create_input_operand (&ops[2], op1, mode);
4345 create_input_operand (&ops[3], op2, mode);
4347 if (maybe_expand_insn (icode, 4, ops))
4349 if (ops[0].value != target)
4350 convert_move (target, ops[0].value, false);
4352 return target;
4354 delete_insns_since (last);
4355 return NULL_RTX;
4358 /* Emit a conditional addition instruction if the machine supports one for that
4359 condition and machine mode.
4361 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4362 the mode to use should they be constants. If it is VOIDmode, they cannot
4363 both be constants.
4365 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4366 should be stored there. MODE is the mode to use should they be constants.
4367 If it is VOIDmode, they cannot both be constants.
4369 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4370 is not supported. */
4373 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4374 machine_mode cmode, rtx op2, rtx op3,
4375 machine_mode mode, int unsignedp)
4377 rtx comparison;
4378 rtx_insn *last;
4379 enum insn_code icode;
4381 /* If one operand is constant, make it the second one. Only do this
4382 if the other operand is not constant as well. */
4384 if (swap_commutative_operands_p (op0, op1))
4386 std::swap (op0, op1);
4387 code = swap_condition (code);
4390 /* get_condition will prefer to generate LT and GT even if the old
4391 comparison was against zero, so undo that canonicalization here since
4392 comparisons against zero are cheaper. */
4393 if (code == LT && op1 == const1_rtx)
4394 code = LE, op1 = const0_rtx;
4395 else if (code == GT && op1 == constm1_rtx)
4396 code = GE, op1 = const0_rtx;
4398 if (cmode == VOIDmode)
4399 cmode = GET_MODE (op0);
4401 if (mode == VOIDmode)
4402 mode = GET_MODE (op2);
4404 icode = optab_handler (addcc_optab, mode);
4406 if (icode == CODE_FOR_nothing)
4407 return 0;
4409 if (!target)
4410 target = gen_reg_rtx (mode);
4412 code = unsignedp ? unsigned_condition (code) : code;
4413 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4415 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4416 return NULL and let the caller figure out how best to deal with this
4417 situation. */
4418 if (!COMPARISON_P (comparison))
4419 return NULL_RTX;
4421 do_pending_stack_adjust ();
4422 last = get_last_insn ();
4423 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4424 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4425 &comparison, &cmode);
4426 if (comparison)
4428 struct expand_operand ops[4];
4430 create_output_operand (&ops[0], target, mode);
4431 create_fixed_operand (&ops[1], comparison);
4432 create_input_operand (&ops[2], op2, mode);
4433 create_input_operand (&ops[3], op3, mode);
4434 if (maybe_expand_insn (icode, 4, ops))
4436 if (ops[0].value != target)
4437 convert_move (target, ops[0].value, false);
4438 return target;
4441 delete_insns_since (last);
4442 return NULL_RTX;
4445 /* These functions attempt to generate an insn body, rather than
4446 emitting the insn, but if the gen function already emits them, we
4447 make no attempt to turn them back into naked patterns. */
4449 /* Generate and return an insn body to add Y to X. */
4451 rtx_insn *
4452 gen_add2_insn (rtx x, rtx y)
4454 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4456 gcc_assert (insn_operand_matches (icode, 0, x));
4457 gcc_assert (insn_operand_matches (icode, 1, x));
4458 gcc_assert (insn_operand_matches (icode, 2, y));
4460 return GEN_FCN (icode) (x, x, y);
4463 /* Generate and return an insn body to add r1 and c,
4464 storing the result in r0. */
4466 rtx_insn *
4467 gen_add3_insn (rtx r0, rtx r1, rtx c)
4469 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4471 if (icode == CODE_FOR_nothing
4472 || !insn_operand_matches (icode, 0, r0)
4473 || !insn_operand_matches (icode, 1, r1)
4474 || !insn_operand_matches (icode, 2, c))
4475 return NULL;
4477 return GEN_FCN (icode) (r0, r1, c);
4481 have_add2_insn (rtx x, rtx y)
4483 enum insn_code icode;
4485 gcc_assert (GET_MODE (x) != VOIDmode);
4487 icode = optab_handler (add_optab, GET_MODE (x));
4489 if (icode == CODE_FOR_nothing)
4490 return 0;
4492 if (!insn_operand_matches (icode, 0, x)
4493 || !insn_operand_matches (icode, 1, x)
4494 || !insn_operand_matches (icode, 2, y))
4495 return 0;
4497 return 1;
4500 /* Generate and return an insn body to add Y to X. */
4502 rtx_insn *
4503 gen_addptr3_insn (rtx x, rtx y, rtx z)
4505 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4507 gcc_assert (insn_operand_matches (icode, 0, x));
4508 gcc_assert (insn_operand_matches (icode, 1, y));
4509 gcc_assert (insn_operand_matches (icode, 2, z));
4511 return GEN_FCN (icode) (x, y, z);
4514 /* Return true if the target implements an addptr pattern and X, Y,
4515 and Z are valid for the pattern predicates. */
4518 have_addptr3_insn (rtx x, rtx y, rtx z)
4520 enum insn_code icode;
4522 gcc_assert (GET_MODE (x) != VOIDmode);
4524 icode = optab_handler (addptr3_optab, GET_MODE (x));
4526 if (icode == CODE_FOR_nothing)
4527 return 0;
4529 if (!insn_operand_matches (icode, 0, x)
4530 || !insn_operand_matches (icode, 1, y)
4531 || !insn_operand_matches (icode, 2, z))
4532 return 0;
4534 return 1;
4537 /* Generate and return an insn body to subtract Y from X. */
4539 rtx_insn *
4540 gen_sub2_insn (rtx x, rtx y)
4542 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4544 gcc_assert (insn_operand_matches (icode, 0, x));
4545 gcc_assert (insn_operand_matches (icode, 1, x));
4546 gcc_assert (insn_operand_matches (icode, 2, y));
4548 return GEN_FCN (icode) (x, x, y);
4551 /* Generate and return an insn body to subtract r1 and c,
4552 storing the result in r0. */
4554 rtx_insn *
4555 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4557 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4559 if (icode == CODE_FOR_nothing
4560 || !insn_operand_matches (icode, 0, r0)
4561 || !insn_operand_matches (icode, 1, r1)
4562 || !insn_operand_matches (icode, 2, c))
4563 return NULL;
4565 return GEN_FCN (icode) (r0, r1, c);
4569 have_sub2_insn (rtx x, rtx y)
4571 enum insn_code icode;
4573 gcc_assert (GET_MODE (x) != VOIDmode);
4575 icode = optab_handler (sub_optab, GET_MODE (x));
4577 if (icode == CODE_FOR_nothing)
4578 return 0;
4580 if (!insn_operand_matches (icode, 0, x)
4581 || !insn_operand_matches (icode, 1, x)
4582 || !insn_operand_matches (icode, 2, y))
4583 return 0;
4585 return 1;
4588 /* Generate the body of an insn to extend Y (with mode MFROM)
4589 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4591 rtx_insn *
4592 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4593 machine_mode mfrom, int unsignedp)
4595 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4596 return GEN_FCN (icode) (x, y);
4599 /* Generate code to convert FROM to floating point
4600 and store in TO. FROM must be fixed point and not VOIDmode.
4601 UNSIGNEDP nonzero means regard FROM as unsigned.
4602 Normally this is done by correcting the final value
4603 if it is negative. */
4605 void
4606 expand_float (rtx to, rtx from, int unsignedp)
4608 enum insn_code icode;
4609 rtx target = to;
4610 machine_mode fmode, imode;
4611 bool can_do_signed = false;
4613 /* Crash now, because we won't be able to decide which mode to use. */
4614 gcc_assert (GET_MODE (from) != VOIDmode);
4616 /* Look for an insn to do the conversion. Do it in the specified
4617 modes if possible; otherwise convert either input, output or both to
4618 wider mode. If the integer mode is wider than the mode of FROM,
4619 we can do the conversion signed even if the input is unsigned. */
4621 for (fmode = GET_MODE (to); fmode != VOIDmode;
4622 fmode = GET_MODE_WIDER_MODE (fmode))
4623 for (imode = GET_MODE (from); imode != VOIDmode;
4624 imode = GET_MODE_WIDER_MODE (imode))
4626 int doing_unsigned = unsignedp;
4628 if (fmode != GET_MODE (to)
4629 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4630 continue;
4632 icode = can_float_p (fmode, imode, unsignedp);
4633 if (icode == CODE_FOR_nothing && unsignedp)
4635 enum insn_code scode = can_float_p (fmode, imode, 0);
4636 if (scode != CODE_FOR_nothing)
4637 can_do_signed = true;
4638 if (imode != GET_MODE (from))
4639 icode = scode, doing_unsigned = 0;
4642 if (icode != CODE_FOR_nothing)
4644 if (imode != GET_MODE (from))
4645 from = convert_to_mode (imode, from, unsignedp);
4647 if (fmode != GET_MODE (to))
4648 target = gen_reg_rtx (fmode);
4650 emit_unop_insn (icode, target, from,
4651 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4653 if (target != to)
4654 convert_move (to, target, 0);
4655 return;
4659 /* Unsigned integer, and no way to convert directly. Convert as signed,
4660 then unconditionally adjust the result. */
4661 if (unsignedp && can_do_signed)
4663 rtx_code_label *label = gen_label_rtx ();
4664 rtx temp;
4665 REAL_VALUE_TYPE offset;
4667 /* Look for a usable floating mode FMODE wider than the source and at
4668 least as wide as the target. Using FMODE will avoid rounding woes
4669 with unsigned values greater than the signed maximum value. */
4671 for (fmode = GET_MODE (to); fmode != VOIDmode;
4672 fmode = GET_MODE_WIDER_MODE (fmode))
4673 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4674 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4675 break;
4677 if (fmode == VOIDmode)
4679 /* There is no such mode. Pretend the target is wide enough. */
4680 fmode = GET_MODE (to);
4682 /* Avoid double-rounding when TO is narrower than FROM. */
4683 if ((significand_size (fmode) + 1)
4684 < GET_MODE_PRECISION (GET_MODE (from)))
4686 rtx temp1;
4687 rtx_code_label *neglabel = gen_label_rtx ();
4689 /* Don't use TARGET if it isn't a register, is a hard register,
4690 or is the wrong mode. */
4691 if (!REG_P (target)
4692 || REGNO (target) < FIRST_PSEUDO_REGISTER
4693 || GET_MODE (target) != fmode)
4694 target = gen_reg_rtx (fmode);
4696 imode = GET_MODE (from);
4697 do_pending_stack_adjust ();
4699 /* Test whether the sign bit is set. */
4700 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4701 0, neglabel);
4703 /* The sign bit is not set. Convert as signed. */
4704 expand_float (target, from, 0);
4705 emit_jump_insn (targetm.gen_jump (label));
4706 emit_barrier ();
4708 /* The sign bit is set.
4709 Convert to a usable (positive signed) value by shifting right
4710 one bit, while remembering if a nonzero bit was shifted
4711 out; i.e., compute (from & 1) | (from >> 1). */
4713 emit_label (neglabel);
4714 temp = expand_binop (imode, and_optab, from, const1_rtx,
4715 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4716 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4717 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4718 OPTAB_LIB_WIDEN);
4719 expand_float (target, temp, 0);
4721 /* Multiply by 2 to undo the shift above. */
4722 temp = expand_binop (fmode, add_optab, target, target,
4723 target, 0, OPTAB_LIB_WIDEN);
4724 if (temp != target)
4725 emit_move_insn (target, temp);
4727 do_pending_stack_adjust ();
4728 emit_label (label);
4729 goto done;
4733 /* If we are about to do some arithmetic to correct for an
4734 unsigned operand, do it in a pseudo-register. */
4736 if (GET_MODE (to) != fmode
4737 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4738 target = gen_reg_rtx (fmode);
4740 /* Convert as signed integer to floating. */
4741 expand_float (target, from, 0);
4743 /* If FROM is negative (and therefore TO is negative),
4744 correct its value by 2**bitwidth. */
4746 do_pending_stack_adjust ();
4747 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4748 0, label);
4751 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
4752 temp = expand_binop (fmode, add_optab, target,
4753 const_double_from_real_value (offset, fmode),
4754 target, 0, OPTAB_LIB_WIDEN);
4755 if (temp != target)
4756 emit_move_insn (target, temp);
4758 do_pending_stack_adjust ();
4759 emit_label (label);
4760 goto done;
4763 /* No hardware instruction available; call a library routine. */
4765 rtx libfunc;
4766 rtx_insn *insns;
4767 rtx value;
4768 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4770 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
4771 from = convert_to_mode (SImode, from, unsignedp);
4773 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4774 gcc_assert (libfunc);
4776 start_sequence ();
4778 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4779 GET_MODE (to), 1, from,
4780 GET_MODE (from));
4781 insns = get_insns ();
4782 end_sequence ();
4784 emit_libcall_block (insns, target, value,
4785 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4786 GET_MODE (to), from));
4789 done:
4791 /* Copy result to requested destination
4792 if we have been computing in a temp location. */
4794 if (target != to)
4796 if (GET_MODE (target) == GET_MODE (to))
4797 emit_move_insn (to, target);
4798 else
4799 convert_move (to, target, 0);
4803 /* Generate code to convert FROM to fixed point and store in TO. FROM
4804 must be floating point. */
4806 void
4807 expand_fix (rtx to, rtx from, int unsignedp)
4809 enum insn_code icode;
4810 rtx target = to;
4811 machine_mode fmode, imode;
4812 bool must_trunc = false;
4814 /* We first try to find a pair of modes, one real and one integer, at
4815 least as wide as FROM and TO, respectively, in which we can open-code
4816 this conversion. If the integer mode is wider than the mode of TO,
4817 we can do the conversion either signed or unsigned. */
4819 for (fmode = GET_MODE (from); fmode != VOIDmode;
4820 fmode = GET_MODE_WIDER_MODE (fmode))
4821 for (imode = GET_MODE (to); imode != VOIDmode;
4822 imode = GET_MODE_WIDER_MODE (imode))
4824 int doing_unsigned = unsignedp;
4826 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4827 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4828 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4830 if (icode != CODE_FOR_nothing)
4832 rtx_insn *last = get_last_insn ();
4833 if (fmode != GET_MODE (from))
4834 from = convert_to_mode (fmode, from, 0);
4836 if (must_trunc)
4838 rtx temp = gen_reg_rtx (GET_MODE (from));
4839 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4840 temp, 0);
4843 if (imode != GET_MODE (to))
4844 target = gen_reg_rtx (imode);
4846 if (maybe_emit_unop_insn (icode, target, from,
4847 doing_unsigned ? UNSIGNED_FIX : FIX))
4849 if (target != to)
4850 convert_move (to, target, unsignedp);
4851 return;
4853 delete_insns_since (last);
4857 /* For an unsigned conversion, there is one more way to do it.
4858 If we have a signed conversion, we generate code that compares
4859 the real value to the largest representable positive number. If if
4860 is smaller, the conversion is done normally. Otherwise, subtract
4861 one plus the highest signed number, convert, and add it back.
4863 We only need to check all real modes, since we know we didn't find
4864 anything with a wider integer mode.
4866 This code used to extend FP value into mode wider than the destination.
4867 This is needed for decimal float modes which cannot accurately
4868 represent one plus the highest signed number of the same size, but
4869 not for binary modes. Consider, for instance conversion from SFmode
4870 into DImode.
4872 The hot path through the code is dealing with inputs smaller than 2^63
4873 and doing just the conversion, so there is no bits to lose.
4875 In the other path we know the value is positive in the range 2^63..2^64-1
4876 inclusive. (as for other input overflow happens and result is undefined)
4877 So we know that the most important bit set in mantissa corresponds to
4878 2^63. The subtraction of 2^63 should not generate any rounding as it
4879 simply clears out that bit. The rest is trivial. */
4881 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4882 for (fmode = GET_MODE (from); fmode != VOIDmode;
4883 fmode = GET_MODE_WIDER_MODE (fmode))
4884 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
4885 && (!DECIMAL_FLOAT_MODE_P (fmode)
4886 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
4888 int bitsize;
4889 REAL_VALUE_TYPE offset;
4890 rtx limit;
4891 rtx_code_label *lab1, *lab2;
4892 rtx_insn *insn;
4894 bitsize = GET_MODE_PRECISION (GET_MODE (to));
4895 real_2expN (&offset, bitsize - 1, fmode);
4896 limit = const_double_from_real_value (offset, fmode);
4897 lab1 = gen_label_rtx ();
4898 lab2 = gen_label_rtx ();
4900 if (fmode != GET_MODE (from))
4901 from = convert_to_mode (fmode, from, 0);
4903 /* See if we need to do the subtraction. */
4904 do_pending_stack_adjust ();
4905 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4906 0, lab1);
4908 /* If not, do the signed "fix" and branch around fixup code. */
4909 expand_fix (to, from, 0);
4910 emit_jump_insn (targetm.gen_jump (lab2));
4911 emit_barrier ();
4913 /* Otherwise, subtract 2**(N-1), convert to signed number,
4914 then add 2**(N-1). Do the addition using XOR since this
4915 will often generate better code. */
4916 emit_label (lab1);
4917 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4918 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4919 expand_fix (to, target, 0);
4920 target = expand_binop (GET_MODE (to), xor_optab, to,
4921 gen_int_mode
4922 (HOST_WIDE_INT_1 << (bitsize - 1),
4923 GET_MODE (to)),
4924 to, 1, OPTAB_LIB_WIDEN);
4926 if (target != to)
4927 emit_move_insn (to, target);
4929 emit_label (lab2);
4931 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
4933 /* Make a place for a REG_NOTE and add it. */
4934 insn = emit_move_insn (to, to);
4935 set_dst_reg_note (insn, REG_EQUAL,
4936 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
4937 copy_rtx (from)),
4938 to);
4941 return;
4944 /* We can't do it with an insn, so use a library call. But first ensure
4945 that the mode of TO is at least as wide as SImode, since those are the
4946 only library calls we know about. */
4948 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
4950 target = gen_reg_rtx (SImode);
4952 expand_fix (target, from, unsignedp);
4954 else
4956 rtx_insn *insns;
4957 rtx value;
4958 rtx libfunc;
4960 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4961 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4962 gcc_assert (libfunc);
4964 start_sequence ();
4966 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4967 GET_MODE (to), 1, from,
4968 GET_MODE (from));
4969 insns = get_insns ();
4970 end_sequence ();
4972 emit_libcall_block (insns, target, value,
4973 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4974 GET_MODE (to), from));
4977 if (target != to)
4979 if (GET_MODE (to) == GET_MODE (target))
4980 emit_move_insn (to, target);
4981 else
4982 convert_move (to, target, 0);
4987 /* Promote integer arguments for a libcall if necessary.
4988 emit_library_call_value cannot do the promotion because it does not
4989 know if it should do a signed or unsigned promotion. This is because
4990 there are no tree types defined for libcalls. */
4992 static rtx
4993 prepare_libcall_arg (rtx arg, int uintp)
4995 machine_mode mode = GET_MODE (arg);
4996 machine_mode arg_mode;
4997 if (SCALAR_INT_MODE_P (mode))
4999 /* If we need to promote the integer function argument we need to do
5000 it here instead of inside emit_library_call_value because in
5001 emit_library_call_value we don't know if we should do a signed or
5002 unsigned promotion. */
5004 int unsigned_p = 0;
5005 arg_mode = promote_function_mode (NULL_TREE, mode,
5006 &unsigned_p, NULL_TREE, 0);
5007 if (arg_mode != mode)
5008 return convert_to_mode (arg_mode, arg, uintp);
5010 return arg;
5013 /* Generate code to convert FROM or TO a fixed-point.
5014 If UINTP is true, either TO or FROM is an unsigned integer.
5015 If SATP is true, we need to saturate the result. */
5017 void
5018 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5020 machine_mode to_mode = GET_MODE (to);
5021 machine_mode from_mode = GET_MODE (from);
5022 convert_optab tab;
5023 enum rtx_code this_code;
5024 enum insn_code code;
5025 rtx_insn *insns;
5026 rtx value;
5027 rtx libfunc;
5029 if (to_mode == from_mode)
5031 emit_move_insn (to, from);
5032 return;
5035 if (uintp)
5037 tab = satp ? satfractuns_optab : fractuns_optab;
5038 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5040 else
5042 tab = satp ? satfract_optab : fract_optab;
5043 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5045 code = convert_optab_handler (tab, to_mode, from_mode);
5046 if (code != CODE_FOR_nothing)
5048 emit_unop_insn (code, to, from, this_code);
5049 return;
5052 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5053 gcc_assert (libfunc);
5055 from = prepare_libcall_arg (from, uintp);
5056 from_mode = GET_MODE (from);
5058 start_sequence ();
5059 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5060 1, from, from_mode);
5061 insns = get_insns ();
5062 end_sequence ();
5064 emit_libcall_block (insns, to, value,
5065 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5068 /* Generate code to convert FROM to fixed point and store in TO. FROM
5069 must be floating point, TO must be signed. Use the conversion optab
5070 TAB to do the conversion. */
5072 bool
5073 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5075 enum insn_code icode;
5076 rtx target = to;
5077 machine_mode fmode, imode;
5079 /* We first try to find a pair of modes, one real and one integer, at
5080 least as wide as FROM and TO, respectively, in which we can open-code
5081 this conversion. If the integer mode is wider than the mode of TO,
5082 we can do the conversion either signed or unsigned. */
5084 for (fmode = GET_MODE (from); fmode != VOIDmode;
5085 fmode = GET_MODE_WIDER_MODE (fmode))
5086 for (imode = GET_MODE (to); imode != VOIDmode;
5087 imode = GET_MODE_WIDER_MODE (imode))
5089 icode = convert_optab_handler (tab, imode, fmode);
5090 if (icode != CODE_FOR_nothing)
5092 rtx_insn *last = get_last_insn ();
5093 if (fmode != GET_MODE (from))
5094 from = convert_to_mode (fmode, from, 0);
5096 if (imode != GET_MODE (to))
5097 target = gen_reg_rtx (imode);
5099 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5101 delete_insns_since (last);
5102 continue;
5104 if (target != to)
5105 convert_move (to, target, 0);
5106 return true;
5110 return false;
5113 /* Report whether we have an instruction to perform the operation
5114 specified by CODE on operands of mode MODE. */
5116 have_insn_for (enum rtx_code code, machine_mode mode)
5118 return (code_to_optab (code)
5119 && (optab_handler (code_to_optab (code), mode)
5120 != CODE_FOR_nothing));
5123 /* Print information about the current contents of the optabs on
5124 STDERR. */
5126 DEBUG_FUNCTION void
5127 debug_optab_libfuncs (void)
5129 int i, j, k;
5131 /* Dump the arithmetic optabs. */
5132 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5133 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5135 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5136 if (l)
5138 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5139 fprintf (stderr, "%s\t%s:\t%s\n",
5140 GET_RTX_NAME (optab_to_code ((optab) i)),
5141 GET_MODE_NAME (j),
5142 XSTR (l, 0));
5146 /* Dump the conversion optabs. */
5147 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5148 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5149 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5151 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5152 (machine_mode) k);
5153 if (l)
5155 gcc_assert (GET_CODE (l) == SYMBOL_REF);
5156 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5157 GET_RTX_NAME (optab_to_code ((optab) i)),
5158 GET_MODE_NAME (j),
5159 GET_MODE_NAME (k),
5160 XSTR (l, 0));
5165 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5166 CODE. Return 0 on failure. */
5168 rtx_insn *
5169 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5171 machine_mode mode = GET_MODE (op1);
5172 enum insn_code icode;
5173 rtx_insn *insn;
5174 rtx trap_rtx;
5176 if (mode == VOIDmode)
5177 return 0;
5179 icode = optab_handler (ctrap_optab, mode);
5180 if (icode == CODE_FOR_nothing)
5181 return 0;
5183 /* Some targets only accept a zero trap code. */
5184 if (!insn_operand_matches (icode, 3, tcode))
5185 return 0;
5187 do_pending_stack_adjust ();
5188 start_sequence ();
5189 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5190 &trap_rtx, &mode);
5191 if (!trap_rtx)
5192 insn = NULL;
5193 else
5194 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5195 tcode);
5197 /* If that failed, then give up. */
5198 if (insn == 0)
5200 end_sequence ();
5201 return 0;
5204 emit_insn (insn);
5205 insn = get_insns ();
5206 end_sequence ();
5207 return insn;
5210 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5211 or unsigned operation code. */
5213 enum rtx_code
5214 get_rtx_code (enum tree_code tcode, bool unsignedp)
5216 enum rtx_code code;
5217 switch (tcode)
5219 case EQ_EXPR:
5220 code = EQ;
5221 break;
5222 case NE_EXPR:
5223 code = NE;
5224 break;
5225 case LT_EXPR:
5226 code = unsignedp ? LTU : LT;
5227 break;
5228 case LE_EXPR:
5229 code = unsignedp ? LEU : LE;
5230 break;
5231 case GT_EXPR:
5232 code = unsignedp ? GTU : GT;
5233 break;
5234 case GE_EXPR:
5235 code = unsignedp ? GEU : GE;
5236 break;
5238 case UNORDERED_EXPR:
5239 code = UNORDERED;
5240 break;
5241 case ORDERED_EXPR:
5242 code = ORDERED;
5243 break;
5244 case UNLT_EXPR:
5245 code = UNLT;
5246 break;
5247 case UNLE_EXPR:
5248 code = UNLE;
5249 break;
5250 case UNGT_EXPR:
5251 code = UNGT;
5252 break;
5253 case UNGE_EXPR:
5254 code = UNGE;
5255 break;
5256 case UNEQ_EXPR:
5257 code = UNEQ;
5258 break;
5259 case LTGT_EXPR:
5260 code = LTGT;
5261 break;
5263 case BIT_AND_EXPR:
5264 code = AND;
5265 break;
5267 case BIT_IOR_EXPR:
5268 code = IOR;
5269 break;
5271 default:
5272 gcc_unreachable ();
5274 return code;
5277 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5278 unsigned operators. OPNO holds an index of the first comparison
5279 operand in insn with code ICODE. Do not generate compare instruction. */
5281 static rtx
5282 vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
5283 bool unsignedp, enum insn_code icode,
5284 unsigned int opno)
5286 struct expand_operand ops[2];
5287 rtx rtx_op0, rtx_op1;
5288 machine_mode m0, m1;
5289 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5291 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5293 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5294 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5295 cases, use the original mode. */
5296 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5297 EXPAND_STACK_PARM);
5298 m0 = GET_MODE (rtx_op0);
5299 if (m0 == VOIDmode)
5300 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5302 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5303 EXPAND_STACK_PARM);
5304 m1 = GET_MODE (rtx_op1);
5305 if (m1 == VOIDmode)
5306 m1 = TYPE_MODE (TREE_TYPE (t_op1));
5308 create_input_operand (&ops[0], rtx_op0, m0);
5309 create_input_operand (&ops[1], rtx_op1, m1);
5310 if (!maybe_legitimize_operands (icode, opno, 2, ops))
5311 gcc_unreachable ();
5312 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
5315 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
5316 vec_perm operand, assuming the second operand is a constant vector of zeroes.
5317 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
5318 shift. */
5319 static rtx
5320 shift_amt_for_vec_perm_mask (rtx sel)
5322 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
5323 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
5325 if (GET_CODE (sel) != CONST_VECTOR)
5326 return NULL_RTX;
5328 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5329 if (first >= nelt)
5330 return NULL_RTX;
5331 for (i = 1; i < nelt; i++)
5333 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5334 unsigned int expected = i + first;
5335 /* Indices into the second vector are all equivalent. */
5336 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5337 return NULL_RTX;
5340 return GEN_INT (first * bitsize);
5343 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5345 static rtx
5346 expand_vec_perm_1 (enum insn_code icode, rtx target,
5347 rtx v0, rtx v1, rtx sel)
5349 machine_mode tmode = GET_MODE (target);
5350 machine_mode smode = GET_MODE (sel);
5351 struct expand_operand ops[4];
5353 create_output_operand (&ops[0], target, tmode);
5354 create_input_operand (&ops[3], sel, smode);
5356 /* Make an effort to preserve v0 == v1. The target expander is able to
5357 rely on this to determine if we're permuting a single input operand. */
5358 if (rtx_equal_p (v0, v1))
5360 if (!insn_operand_matches (icode, 1, v0))
5361 v0 = force_reg (tmode, v0);
5362 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5363 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5365 create_fixed_operand (&ops[1], v0);
5366 create_fixed_operand (&ops[2], v0);
5368 else
5370 create_input_operand (&ops[1], v0, tmode);
5371 create_input_operand (&ops[2], v1, tmode);
5374 if (maybe_expand_insn (icode, 4, ops))
5375 return ops[0].value;
5376 return NULL_RTX;
5379 /* Generate instructions for vec_perm optab given its mode
5380 and three operands. */
5383 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5385 enum insn_code icode;
5386 machine_mode qimode;
5387 unsigned int i, w, e, u;
5388 rtx tmp, sel_qi = NULL;
5389 rtvec vec;
5391 if (!target || GET_MODE (target) != mode)
5392 target = gen_reg_rtx (mode);
5394 w = GET_MODE_SIZE (mode);
5395 e = GET_MODE_NUNITS (mode);
5396 u = GET_MODE_UNIT_SIZE (mode);
5398 /* Set QIMODE to a different vector mode with byte elements.
5399 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5400 qimode = VOIDmode;
5401 if (GET_MODE_INNER (mode) != QImode)
5403 qimode = mode_for_vector (QImode, w);
5404 if (!VECTOR_MODE_P (qimode))
5405 qimode = VOIDmode;
5408 /* If the input is a constant, expand it specially. */
5409 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5410 if (GET_CODE (sel) == CONST_VECTOR)
5412 /* See if this can be handled with a vec_shr. We only do this if the
5413 second vector is all zeroes. */
5414 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5415 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5416 ? optab_handler (vec_shr_optab, qimode)
5417 : CODE_FOR_nothing);
5418 rtx shift_amt = NULL_RTX;
5419 if (v1 == CONST0_RTX (GET_MODE (v1))
5420 && (shift_code != CODE_FOR_nothing
5421 || shift_code_qi != CODE_FOR_nothing))
5423 shift_amt = shift_amt_for_vec_perm_mask (sel);
5424 if (shift_amt)
5426 struct expand_operand ops[3];
5427 if (shift_code != CODE_FOR_nothing)
5429 create_output_operand (&ops[0], target, mode);
5430 create_input_operand (&ops[1], v0, mode);
5431 create_convert_operand_from_type (&ops[2], shift_amt,
5432 sizetype);
5433 if (maybe_expand_insn (shift_code, 3, ops))
5434 return ops[0].value;
5436 if (shift_code_qi != CODE_FOR_nothing)
5438 tmp = gen_reg_rtx (qimode);
5439 create_output_operand (&ops[0], tmp, qimode);
5440 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5441 qimode);
5442 create_convert_operand_from_type (&ops[2], shift_amt,
5443 sizetype);
5444 if (maybe_expand_insn (shift_code_qi, 3, ops))
5445 return gen_lowpart (mode, ops[0].value);
5450 icode = direct_optab_handler (vec_perm_const_optab, mode);
5451 if (icode != CODE_FOR_nothing)
5453 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5454 if (tmp)
5455 return tmp;
5458 /* Fall back to a constant byte-based permutation. */
5459 if (qimode != VOIDmode)
5461 vec = rtvec_alloc (w);
5462 for (i = 0; i < e; ++i)
5464 unsigned int j, this_e;
5466 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5467 this_e &= 2 * e - 1;
5468 this_e *= u;
5470 for (j = 0; j < u; ++j)
5471 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5473 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5475 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5476 if (icode != CODE_FOR_nothing)
5478 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5479 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5480 gen_lowpart (qimode, v1), sel_qi);
5481 if (tmp)
5482 return gen_lowpart (mode, tmp);
5487 /* Otherwise expand as a fully variable permuation. */
5488 icode = direct_optab_handler (vec_perm_optab, mode);
5489 if (icode != CODE_FOR_nothing)
5491 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5492 if (tmp)
5493 return tmp;
5496 /* As a special case to aid several targets, lower the element-based
5497 permutation to a byte-based permutation and try again. */
5498 if (qimode == VOIDmode)
5499 return NULL_RTX;
5500 icode = direct_optab_handler (vec_perm_optab, qimode);
5501 if (icode == CODE_FOR_nothing)
5502 return NULL_RTX;
5504 if (sel_qi == NULL)
5506 /* Multiply each element by its byte size. */
5507 machine_mode selmode = GET_MODE (sel);
5508 if (u == 2)
5509 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5510 NULL, 0, OPTAB_DIRECT);
5511 else
5512 sel = expand_simple_binop (selmode, ASHIFT, sel,
5513 GEN_INT (exact_log2 (u)),
5514 NULL, 0, OPTAB_DIRECT);
5515 gcc_assert (sel != NULL);
5517 /* Broadcast the low byte each element into each of its bytes. */
5518 vec = rtvec_alloc (w);
5519 for (i = 0; i < w; ++i)
5521 int this_e = i / u * u;
5522 if (BYTES_BIG_ENDIAN)
5523 this_e += u - 1;
5524 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5526 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5527 sel = gen_lowpart (qimode, sel);
5528 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5529 gcc_assert (sel != NULL);
5531 /* Add the byte offset to each byte element. */
5532 /* Note that the definition of the indicies here is memory ordering,
5533 so there should be no difference between big and little endian. */
5534 vec = rtvec_alloc (w);
5535 for (i = 0; i < w; ++i)
5536 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5537 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5538 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5539 sel, 0, OPTAB_DIRECT);
5540 gcc_assert (sel_qi != NULL);
5543 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5544 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5545 gen_lowpart (qimode, v1), sel_qi);
5546 if (tmp)
5547 tmp = gen_lowpart (mode, tmp);
5548 return tmp;
5551 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5552 three operands. */
5555 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5556 rtx target)
5558 struct expand_operand ops[4];
5559 machine_mode mode = TYPE_MODE (vec_cond_type);
5560 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5561 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5562 rtx mask, rtx_op1, rtx_op2;
5564 if (icode == CODE_FOR_nothing)
5565 return 0;
5567 mask = expand_normal (op0);
5568 rtx_op1 = expand_normal (op1);
5569 rtx_op2 = expand_normal (op2);
5571 mask = force_reg (mask_mode, mask);
5572 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5574 create_output_operand (&ops[0], target, mode);
5575 create_input_operand (&ops[1], rtx_op1, mode);
5576 create_input_operand (&ops[2], rtx_op2, mode);
5577 create_input_operand (&ops[3], mask, mask_mode);
5578 expand_insn (icode, 4, ops);
5580 return ops[0].value;
5583 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5584 three operands. */
5587 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5588 rtx target)
5590 struct expand_operand ops[6];
5591 enum insn_code icode;
5592 rtx comparison, rtx_op1, rtx_op2;
5593 machine_mode mode = TYPE_MODE (vec_cond_type);
5594 machine_mode cmp_op_mode;
5595 bool unsignedp;
5596 tree op0a, op0b;
5597 enum tree_code tcode;
5599 if (COMPARISON_CLASS_P (op0))
5601 op0a = TREE_OPERAND (op0, 0);
5602 op0b = TREE_OPERAND (op0, 1);
5603 tcode = TREE_CODE (op0);
5605 else
5607 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5608 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5609 != CODE_FOR_nothing)
5610 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5611 op2, target);
5612 /* Fake op0 < 0. */
5613 else
5615 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5616 == MODE_VECTOR_INT);
5617 op0a = op0;
5618 op0b = build_zero_cst (TREE_TYPE (op0));
5619 tcode = LT_EXPR;
5622 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5623 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5626 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5627 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5629 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5630 if (icode == CODE_FOR_nothing)
5631 return 0;
5633 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 4);
5634 rtx_op1 = expand_normal (op1);
5635 rtx_op2 = expand_normal (op2);
5637 create_output_operand (&ops[0], target, mode);
5638 create_input_operand (&ops[1], rtx_op1, mode);
5639 create_input_operand (&ops[2], rtx_op2, mode);
5640 create_fixed_operand (&ops[3], comparison);
5641 create_fixed_operand (&ops[4], XEXP (comparison, 0));
5642 create_fixed_operand (&ops[5], XEXP (comparison, 1));
5643 expand_insn (icode, 6, ops);
5644 return ops[0].value;
5647 /* Generate insns for a vector comparison into a mask. */
5650 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5652 struct expand_operand ops[4];
5653 enum insn_code icode;
5654 rtx comparison;
5655 machine_mode mask_mode = TYPE_MODE (type);
5656 machine_mode vmode;
5657 bool unsignedp;
5658 tree op0a, op0b;
5659 enum tree_code tcode;
5661 op0a = TREE_OPERAND (exp, 0);
5662 op0b = TREE_OPERAND (exp, 1);
5663 tcode = TREE_CODE (exp);
5665 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5666 vmode = TYPE_MODE (TREE_TYPE (op0a));
5668 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5669 if (icode == CODE_FOR_nothing)
5670 return 0;
5672 comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 2);
5673 create_output_operand (&ops[0], target, mask_mode);
5674 create_fixed_operand (&ops[1], comparison);
5675 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5676 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5677 expand_insn (icode, 4, ops);
5678 return ops[0].value;
5681 /* Expand a highpart multiply. */
5684 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5685 rtx target, bool uns_p)
5687 struct expand_operand eops[3];
5688 enum insn_code icode;
5689 int method, i, nunits;
5690 machine_mode wmode;
5691 rtx m1, m2, perm;
5692 optab tab1, tab2;
5693 rtvec v;
5695 method = can_mult_highpart_p (mode, uns_p);
5696 switch (method)
5698 case 0:
5699 return NULL_RTX;
5700 case 1:
5701 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5702 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5703 OPTAB_LIB_WIDEN);
5704 case 2:
5705 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5706 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5707 break;
5708 case 3:
5709 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5710 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5711 if (BYTES_BIG_ENDIAN)
5712 std::swap (tab1, tab2);
5713 break;
5714 default:
5715 gcc_unreachable ();
5718 icode = optab_handler (tab1, mode);
5719 nunits = GET_MODE_NUNITS (mode);
5720 wmode = insn_data[icode].operand[0].mode;
5721 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5722 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5724 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5725 create_input_operand (&eops[1], op0, mode);
5726 create_input_operand (&eops[2], op1, mode);
5727 expand_insn (icode, 3, eops);
5728 m1 = gen_lowpart (mode, eops[0].value);
5730 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5731 create_input_operand (&eops[1], op0, mode);
5732 create_input_operand (&eops[2], op1, mode);
5733 expand_insn (optab_handler (tab2, mode), 3, eops);
5734 m2 = gen_lowpart (mode, eops[0].value);
5736 v = rtvec_alloc (nunits);
5737 if (method == 2)
5739 for (i = 0; i < nunits; ++i)
5740 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5741 + ((i & 1) ? nunits : 0));
5743 else
5745 for (i = 0; i < nunits; ++i)
5746 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5748 perm = gen_rtx_CONST_VECTOR (mode, v);
5750 return expand_vec_perm (mode, m1, m2, perm, target);
5753 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5754 pattern. */
5756 static void
5757 find_cc_set (rtx x, const_rtx pat, void *data)
5759 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5760 && GET_CODE (pat) == SET)
5762 rtx *p_cc_reg = (rtx *) data;
5763 gcc_assert (!*p_cc_reg);
5764 *p_cc_reg = x;
5768 /* This is a helper function for the other atomic operations. This function
5769 emits a loop that contains SEQ that iterates until a compare-and-swap
5770 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5771 a set of instructions that takes a value from OLD_REG as an input and
5772 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5773 set to the current contents of MEM. After SEQ, a compare-and-swap will
5774 attempt to update MEM with NEW_REG. The function returns true when the
5775 loop was generated successfully. */
5777 static bool
5778 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5780 machine_mode mode = GET_MODE (mem);
5781 rtx_code_label *label;
5782 rtx cmp_reg, success, oldval;
5784 /* The loop we want to generate looks like
5786 cmp_reg = mem;
5787 label:
5788 old_reg = cmp_reg;
5789 seq;
5790 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5791 if (success)
5792 goto label;
5794 Note that we only do the plain load from memory once. Subsequent
5795 iterations use the value loaded by the compare-and-swap pattern. */
5797 label = gen_label_rtx ();
5798 cmp_reg = gen_reg_rtx (mode);
5800 emit_move_insn (cmp_reg, mem);
5801 emit_label (label);
5802 emit_move_insn (old_reg, cmp_reg);
5803 if (seq)
5804 emit_insn (seq);
5806 success = NULL_RTX;
5807 oldval = cmp_reg;
5808 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5809 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5810 MEMMODEL_RELAXED))
5811 return false;
5813 if (oldval != cmp_reg)
5814 emit_move_insn (cmp_reg, oldval);
5816 /* Mark this jump predicted not taken. */
5817 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5818 GET_MODE (success), 1, label, 0);
5819 return true;
5823 /* This function tries to emit an atomic_exchange intruction. VAL is written
5824 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5825 using TARGET if possible. */
5827 static rtx
5828 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5830 machine_mode mode = GET_MODE (mem);
5831 enum insn_code icode;
5833 /* If the target supports the exchange directly, great. */
5834 icode = direct_optab_handler (atomic_exchange_optab, mode);
5835 if (icode != CODE_FOR_nothing)
5837 struct expand_operand ops[4];
5839 create_output_operand (&ops[0], target, mode);
5840 create_fixed_operand (&ops[1], mem);
5841 create_input_operand (&ops[2], val, mode);
5842 create_integer_operand (&ops[3], model);
5843 if (maybe_expand_insn (icode, 4, ops))
5844 return ops[0].value;
5847 return NULL_RTX;
5850 /* This function tries to implement an atomic exchange operation using
5851 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5852 The previous contents of *MEM are returned, using TARGET if possible.
5853 Since this instructionn is an acquire barrier only, stronger memory
5854 models may require additional barriers to be emitted. */
5856 static rtx
5857 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5858 enum memmodel model)
5860 machine_mode mode = GET_MODE (mem);
5861 enum insn_code icode;
5862 rtx_insn *last_insn = get_last_insn ();
5864 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5866 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5867 exists, and the memory model is stronger than acquire, add a release
5868 barrier before the instruction. */
5870 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5871 expand_mem_thread_fence (model);
5873 if (icode != CODE_FOR_nothing)
5875 struct expand_operand ops[3];
5876 create_output_operand (&ops[0], target, mode);
5877 create_fixed_operand (&ops[1], mem);
5878 create_input_operand (&ops[2], val, mode);
5879 if (maybe_expand_insn (icode, 3, ops))
5880 return ops[0].value;
5883 /* If an external test-and-set libcall is provided, use that instead of
5884 any external compare-and-swap that we might get from the compare-and-
5885 swap-loop expansion later. */
5886 if (!can_compare_and_swap_p (mode, false))
5888 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5889 if (libfunc != NULL)
5891 rtx addr;
5893 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5894 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5895 mode, 2, addr, ptr_mode,
5896 val, mode);
5900 /* If the test_and_set can't be emitted, eliminate any barrier that might
5901 have been emitted. */
5902 delete_insns_since (last_insn);
5903 return NULL_RTX;
5906 /* This function tries to implement an atomic exchange operation using a
5907 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5908 *MEM are returned, using TARGET if possible. No memory model is required
5909 since a compare_and_swap loop is seq-cst. */
5911 static rtx
5912 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5914 machine_mode mode = GET_MODE (mem);
5916 if (can_compare_and_swap_p (mode, true))
5918 if (!target || !register_operand (target, mode))
5919 target = gen_reg_rtx (mode);
5920 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5921 return target;
5924 return NULL_RTX;
5927 /* This function tries to implement an atomic test-and-set operation
5928 using the atomic_test_and_set instruction pattern. A boolean value
5929 is returned from the operation, using TARGET if possible. */
5931 static rtx
5932 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5934 machine_mode pat_bool_mode;
5935 struct expand_operand ops[3];
5937 if (!targetm.have_atomic_test_and_set ())
5938 return NULL_RTX;
5940 /* While we always get QImode from __atomic_test_and_set, we get
5941 other memory modes from __sync_lock_test_and_set. Note that we
5942 use no endian adjustment here. This matches the 4.6 behavior
5943 in the Sparc backend. */
5944 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5945 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5946 if (GET_MODE (mem) != QImode)
5947 mem = adjust_address_nv (mem, QImode, 0);
5949 pat_bool_mode = insn_data[icode].operand[0].mode;
5950 create_output_operand (&ops[0], target, pat_bool_mode);
5951 create_fixed_operand (&ops[1], mem);
5952 create_integer_operand (&ops[2], model);
5954 if (maybe_expand_insn (icode, 3, ops))
5955 return ops[0].value;
5956 return NULL_RTX;
5959 /* This function expands the legacy _sync_lock test_and_set operation which is
5960 generally an atomic exchange. Some limited targets only allow the
5961 constant 1 to be stored. This is an ACQUIRE operation.
5963 TARGET is an optional place to stick the return value.
5964 MEM is where VAL is stored. */
5967 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
5969 rtx ret;
5971 /* Try an atomic_exchange first. */
5972 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
5973 if (ret)
5974 return ret;
5976 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
5977 MEMMODEL_SYNC_ACQUIRE);
5978 if (ret)
5979 return ret;
5981 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
5982 if (ret)
5983 return ret;
5985 /* If there are no other options, try atomic_test_and_set if the value
5986 being stored is 1. */
5987 if (val == const1_rtx)
5988 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
5990 return ret;
5993 /* This function expands the atomic test_and_set operation:
5994 atomically store a boolean TRUE into MEM and return the previous value.
5996 MEMMODEL is the memory model variant to use.
5997 TARGET is an optional place to stick the return value. */
6000 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6002 machine_mode mode = GET_MODE (mem);
6003 rtx ret, trueval, subtarget;
6005 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6006 if (ret)
6007 return ret;
6009 /* Be binary compatible with non-default settings of trueval, and different
6010 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6011 another only has atomic-exchange. */
6012 if (targetm.atomic_test_and_set_trueval == 1)
6014 trueval = const1_rtx;
6015 subtarget = target ? target : gen_reg_rtx (mode);
6017 else
6019 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6020 subtarget = gen_reg_rtx (mode);
6023 /* Try the atomic-exchange optab... */
6024 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6026 /* ... then an atomic-compare-and-swap loop ... */
6027 if (!ret)
6028 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6030 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6031 if (!ret)
6032 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6034 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6035 things with the value 1. Thus we try again without trueval. */
6036 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6037 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6039 /* Failing all else, assume a single threaded environment and simply
6040 perform the operation. */
6041 if (!ret)
6043 /* If the result is ignored skip the move to target. */
6044 if (subtarget != const0_rtx)
6045 emit_move_insn (subtarget, mem);
6047 emit_move_insn (mem, trueval);
6048 ret = subtarget;
6051 /* Recall that have to return a boolean value; rectify if trueval
6052 is not exactly one. */
6053 if (targetm.atomic_test_and_set_trueval != 1)
6054 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6056 return ret;
6059 /* This function expands the atomic exchange operation:
6060 atomically store VAL in MEM and return the previous value in MEM.
6062 MEMMODEL is the memory model variant to use.
6063 TARGET is an optional place to stick the return value. */
6066 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6068 rtx ret;
6070 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6072 /* Next try a compare-and-swap loop for the exchange. */
6073 if (!ret)
6074 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6076 return ret;
6079 /* This function expands the atomic compare exchange operation:
6081 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6082 *PTARGET_OVAL is an optional place to store the old value from memory.
6083 Both target parameters may be NULL or const0_rtx to indicate that we do
6084 not care about that return value. Both target parameters are updated on
6085 success to the actual location of the corresponding result.
6087 MEMMODEL is the memory model variant to use.
6089 The return value of the function is true for success. */
6091 bool
6092 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6093 rtx mem, rtx expected, rtx desired,
6094 bool is_weak, enum memmodel succ_model,
6095 enum memmodel fail_model)
6097 machine_mode mode = GET_MODE (mem);
6098 struct expand_operand ops[8];
6099 enum insn_code icode;
6100 rtx target_oval, target_bool = NULL_RTX;
6101 rtx libfunc;
6103 /* Load expected into a register for the compare and swap. */
6104 if (MEM_P (expected))
6105 expected = copy_to_reg (expected);
6107 /* Make sure we always have some place to put the return oldval.
6108 Further, make sure that place is distinct from the input expected,
6109 just in case we need that path down below. */
6110 if (ptarget_oval && *ptarget_oval == const0_rtx)
6111 ptarget_oval = NULL;
6113 if (ptarget_oval == NULL
6114 || (target_oval = *ptarget_oval) == NULL
6115 || reg_overlap_mentioned_p (expected, target_oval))
6116 target_oval = gen_reg_rtx (mode);
6118 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6119 if (icode != CODE_FOR_nothing)
6121 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6123 if (ptarget_bool && *ptarget_bool == const0_rtx)
6124 ptarget_bool = NULL;
6126 /* Make sure we always have a place for the bool operand. */
6127 if (ptarget_bool == NULL
6128 || (target_bool = *ptarget_bool) == NULL
6129 || GET_MODE (target_bool) != bool_mode)
6130 target_bool = gen_reg_rtx (bool_mode);
6132 /* Emit the compare_and_swap. */
6133 create_output_operand (&ops[0], target_bool, bool_mode);
6134 create_output_operand (&ops[1], target_oval, mode);
6135 create_fixed_operand (&ops[2], mem);
6136 create_input_operand (&ops[3], expected, mode);
6137 create_input_operand (&ops[4], desired, mode);
6138 create_integer_operand (&ops[5], is_weak);
6139 create_integer_operand (&ops[6], succ_model);
6140 create_integer_operand (&ops[7], fail_model);
6141 if (maybe_expand_insn (icode, 8, ops))
6143 /* Return success/failure. */
6144 target_bool = ops[0].value;
6145 target_oval = ops[1].value;
6146 goto success;
6150 /* Otherwise fall back to the original __sync_val_compare_and_swap
6151 which is always seq-cst. */
6152 icode = optab_handler (sync_compare_and_swap_optab, mode);
6153 if (icode != CODE_FOR_nothing)
6155 rtx cc_reg;
6157 create_output_operand (&ops[0], target_oval, mode);
6158 create_fixed_operand (&ops[1], mem);
6159 create_input_operand (&ops[2], expected, mode);
6160 create_input_operand (&ops[3], desired, mode);
6161 if (!maybe_expand_insn (icode, 4, ops))
6162 return false;
6164 target_oval = ops[0].value;
6166 /* If the caller isn't interested in the boolean return value,
6167 skip the computation of it. */
6168 if (ptarget_bool == NULL)
6169 goto success;
6171 /* Otherwise, work out if the compare-and-swap succeeded. */
6172 cc_reg = NULL_RTX;
6173 if (have_insn_for (COMPARE, CCmode))
6174 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6175 if (cc_reg)
6177 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6178 const0_rtx, VOIDmode, 0, 1);
6179 goto success;
6181 goto success_bool_from_val;
6184 /* Also check for library support for __sync_val_compare_and_swap. */
6185 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6186 if (libfunc != NULL)
6188 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6189 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6190 mode, 3, addr, ptr_mode,
6191 expected, mode, desired, mode);
6192 emit_move_insn (target_oval, target);
6194 /* Compute the boolean return value only if requested. */
6195 if (ptarget_bool)
6196 goto success_bool_from_val;
6197 else
6198 goto success;
6201 /* Failure. */
6202 return false;
6204 success_bool_from_val:
6205 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6206 expected, VOIDmode, 1, 1);
6207 success:
6208 /* Make sure that the oval output winds up where the caller asked. */
6209 if (ptarget_oval)
6210 *ptarget_oval = target_oval;
6211 if (ptarget_bool)
6212 *ptarget_bool = target_bool;
6213 return true;
6216 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
6218 static void
6219 expand_asm_memory_barrier (void)
6221 rtx asm_op, clob;
6223 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
6224 rtvec_alloc (0), rtvec_alloc (0),
6225 rtvec_alloc (0), UNKNOWN_LOCATION);
6226 MEM_VOLATILE_P (asm_op) = 1;
6228 clob = gen_rtx_SCRATCH (VOIDmode);
6229 clob = gen_rtx_MEM (BLKmode, clob);
6230 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6232 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6235 /* This routine will either emit the mem_thread_fence pattern or issue a
6236 sync_synchronize to generate a fence for memory model MEMMODEL. */
6238 void
6239 expand_mem_thread_fence (enum memmodel model)
6241 if (targetm.have_mem_thread_fence ())
6242 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6243 else if (!is_mm_relaxed (model))
6245 if (targetm.have_memory_barrier ())
6246 emit_insn (targetm.gen_memory_barrier ());
6247 else if (synchronize_libfunc != NULL_RTX)
6248 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
6249 else
6250 expand_asm_memory_barrier ();
6254 /* This routine will either emit the mem_signal_fence pattern or issue a
6255 sync_synchronize to generate a fence for memory model MEMMODEL. */
6257 void
6258 expand_mem_signal_fence (enum memmodel model)
6260 if (targetm.have_mem_signal_fence ())
6261 emit_insn (targetm.gen_mem_signal_fence (GEN_INT (model)));
6262 else if (!is_mm_relaxed (model))
6264 /* By default targets are coherent between a thread and the signal
6265 handler running on the same thread. Thus this really becomes a
6266 compiler barrier, in that stores must not be sunk past
6267 (or raised above) a given point. */
6268 expand_asm_memory_barrier ();
6272 /* This function expands the atomic load operation:
6273 return the atomically loaded value in MEM.
6275 MEMMODEL is the memory model variant to use.
6276 TARGET is an option place to stick the return value. */
6279 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6281 machine_mode mode = GET_MODE (mem);
6282 enum insn_code icode;
6284 /* If the target supports the load directly, great. */
6285 icode = direct_optab_handler (atomic_load_optab, mode);
6286 if (icode != CODE_FOR_nothing)
6288 struct expand_operand ops[3];
6290 create_output_operand (&ops[0], target, mode);
6291 create_fixed_operand (&ops[1], mem);
6292 create_integer_operand (&ops[2], model);
6293 if (maybe_expand_insn (icode, 3, ops))
6294 return ops[0].value;
6297 /* If the size of the object is greater than word size on this target,
6298 then we assume that a load will not be atomic. */
6299 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6301 /* Issue val = compare_and_swap (mem, 0, 0).
6302 This may cause the occasional harmless store of 0 when the value is
6303 already 0, but it seems to be OK according to the standards guys. */
6304 if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
6305 const0_rtx, false, model, model))
6306 return target;
6307 else
6308 /* Otherwise there is no atomic load, leave the library call. */
6309 return NULL_RTX;
6312 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6313 if (!target || target == const0_rtx)
6314 target = gen_reg_rtx (mode);
6316 /* For SEQ_CST, emit a barrier before the load. */
6317 if (is_mm_seq_cst (model))
6318 expand_mem_thread_fence (model);
6320 emit_move_insn (target, mem);
6322 /* Emit the appropriate barrier after the load. */
6323 expand_mem_thread_fence (model);
6325 return target;
6328 /* This function expands the atomic store operation:
6329 Atomically store VAL in MEM.
6330 MEMMODEL is the memory model variant to use.
6331 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6332 function returns const0_rtx if a pattern was emitted. */
6335 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6337 machine_mode mode = GET_MODE (mem);
6338 enum insn_code icode;
6339 struct expand_operand ops[3];
6341 /* If the target supports the store directly, great. */
6342 icode = direct_optab_handler (atomic_store_optab, mode);
6343 if (icode != CODE_FOR_nothing)
6345 create_fixed_operand (&ops[0], mem);
6346 create_input_operand (&ops[1], val, mode);
6347 create_integer_operand (&ops[2], model);
6348 if (maybe_expand_insn (icode, 3, ops))
6349 return const0_rtx;
6352 /* If using __sync_lock_release is a viable alternative, try it. */
6353 if (use_release)
6355 icode = direct_optab_handler (sync_lock_release_optab, mode);
6356 if (icode != CODE_FOR_nothing)
6358 create_fixed_operand (&ops[0], mem);
6359 create_input_operand (&ops[1], const0_rtx, mode);
6360 if (maybe_expand_insn (icode, 2, ops))
6362 /* lock_release is only a release barrier. */
6363 if (is_mm_seq_cst (model))
6364 expand_mem_thread_fence (model);
6365 return const0_rtx;
6370 /* If the size of the object is greater than word size on this target,
6371 a default store will not be atomic, Try a mem_exchange and throw away
6372 the result. If that doesn't work, don't do anything. */
6373 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6375 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6376 if (!target)
6377 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
6378 if (target)
6379 return const0_rtx;
6380 else
6381 return NULL_RTX;
6384 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6385 expand_mem_thread_fence (model);
6387 emit_move_insn (mem, val);
6389 /* For SEQ_CST, also emit a barrier after the store. */
6390 if (is_mm_seq_cst (model))
6391 expand_mem_thread_fence (model);
6393 return const0_rtx;
6397 /* Structure containing the pointers and values required to process the
6398 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6400 struct atomic_op_functions
6402 direct_optab mem_fetch_before;
6403 direct_optab mem_fetch_after;
6404 direct_optab mem_no_result;
6405 optab fetch_before;
6406 optab fetch_after;
6407 direct_optab no_result;
6408 enum rtx_code reverse_code;
6412 /* Fill in structure pointed to by OP with the various optab entries for an
6413 operation of type CODE. */
6415 static void
6416 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6418 gcc_assert (op!= NULL);
6420 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6421 in the source code during compilation, and the optab entries are not
6422 computable until runtime. Fill in the values at runtime. */
6423 switch (code)
6425 case PLUS:
6426 op->mem_fetch_before = atomic_fetch_add_optab;
6427 op->mem_fetch_after = atomic_add_fetch_optab;
6428 op->mem_no_result = atomic_add_optab;
6429 op->fetch_before = sync_old_add_optab;
6430 op->fetch_after = sync_new_add_optab;
6431 op->no_result = sync_add_optab;
6432 op->reverse_code = MINUS;
6433 break;
6434 case MINUS:
6435 op->mem_fetch_before = atomic_fetch_sub_optab;
6436 op->mem_fetch_after = atomic_sub_fetch_optab;
6437 op->mem_no_result = atomic_sub_optab;
6438 op->fetch_before = sync_old_sub_optab;
6439 op->fetch_after = sync_new_sub_optab;
6440 op->no_result = sync_sub_optab;
6441 op->reverse_code = PLUS;
6442 break;
6443 case XOR:
6444 op->mem_fetch_before = atomic_fetch_xor_optab;
6445 op->mem_fetch_after = atomic_xor_fetch_optab;
6446 op->mem_no_result = atomic_xor_optab;
6447 op->fetch_before = sync_old_xor_optab;
6448 op->fetch_after = sync_new_xor_optab;
6449 op->no_result = sync_xor_optab;
6450 op->reverse_code = XOR;
6451 break;
6452 case AND:
6453 op->mem_fetch_before = atomic_fetch_and_optab;
6454 op->mem_fetch_after = atomic_and_fetch_optab;
6455 op->mem_no_result = atomic_and_optab;
6456 op->fetch_before = sync_old_and_optab;
6457 op->fetch_after = sync_new_and_optab;
6458 op->no_result = sync_and_optab;
6459 op->reverse_code = UNKNOWN;
6460 break;
6461 case IOR:
6462 op->mem_fetch_before = atomic_fetch_or_optab;
6463 op->mem_fetch_after = atomic_or_fetch_optab;
6464 op->mem_no_result = atomic_or_optab;
6465 op->fetch_before = sync_old_ior_optab;
6466 op->fetch_after = sync_new_ior_optab;
6467 op->no_result = sync_ior_optab;
6468 op->reverse_code = UNKNOWN;
6469 break;
6470 case NOT:
6471 op->mem_fetch_before = atomic_fetch_nand_optab;
6472 op->mem_fetch_after = atomic_nand_fetch_optab;
6473 op->mem_no_result = atomic_nand_optab;
6474 op->fetch_before = sync_old_nand_optab;
6475 op->fetch_after = sync_new_nand_optab;
6476 op->no_result = sync_nand_optab;
6477 op->reverse_code = UNKNOWN;
6478 break;
6479 default:
6480 gcc_unreachable ();
6484 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6485 using memory order MODEL. If AFTER is true the operation needs to return
6486 the value of *MEM after the operation, otherwise the previous value.
6487 TARGET is an optional place to place the result. The result is unused if
6488 it is const0_rtx.
6489 Return the result if there is a better sequence, otherwise NULL_RTX. */
6491 static rtx
6492 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6493 enum memmodel model, bool after)
6495 /* If the value is prefetched, or not used, it may be possible to replace
6496 the sequence with a native exchange operation. */
6497 if (!after || target == const0_rtx)
6499 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
6500 if (code == AND && val == const0_rtx)
6502 if (target == const0_rtx)
6503 target = gen_reg_rtx (GET_MODE (mem));
6504 return maybe_emit_atomic_exchange (target, mem, val, model);
6507 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6508 if (code == IOR && val == constm1_rtx)
6510 if (target == const0_rtx)
6511 target = gen_reg_rtx (GET_MODE (mem));
6512 return maybe_emit_atomic_exchange (target, mem, val, model);
6516 return NULL_RTX;
6519 /* Try to emit an instruction for a specific operation varaition.
6520 OPTAB contains the OP functions.
6521 TARGET is an optional place to return the result. const0_rtx means unused.
6522 MEM is the memory location to operate on.
6523 VAL is the value to use in the operation.
6524 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6525 MODEL is the memory model, if used.
6526 AFTER is true if the returned result is the value after the operation. */
6528 static rtx
6529 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6530 rtx val, bool use_memmodel, enum memmodel model, bool after)
6532 machine_mode mode = GET_MODE (mem);
6533 struct expand_operand ops[4];
6534 enum insn_code icode;
6535 int op_counter = 0;
6536 int num_ops;
6538 /* Check to see if there is a result returned. */
6539 if (target == const0_rtx)
6541 if (use_memmodel)
6543 icode = direct_optab_handler (optab->mem_no_result, mode);
6544 create_integer_operand (&ops[2], model);
6545 num_ops = 3;
6547 else
6549 icode = direct_optab_handler (optab->no_result, mode);
6550 num_ops = 2;
6553 /* Otherwise, we need to generate a result. */
6554 else
6556 if (use_memmodel)
6558 icode = direct_optab_handler (after ? optab->mem_fetch_after
6559 : optab->mem_fetch_before, mode);
6560 create_integer_operand (&ops[3], model);
6561 num_ops = 4;
6563 else
6565 icode = optab_handler (after ? optab->fetch_after
6566 : optab->fetch_before, mode);
6567 num_ops = 3;
6569 create_output_operand (&ops[op_counter++], target, mode);
6571 if (icode == CODE_FOR_nothing)
6572 return NULL_RTX;
6574 create_fixed_operand (&ops[op_counter++], mem);
6575 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6576 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6578 if (maybe_expand_insn (icode, num_ops, ops))
6579 return (target == const0_rtx ? const0_rtx : ops[0].value);
6581 return NULL_RTX;
6585 /* This function expands an atomic fetch_OP or OP_fetch operation:
6586 TARGET is an option place to stick the return value. const0_rtx indicates
6587 the result is unused.
6588 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6589 CODE is the operation being performed (OP)
6590 MEMMODEL is the memory model variant to use.
6591 AFTER is true to return the result of the operation (OP_fetch).
6592 AFTER is false to return the value before the operation (fetch_OP).
6594 This function will *only* generate instructions if there is a direct
6595 optab. No compare and swap loops or libcalls will be generated. */
6597 static rtx
6598 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6599 enum rtx_code code, enum memmodel model,
6600 bool after)
6602 machine_mode mode = GET_MODE (mem);
6603 struct atomic_op_functions optab;
6604 rtx result;
6605 bool unused_result = (target == const0_rtx);
6607 get_atomic_op_for_code (&optab, code);
6609 /* Check to see if there are any better instructions. */
6610 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6611 if (result)
6612 return result;
6614 /* Check for the case where the result isn't used and try those patterns. */
6615 if (unused_result)
6617 /* Try the memory model variant first. */
6618 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6619 if (result)
6620 return result;
6622 /* Next try the old style withuot a memory model. */
6623 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6624 if (result)
6625 return result;
6627 /* There is no no-result pattern, so try patterns with a result. */
6628 target = NULL_RTX;
6631 /* Try the __atomic version. */
6632 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6633 if (result)
6634 return result;
6636 /* Try the older __sync version. */
6637 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6638 if (result)
6639 return result;
6641 /* If the fetch value can be calculated from the other variation of fetch,
6642 try that operation. */
6643 if (after || unused_result || optab.reverse_code != UNKNOWN)
6645 /* Try the __atomic version, then the older __sync version. */
6646 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6647 if (!result)
6648 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6650 if (result)
6652 /* If the result isn't used, no need to do compensation code. */
6653 if (unused_result)
6654 return result;
6656 /* Issue compensation code. Fetch_after == fetch_before OP val.
6657 Fetch_before == after REVERSE_OP val. */
6658 if (!after)
6659 code = optab.reverse_code;
6660 if (code == NOT)
6662 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6663 true, OPTAB_LIB_WIDEN);
6664 result = expand_simple_unop (mode, NOT, result, target, true);
6666 else
6667 result = expand_simple_binop (mode, code, result, val, target,
6668 true, OPTAB_LIB_WIDEN);
6669 return result;
6673 /* No direct opcode can be generated. */
6674 return NULL_RTX;
6679 /* This function expands an atomic fetch_OP or OP_fetch operation:
6680 TARGET is an option place to stick the return value. const0_rtx indicates
6681 the result is unused.
6682 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6683 CODE is the operation being performed (OP)
6684 MEMMODEL is the memory model variant to use.
6685 AFTER is true to return the result of the operation (OP_fetch).
6686 AFTER is false to return the value before the operation (fetch_OP). */
6688 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6689 enum memmodel model, bool after)
6691 machine_mode mode = GET_MODE (mem);
6692 rtx result;
6693 bool unused_result = (target == const0_rtx);
6695 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6696 after);
6698 if (result)
6699 return result;
6701 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6702 if (code == PLUS || code == MINUS)
6704 rtx tmp;
6705 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6707 start_sequence ();
6708 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6709 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6710 model, after);
6711 if (result)
6713 /* PLUS worked so emit the insns and return. */
6714 tmp = get_insns ();
6715 end_sequence ();
6716 emit_insn (tmp);
6717 return result;
6720 /* PLUS did not work, so throw away the negation code and continue. */
6721 end_sequence ();
6724 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6725 if (!can_compare_and_swap_p (mode, false))
6727 rtx libfunc;
6728 bool fixup = false;
6729 enum rtx_code orig_code = code;
6730 struct atomic_op_functions optab;
6732 get_atomic_op_for_code (&optab, code);
6733 libfunc = optab_libfunc (after ? optab.fetch_after
6734 : optab.fetch_before, mode);
6735 if (libfunc == NULL
6736 && (after || unused_result || optab.reverse_code != UNKNOWN))
6738 fixup = true;
6739 if (!after)
6740 code = optab.reverse_code;
6741 libfunc = optab_libfunc (after ? optab.fetch_before
6742 : optab.fetch_after, mode);
6744 if (libfunc != NULL)
6746 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6747 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6748 2, addr, ptr_mode, val, mode);
6750 if (!unused_result && fixup)
6751 result = expand_simple_binop (mode, code, result, val, target,
6752 true, OPTAB_LIB_WIDEN);
6753 return result;
6756 /* We need the original code for any further attempts. */
6757 code = orig_code;
6760 /* If nothing else has succeeded, default to a compare and swap loop. */
6761 if (can_compare_and_swap_p (mode, true))
6763 rtx_insn *insn;
6764 rtx t0 = gen_reg_rtx (mode), t1;
6766 start_sequence ();
6768 /* If the result is used, get a register for it. */
6769 if (!unused_result)
6771 if (!target || !register_operand (target, mode))
6772 target = gen_reg_rtx (mode);
6773 /* If fetch_before, copy the value now. */
6774 if (!after)
6775 emit_move_insn (target, t0);
6777 else
6778 target = const0_rtx;
6780 t1 = t0;
6781 if (code == NOT)
6783 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
6784 true, OPTAB_LIB_WIDEN);
6785 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
6787 else
6788 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
6789 OPTAB_LIB_WIDEN);
6791 /* For after, copy the value now. */
6792 if (!unused_result && after)
6793 emit_move_insn (target, t1);
6794 insn = get_insns ();
6795 end_sequence ();
6797 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6798 return target;
6801 return NULL_RTX;
6804 /* Return true if OPERAND is suitable for operand number OPNO of
6805 instruction ICODE. */
6807 bool
6808 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
6810 return (!insn_data[(int) icode].operand[opno].predicate
6811 || (insn_data[(int) icode].operand[opno].predicate
6812 (operand, insn_data[(int) icode].operand[opno].mode)));
6815 /* TARGET is a target of a multiword operation that we are going to
6816 implement as a series of word-mode operations. Return true if
6817 TARGET is suitable for this purpose. */
6819 bool
6820 valid_multiword_target_p (rtx target)
6822 machine_mode mode;
6823 int i;
6825 mode = GET_MODE (target);
6826 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6827 if (!validate_subreg (word_mode, mode, target, i))
6828 return false;
6829 return true;
6832 /* Like maybe_legitimize_operand, but do not change the code of the
6833 current rtx value. */
6835 static bool
6836 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6837 struct expand_operand *op)
6839 /* See if the operand matches in its current form. */
6840 if (insn_operand_matches (icode, opno, op->value))
6841 return true;
6843 /* If the operand is a memory whose address has no side effects,
6844 try forcing the address into a non-virtual pseudo register.
6845 The check for side effects is important because copy_to_mode_reg
6846 cannot handle things like auto-modified addresses. */
6847 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6849 rtx addr, mem;
6851 mem = op->value;
6852 addr = XEXP (mem, 0);
6853 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6854 && !side_effects_p (addr))
6856 rtx_insn *last;
6857 machine_mode mode;
6859 last = get_last_insn ();
6860 mode = get_address_mode (mem);
6861 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6862 if (insn_operand_matches (icode, opno, mem))
6864 op->value = mem;
6865 return true;
6867 delete_insns_since (last);
6871 return false;
6874 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6875 on success, storing the new operand value back in OP. */
6877 static bool
6878 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6879 struct expand_operand *op)
6881 machine_mode mode, imode;
6882 bool old_volatile_ok, result;
6884 mode = op->mode;
6885 switch (op->type)
6887 case EXPAND_FIXED:
6888 old_volatile_ok = volatile_ok;
6889 volatile_ok = true;
6890 result = maybe_legitimize_operand_same_code (icode, opno, op);
6891 volatile_ok = old_volatile_ok;
6892 return result;
6894 case EXPAND_OUTPUT:
6895 gcc_assert (mode != VOIDmode);
6896 if (op->value
6897 && op->value != const0_rtx
6898 && GET_MODE (op->value) == mode
6899 && maybe_legitimize_operand_same_code (icode, opno, op))
6900 return true;
6902 op->value = gen_reg_rtx (mode);
6903 break;
6905 case EXPAND_INPUT:
6906 input:
6907 gcc_assert (mode != VOIDmode);
6908 gcc_assert (GET_MODE (op->value) == VOIDmode
6909 || GET_MODE (op->value) == mode);
6910 if (maybe_legitimize_operand_same_code (icode, opno, op))
6911 return true;
6913 op->value = copy_to_mode_reg (mode, op->value);
6914 break;
6916 case EXPAND_CONVERT_TO:
6917 gcc_assert (mode != VOIDmode);
6918 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
6919 goto input;
6921 case EXPAND_CONVERT_FROM:
6922 if (GET_MODE (op->value) != VOIDmode)
6923 mode = GET_MODE (op->value);
6924 else
6925 /* The caller must tell us what mode this value has. */
6926 gcc_assert (mode != VOIDmode);
6928 imode = insn_data[(int) icode].operand[opno].mode;
6929 if (imode != VOIDmode && imode != mode)
6931 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
6932 mode = imode;
6934 goto input;
6936 case EXPAND_ADDRESS:
6937 gcc_assert (mode != VOIDmode);
6938 op->value = convert_memory_address (mode, op->value);
6939 goto input;
6941 case EXPAND_INTEGER:
6942 mode = insn_data[(int) icode].operand[opno].mode;
6943 if (mode != VOIDmode && const_int_operand (op->value, mode))
6944 goto input;
6945 break;
6947 return insn_operand_matches (icode, opno, op->value);
6950 /* Make OP describe an input operand that should have the same value
6951 as VALUE, after any mode conversion that the target might request.
6952 TYPE is the type of VALUE. */
6954 void
6955 create_convert_operand_from_type (struct expand_operand *op,
6956 rtx value, tree type)
6958 create_convert_operand_from (op, value, TYPE_MODE (type),
6959 TYPE_UNSIGNED (type));
6962 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
6963 of instruction ICODE. Return true on success, leaving the new operand
6964 values in the OPS themselves. Emit no code on failure. */
6966 bool
6967 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
6968 unsigned int nops, struct expand_operand *ops)
6970 rtx_insn *last;
6971 unsigned int i;
6973 last = get_last_insn ();
6974 for (i = 0; i < nops; i++)
6975 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
6977 delete_insns_since (last);
6978 return false;
6980 return true;
6983 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
6984 as its operands. Return the instruction pattern on success,
6985 and emit any necessary set-up code. Return null and emit no
6986 code on failure. */
6988 rtx_insn *
6989 maybe_gen_insn (enum insn_code icode, unsigned int nops,
6990 struct expand_operand *ops)
6992 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
6993 if (!maybe_legitimize_operands (icode, 0, nops, ops))
6994 return NULL;
6996 switch (nops)
6998 case 1:
6999 return GEN_FCN (icode) (ops[0].value);
7000 case 2:
7001 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7002 case 3:
7003 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7004 case 4:
7005 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7006 ops[3].value);
7007 case 5:
7008 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7009 ops[3].value, ops[4].value);
7010 case 6:
7011 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7012 ops[3].value, ops[4].value, ops[5].value);
7013 case 7:
7014 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7015 ops[3].value, ops[4].value, ops[5].value,
7016 ops[6].value);
7017 case 8:
7018 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7019 ops[3].value, ops[4].value, ops[5].value,
7020 ops[6].value, ops[7].value);
7021 case 9:
7022 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7023 ops[3].value, ops[4].value, ops[5].value,
7024 ops[6].value, ops[7].value, ops[8].value);
7026 gcc_unreachable ();
7029 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7030 as its operands. Return true on success and emit no code on failure. */
7032 bool
7033 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7034 struct expand_operand *ops)
7036 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7037 if (pat)
7039 emit_insn (pat);
7040 return true;
7042 return false;
7045 /* Like maybe_expand_insn, but for jumps. */
7047 bool
7048 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7049 struct expand_operand *ops)
7051 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7052 if (pat)
7054 emit_jump_insn (pat);
7055 return true;
7057 return false;
7060 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7061 as its operands. */
7063 void
7064 expand_insn (enum insn_code icode, unsigned int nops,
7065 struct expand_operand *ops)
7067 if (!maybe_expand_insn (icode, nops, ops))
7068 gcc_unreachable ();
7071 /* Like expand_insn, but for jumps. */
7073 void
7074 expand_jump_insn (enum insn_code icode, unsigned int nops,
7075 struct expand_operand *ops)
7077 if (!maybe_expand_jump_insn (icode, nops, ops))
7078 gcc_unreachable ();