PR rtl-optimization/82913
[official-gcc.git] / gcc / expmed.c
blob8e9f15d6a65fb06acd317da5b9ad9e3e42e9aeaa
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2017 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "expr.h"
42 #include "langhooks.h"
44 struct target_expmed default_target_expmed;
45 #if SWITCHABLE_TARGET
46 struct target_expmed *this_target_expmed = &default_target_expmed;
47 #endif
49 static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
50 unsigned HOST_WIDE_INT,
51 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT,
54 rtx, scalar_int_mode, bool);
55 static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
56 unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT,
58 rtx, scalar_int_mode, bool);
59 static void store_split_bit_field (rtx, opt_scalar_int_mode,
60 unsigned HOST_WIDE_INT,
61 unsigned HOST_WIDE_INT,
62 unsigned HOST_WIDE_INT,
63 unsigned HOST_WIDE_INT,
64 rtx, scalar_int_mode, bool);
65 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
66 unsigned HOST_WIDE_INT,
67 unsigned HOST_WIDE_INT, rtx, int, bool);
68 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
69 unsigned HOST_WIDE_INT,
70 unsigned HOST_WIDE_INT, rtx, int, bool);
71 static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
72 static rtx extract_split_bit_field (rtx, opt_scalar_int_mode,
73 unsigned HOST_WIDE_INT,
74 unsigned HOST_WIDE_INT, int, bool);
75 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
76 static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
77 static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
79 /* Return a constant integer mask value of mode MODE with BITSIZE ones
80 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
81 The mask is truncated if necessary to the width of mode MODE. The
82 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
84 static inline rtx
85 mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
87 return immed_wide_int_const
88 (wi::shifted_mask (bitpos, bitsize, complement,
89 GET_MODE_PRECISION (mode)), mode);
92 /* Test whether a value is zero of a power of two. */
93 #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
94 (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
96 struct init_expmed_rtl
98 rtx reg;
99 rtx plus;
100 rtx neg;
101 rtx mult;
102 rtx sdiv;
103 rtx udiv;
104 rtx sdiv_32;
105 rtx smod_32;
106 rtx wide_mult;
107 rtx wide_lshr;
108 rtx wide_trunc;
109 rtx shift;
110 rtx shift_mult;
111 rtx shift_add;
112 rtx shift_sub0;
113 rtx shift_sub1;
114 rtx zext;
115 rtx trunc;
117 rtx pow2[MAX_BITS_PER_WORD];
118 rtx cint[MAX_BITS_PER_WORD];
121 static void
122 init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode,
123 scalar_int_mode from_mode, bool speed)
125 int to_size, from_size;
126 rtx which;
128 to_size = GET_MODE_PRECISION (to_mode);
129 from_size = GET_MODE_PRECISION (from_mode);
131 /* Most partial integers have a precision less than the "full"
132 integer it requires for storage. In case one doesn't, for
133 comparison purposes here, reduce the bit size by one in that
134 case. */
135 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
136 && pow2p_hwi (to_size))
137 to_size --;
138 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
139 && pow2p_hwi (from_size))
140 from_size --;
142 /* Assume cost of zero-extend and sign-extend is the same. */
143 which = (to_size < from_size ? all->trunc : all->zext);
145 PUT_MODE (all->reg, from_mode);
146 set_convert_cost (to_mode, from_mode, speed,
147 set_src_cost (which, to_mode, speed));
150 static void
151 init_expmed_one_mode (struct init_expmed_rtl *all,
152 machine_mode mode, int speed)
154 int m, n, mode_bitsize;
155 machine_mode mode_from;
157 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
159 PUT_MODE (all->reg, mode);
160 PUT_MODE (all->plus, mode);
161 PUT_MODE (all->neg, mode);
162 PUT_MODE (all->mult, mode);
163 PUT_MODE (all->sdiv, mode);
164 PUT_MODE (all->udiv, mode);
165 PUT_MODE (all->sdiv_32, mode);
166 PUT_MODE (all->smod_32, mode);
167 PUT_MODE (all->wide_trunc, mode);
168 PUT_MODE (all->shift, mode);
169 PUT_MODE (all->shift_mult, mode);
170 PUT_MODE (all->shift_add, mode);
171 PUT_MODE (all->shift_sub0, mode);
172 PUT_MODE (all->shift_sub1, mode);
173 PUT_MODE (all->zext, mode);
174 PUT_MODE (all->trunc, mode);
176 set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
177 set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
178 set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
179 set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
180 set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
182 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
183 <= 2 * add_cost (speed, mode)));
184 set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
185 <= 4 * add_cost (speed, mode)));
187 set_shift_cost (speed, mode, 0, 0);
189 int cost = add_cost (speed, mode);
190 set_shiftadd_cost (speed, mode, 0, cost);
191 set_shiftsub0_cost (speed, mode, 0, cost);
192 set_shiftsub1_cost (speed, mode, 0, cost);
195 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
196 for (m = 1; m < n; m++)
198 XEXP (all->shift, 1) = all->cint[m];
199 XEXP (all->shift_mult, 1) = all->pow2[m];
201 set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
202 set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
203 speed));
204 set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
205 speed));
206 set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
207 speed));
210 scalar_int_mode int_mode_to;
211 if (is_a <scalar_int_mode> (mode, &int_mode_to))
213 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
214 mode_from = (machine_mode)(mode_from + 1))
215 init_expmed_one_conv (all, int_mode_to,
216 as_a <scalar_int_mode> (mode_from), speed);
218 scalar_int_mode wider_mode;
219 if (GET_MODE_CLASS (int_mode_to) == MODE_INT
220 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
222 PUT_MODE (all->zext, wider_mode);
223 PUT_MODE (all->wide_mult, wider_mode);
224 PUT_MODE (all->wide_lshr, wider_mode);
225 XEXP (all->wide_lshr, 1) = GEN_INT (mode_bitsize);
227 set_mul_widen_cost (speed, wider_mode,
228 set_src_cost (all->wide_mult, wider_mode, speed));
229 set_mul_highpart_cost (speed, int_mode_to,
230 set_src_cost (all->wide_trunc,
231 int_mode_to, speed));
236 void
237 init_expmed (void)
239 struct init_expmed_rtl all;
240 machine_mode mode = QImode;
241 int m, speed;
243 memset (&all, 0, sizeof all);
244 for (m = 1; m < MAX_BITS_PER_WORD; m++)
246 all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
247 all.cint[m] = GEN_INT (m);
250 /* Avoid using hard regs in ways which may be unsupported. */
251 all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
252 all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
253 all.neg = gen_rtx_NEG (mode, all.reg);
254 all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
255 all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
256 all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
257 all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
258 all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
259 all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
260 all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
261 all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
262 all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
263 all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
264 all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
265 all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
266 all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
267 all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
268 all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
270 for (speed = 0; speed < 2; speed++)
272 crtl->maybe_hot_insn_p = speed;
273 set_zero_cost (speed, set_src_cost (const0_rtx, mode, speed));
275 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
276 mode = (machine_mode)(mode + 1))
277 init_expmed_one_mode (&all, mode, speed);
279 if (MIN_MODE_PARTIAL_INT != VOIDmode)
280 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
281 mode = (machine_mode)(mode + 1))
282 init_expmed_one_mode (&all, mode, speed);
284 if (MIN_MODE_VECTOR_INT != VOIDmode)
285 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
286 mode = (machine_mode)(mode + 1))
287 init_expmed_one_mode (&all, mode, speed);
290 if (alg_hash_used_p ())
292 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
293 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
295 else
296 set_alg_hash_used_p (true);
297 default_rtl_profile ();
299 ggc_free (all.trunc);
300 ggc_free (all.shift_sub1);
301 ggc_free (all.shift_sub0);
302 ggc_free (all.shift_add);
303 ggc_free (all.shift_mult);
304 ggc_free (all.shift);
305 ggc_free (all.wide_trunc);
306 ggc_free (all.wide_lshr);
307 ggc_free (all.wide_mult);
308 ggc_free (all.zext);
309 ggc_free (all.smod_32);
310 ggc_free (all.sdiv_32);
311 ggc_free (all.udiv);
312 ggc_free (all.sdiv);
313 ggc_free (all.mult);
314 ggc_free (all.neg);
315 ggc_free (all.plus);
316 ggc_free (all.reg);
319 /* Return an rtx representing minus the value of X.
320 MODE is the intended mode of the result,
321 useful if X is a CONST_INT. */
324 negate_rtx (machine_mode mode, rtx x)
326 rtx result = simplify_unary_operation (NEG, mode, x, mode);
328 if (result == 0)
329 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
331 return result;
334 /* Whether reverse storage order is supported on the target. */
335 static int reverse_storage_order_supported = -1;
337 /* Check whether reverse storage order is supported on the target. */
339 static void
340 check_reverse_storage_order_support (void)
342 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
344 reverse_storage_order_supported = 0;
345 sorry ("reverse scalar storage order");
347 else
348 reverse_storage_order_supported = 1;
351 /* Whether reverse FP storage order is supported on the target. */
352 static int reverse_float_storage_order_supported = -1;
354 /* Check whether reverse FP storage order is supported on the target. */
356 static void
357 check_reverse_float_storage_order_support (void)
359 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
361 reverse_float_storage_order_supported = 0;
362 sorry ("reverse floating-point scalar storage order");
364 else
365 reverse_float_storage_order_supported = 1;
368 /* Return an rtx representing value of X with reverse storage order.
369 MODE is the intended mode of the result,
370 useful if X is a CONST_INT. */
373 flip_storage_order (machine_mode mode, rtx x)
375 scalar_int_mode int_mode;
376 rtx result;
378 if (mode == QImode)
379 return x;
381 if (COMPLEX_MODE_P (mode))
383 rtx real = read_complex_part (x, false);
384 rtx imag = read_complex_part (x, true);
386 real = flip_storage_order (GET_MODE_INNER (mode), real);
387 imag = flip_storage_order (GET_MODE_INNER (mode), imag);
389 return gen_rtx_CONCAT (mode, real, imag);
392 if (__builtin_expect (reverse_storage_order_supported < 0, 0))
393 check_reverse_storage_order_support ();
395 if (!is_a <scalar_int_mode> (mode, &int_mode))
397 if (FLOAT_MODE_P (mode)
398 && __builtin_expect (reverse_float_storage_order_supported < 0, 0))
399 check_reverse_float_storage_order_support ();
401 if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
403 sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
404 return x;
406 x = gen_lowpart (int_mode, x);
409 result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
410 if (result == 0)
411 result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
413 if (int_mode != mode)
414 result = gen_lowpart (mode, result);
416 return result;
419 /* If MODE is set, adjust bitfield memory MEM so that it points to the
420 first unit of mode MODE that contains a bitfield of size BITSIZE at
421 bit position BITNUM. If MODE is not set, return a BLKmode reference
422 to every byte in the bitfield. Set *NEW_BITNUM to the bit position
423 of the field within the new memory. */
425 static rtx
426 narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode,
427 unsigned HOST_WIDE_INT bitsize,
428 unsigned HOST_WIDE_INT bitnum,
429 unsigned HOST_WIDE_INT *new_bitnum)
431 scalar_int_mode imode;
432 if (mode.exists (&imode))
434 unsigned int unit = GET_MODE_BITSIZE (imode);
435 *new_bitnum = bitnum % unit;
436 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
437 return adjust_bitfield_address (mem, imode, offset);
439 else
441 *new_bitnum = bitnum % BITS_PER_UNIT;
442 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
443 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
444 / BITS_PER_UNIT);
445 return adjust_bitfield_address_size (mem, BLKmode, offset, size);
449 /* The caller wants to perform insertion or extraction PATTERN on a
450 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
451 BITREGION_START and BITREGION_END are as for store_bit_field
452 and FIELDMODE is the natural mode of the field.
454 Search for a mode that is compatible with the memory access
455 restrictions and (where applicable) with a register insertion or
456 extraction. Return the new memory on success, storing the adjusted
457 bit position in *NEW_BITNUM. Return null otherwise. */
459 static rtx
460 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
461 rtx op0, HOST_WIDE_INT bitsize,
462 HOST_WIDE_INT bitnum,
463 unsigned HOST_WIDE_INT bitregion_start,
464 unsigned HOST_WIDE_INT bitregion_end,
465 machine_mode fieldmode,
466 unsigned HOST_WIDE_INT *new_bitnum)
468 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
469 bitregion_end, MEM_ALIGN (op0),
470 MEM_VOLATILE_P (op0));
471 scalar_int_mode best_mode;
472 if (iter.next_mode (&best_mode))
474 /* We can use a memory in BEST_MODE. See whether this is true for
475 any wider modes. All other things being equal, we prefer to
476 use the widest mode possible because it tends to expose more
477 CSE opportunities. */
478 if (!iter.prefer_smaller_modes ())
480 /* Limit the search to the mode required by the corresponding
481 register insertion or extraction instruction, if any. */
482 scalar_int_mode limit_mode = word_mode;
483 extraction_insn insn;
484 if (get_best_reg_extraction_insn (&insn, pattern,
485 GET_MODE_BITSIZE (best_mode),
486 fieldmode))
487 limit_mode = insn.field_mode;
489 scalar_int_mode wider_mode;
490 while (iter.next_mode (&wider_mode)
491 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
492 best_mode = wider_mode;
494 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
495 new_bitnum);
497 return NULL_RTX;
500 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
501 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
502 offset is then BITNUM / BITS_PER_UNIT. */
504 static bool
505 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
506 unsigned HOST_WIDE_INT bitsize,
507 machine_mode struct_mode)
509 unsigned HOST_WIDE_INT regsize = REGMODE_NATURAL_SIZE (struct_mode);
510 if (BYTES_BIG_ENDIAN)
511 return (bitnum % BITS_PER_UNIT == 0
512 && (bitnum + bitsize == GET_MODE_BITSIZE (struct_mode)
513 || (bitnum + bitsize) % (regsize * BITS_PER_UNIT) == 0));
514 else
515 return bitnum % (regsize * BITS_PER_UNIT) == 0;
518 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
519 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
520 Return false if the access would touch memory outside the range
521 BITREGION_START to BITREGION_END for conformance to the C++ memory
522 model. */
524 static bool
525 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
526 unsigned HOST_WIDE_INT bitnum,
527 scalar_int_mode fieldmode,
528 unsigned HOST_WIDE_INT bitregion_start,
529 unsigned HOST_WIDE_INT bitregion_end)
531 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
533 /* -fstrict-volatile-bitfields must be enabled and we must have a
534 volatile MEM. */
535 if (!MEM_P (op0)
536 || !MEM_VOLATILE_P (op0)
537 || flag_strict_volatile_bitfields <= 0)
538 return false;
540 /* The bit size must not be larger than the field mode, and
541 the field mode must not be larger than a word. */
542 if (bitsize > modesize || modesize > BITS_PER_WORD)
543 return false;
545 /* Check for cases of unaligned fields that must be split. */
546 if (bitnum % modesize + bitsize > modesize)
547 return false;
549 /* The memory must be sufficiently aligned for a MODESIZE access.
550 This condition guarantees, that the memory access will not
551 touch anything after the end of the structure. */
552 if (MEM_ALIGN (op0) < modesize)
553 return false;
555 /* Check for cases where the C++ memory model applies. */
556 if (bitregion_end != 0
557 && (bitnum - bitnum % modesize < bitregion_start
558 || bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
559 return false;
561 return true;
564 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
565 bit number BITNUM can be treated as a simple value of mode MODE. */
567 static bool
568 simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
569 unsigned HOST_WIDE_INT bitnum, machine_mode mode)
571 return (MEM_P (op0)
572 && bitnum % BITS_PER_UNIT == 0
573 && bitsize == GET_MODE_BITSIZE (mode)
574 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
575 || (bitnum % GET_MODE_ALIGNMENT (mode) == 0
576 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
579 /* Try to use instruction INSV to store VALUE into a field of OP0.
580 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
581 BLKmode MEM. VALUE_MODE is the mode of VALUE. BITSIZE and BITNUM
582 are as for store_bit_field. */
584 static bool
585 store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
586 opt_scalar_int_mode op0_mode,
587 unsigned HOST_WIDE_INT bitsize,
588 unsigned HOST_WIDE_INT bitnum,
589 rtx value, scalar_int_mode value_mode)
591 struct expand_operand ops[4];
592 rtx value1;
593 rtx xop0 = op0;
594 rtx_insn *last = get_last_insn ();
595 bool copy_back = false;
597 scalar_int_mode op_mode = insv->field_mode;
598 unsigned int unit = GET_MODE_BITSIZE (op_mode);
599 if (bitsize == 0 || bitsize > unit)
600 return false;
602 if (MEM_P (xop0))
603 /* Get a reference to the first byte of the field. */
604 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
605 &bitnum);
606 else
608 /* Convert from counting within OP0 to counting in OP_MODE. */
609 if (BYTES_BIG_ENDIAN)
610 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
612 /* If xop0 is a register, we need it in OP_MODE
613 to make it acceptable to the format of insv. */
614 if (GET_CODE (xop0) == SUBREG)
615 /* We can't just change the mode, because this might clobber op0,
616 and we will need the original value of op0 if insv fails. */
617 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
618 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
619 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
622 /* If the destination is a paradoxical subreg such that we need a
623 truncate to the inner mode, perform the insertion on a temporary and
624 truncate the result to the original destination. Note that we can't
625 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
626 X) 0)) is (reg:N X). */
627 if (GET_CODE (xop0) == SUBREG
628 && REG_P (SUBREG_REG (xop0))
629 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
630 op_mode))
632 rtx tem = gen_reg_rtx (op_mode);
633 emit_move_insn (tem, xop0);
634 xop0 = tem;
635 copy_back = true;
638 /* There are similar overflow check at the start of store_bit_field_1,
639 but that only check the situation where the field lies completely
640 outside the register, while there do have situation where the field
641 lies partialy in the register, we need to adjust bitsize for this
642 partial overflow situation. Without this fix, pr48335-2.c on big-endian
643 will broken on those arch support bit insert instruction, like arm, aarch64
644 etc. */
645 if (bitsize + bitnum > unit && bitnum < unit)
647 warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
648 "destination object, data truncated into %wu-bit",
649 bitsize, unit - bitnum);
650 bitsize = unit - bitnum;
653 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
654 "backwards" from the size of the unit we are inserting into.
655 Otherwise, we count bits from the most significant on a
656 BYTES/BITS_BIG_ENDIAN machine. */
658 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
659 bitnum = unit - bitsize - bitnum;
661 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
662 value1 = value;
663 if (value_mode != op_mode)
665 if (GET_MODE_BITSIZE (value_mode) >= bitsize)
667 rtx tmp;
668 /* Optimization: Don't bother really extending VALUE
669 if it has all the bits we will actually use. However,
670 if we must narrow it, be sure we do it correctly. */
672 if (GET_MODE_SIZE (value_mode) < GET_MODE_SIZE (op_mode))
674 tmp = simplify_subreg (op_mode, value1, value_mode, 0);
675 if (! tmp)
676 tmp = simplify_gen_subreg (op_mode,
677 force_reg (value_mode, value1),
678 value_mode, 0);
680 else
682 tmp = gen_lowpart_if_possible (op_mode, value1);
683 if (! tmp)
684 tmp = gen_lowpart (op_mode, force_reg (value_mode, value1));
686 value1 = tmp;
688 else if (CONST_INT_P (value))
689 value1 = gen_int_mode (INTVAL (value), op_mode);
690 else
691 /* Parse phase is supposed to make VALUE's data type
692 match that of the component reference, which is a type
693 at least as wide as the field; so VALUE should have
694 a mode that corresponds to that type. */
695 gcc_assert (CONSTANT_P (value));
698 create_fixed_operand (&ops[0], xop0);
699 create_integer_operand (&ops[1], bitsize);
700 create_integer_operand (&ops[2], bitnum);
701 create_input_operand (&ops[3], value1, op_mode);
702 if (maybe_expand_insn (insv->icode, 4, ops))
704 if (copy_back)
705 convert_move (op0, xop0, true);
706 return true;
708 delete_insns_since (last);
709 return false;
712 /* A subroutine of store_bit_field, with the same arguments. Return true
713 if the operation could be implemented.
715 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
716 no other way of implementing the operation. If FALLBACK_P is false,
717 return false instead. */
719 static bool
720 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
721 unsigned HOST_WIDE_INT bitnum,
722 unsigned HOST_WIDE_INT bitregion_start,
723 unsigned HOST_WIDE_INT bitregion_end,
724 machine_mode fieldmode,
725 rtx value, bool reverse, bool fallback_p)
727 rtx op0 = str_rtx;
728 rtx orig_value;
730 while (GET_CODE (op0) == SUBREG)
732 bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
733 op0 = SUBREG_REG (op0);
736 /* No action is needed if the target is a register and if the field
737 lies completely outside that register. This can occur if the source
738 code contains an out-of-bounds access to a small array. */
739 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
740 return true;
742 /* Use vec_set patterns for inserting parts of vectors whenever
743 available. */
744 machine_mode outermode = GET_MODE (op0);
745 scalar_mode innermode = GET_MODE_INNER (outermode);
746 if (VECTOR_MODE_P (outermode)
747 && !MEM_P (op0)
748 && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
749 && fieldmode == innermode
750 && bitsize == GET_MODE_BITSIZE (innermode)
751 && !(bitnum % GET_MODE_BITSIZE (innermode)))
753 struct expand_operand ops[3];
754 enum insn_code icode = optab_handler (vec_set_optab, outermode);
755 int pos = bitnum / GET_MODE_BITSIZE (innermode);
757 create_fixed_operand (&ops[0], op0);
758 create_input_operand (&ops[1], value, innermode);
759 create_integer_operand (&ops[2], pos);
760 if (maybe_expand_insn (icode, 3, ops))
761 return true;
764 /* If the target is a register, overwriting the entire object, or storing
765 a full-word or multi-word field can be done with just a SUBREG. */
766 if (!MEM_P (op0)
767 && bitsize == GET_MODE_BITSIZE (fieldmode)
768 && ((bitsize == GET_MODE_BITSIZE (GET_MODE (op0)) && bitnum == 0)
769 || (bitsize % BITS_PER_WORD == 0 && bitnum % BITS_PER_WORD == 0)))
771 /* Use the subreg machinery either to narrow OP0 to the required
772 words or to cope with mode punning between equal-sized modes.
773 In the latter case, use subreg on the rhs side, not lhs. */
774 rtx sub;
776 if (bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
778 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
779 if (sub)
781 if (reverse)
782 sub = flip_storage_order (GET_MODE (op0), sub);
783 emit_move_insn (op0, sub);
784 return true;
787 else
789 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
790 bitnum / BITS_PER_UNIT);
791 if (sub)
793 if (reverse)
794 value = flip_storage_order (fieldmode, value);
795 emit_move_insn (sub, value);
796 return true;
801 /* If the target is memory, storing any naturally aligned field can be
802 done with a simple store. For targets that support fast unaligned
803 memory, any naturally sized, unit aligned field can be done directly. */
804 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
806 op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
807 if (reverse)
808 value = flip_storage_order (fieldmode, value);
809 emit_move_insn (op0, value);
810 return true;
813 /* Make sure we are playing with integral modes. Pun with subregs
814 if we aren't. This must come after the entire register case above,
815 since that case is valid for any mode. The following cases are only
816 valid for integral modes. */
817 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
818 scalar_int_mode imode;
819 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
821 if (MEM_P (op0))
822 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
823 0, MEM_SIZE (op0));
824 else
825 op0 = gen_lowpart (op0_mode.require (), op0);
828 /* Storing an lsb-aligned field in a register
829 can be done with a movstrict instruction. */
831 if (!MEM_P (op0)
832 && !reverse
833 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
834 && bitsize == GET_MODE_BITSIZE (fieldmode)
835 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
837 struct expand_operand ops[2];
838 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
839 rtx arg0 = op0;
840 unsigned HOST_WIDE_INT subreg_off;
842 if (GET_CODE (arg0) == SUBREG)
844 /* Else we've got some float mode source being extracted into
845 a different float mode destination -- this combination of
846 subregs results in Severe Tire Damage. */
847 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
848 || GET_MODE_CLASS (fieldmode) == MODE_INT
849 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
850 arg0 = SUBREG_REG (arg0);
853 subreg_off = bitnum / BITS_PER_UNIT;
854 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
856 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
858 create_fixed_operand (&ops[0], arg0);
859 /* Shrink the source operand to FIELDMODE. */
860 create_convert_operand_to (&ops[1], value, fieldmode, false);
861 if (maybe_expand_insn (icode, 2, ops))
862 return true;
866 /* Handle fields bigger than a word. */
868 if (bitsize > BITS_PER_WORD)
870 /* Here we transfer the words of the field
871 in the order least significant first.
872 This is because the most significant word is the one which may
873 be less than full.
874 However, only do that if the value is not BLKmode. */
876 const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
877 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
878 unsigned int i;
879 rtx_insn *last;
881 /* This is the mode we must force value to, so that there will be enough
882 subwords to extract. Note that fieldmode will often (always?) be
883 VOIDmode, because that is what store_field uses to indicate that this
884 is a bit field, but passing VOIDmode to operand_subword_force
885 is not allowed. */
886 fieldmode = GET_MODE (value);
887 if (fieldmode == VOIDmode)
888 fieldmode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
890 last = get_last_insn ();
891 for (i = 0; i < nwords; i++)
893 /* If I is 0, use the low-order word in both field and target;
894 if I is 1, use the next to lowest word; and so on. */
895 unsigned int wordnum = (backwards
896 ? GET_MODE_SIZE (fieldmode) / UNITS_PER_WORD
897 - i - 1
898 : i);
899 unsigned int bit_offset = (backwards ^ reverse
900 ? MAX ((int) bitsize - ((int) i + 1)
901 * BITS_PER_WORD,
903 : (int) i * BITS_PER_WORD);
904 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
905 unsigned HOST_WIDE_INT new_bitsize =
906 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
908 /* If the remaining chunk doesn't have full wordsize we have
909 to make sure that for big-endian machines the higher order
910 bits are used. */
911 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
912 value_word = simplify_expand_binop (word_mode, lshr_optab,
913 value_word,
914 GEN_INT (BITS_PER_WORD
915 - new_bitsize),
916 NULL_RTX, true,
917 OPTAB_LIB_WIDEN);
919 if (!store_bit_field_1 (op0, new_bitsize,
920 bitnum + bit_offset,
921 bitregion_start, bitregion_end,
922 word_mode,
923 value_word, reverse, fallback_p))
925 delete_insns_since (last);
926 return false;
929 return true;
932 /* If VALUE has a floating-point or complex mode, access it as an
933 integer of the corresponding size. This can occur on a machine
934 with 64 bit registers that uses SFmode for float. It can also
935 occur for unaligned float or complex fields. */
936 orig_value = value;
937 scalar_int_mode value_mode;
938 if (GET_MODE (value) == VOIDmode)
939 /* By this point we've dealt with values that are bigger than a word,
940 so word_mode is a conservatively correct choice. */
941 value_mode = word_mode;
942 else if (!is_a <scalar_int_mode> (GET_MODE (value), &value_mode))
944 value_mode = int_mode_for_mode (GET_MODE (value)).require ();
945 value = gen_reg_rtx (value_mode);
946 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
949 /* If OP0 is a multi-word register, narrow it to the affected word.
950 If the region spans two words, defer to store_split_bit_field.
951 Don't do this if op0 is a single hard register wider than word
952 such as a float or vector register. */
953 if (!MEM_P (op0)
954 && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD
955 && (!REG_P (op0)
956 || !HARD_REGISTER_P (op0)
957 || hard_regno_nregs (REGNO (op0), op0_mode.require ()) != 1))
959 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
961 if (!fallback_p)
962 return false;
964 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
965 bitregion_start, bitregion_end,
966 value, value_mode, reverse);
967 return true;
969 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
970 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
971 gcc_assert (op0);
972 op0_mode = word_mode;
973 bitnum %= BITS_PER_WORD;
976 /* From here on we can assume that the field to be stored in fits
977 within a word. If the destination is a register, it too fits
978 in a word. */
980 extraction_insn insv;
981 if (!MEM_P (op0)
982 && !reverse
983 && get_best_reg_extraction_insn (&insv, EP_insv,
984 GET_MODE_BITSIZE (op0_mode.require ()),
985 fieldmode)
986 && store_bit_field_using_insv (&insv, op0, op0_mode,
987 bitsize, bitnum, value, value_mode))
988 return true;
990 /* If OP0 is a memory, try copying it to a register and seeing if a
991 cheap register alternative is available. */
992 if (MEM_P (op0) && !reverse)
994 if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
995 fieldmode)
996 && store_bit_field_using_insv (&insv, op0, op0_mode,
997 bitsize, bitnum, value, value_mode))
998 return true;
1000 rtx_insn *last = get_last_insn ();
1002 /* Try loading part of OP0 into a register, inserting the bitfield
1003 into that, and then copying the result back to OP0. */
1004 unsigned HOST_WIDE_INT bitpos;
1005 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1006 bitregion_start, bitregion_end,
1007 fieldmode, &bitpos);
1008 if (xop0)
1010 rtx tempreg = copy_to_reg (xop0);
1011 if (store_bit_field_1 (tempreg, bitsize, bitpos,
1012 bitregion_start, bitregion_end,
1013 fieldmode, orig_value, reverse, false))
1015 emit_move_insn (xop0, tempreg);
1016 return true;
1018 delete_insns_since (last);
1022 if (!fallback_p)
1023 return false;
1025 store_fixed_bit_field (op0, op0_mode, bitsize, bitnum, bitregion_start,
1026 bitregion_end, value, value_mode, reverse);
1027 return true;
1030 /* Generate code to store value from rtx VALUE
1031 into a bit-field within structure STR_RTX
1032 containing BITSIZE bits starting at bit BITNUM.
1034 BITREGION_START is bitpos of the first bitfield in this region.
1035 BITREGION_END is the bitpos of the ending bitfield in this region.
1036 These two fields are 0, if the C++ memory model does not apply,
1037 or we are not interested in keeping track of bitfield regions.
1039 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1041 If REVERSE is true, the store is to be done in reverse order. */
1043 void
1044 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1045 unsigned HOST_WIDE_INT bitnum,
1046 unsigned HOST_WIDE_INT bitregion_start,
1047 unsigned HOST_WIDE_INT bitregion_end,
1048 machine_mode fieldmode,
1049 rtx value, bool reverse)
1051 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1052 scalar_int_mode int_mode;
1053 if (is_a <scalar_int_mode> (fieldmode, &int_mode)
1054 && strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, int_mode,
1055 bitregion_start, bitregion_end))
1057 /* Storing of a full word can be done with a simple store.
1058 We know here that the field can be accessed with one single
1059 instruction. For targets that support unaligned memory,
1060 an unaligned access may be necessary. */
1061 if (bitsize == GET_MODE_BITSIZE (int_mode))
1063 str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1064 bitnum / BITS_PER_UNIT);
1065 if (reverse)
1066 value = flip_storage_order (int_mode, value);
1067 gcc_assert (bitnum % BITS_PER_UNIT == 0);
1068 emit_move_insn (str_rtx, value);
1070 else
1072 rtx temp;
1074 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, bitsize, bitnum,
1075 &bitnum);
1076 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (int_mode));
1077 temp = copy_to_reg (str_rtx);
1078 if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0,
1079 int_mode, value, reverse, true))
1080 gcc_unreachable ();
1082 emit_move_insn (str_rtx, temp);
1085 return;
1088 /* Under the C++0x memory model, we must not touch bits outside the
1089 bit region. Adjust the address to start at the beginning of the
1090 bit region. */
1091 if (MEM_P (str_rtx) && bitregion_start > 0)
1093 scalar_int_mode best_mode;
1094 machine_mode addr_mode = VOIDmode;
1095 HOST_WIDE_INT offset, size;
1097 gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
1099 offset = bitregion_start / BITS_PER_UNIT;
1100 bitnum -= bitregion_start;
1101 size = (bitnum + bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
1102 bitregion_end -= bitregion_start;
1103 bitregion_start = 0;
1104 if (get_best_mode (bitsize, bitnum,
1105 bitregion_start, bitregion_end,
1106 MEM_ALIGN (str_rtx), INT_MAX,
1107 MEM_VOLATILE_P (str_rtx), &best_mode))
1108 addr_mode = best_mode;
1109 str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1110 offset, size);
1113 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1114 bitregion_start, bitregion_end,
1115 fieldmode, value, reverse, true))
1116 gcc_unreachable ();
1119 /* Use shifts and boolean operations to store VALUE into a bit field of
1120 width BITSIZE in OP0, starting at bit BITNUM. If OP0_MODE is defined,
1121 it is the mode of OP0, otherwise OP0 is a BLKmode MEM. VALUE_MODE is
1122 the mode of VALUE.
1124 If REVERSE is true, the store is to be done in reverse order. */
1126 static void
1127 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1128 unsigned HOST_WIDE_INT bitsize,
1129 unsigned HOST_WIDE_INT bitnum,
1130 unsigned HOST_WIDE_INT bitregion_start,
1131 unsigned HOST_WIDE_INT bitregion_end,
1132 rtx value, scalar_int_mode value_mode, bool reverse)
1134 /* There is a case not handled here:
1135 a structure with a known alignment of just a halfword
1136 and a field split across two aligned halfwords within the structure.
1137 Or likewise a structure with a known alignment of just a byte
1138 and a field split across two bytes.
1139 Such cases are not supposed to be able to occur. */
1141 scalar_int_mode best_mode;
1142 if (MEM_P (op0))
1144 unsigned int max_bitsize = BITS_PER_WORD;
1145 scalar_int_mode imode;
1146 if (op0_mode.exists (&imode) && GET_MODE_BITSIZE (imode) < max_bitsize)
1147 max_bitsize = GET_MODE_BITSIZE (imode);
1149 if (!get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1150 MEM_ALIGN (op0), max_bitsize, MEM_VOLATILE_P (op0),
1151 &best_mode))
1153 /* The only way this should occur is if the field spans word
1154 boundaries. */
1155 store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1156 bitregion_start, bitregion_end,
1157 value, value_mode, reverse);
1158 return;
1161 op0 = narrow_bit_field_mem (op0, best_mode, bitsize, bitnum, &bitnum);
1163 else
1164 best_mode = op0_mode.require ();
1166 store_fixed_bit_field_1 (op0, best_mode, bitsize, bitnum,
1167 value, value_mode, reverse);
1170 /* Helper function for store_fixed_bit_field, stores
1171 the bit field always using MODE, which is the mode of OP0. The other
1172 arguments are as for store_fixed_bit_field. */
1174 static void
1175 store_fixed_bit_field_1 (rtx op0, scalar_int_mode mode,
1176 unsigned HOST_WIDE_INT bitsize,
1177 unsigned HOST_WIDE_INT bitnum,
1178 rtx value, scalar_int_mode value_mode, bool reverse)
1180 rtx temp;
1181 int all_zero = 0;
1182 int all_one = 0;
1184 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1185 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1187 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1188 /* BITNUM is the distance between our msb
1189 and that of the containing datum.
1190 Convert it to the distance from the lsb. */
1191 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1193 /* Now BITNUM is always the distance between our lsb
1194 and that of OP0. */
1196 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1197 we must first convert its mode to MODE. */
1199 if (CONST_INT_P (value))
1201 unsigned HOST_WIDE_INT v = UINTVAL (value);
1203 if (bitsize < HOST_BITS_PER_WIDE_INT)
1204 v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1206 if (v == 0)
1207 all_zero = 1;
1208 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1209 && v == (HOST_WIDE_INT_1U << bitsize) - 1)
1210 || (bitsize == HOST_BITS_PER_WIDE_INT
1211 && v == HOST_WIDE_INT_M1U))
1212 all_one = 1;
1214 value = lshift_value (mode, v, bitnum);
1216 else
1218 int must_and = (GET_MODE_BITSIZE (value_mode) != bitsize
1219 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1221 if (value_mode != mode)
1222 value = convert_to_mode (mode, value, 1);
1224 if (must_and)
1225 value = expand_binop (mode, and_optab, value,
1226 mask_rtx (mode, 0, bitsize, 0),
1227 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1228 if (bitnum > 0)
1229 value = expand_shift (LSHIFT_EXPR, mode, value,
1230 bitnum, NULL_RTX, 1);
1233 if (reverse)
1234 value = flip_storage_order (mode, value);
1236 /* Now clear the chosen bits in OP0,
1237 except that if VALUE is -1 we need not bother. */
1238 /* We keep the intermediates in registers to allow CSE to combine
1239 consecutive bitfield assignments. */
1241 temp = force_reg (mode, op0);
1243 if (! all_one)
1245 rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1246 if (reverse)
1247 mask = flip_storage_order (mode, mask);
1248 temp = expand_binop (mode, and_optab, temp, mask,
1249 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1250 temp = force_reg (mode, temp);
1253 /* Now logical-or VALUE into OP0, unless it is zero. */
1255 if (! all_zero)
1257 temp = expand_binop (mode, ior_optab, temp, value,
1258 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1259 temp = force_reg (mode, temp);
1262 if (op0 != temp)
1264 op0 = copy_rtx (op0);
1265 emit_move_insn (op0, temp);
1269 /* Store a bit field that is split across multiple accessible memory objects.
1271 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1272 BITSIZE is the field width; BITPOS the position of its first bit
1273 (within the word).
1274 VALUE is the value to store, which has mode VALUE_MODE.
1275 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1276 a BLKmode MEM.
1278 If REVERSE is true, the store is to be done in reverse order.
1280 This does not yet handle fields wider than BITS_PER_WORD. */
1282 static void
1283 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1284 unsigned HOST_WIDE_INT bitsize,
1285 unsigned HOST_WIDE_INT bitpos,
1286 unsigned HOST_WIDE_INT bitregion_start,
1287 unsigned HOST_WIDE_INT bitregion_end,
1288 rtx value, scalar_int_mode value_mode, bool reverse)
1290 unsigned int unit, total_bits, bitsdone = 0;
1292 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1293 much at a time. */
1294 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1295 unit = BITS_PER_WORD;
1296 else
1297 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1299 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1300 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1301 again, and we will mutually recurse forever. */
1302 if (MEM_P (op0) && op0_mode.exists ())
1303 unit = MIN (unit, GET_MODE_BITSIZE (op0_mode.require ()));
1305 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1306 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1307 that VALUE might be a floating-point constant. */
1308 if (CONSTANT_P (value) && !CONST_INT_P (value))
1310 rtx word = gen_lowpart_common (word_mode, value);
1312 if (word && (value != word))
1313 value = word;
1314 else
1315 value = gen_lowpart_common (word_mode, force_reg (value_mode, value));
1316 value_mode = word_mode;
1319 total_bits = GET_MODE_BITSIZE (value_mode);
1321 while (bitsdone < bitsize)
1323 unsigned HOST_WIDE_INT thissize;
1324 unsigned HOST_WIDE_INT thispos;
1325 unsigned HOST_WIDE_INT offset;
1326 rtx part;
1328 offset = (bitpos + bitsdone) / unit;
1329 thispos = (bitpos + bitsdone) % unit;
1331 /* When region of bytes we can touch is restricted, decrease
1332 UNIT close to the end of the region as needed. If op0 is a REG
1333 or SUBREG of REG, don't do this, as there can't be data races
1334 on a register and we can expand shorter code in some cases. */
1335 if (bitregion_end
1336 && unit > BITS_PER_UNIT
1337 && bitpos + bitsdone - thispos + unit > bitregion_end + 1
1338 && !REG_P (op0)
1339 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1341 unit = unit / 2;
1342 continue;
1345 /* THISSIZE must not overrun a word boundary. Otherwise,
1346 store_fixed_bit_field will call us again, and we will mutually
1347 recurse forever. */
1348 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1349 thissize = MIN (thissize, unit - thispos);
1351 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1353 /* Fetch successively less significant portions. */
1354 if (CONST_INT_P (value))
1355 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1356 >> (bitsize - bitsdone - thissize))
1357 & ((HOST_WIDE_INT_1 << thissize) - 1));
1358 /* Likewise, but the source is little-endian. */
1359 else if (reverse)
1360 part = extract_fixed_bit_field (word_mode, value, value_mode,
1361 thissize,
1362 bitsize - bitsdone - thissize,
1363 NULL_RTX, 1, false);
1364 else
1365 /* The args are chosen so that the last part includes the
1366 lsb. Give extract_bit_field the value it needs (with
1367 endianness compensation) to fetch the piece we want. */
1368 part = extract_fixed_bit_field (word_mode, value, value_mode,
1369 thissize,
1370 total_bits - bitsize + bitsdone,
1371 NULL_RTX, 1, false);
1373 else
1375 /* Fetch successively more significant portions. */
1376 if (CONST_INT_P (value))
1377 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1378 >> bitsdone)
1379 & ((HOST_WIDE_INT_1 << thissize) - 1));
1380 /* Likewise, but the source is big-endian. */
1381 else if (reverse)
1382 part = extract_fixed_bit_field (word_mode, value, value_mode,
1383 thissize,
1384 total_bits - bitsdone - thissize,
1385 NULL_RTX, 1, false);
1386 else
1387 part = extract_fixed_bit_field (word_mode, value, value_mode,
1388 thissize, bitsdone, NULL_RTX,
1389 1, false);
1392 /* If OP0 is a register, then handle OFFSET here. */
1393 rtx op0_piece = op0;
1394 opt_scalar_int_mode op0_piece_mode = op0_mode;
1395 if (SUBREG_P (op0) || REG_P (op0))
1397 scalar_int_mode imode;
1398 if (op0_mode.exists (&imode)
1399 && GET_MODE_SIZE (imode) < UNITS_PER_WORD)
1401 if (offset)
1402 op0_piece = const0_rtx;
1404 else
1406 op0_piece = operand_subword_force (op0,
1407 offset * unit / BITS_PER_WORD,
1408 GET_MODE (op0));
1409 op0_piece_mode = word_mode;
1411 offset &= BITS_PER_WORD / unit - 1;
1414 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1415 it is just an out-of-bounds access. Ignore it. */
1416 if (op0_piece != const0_rtx)
1417 store_fixed_bit_field (op0_piece, op0_piece_mode, thissize,
1418 offset * unit + thispos, bitregion_start,
1419 bitregion_end, part, word_mode, reverse);
1420 bitsdone += thissize;
1424 /* A subroutine of extract_bit_field_1 that converts return value X
1425 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1426 to extract_bit_field. */
1428 static rtx
1429 convert_extracted_bit_field (rtx x, machine_mode mode,
1430 machine_mode tmode, bool unsignedp)
1432 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1433 return x;
1435 /* If the x mode is not a scalar integral, first convert to the
1436 integer mode of that size and then access it as a floating-point
1437 value via a SUBREG. */
1438 if (!SCALAR_INT_MODE_P (tmode))
1440 scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1441 x = convert_to_mode (int_mode, x, unsignedp);
1442 x = force_reg (int_mode, x);
1443 return gen_lowpart (tmode, x);
1446 return convert_to_mode (tmode, x, unsignedp);
1449 /* Try to use an ext(z)v pattern to extract a field from OP0.
1450 Return the extracted value on success, otherwise return null.
1451 EXTV describes the extraction instruction to use. If OP0_MODE
1452 is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1453 The other arguments are as for extract_bit_field. */
1455 static rtx
1456 extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1457 opt_scalar_int_mode op0_mode,
1458 unsigned HOST_WIDE_INT bitsize,
1459 unsigned HOST_WIDE_INT bitnum,
1460 int unsignedp, rtx target,
1461 machine_mode mode, machine_mode tmode)
1463 struct expand_operand ops[4];
1464 rtx spec_target = target;
1465 rtx spec_target_subreg = 0;
1466 scalar_int_mode ext_mode = extv->field_mode;
1467 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1469 if (bitsize == 0 || unit < bitsize)
1470 return NULL_RTX;
1472 if (MEM_P (op0))
1473 /* Get a reference to the first byte of the field. */
1474 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1475 &bitnum);
1476 else
1478 /* Convert from counting within OP0 to counting in EXT_MODE. */
1479 if (BYTES_BIG_ENDIAN)
1480 bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
1482 /* If op0 is a register, we need it in EXT_MODE to make it
1483 acceptable to the format of ext(z)v. */
1484 if (GET_CODE (op0) == SUBREG && op0_mode.require () != ext_mode)
1485 return NULL_RTX;
1486 if (REG_P (op0) && op0_mode.require () != ext_mode)
1487 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1490 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1491 "backwards" from the size of the unit we are extracting from.
1492 Otherwise, we count bits from the most significant on a
1493 BYTES/BITS_BIG_ENDIAN machine. */
1495 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1496 bitnum = unit - bitsize - bitnum;
1498 if (target == 0)
1499 target = spec_target = gen_reg_rtx (tmode);
1501 if (GET_MODE (target) != ext_mode)
1503 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1504 between the mode of the extraction (word_mode) and the target
1505 mode. Instead, create a temporary and use convert_move to set
1506 the target. */
1507 if (REG_P (target)
1508 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1510 target = gen_lowpart (ext_mode, target);
1511 if (partial_subreg_p (GET_MODE (spec_target), ext_mode))
1512 spec_target_subreg = target;
1514 else
1515 target = gen_reg_rtx (ext_mode);
1518 create_output_operand (&ops[0], target, ext_mode);
1519 create_fixed_operand (&ops[1], op0);
1520 create_integer_operand (&ops[2], bitsize);
1521 create_integer_operand (&ops[3], bitnum);
1522 if (maybe_expand_insn (extv->icode, 4, ops))
1524 target = ops[0].value;
1525 if (target == spec_target)
1526 return target;
1527 if (target == spec_target_subreg)
1528 return spec_target;
1529 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1531 return NULL_RTX;
1534 /* A subroutine of extract_bit_field, with the same arguments.
1535 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1536 if we can find no other means of implementing the operation.
1537 if FALLBACK_P is false, return NULL instead. */
1539 static rtx
1540 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1541 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1542 machine_mode mode, machine_mode tmode,
1543 bool reverse, bool fallback_p, rtx *alt_rtl)
1545 rtx op0 = str_rtx;
1546 machine_mode mode1;
1548 if (tmode == VOIDmode)
1549 tmode = mode;
1551 while (GET_CODE (op0) == SUBREG)
1553 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1554 op0 = SUBREG_REG (op0);
1557 /* If we have an out-of-bounds access to a register, just return an
1558 uninitialized register of the required mode. This can occur if the
1559 source code contains an out-of-bounds access to a small array. */
1560 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1561 return gen_reg_rtx (tmode);
1563 if (REG_P (op0)
1564 && mode == GET_MODE (op0)
1565 && bitnum == 0
1566 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1568 if (reverse)
1569 op0 = flip_storage_order (mode, op0);
1570 /* We're trying to extract a full register from itself. */
1571 return op0;
1574 /* First try to check for vector from vector extractions. */
1575 if (VECTOR_MODE_P (GET_MODE (op0))
1576 && !MEM_P (op0)
1577 && VECTOR_MODE_P (tmode)
1578 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode))
1580 machine_mode new_mode = GET_MODE (op0);
1581 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1583 scalar_mode inner_mode = GET_MODE_INNER (tmode);
1584 unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0))
1585 / GET_MODE_UNIT_BITSIZE (tmode));
1586 if (!mode_for_vector (inner_mode, nunits).exists (&new_mode)
1587 || !VECTOR_MODE_P (new_mode)
1588 || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
1589 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1590 || !targetm.vector_mode_supported_p (new_mode))
1591 new_mode = VOIDmode;
1593 if (new_mode != VOIDmode
1594 && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1595 != CODE_FOR_nothing)
1596 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (tmode)
1597 == bitnum / GET_MODE_BITSIZE (tmode)))
1599 struct expand_operand ops[3];
1600 machine_mode outermode = new_mode;
1601 machine_mode innermode = tmode;
1602 enum insn_code icode
1603 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1604 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1606 if (new_mode != GET_MODE (op0))
1607 op0 = gen_lowpart (new_mode, op0);
1608 create_output_operand (&ops[0], target, innermode);
1609 ops[0].target = 1;
1610 create_input_operand (&ops[1], op0, outermode);
1611 create_integer_operand (&ops[2], pos);
1612 if (maybe_expand_insn (icode, 3, ops))
1614 if (alt_rtl && ops[0].target)
1615 *alt_rtl = target;
1616 target = ops[0].value;
1617 if (GET_MODE (target) != mode)
1618 return gen_lowpart (tmode, target);
1619 return target;
1624 /* See if we can get a better vector mode before extracting. */
1625 if (VECTOR_MODE_P (GET_MODE (op0))
1626 && !MEM_P (op0)
1627 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1629 machine_mode new_mode;
1631 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1632 new_mode = MIN_MODE_VECTOR_FLOAT;
1633 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1634 new_mode = MIN_MODE_VECTOR_FRACT;
1635 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1636 new_mode = MIN_MODE_VECTOR_UFRACT;
1637 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1638 new_mode = MIN_MODE_VECTOR_ACCUM;
1639 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1640 new_mode = MIN_MODE_VECTOR_UACCUM;
1641 else
1642 new_mode = MIN_MODE_VECTOR_INT;
1644 FOR_EACH_MODE_FROM (new_mode, new_mode)
1645 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1646 && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode)
1647 && targetm.vector_mode_supported_p (new_mode))
1648 break;
1649 if (new_mode != VOIDmode)
1650 op0 = gen_lowpart (new_mode, op0);
1653 /* Use vec_extract patterns for extracting parts of vectors whenever
1654 available. */
1655 machine_mode outermode = GET_MODE (op0);
1656 scalar_mode innermode = GET_MODE_INNER (outermode);
1657 if (VECTOR_MODE_P (outermode)
1658 && !MEM_P (op0)
1659 && (convert_optab_handler (vec_extract_optab, outermode, innermode)
1660 != CODE_FOR_nothing)
1661 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (innermode)
1662 == bitnum / GET_MODE_BITSIZE (innermode)))
1664 struct expand_operand ops[3];
1665 enum insn_code icode
1666 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1667 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1669 create_output_operand (&ops[0], target, innermode);
1670 ops[0].target = 1;
1671 create_input_operand (&ops[1], op0, outermode);
1672 create_integer_operand (&ops[2], pos);
1673 if (maybe_expand_insn (icode, 3, ops))
1675 if (alt_rtl && ops[0].target)
1676 *alt_rtl = target;
1677 target = ops[0].value;
1678 if (GET_MODE (target) != mode)
1679 return gen_lowpart (tmode, target);
1680 return target;
1684 /* Make sure we are playing with integral modes. Pun with subregs
1685 if we aren't. */
1686 opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
1687 scalar_int_mode imode;
1688 if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
1690 if (MEM_P (op0))
1691 op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
1692 0, MEM_SIZE (op0));
1693 else if (op0_mode.exists (&imode))
1695 op0 = gen_lowpart (imode, op0);
1697 /* If we got a SUBREG, force it into a register since we
1698 aren't going to be able to do another SUBREG on it. */
1699 if (GET_CODE (op0) == SUBREG)
1700 op0 = force_reg (imode, op0);
1702 else
1704 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
1705 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1706 emit_move_insn (mem, op0);
1707 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1711 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1712 If that's wrong, the solution is to test for it and set TARGET to 0
1713 if needed. */
1715 /* Get the mode of the field to use for atomic access or subreg
1716 conversion. */
1717 if (!SCALAR_INT_MODE_P (tmode)
1718 || !mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0).exists (&mode1))
1719 mode1 = mode;
1720 gcc_assert (mode1 != BLKmode);
1722 /* Extraction of a full MODE1 value can be done with a subreg as long
1723 as the least significant bit of the value is the least significant
1724 bit of either OP0 or a word of OP0. */
1725 if (!MEM_P (op0)
1726 && !reverse
1727 && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
1728 && bitsize == GET_MODE_BITSIZE (mode1)
1729 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, op0_mode.require ()))
1731 rtx sub = simplify_gen_subreg (mode1, op0, op0_mode.require (),
1732 bitnum / BITS_PER_UNIT);
1733 if (sub)
1734 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1737 /* Extraction of a full MODE1 value can be done with a load as long as
1738 the field is on a byte boundary and is sufficiently aligned. */
1739 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
1741 op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
1742 if (reverse)
1743 op0 = flip_storage_order (mode1, op0);
1744 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1747 /* Handle fields bigger than a word. */
1749 if (bitsize > BITS_PER_WORD)
1751 /* Here we transfer the words of the field
1752 in the order least significant first.
1753 This is because the most significant word is the one which may
1754 be less than full. */
1756 const bool backwards = WORDS_BIG_ENDIAN;
1757 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1758 unsigned int i;
1759 rtx_insn *last;
1761 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1762 target = gen_reg_rtx (mode);
1764 /* In case we're about to clobber a base register or something
1765 (see gcc.c-torture/execute/20040625-1.c). */
1766 if (reg_mentioned_p (target, str_rtx))
1767 target = gen_reg_rtx (mode);
1769 /* Indicate for flow that the entire target reg is being set. */
1770 emit_clobber (target);
1772 last = get_last_insn ();
1773 for (i = 0; i < nwords; i++)
1775 /* If I is 0, use the low-order word in both field and target;
1776 if I is 1, use the next to lowest word; and so on. */
1777 /* Word number in TARGET to use. */
1778 unsigned int wordnum
1779 = (backwards
1780 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1781 : i);
1782 /* Offset from start of field in OP0. */
1783 unsigned int bit_offset = (backwards ^ reverse
1784 ? MAX ((int) bitsize - ((int) i + 1)
1785 * BITS_PER_WORD,
1787 : (int) i * BITS_PER_WORD);
1788 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1789 rtx result_part
1790 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1791 bitsize - i * BITS_PER_WORD),
1792 bitnum + bit_offset, 1, target_part,
1793 mode, word_mode, reverse, fallback_p, NULL);
1795 gcc_assert (target_part);
1796 if (!result_part)
1798 delete_insns_since (last);
1799 return NULL;
1802 if (result_part != target_part)
1803 emit_move_insn (target_part, result_part);
1806 if (unsignedp)
1808 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1809 need to be zero'd out. */
1810 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1812 unsigned int i, total_words;
1814 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1815 for (i = nwords; i < total_words; i++)
1816 emit_move_insn
1817 (operand_subword (target,
1818 backwards ? total_words - i - 1 : i,
1819 1, VOIDmode),
1820 const0_rtx);
1822 return target;
1825 /* Signed bit field: sign-extend with two arithmetic shifts. */
1826 target = expand_shift (LSHIFT_EXPR, mode, target,
1827 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1828 return expand_shift (RSHIFT_EXPR, mode, target,
1829 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1832 /* If OP0 is a multi-word register, narrow it to the affected word.
1833 If the region spans two words, defer to extract_split_bit_field. */
1834 if (!MEM_P (op0) && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD)
1836 if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1838 if (!fallback_p)
1839 return NULL_RTX;
1840 target = extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
1841 unsignedp, reverse);
1842 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1844 op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1845 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1846 op0_mode = word_mode;
1847 bitnum %= BITS_PER_WORD;
1850 /* From here on we know the desired field is smaller than a word.
1851 If OP0 is a register, it too fits within a word. */
1852 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1853 extraction_insn extv;
1854 if (!MEM_P (op0)
1855 && !reverse
1856 /* ??? We could limit the structure size to the part of OP0 that
1857 contains the field, with appropriate checks for endianness
1858 and TARGET_TRULY_NOOP_TRUNCATION. */
1859 && get_best_reg_extraction_insn (&extv, pattern,
1860 GET_MODE_BITSIZE (op0_mode.require ()),
1861 tmode))
1863 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1864 bitsize, bitnum,
1865 unsignedp, target, mode,
1866 tmode);
1867 if (result)
1868 return result;
1871 /* If OP0 is a memory, try copying it to a register and seeing if a
1872 cheap register alternative is available. */
1873 if (MEM_P (op0) & !reverse)
1875 if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1876 tmode))
1878 rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
1879 bitsize, bitnum,
1880 unsignedp, target, mode,
1881 tmode);
1882 if (result)
1883 return result;
1886 rtx_insn *last = get_last_insn ();
1888 /* Try loading part of OP0 into a register and extracting the
1889 bitfield from that. */
1890 unsigned HOST_WIDE_INT bitpos;
1891 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1892 0, 0, tmode, &bitpos);
1893 if (xop0)
1895 xop0 = copy_to_reg (xop0);
1896 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1897 unsignedp, target,
1898 mode, tmode, reverse, false, NULL);
1899 if (result)
1900 return result;
1901 delete_insns_since (last);
1905 if (!fallback_p)
1906 return NULL;
1908 /* Find a correspondingly-sized integer field, so we can apply
1909 shifts and masks to it. */
1910 scalar_int_mode int_mode;
1911 if (!int_mode_for_mode (tmode).exists (&int_mode))
1912 /* If this fails, we should probably push op0 out to memory and then
1913 do a load. */
1914 int_mode = int_mode_for_mode (mode).require ();
1916 target = extract_fixed_bit_field (int_mode, op0, op0_mode, bitsize,
1917 bitnum, target, unsignedp, reverse);
1919 /* Complex values must be reversed piecewise, so we need to undo the global
1920 reversal, convert to the complex mode and reverse again. */
1921 if (reverse && COMPLEX_MODE_P (tmode))
1923 target = flip_storage_order (int_mode, target);
1924 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
1925 target = flip_storage_order (tmode, target);
1927 else
1928 target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
1930 return target;
1933 /* Generate code to extract a byte-field from STR_RTX
1934 containing BITSIZE bits, starting at BITNUM,
1935 and put it in TARGET if possible (if TARGET is nonzero).
1936 Regardless of TARGET, we return the rtx for where the value is placed.
1938 STR_RTX is the structure containing the byte (a REG or MEM).
1939 UNSIGNEDP is nonzero if this is an unsigned bit field.
1940 MODE is the natural mode of the field value once extracted.
1941 TMODE is the mode the caller would like the value to have;
1942 but the value may be returned with type MODE instead.
1944 If REVERSE is true, the extraction is to be done in reverse order.
1946 If a TARGET is specified and we can store in it at no extra cost,
1947 we do so, and return TARGET.
1948 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1949 if they are equally easy. */
1952 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1953 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1954 machine_mode mode, machine_mode tmode, bool reverse,
1955 rtx *alt_rtl)
1957 machine_mode mode1;
1959 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1960 if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
1961 mode1 = GET_MODE (str_rtx);
1962 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1963 mode1 = GET_MODE (target);
1964 else
1965 mode1 = tmode;
1967 scalar_int_mode int_mode;
1968 if (is_a <scalar_int_mode> (mode1, &int_mode)
1969 && strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, int_mode, 0, 0))
1971 /* Extraction of a full INT_MODE value can be done with a simple load.
1972 We know here that the field can be accessed with one single
1973 instruction. For targets that support unaligned memory,
1974 an unaligned access may be necessary. */
1975 if (bitsize == GET_MODE_BITSIZE (int_mode))
1977 rtx result = adjust_bitfield_address (str_rtx, int_mode,
1978 bitnum / BITS_PER_UNIT);
1979 if (reverse)
1980 result = flip_storage_order (int_mode, result);
1981 gcc_assert (bitnum % BITS_PER_UNIT == 0);
1982 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
1985 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, bitsize, bitnum,
1986 &bitnum);
1987 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (int_mode));
1988 str_rtx = copy_to_reg (str_rtx);
1991 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1992 target, mode, tmode, reverse, true, alt_rtl);
1995 /* Use shifts and boolean operations to extract a field of BITSIZE bits
1996 from bit BITNUM of OP0. If OP0_MODE is defined, it is the mode of OP0,
1997 otherwise OP0 is a BLKmode MEM.
1999 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2000 If REVERSE is true, the extraction is to be done in reverse order.
2002 If TARGET is nonzero, attempts to store the value there
2003 and return TARGET, but this is not guaranteed.
2004 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
2006 static rtx
2007 extract_fixed_bit_field (machine_mode tmode, rtx op0,
2008 opt_scalar_int_mode op0_mode,
2009 unsigned HOST_WIDE_INT bitsize,
2010 unsigned HOST_WIDE_INT bitnum, rtx target,
2011 int unsignedp, bool reverse)
2013 scalar_int_mode mode;
2014 if (MEM_P (op0))
2016 if (!get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
2017 BITS_PER_WORD, MEM_VOLATILE_P (op0), &mode))
2018 /* The only way this should occur is if the field spans word
2019 boundaries. */
2020 return extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2021 unsignedp, reverse);
2023 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2025 else
2026 mode = op0_mode.require ();
2028 return extract_fixed_bit_field_1 (tmode, op0, mode, bitsize, bitnum,
2029 target, unsignedp, reverse);
2032 /* Helper function for extract_fixed_bit_field, extracts
2033 the bit field always using MODE, which is the mode of OP0.
2034 The other arguments are as for extract_fixed_bit_field. */
2036 static rtx
2037 extract_fixed_bit_field_1 (machine_mode tmode, rtx op0, scalar_int_mode mode,
2038 unsigned HOST_WIDE_INT bitsize,
2039 unsigned HOST_WIDE_INT bitnum, rtx target,
2040 int unsignedp, bool reverse)
2042 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2043 for invalid input, such as extract equivalent of f5 from
2044 gcc.dg/pr48335-2.c. */
2046 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2047 /* BITNUM is the distance between our msb and that of OP0.
2048 Convert it to the distance from the lsb. */
2049 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2051 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2052 We have reduced the big-endian case to the little-endian case. */
2053 if (reverse)
2054 op0 = flip_storage_order (mode, op0);
2056 if (unsignedp)
2058 if (bitnum)
2060 /* If the field does not already start at the lsb,
2061 shift it so it does. */
2062 /* Maybe propagate the target for the shift. */
2063 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2064 if (tmode != mode)
2065 subtarget = 0;
2066 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2068 /* Convert the value to the desired mode. TMODE must also be a
2069 scalar integer for this conversion to make sense, since we
2070 shouldn't reinterpret the bits. */
2071 scalar_int_mode new_mode = as_a <scalar_int_mode> (tmode);
2072 if (mode != new_mode)
2073 op0 = convert_to_mode (new_mode, op0, 1);
2075 /* Unless the msb of the field used to be the msb when we shifted,
2076 mask out the upper bits. */
2078 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
2079 return expand_binop (new_mode, and_optab, op0,
2080 mask_rtx (new_mode, 0, bitsize, 0),
2081 target, 1, OPTAB_LIB_WIDEN);
2082 return op0;
2085 /* To extract a signed bit-field, first shift its msb to the msb of the word,
2086 then arithmetic-shift its lsb to the lsb of the word. */
2087 op0 = force_reg (mode, op0);
2089 /* Find the narrowest integer mode that contains the field. */
2091 opt_scalar_int_mode mode_iter;
2092 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2093 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize + bitnum)
2094 break;
2096 mode = mode_iter.require ();
2097 op0 = convert_to_mode (mode, op0, 0);
2099 if (mode != tmode)
2100 target = 0;
2102 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2104 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2105 /* Maybe propagate the target for the shift. */
2106 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2107 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2110 return expand_shift (RSHIFT_EXPR, mode, op0,
2111 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2114 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2115 VALUE << BITPOS. */
2117 static rtx
2118 lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2119 int bitpos)
2121 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2124 /* Extract a bit field that is split across two words
2125 and return an RTX for the result.
2127 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2128 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2129 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2130 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2131 a BLKmode MEM.
2133 If REVERSE is true, the extraction is to be done in reverse order. */
2135 static rtx
2136 extract_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
2137 unsigned HOST_WIDE_INT bitsize,
2138 unsigned HOST_WIDE_INT bitpos, int unsignedp,
2139 bool reverse)
2141 unsigned int unit;
2142 unsigned int bitsdone = 0;
2143 rtx result = NULL_RTX;
2144 int first = 1;
2146 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2147 much at a time. */
2148 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2149 unit = BITS_PER_WORD;
2150 else
2151 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2153 while (bitsdone < bitsize)
2155 unsigned HOST_WIDE_INT thissize;
2156 rtx part;
2157 unsigned HOST_WIDE_INT thispos;
2158 unsigned HOST_WIDE_INT offset;
2160 offset = (bitpos + bitsdone) / unit;
2161 thispos = (bitpos + bitsdone) % unit;
2163 /* THISSIZE must not overrun a word boundary. Otherwise,
2164 extract_fixed_bit_field will call us again, and we will mutually
2165 recurse forever. */
2166 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2167 thissize = MIN (thissize, unit - thispos);
2169 /* If OP0 is a register, then handle OFFSET here. */
2170 rtx op0_piece = op0;
2171 opt_scalar_int_mode op0_piece_mode = op0_mode;
2172 if (SUBREG_P (op0) || REG_P (op0))
2174 op0_piece = operand_subword_force (op0, offset, op0_mode.require ());
2175 op0_piece_mode = word_mode;
2176 offset = 0;
2179 /* Extract the parts in bit-counting order,
2180 whose meaning is determined by BYTES_PER_UNIT.
2181 OFFSET is in UNITs, and UNIT is in bits. */
2182 part = extract_fixed_bit_field (word_mode, op0_piece, op0_piece_mode,
2183 thissize, offset * unit + thispos,
2184 0, 1, reverse);
2185 bitsdone += thissize;
2187 /* Shift this part into place for the result. */
2188 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2190 if (bitsize != bitsdone)
2191 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2192 bitsize - bitsdone, 0, 1);
2194 else
2196 if (bitsdone != thissize)
2197 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2198 bitsdone - thissize, 0, 1);
2201 if (first)
2202 result = part;
2203 else
2204 /* Combine the parts with bitwise or. This works
2205 because we extracted each part as an unsigned bit field. */
2206 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2207 OPTAB_LIB_WIDEN);
2209 first = 0;
2212 /* Unsigned bit field: we are done. */
2213 if (unsignedp)
2214 return result;
2215 /* Signed bit field: sign-extend with two arithmetic shifts. */
2216 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2217 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2218 return expand_shift (RSHIFT_EXPR, word_mode, result,
2219 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2222 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2223 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2224 MODE, fill the upper bits with zeros. Fail if the layout of either
2225 mode is unknown (as for CC modes) or if the extraction would involve
2226 unprofitable mode punning. Return the value on success, otherwise
2227 return null.
2229 This is different from gen_lowpart* in these respects:
2231 - the returned value must always be considered an rvalue
2233 - when MODE is wider than SRC_MODE, the extraction involves
2234 a zero extension
2236 - when MODE is smaller than SRC_MODE, the extraction involves
2237 a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2239 In other words, this routine performs a computation, whereas the
2240 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2241 operations. */
2244 extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2246 scalar_int_mode int_mode, src_int_mode;
2248 if (mode == src_mode)
2249 return src;
2251 if (CONSTANT_P (src))
2253 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2254 fails, it will happily create (subreg (symbol_ref)) or similar
2255 invalid SUBREGs. */
2256 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2257 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2258 if (ret)
2259 return ret;
2261 if (GET_MODE (src) == VOIDmode
2262 || !validate_subreg (mode, src_mode, src, byte))
2263 return NULL_RTX;
2265 src = force_reg (GET_MODE (src), src);
2266 return gen_rtx_SUBREG (mode, src, byte);
2269 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2270 return NULL_RTX;
2272 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2273 && targetm.modes_tieable_p (mode, src_mode))
2275 rtx x = gen_lowpart_common (mode, src);
2276 if (x)
2277 return x;
2280 if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2281 || !int_mode_for_mode (mode).exists (&int_mode))
2282 return NULL_RTX;
2284 if (!targetm.modes_tieable_p (src_int_mode, src_mode))
2285 return NULL_RTX;
2286 if (!targetm.modes_tieable_p (int_mode, mode))
2287 return NULL_RTX;
2289 src = gen_lowpart (src_int_mode, src);
2290 src = convert_modes (int_mode, src_int_mode, src, true);
2291 src = gen_lowpart (mode, src);
2292 return src;
2295 /* Add INC into TARGET. */
2297 void
2298 expand_inc (rtx target, rtx inc)
2300 rtx value = expand_binop (GET_MODE (target), add_optab,
2301 target, inc,
2302 target, 0, OPTAB_LIB_WIDEN);
2303 if (value != target)
2304 emit_move_insn (target, value);
2307 /* Subtract DEC from TARGET. */
2309 void
2310 expand_dec (rtx target, rtx dec)
2312 rtx value = expand_binop (GET_MODE (target), sub_optab,
2313 target, dec,
2314 target, 0, OPTAB_LIB_WIDEN);
2315 if (value != target)
2316 emit_move_insn (target, value);
2319 /* Output a shift instruction for expression code CODE,
2320 with SHIFTED being the rtx for the value to shift,
2321 and AMOUNT the rtx for the amount to shift by.
2322 Store the result in the rtx TARGET, if that is convenient.
2323 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2324 Return the rtx for where the value is.
2325 If that cannot be done, abort the compilation unless MAY_FAIL is true,
2326 in which case 0 is returned. */
2328 static rtx
2329 expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2330 rtx amount, rtx target, int unsignedp, bool may_fail = false)
2332 rtx op1, temp = 0;
2333 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2334 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2335 optab lshift_optab = ashl_optab;
2336 optab rshift_arith_optab = ashr_optab;
2337 optab rshift_uns_optab = lshr_optab;
2338 optab lrotate_optab = rotl_optab;
2339 optab rrotate_optab = rotr_optab;
2340 machine_mode op1_mode;
2341 scalar_mode scalar_mode = GET_MODE_INNER (mode);
2342 int attempt;
2343 bool speed = optimize_insn_for_speed_p ();
2345 op1 = amount;
2346 op1_mode = GET_MODE (op1);
2348 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2349 shift amount is a vector, use the vector/vector shift patterns. */
2350 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2352 lshift_optab = vashl_optab;
2353 rshift_arith_optab = vashr_optab;
2354 rshift_uns_optab = vlshr_optab;
2355 lrotate_optab = vrotl_optab;
2356 rrotate_optab = vrotr_optab;
2359 /* Previously detected shift-counts computed by NEGATE_EXPR
2360 and shifted in the other direction; but that does not work
2361 on all machines. */
2363 if (SHIFT_COUNT_TRUNCATED)
2365 if (CONST_INT_P (op1)
2366 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2367 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2368 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2369 % GET_MODE_BITSIZE (scalar_mode));
2370 else if (GET_CODE (op1) == SUBREG
2371 && subreg_lowpart_p (op1)
2372 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2373 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2374 op1 = SUBREG_REG (op1);
2377 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2378 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2379 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2380 amount instead. */
2381 if (rotate
2382 && CONST_INT_P (op1)
2383 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2384 GET_MODE_BITSIZE (scalar_mode) - 1))
2386 op1 = GEN_INT (GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2387 left = !left;
2388 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2391 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2392 Note that this is not the case for bigger values. For instance a rotation
2393 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2394 0x04030201 (bswapsi). */
2395 if (rotate
2396 && CONST_INT_P (op1)
2397 && INTVAL (op1) == BITS_PER_UNIT
2398 && GET_MODE_SIZE (scalar_mode) == 2
2399 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing)
2400 return expand_unop (HImode, bswap_optab, shifted, NULL_RTX,
2401 unsignedp);
2403 if (op1 == const0_rtx)
2404 return shifted;
2406 /* Check whether its cheaper to implement a left shift by a constant
2407 bit count by a sequence of additions. */
2408 if (code == LSHIFT_EXPR
2409 && CONST_INT_P (op1)
2410 && INTVAL (op1) > 0
2411 && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2412 && INTVAL (op1) < MAX_BITS_PER_WORD
2413 && (shift_cost (speed, mode, INTVAL (op1))
2414 > INTVAL (op1) * add_cost (speed, mode))
2415 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2417 int i;
2418 for (i = 0; i < INTVAL (op1); i++)
2420 temp = force_reg (mode, shifted);
2421 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2422 unsignedp, OPTAB_LIB_WIDEN);
2424 return shifted;
2427 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2429 enum optab_methods methods;
2431 if (attempt == 0)
2432 methods = OPTAB_DIRECT;
2433 else if (attempt == 1)
2434 methods = OPTAB_WIDEN;
2435 else
2436 methods = OPTAB_LIB_WIDEN;
2438 if (rotate)
2440 /* Widening does not work for rotation. */
2441 if (methods == OPTAB_WIDEN)
2442 continue;
2443 else if (methods == OPTAB_LIB_WIDEN)
2445 /* If we have been unable to open-code this by a rotation,
2446 do it as the IOR of two shifts. I.e., to rotate A
2447 by N bits, compute
2448 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2449 where C is the bitsize of A.
2451 It is theoretically possible that the target machine might
2452 not be able to perform either shift and hence we would
2453 be making two libcalls rather than just the one for the
2454 shift (similarly if IOR could not be done). We will allow
2455 this extremely unlikely lossage to avoid complicating the
2456 code below. */
2458 rtx subtarget = target == shifted ? 0 : target;
2459 rtx new_amount, other_amount;
2460 rtx temp1;
2462 new_amount = op1;
2463 if (op1 == const0_rtx)
2464 return shifted;
2465 else if (CONST_INT_P (op1))
2466 other_amount = GEN_INT (GET_MODE_BITSIZE (scalar_mode)
2467 - INTVAL (op1));
2468 else
2470 other_amount
2471 = simplify_gen_unary (NEG, GET_MODE (op1),
2472 op1, GET_MODE (op1));
2473 HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2474 other_amount
2475 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2476 gen_int_mode (mask, GET_MODE (op1)));
2479 shifted = force_reg (mode, shifted);
2481 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2482 mode, shifted, new_amount, 0, 1);
2483 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2484 mode, shifted, other_amount,
2485 subtarget, 1);
2486 return expand_binop (mode, ior_optab, temp, temp1, target,
2487 unsignedp, methods);
2490 temp = expand_binop (mode,
2491 left ? lrotate_optab : rrotate_optab,
2492 shifted, op1, target, unsignedp, methods);
2494 else if (unsignedp)
2495 temp = expand_binop (mode,
2496 left ? lshift_optab : rshift_uns_optab,
2497 shifted, op1, target, unsignedp, methods);
2499 /* Do arithmetic shifts.
2500 Also, if we are going to widen the operand, we can just as well
2501 use an arithmetic right-shift instead of a logical one. */
2502 if (temp == 0 && ! rotate
2503 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2505 enum optab_methods methods1 = methods;
2507 /* If trying to widen a log shift to an arithmetic shift,
2508 don't accept an arithmetic shift of the same size. */
2509 if (unsignedp)
2510 methods1 = OPTAB_MUST_WIDEN;
2512 /* Arithmetic shift */
2514 temp = expand_binop (mode,
2515 left ? lshift_optab : rshift_arith_optab,
2516 shifted, op1, target, unsignedp, methods1);
2519 /* We used to try extzv here for logical right shifts, but that was
2520 only useful for one machine, the VAX, and caused poor code
2521 generation there for lshrdi3, so the code was deleted and a
2522 define_expand for lshrsi3 was added to vax.md. */
2525 gcc_assert (temp != NULL_RTX || may_fail);
2526 return temp;
2529 /* Output a shift instruction for expression code CODE,
2530 with SHIFTED being the rtx for the value to shift,
2531 and AMOUNT the amount to shift by.
2532 Store the result in the rtx TARGET, if that is convenient.
2533 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2534 Return the rtx for where the value is. */
2537 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2538 int amount, rtx target, int unsignedp)
2540 return expand_shift_1 (code, mode,
2541 shifted, GEN_INT (amount), target, unsignedp);
2544 /* Likewise, but return 0 if that cannot be done. */
2546 static rtx
2547 maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2548 int amount, rtx target, int unsignedp)
2550 return expand_shift_1 (code, mode,
2551 shifted, GEN_INT (amount), target, unsignedp, true);
2554 /* Output a shift instruction for expression code CODE,
2555 with SHIFTED being the rtx for the value to shift,
2556 and AMOUNT the tree for the amount to shift by.
2557 Store the result in the rtx TARGET, if that is convenient.
2558 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2559 Return the rtx for where the value is. */
2562 expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2563 tree amount, rtx target, int unsignedp)
2565 return expand_shift_1 (code, mode,
2566 shifted, expand_normal (amount), target, unsignedp);
2570 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2571 const struct mult_cost *, machine_mode mode);
2572 static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2573 const struct algorithm *, enum mult_variant);
2574 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2575 static rtx extract_high_half (scalar_int_mode, rtx);
2576 static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int);
2577 static rtx expmed_mult_highpart_optab (scalar_int_mode, rtx, rtx, rtx,
2578 int, int);
2579 /* Compute and return the best algorithm for multiplying by T.
2580 The algorithm must cost less than cost_limit
2581 If retval.cost >= COST_LIMIT, no algorithm was found and all
2582 other field of the returned struct are undefined.
2583 MODE is the machine mode of the multiplication. */
2585 static void
2586 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2587 const struct mult_cost *cost_limit, machine_mode mode)
2589 int m;
2590 struct algorithm *alg_in, *best_alg;
2591 struct mult_cost best_cost;
2592 struct mult_cost new_limit;
2593 int op_cost, op_latency;
2594 unsigned HOST_WIDE_INT orig_t = t;
2595 unsigned HOST_WIDE_INT q;
2596 int maxm, hash_index;
2597 bool cache_hit = false;
2598 enum alg_code cache_alg = alg_zero;
2599 bool speed = optimize_insn_for_speed_p ();
2600 scalar_int_mode imode;
2601 struct alg_hash_entry *entry_ptr;
2603 /* Indicate that no algorithm is yet found. If no algorithm
2604 is found, this value will be returned and indicate failure. */
2605 alg_out->cost.cost = cost_limit->cost + 1;
2606 alg_out->cost.latency = cost_limit->latency + 1;
2608 if (cost_limit->cost < 0
2609 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2610 return;
2612 /* Be prepared for vector modes. */
2613 imode = as_a <scalar_int_mode> (GET_MODE_INNER (mode));
2615 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2617 /* Restrict the bits of "t" to the multiplication's mode. */
2618 t &= GET_MODE_MASK (imode);
2620 /* t == 1 can be done in zero cost. */
2621 if (t == 1)
2623 alg_out->ops = 1;
2624 alg_out->cost.cost = 0;
2625 alg_out->cost.latency = 0;
2626 alg_out->op[0] = alg_m;
2627 return;
2630 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2631 fail now. */
2632 if (t == 0)
2634 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2635 return;
2636 else
2638 alg_out->ops = 1;
2639 alg_out->cost.cost = zero_cost (speed);
2640 alg_out->cost.latency = zero_cost (speed);
2641 alg_out->op[0] = alg_zero;
2642 return;
2646 /* We'll be needing a couple extra algorithm structures now. */
2648 alg_in = XALLOCA (struct algorithm);
2649 best_alg = XALLOCA (struct algorithm);
2650 best_cost = *cost_limit;
2652 /* Compute the hash index. */
2653 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2655 /* See if we already know what to do for T. */
2656 entry_ptr = alg_hash_entry_ptr (hash_index);
2657 if (entry_ptr->t == t
2658 && entry_ptr->mode == mode
2659 && entry_ptr->speed == speed
2660 && entry_ptr->alg != alg_unknown)
2662 cache_alg = entry_ptr->alg;
2664 if (cache_alg == alg_impossible)
2666 /* The cache tells us that it's impossible to synthesize
2667 multiplication by T within entry_ptr->cost. */
2668 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2669 /* COST_LIMIT is at least as restrictive as the one
2670 recorded in the hash table, in which case we have no
2671 hope of synthesizing a multiplication. Just
2672 return. */
2673 return;
2675 /* If we get here, COST_LIMIT is less restrictive than the
2676 one recorded in the hash table, so we may be able to
2677 synthesize a multiplication. Proceed as if we didn't
2678 have the cache entry. */
2680 else
2682 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2683 /* The cached algorithm shows that this multiplication
2684 requires more cost than COST_LIMIT. Just return. This
2685 way, we don't clobber this cache entry with
2686 alg_impossible but retain useful information. */
2687 return;
2689 cache_hit = true;
2691 switch (cache_alg)
2693 case alg_shift:
2694 goto do_alg_shift;
2696 case alg_add_t_m2:
2697 case alg_sub_t_m2:
2698 goto do_alg_addsub_t_m2;
2700 case alg_add_factor:
2701 case alg_sub_factor:
2702 goto do_alg_addsub_factor;
2704 case alg_add_t2_m:
2705 goto do_alg_add_t2_m;
2707 case alg_sub_t2_m:
2708 goto do_alg_sub_t2_m;
2710 default:
2711 gcc_unreachable ();
2716 /* If we have a group of zero bits at the low-order part of T, try
2717 multiplying by the remaining bits and then doing a shift. */
2719 if ((t & 1) == 0)
2721 do_alg_shift:
2722 m = ctz_or_zero (t); /* m = number of low zero bits */
2723 if (m < maxm)
2725 q = t >> m;
2726 /* The function expand_shift will choose between a shift and
2727 a sequence of additions, so the observed cost is given as
2728 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2729 op_cost = m * add_cost (speed, mode);
2730 if (shift_cost (speed, mode, m) < op_cost)
2731 op_cost = shift_cost (speed, mode, m);
2732 new_limit.cost = best_cost.cost - op_cost;
2733 new_limit.latency = best_cost.latency - op_cost;
2734 synth_mult (alg_in, q, &new_limit, mode);
2736 alg_in->cost.cost += op_cost;
2737 alg_in->cost.latency += op_cost;
2738 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2740 best_cost = alg_in->cost;
2741 std::swap (alg_in, best_alg);
2742 best_alg->log[best_alg->ops] = m;
2743 best_alg->op[best_alg->ops] = alg_shift;
2746 /* See if treating ORIG_T as a signed number yields a better
2747 sequence. Try this sequence only for a negative ORIG_T
2748 as it would be useless for a non-negative ORIG_T. */
2749 if ((HOST_WIDE_INT) orig_t < 0)
2751 /* Shift ORIG_T as follows because a right shift of a
2752 negative-valued signed type is implementation
2753 defined. */
2754 q = ~(~orig_t >> m);
2755 /* The function expand_shift will choose between a shift
2756 and a sequence of additions, so the observed cost is
2757 given as MIN (m * add_cost(speed, mode),
2758 shift_cost(speed, mode, m)). */
2759 op_cost = m * add_cost (speed, mode);
2760 if (shift_cost (speed, mode, m) < op_cost)
2761 op_cost = shift_cost (speed, mode, m);
2762 new_limit.cost = best_cost.cost - op_cost;
2763 new_limit.latency = best_cost.latency - op_cost;
2764 synth_mult (alg_in, q, &new_limit, mode);
2766 alg_in->cost.cost += op_cost;
2767 alg_in->cost.latency += op_cost;
2768 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2770 best_cost = alg_in->cost;
2771 std::swap (alg_in, best_alg);
2772 best_alg->log[best_alg->ops] = m;
2773 best_alg->op[best_alg->ops] = alg_shift;
2777 if (cache_hit)
2778 goto done;
2781 /* If we have an odd number, add or subtract one. */
2782 if ((t & 1) != 0)
2784 unsigned HOST_WIDE_INT w;
2786 do_alg_addsub_t_m2:
2787 for (w = 1; (w & t) != 0; w <<= 1)
2789 /* If T was -1, then W will be zero after the loop. This is another
2790 case where T ends with ...111. Handling this with (T + 1) and
2791 subtract 1 produces slightly better code and results in algorithm
2792 selection much faster than treating it like the ...0111 case
2793 below. */
2794 if (w == 0
2795 || (w > 2
2796 /* Reject the case where t is 3.
2797 Thus we prefer addition in that case. */
2798 && t != 3))
2800 /* T ends with ...111. Multiply by (T + 1) and subtract T. */
2802 op_cost = add_cost (speed, mode);
2803 new_limit.cost = best_cost.cost - op_cost;
2804 new_limit.latency = best_cost.latency - op_cost;
2805 synth_mult (alg_in, t + 1, &new_limit, mode);
2807 alg_in->cost.cost += op_cost;
2808 alg_in->cost.latency += op_cost;
2809 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2811 best_cost = alg_in->cost;
2812 std::swap (alg_in, best_alg);
2813 best_alg->log[best_alg->ops] = 0;
2814 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2817 else
2819 /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
2821 op_cost = add_cost (speed, mode);
2822 new_limit.cost = best_cost.cost - op_cost;
2823 new_limit.latency = best_cost.latency - op_cost;
2824 synth_mult (alg_in, t - 1, &new_limit, mode);
2826 alg_in->cost.cost += op_cost;
2827 alg_in->cost.latency += op_cost;
2828 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2830 best_cost = alg_in->cost;
2831 std::swap (alg_in, best_alg);
2832 best_alg->log[best_alg->ops] = 0;
2833 best_alg->op[best_alg->ops] = alg_add_t_m2;
2837 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2838 quickly with a - a * n for some appropriate constant n. */
2839 m = exact_log2 (-orig_t + 1);
2840 if (m >= 0 && m < maxm)
2842 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2843 /* If the target has a cheap shift-and-subtract insn use
2844 that in preference to a shift insn followed by a sub insn.
2845 Assume that the shift-and-sub is "atomic" with a latency
2846 equal to it's cost, otherwise assume that on superscalar
2847 hardware the shift may be executed concurrently with the
2848 earlier steps in the algorithm. */
2849 if (shiftsub1_cost (speed, mode, m) <= op_cost)
2851 op_cost = shiftsub1_cost (speed, mode, m);
2852 op_latency = op_cost;
2854 else
2855 op_latency = add_cost (speed, mode);
2857 new_limit.cost = best_cost.cost - op_cost;
2858 new_limit.latency = best_cost.latency - op_latency;
2859 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2860 &new_limit, mode);
2862 alg_in->cost.cost += op_cost;
2863 alg_in->cost.latency += op_latency;
2864 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2866 best_cost = alg_in->cost;
2867 std::swap (alg_in, best_alg);
2868 best_alg->log[best_alg->ops] = m;
2869 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2873 if (cache_hit)
2874 goto done;
2877 /* Look for factors of t of the form
2878 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2879 If we find such a factor, we can multiply by t using an algorithm that
2880 multiplies by q, shift the result by m and add/subtract it to itself.
2882 We search for large factors first and loop down, even if large factors
2883 are less probable than small; if we find a large factor we will find a
2884 good sequence quickly, and therefore be able to prune (by decreasing
2885 COST_LIMIT) the search. */
2887 do_alg_addsub_factor:
2888 for (m = floor_log2 (t - 1); m >= 2; m--)
2890 unsigned HOST_WIDE_INT d;
2892 d = (HOST_WIDE_INT_1U << m) + 1;
2893 if (t % d == 0 && t > d && m < maxm
2894 && (!cache_hit || cache_alg == alg_add_factor))
2896 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2897 if (shiftadd_cost (speed, mode, m) <= op_cost)
2898 op_cost = shiftadd_cost (speed, mode, m);
2900 op_latency = op_cost;
2903 new_limit.cost = best_cost.cost - op_cost;
2904 new_limit.latency = best_cost.latency - op_latency;
2905 synth_mult (alg_in, t / d, &new_limit, mode);
2907 alg_in->cost.cost += op_cost;
2908 alg_in->cost.latency += op_latency;
2909 if (alg_in->cost.latency < op_cost)
2910 alg_in->cost.latency = op_cost;
2911 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2913 best_cost = alg_in->cost;
2914 std::swap (alg_in, best_alg);
2915 best_alg->log[best_alg->ops] = m;
2916 best_alg->op[best_alg->ops] = alg_add_factor;
2918 /* Other factors will have been taken care of in the recursion. */
2919 break;
2922 d = (HOST_WIDE_INT_1U << m) - 1;
2923 if (t % d == 0 && t > d && m < maxm
2924 && (!cache_hit || cache_alg == alg_sub_factor))
2926 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2927 if (shiftsub0_cost (speed, mode, m) <= op_cost)
2928 op_cost = shiftsub0_cost (speed, mode, m);
2930 op_latency = op_cost;
2932 new_limit.cost = best_cost.cost - op_cost;
2933 new_limit.latency = best_cost.latency - op_latency;
2934 synth_mult (alg_in, t / d, &new_limit, mode);
2936 alg_in->cost.cost += op_cost;
2937 alg_in->cost.latency += op_latency;
2938 if (alg_in->cost.latency < op_cost)
2939 alg_in->cost.latency = op_cost;
2940 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2942 best_cost = alg_in->cost;
2943 std::swap (alg_in, best_alg);
2944 best_alg->log[best_alg->ops] = m;
2945 best_alg->op[best_alg->ops] = alg_sub_factor;
2947 break;
2950 if (cache_hit)
2951 goto done;
2953 /* Try shift-and-add (load effective address) instructions,
2954 i.e. do a*3, a*5, a*9. */
2955 if ((t & 1) != 0)
2957 do_alg_add_t2_m:
2958 q = t - 1;
2959 m = ctz_hwi (q);
2960 if (q && m < maxm)
2962 op_cost = shiftadd_cost (speed, mode, m);
2963 new_limit.cost = best_cost.cost - op_cost;
2964 new_limit.latency = best_cost.latency - op_cost;
2965 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2967 alg_in->cost.cost += op_cost;
2968 alg_in->cost.latency += op_cost;
2969 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2971 best_cost = alg_in->cost;
2972 std::swap (alg_in, best_alg);
2973 best_alg->log[best_alg->ops] = m;
2974 best_alg->op[best_alg->ops] = alg_add_t2_m;
2977 if (cache_hit)
2978 goto done;
2980 do_alg_sub_t2_m:
2981 q = t + 1;
2982 m = ctz_hwi (q);
2983 if (q && m < maxm)
2985 op_cost = shiftsub0_cost (speed, mode, m);
2986 new_limit.cost = best_cost.cost - op_cost;
2987 new_limit.latency = best_cost.latency - op_cost;
2988 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2990 alg_in->cost.cost += op_cost;
2991 alg_in->cost.latency += op_cost;
2992 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2994 best_cost = alg_in->cost;
2995 std::swap (alg_in, best_alg);
2996 best_alg->log[best_alg->ops] = m;
2997 best_alg->op[best_alg->ops] = alg_sub_t2_m;
3000 if (cache_hit)
3001 goto done;
3004 done:
3005 /* If best_cost has not decreased, we have not found any algorithm. */
3006 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
3008 /* We failed to find an algorithm. Record alg_impossible for
3009 this case (that is, <T, MODE, COST_LIMIT>) so that next time
3010 we are asked to find an algorithm for T within the same or
3011 lower COST_LIMIT, we can immediately return to the
3012 caller. */
3013 entry_ptr->t = t;
3014 entry_ptr->mode = mode;
3015 entry_ptr->speed = speed;
3016 entry_ptr->alg = alg_impossible;
3017 entry_ptr->cost = *cost_limit;
3018 return;
3021 /* Cache the result. */
3022 if (!cache_hit)
3024 entry_ptr->t = t;
3025 entry_ptr->mode = mode;
3026 entry_ptr->speed = speed;
3027 entry_ptr->alg = best_alg->op[best_alg->ops];
3028 entry_ptr->cost.cost = best_cost.cost;
3029 entry_ptr->cost.latency = best_cost.latency;
3032 /* If we are getting a too long sequence for `struct algorithm'
3033 to record, make this search fail. */
3034 if (best_alg->ops == MAX_BITS_PER_WORD)
3035 return;
3037 /* Copy the algorithm from temporary space to the space at alg_out.
3038 We avoid using structure assignment because the majority of
3039 best_alg is normally undefined, and this is a critical function. */
3040 alg_out->ops = best_alg->ops + 1;
3041 alg_out->cost = best_cost;
3042 memcpy (alg_out->op, best_alg->op,
3043 alg_out->ops * sizeof *alg_out->op);
3044 memcpy (alg_out->log, best_alg->log,
3045 alg_out->ops * sizeof *alg_out->log);
3048 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3049 Try three variations:
3051 - a shift/add sequence based on VAL itself
3052 - a shift/add sequence based on -VAL, followed by a negation
3053 - a shift/add sequence based on VAL - 1, followed by an addition.
3055 Return true if the cheapest of these cost less than MULT_COST,
3056 describing the algorithm in *ALG and final fixup in *VARIANT. */
3058 bool
3059 choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3060 struct algorithm *alg, enum mult_variant *variant,
3061 int mult_cost)
3063 struct algorithm alg2;
3064 struct mult_cost limit;
3065 int op_cost;
3066 bool speed = optimize_insn_for_speed_p ();
3068 /* Fail quickly for impossible bounds. */
3069 if (mult_cost < 0)
3070 return false;
3072 /* Ensure that mult_cost provides a reasonable upper bound.
3073 Any constant multiplication can be performed with less
3074 than 2 * bits additions. */
3075 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3076 if (mult_cost > op_cost)
3077 mult_cost = op_cost;
3079 *variant = basic_variant;
3080 limit.cost = mult_cost;
3081 limit.latency = mult_cost;
3082 synth_mult (alg, val, &limit, mode);
3084 /* This works only if the inverted value actually fits in an
3085 `unsigned int' */
3086 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3088 op_cost = neg_cost (speed, mode);
3089 if (MULT_COST_LESS (&alg->cost, mult_cost))
3091 limit.cost = alg->cost.cost - op_cost;
3092 limit.latency = alg->cost.latency - op_cost;
3094 else
3096 limit.cost = mult_cost - op_cost;
3097 limit.latency = mult_cost - op_cost;
3100 synth_mult (&alg2, -val, &limit, mode);
3101 alg2.cost.cost += op_cost;
3102 alg2.cost.latency += op_cost;
3103 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3104 *alg = alg2, *variant = negate_variant;
3107 /* This proves very useful for division-by-constant. */
3108 op_cost = add_cost (speed, mode);
3109 if (MULT_COST_LESS (&alg->cost, mult_cost))
3111 limit.cost = alg->cost.cost - op_cost;
3112 limit.latency = alg->cost.latency - op_cost;
3114 else
3116 limit.cost = mult_cost - op_cost;
3117 limit.latency = mult_cost - op_cost;
3120 synth_mult (&alg2, val - 1, &limit, mode);
3121 alg2.cost.cost += op_cost;
3122 alg2.cost.latency += op_cost;
3123 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3124 *alg = alg2, *variant = add_variant;
3126 return MULT_COST_LESS (&alg->cost, mult_cost);
3129 /* A subroutine of expand_mult, used for constant multiplications.
3130 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3131 convenient. Use the shift/add sequence described by ALG and apply
3132 the final fixup specified by VARIANT. */
3134 static rtx
3135 expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3136 rtx target, const struct algorithm *alg,
3137 enum mult_variant variant)
3139 unsigned HOST_WIDE_INT val_so_far;
3140 rtx_insn *insn;
3141 rtx accum, tem;
3142 int opno;
3143 machine_mode nmode;
3145 /* Avoid referencing memory over and over and invalid sharing
3146 on SUBREGs. */
3147 op0 = force_reg (mode, op0);
3149 /* ACCUM starts out either as OP0 or as a zero, depending on
3150 the first operation. */
3152 if (alg->op[0] == alg_zero)
3154 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3155 val_so_far = 0;
3157 else if (alg->op[0] == alg_m)
3159 accum = copy_to_mode_reg (mode, op0);
3160 val_so_far = 1;
3162 else
3163 gcc_unreachable ();
3165 for (opno = 1; opno < alg->ops; opno++)
3167 int log = alg->log[opno];
3168 rtx shift_subtarget = optimize ? 0 : accum;
3169 rtx add_target
3170 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3171 && !optimize)
3172 ? target : 0;
3173 rtx accum_target = optimize ? 0 : accum;
3174 rtx accum_inner;
3176 switch (alg->op[opno])
3178 case alg_shift:
3179 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3180 /* REG_EQUAL note will be attached to the following insn. */
3181 emit_move_insn (accum, tem);
3182 val_so_far <<= log;
3183 break;
3185 case alg_add_t_m2:
3186 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3187 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3188 add_target ? add_target : accum_target);
3189 val_so_far += HOST_WIDE_INT_1U << log;
3190 break;
3192 case alg_sub_t_m2:
3193 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3194 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3195 add_target ? add_target : accum_target);
3196 val_so_far -= HOST_WIDE_INT_1U << log;
3197 break;
3199 case alg_add_t2_m:
3200 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3201 log, shift_subtarget, 0);
3202 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3203 add_target ? add_target : accum_target);
3204 val_so_far = (val_so_far << log) + 1;
3205 break;
3207 case alg_sub_t2_m:
3208 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3209 log, shift_subtarget, 0);
3210 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3211 add_target ? add_target : accum_target);
3212 val_so_far = (val_so_far << log) - 1;
3213 break;
3215 case alg_add_factor:
3216 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3217 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3218 add_target ? add_target : accum_target);
3219 val_so_far += val_so_far << log;
3220 break;
3222 case alg_sub_factor:
3223 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3224 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3225 (add_target
3226 ? add_target : (optimize ? 0 : tem)));
3227 val_so_far = (val_so_far << log) - val_so_far;
3228 break;
3230 default:
3231 gcc_unreachable ();
3234 if (SCALAR_INT_MODE_P (mode))
3236 /* Write a REG_EQUAL note on the last insn so that we can cse
3237 multiplication sequences. Note that if ACCUM is a SUBREG,
3238 we've set the inner register and must properly indicate that. */
3239 tem = op0, nmode = mode;
3240 accum_inner = accum;
3241 if (GET_CODE (accum) == SUBREG)
3243 accum_inner = SUBREG_REG (accum);
3244 nmode = GET_MODE (accum_inner);
3245 tem = gen_lowpart (nmode, op0);
3248 insn = get_last_insn ();
3249 set_dst_reg_note (insn, REG_EQUAL,
3250 gen_rtx_MULT (nmode, tem,
3251 gen_int_mode (val_so_far, nmode)),
3252 accum_inner);
3256 if (variant == negate_variant)
3258 val_so_far = -val_so_far;
3259 accum = expand_unop (mode, neg_optab, accum, target, 0);
3261 else if (variant == add_variant)
3263 val_so_far = val_so_far + 1;
3264 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3267 /* Compare only the bits of val and val_so_far that are significant
3268 in the result mode, to avoid sign-/zero-extension confusion. */
3269 nmode = GET_MODE_INNER (mode);
3270 val &= GET_MODE_MASK (nmode);
3271 val_so_far &= GET_MODE_MASK (nmode);
3272 gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3274 return accum;
3277 /* Perform a multiplication and return an rtx for the result.
3278 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3279 TARGET is a suggestion for where to store the result (an rtx).
3281 We check specially for a constant integer as OP1.
3282 If you want this check for OP0 as well, then before calling
3283 you should swap the two operands if OP0 would be constant. */
3286 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3287 int unsignedp)
3289 enum mult_variant variant;
3290 struct algorithm algorithm;
3291 rtx scalar_op1;
3292 int max_cost;
3293 bool speed = optimize_insn_for_speed_p ();
3294 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3296 if (CONSTANT_P (op0))
3297 std::swap (op0, op1);
3299 /* For vectors, there are several simplifications that can be made if
3300 all elements of the vector constant are identical. */
3301 scalar_op1 = unwrap_const_vec_duplicate (op1);
3303 if (INTEGRAL_MODE_P (mode))
3305 rtx fake_reg;
3306 HOST_WIDE_INT coeff;
3307 bool is_neg;
3308 int mode_bitsize;
3310 if (op1 == CONST0_RTX (mode))
3311 return op1;
3312 if (op1 == CONST1_RTX (mode))
3313 return op0;
3314 if (op1 == CONSTM1_RTX (mode))
3315 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3316 op0, target, 0);
3318 if (do_trapv)
3319 goto skip_synth;
3321 /* If mode is integer vector mode, check if the backend supports
3322 vector lshift (by scalar or vector) at all. If not, we can't use
3323 synthetized multiply. */
3324 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3325 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3326 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3327 goto skip_synth;
3329 /* These are the operations that are potentially turned into
3330 a sequence of shifts and additions. */
3331 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3333 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3334 less than or equal in size to `unsigned int' this doesn't matter.
3335 If the mode is larger than `unsigned int', then synth_mult works
3336 only if the constant value exactly fits in an `unsigned int' without
3337 any truncation. This means that multiplying by negative values does
3338 not work; results are off by 2^32 on a 32 bit machine. */
3339 if (CONST_INT_P (scalar_op1))
3341 coeff = INTVAL (scalar_op1);
3342 is_neg = coeff < 0;
3344 #if TARGET_SUPPORTS_WIDE_INT
3345 else if (CONST_WIDE_INT_P (scalar_op1))
3346 #else
3347 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3348 #endif
3350 int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3351 /* Perfect power of 2 (other than 1, which is handled above). */
3352 if (shift > 0)
3353 return expand_shift (LSHIFT_EXPR, mode, op0,
3354 shift, target, unsignedp);
3355 else
3356 goto skip_synth;
3358 else
3359 goto skip_synth;
3361 /* We used to test optimize here, on the grounds that it's better to
3362 produce a smaller program when -O is not used. But this causes
3363 such a terrible slowdown sometimes that it seems better to always
3364 use synth_mult. */
3366 /* Special case powers of two. */
3367 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3368 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3369 return expand_shift (LSHIFT_EXPR, mode, op0,
3370 floor_log2 (coeff), target, unsignedp);
3372 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3374 /* Attempt to handle multiplication of DImode values by negative
3375 coefficients, by performing the multiplication by a positive
3376 multiplier and then inverting the result. */
3377 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3379 /* Its safe to use -coeff even for INT_MIN, as the
3380 result is interpreted as an unsigned coefficient.
3381 Exclude cost of op0 from max_cost to match the cost
3382 calculation of the synth_mult. */
3383 coeff = -(unsigned HOST_WIDE_INT) coeff;
3384 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3385 mode, speed)
3386 - neg_cost (speed, mode));
3387 if (max_cost <= 0)
3388 goto skip_synth;
3390 /* Special case powers of two. */
3391 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3393 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3394 floor_log2 (coeff), target, unsignedp);
3395 return expand_unop (mode, neg_optab, temp, target, 0);
3398 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3399 max_cost))
3401 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3402 &algorithm, variant);
3403 return expand_unop (mode, neg_optab, temp, target, 0);
3405 goto skip_synth;
3408 /* Exclude cost of op0 from max_cost to match the cost
3409 calculation of the synth_mult. */
3410 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3411 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3412 return expand_mult_const (mode, op0, coeff, target,
3413 &algorithm, variant);
3415 skip_synth:
3417 /* Expand x*2.0 as x+x. */
3418 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3419 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3421 op0 = force_reg (GET_MODE (op0), op0);
3422 return expand_binop (mode, add_optab, op0, op0,
3423 target, unsignedp, OPTAB_LIB_WIDEN);
3426 /* This used to use umul_optab if unsigned, but for non-widening multiply
3427 there is no difference between signed and unsigned. */
3428 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3429 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3430 gcc_assert (op0);
3431 return op0;
3434 /* Return a cost estimate for multiplying a register by the given
3435 COEFFicient in the given MODE and SPEED. */
3438 mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3440 int max_cost;
3441 struct algorithm algorithm;
3442 enum mult_variant variant;
3444 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3445 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3446 mode, speed);
3447 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3448 return algorithm.cost.cost;
3449 else
3450 return max_cost;
3453 /* Perform a widening multiplication and return an rtx for the result.
3454 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3455 TARGET is a suggestion for where to store the result (an rtx).
3456 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3457 or smul_widen_optab.
3459 We check specially for a constant integer as OP1, comparing the
3460 cost of a widening multiply against the cost of a sequence of shifts
3461 and adds. */
3464 expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3465 int unsignedp, optab this_optab)
3467 bool speed = optimize_insn_for_speed_p ();
3468 rtx cop1;
3470 if (CONST_INT_P (op1)
3471 && GET_MODE (op0) != VOIDmode
3472 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3473 this_optab == umul_widen_optab))
3474 && CONST_INT_P (cop1)
3475 && (INTVAL (cop1) >= 0
3476 || HWI_COMPUTABLE_MODE_P (mode)))
3478 HOST_WIDE_INT coeff = INTVAL (cop1);
3479 int max_cost;
3480 enum mult_variant variant;
3481 struct algorithm algorithm;
3483 if (coeff == 0)
3484 return CONST0_RTX (mode);
3486 /* Special case powers of two. */
3487 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3489 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3490 return expand_shift (LSHIFT_EXPR, mode, op0,
3491 floor_log2 (coeff), target, unsignedp);
3494 /* Exclude cost of op0 from max_cost to match the cost
3495 calculation of the synth_mult. */
3496 max_cost = mul_widen_cost (speed, mode);
3497 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3498 max_cost))
3500 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3501 return expand_mult_const (mode, op0, coeff, target,
3502 &algorithm, variant);
3505 return expand_binop (mode, this_optab, op0, op1, target,
3506 unsignedp, OPTAB_LIB_WIDEN);
3509 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3510 replace division by D, and put the least significant N bits of the result
3511 in *MULTIPLIER_PTR and return the most significant bit.
3513 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3514 needed precision is in PRECISION (should be <= N).
3516 PRECISION should be as small as possible so this function can choose
3517 multiplier more freely.
3519 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3520 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3522 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3523 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3525 unsigned HOST_WIDE_INT
3526 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3527 unsigned HOST_WIDE_INT *multiplier_ptr,
3528 int *post_shift_ptr, int *lgup_ptr)
3530 int lgup, post_shift;
3531 int pow, pow2;
3533 /* lgup = ceil(log2(divisor)); */
3534 lgup = ceil_log2 (d);
3536 gcc_assert (lgup <= n);
3538 pow = n + lgup;
3539 pow2 = n + lgup - precision;
3541 /* mlow = 2^(N + lgup)/d */
3542 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3543 wide_int mlow = wi::udiv_trunc (val, d);
3545 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3546 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3547 wide_int mhigh = wi::udiv_trunc (val, d);
3549 /* If precision == N, then mlow, mhigh exceed 2^N
3550 (but they do not exceed 2^(N+1)). */
3552 /* Reduce to lowest terms. */
3553 for (post_shift = lgup; post_shift > 0; post_shift--)
3555 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3556 HOST_BITS_PER_WIDE_INT);
3557 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3558 HOST_BITS_PER_WIDE_INT);
3559 if (ml_lo >= mh_lo)
3560 break;
3562 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3563 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3566 *post_shift_ptr = post_shift;
3567 *lgup_ptr = lgup;
3568 if (n < HOST_BITS_PER_WIDE_INT)
3570 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3571 *multiplier_ptr = mhigh.to_uhwi () & mask;
3572 return mhigh.to_uhwi () >= mask;
3574 else
3576 *multiplier_ptr = mhigh.to_uhwi ();
3577 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3581 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3582 congruent to 1 (mod 2**N). */
3584 static unsigned HOST_WIDE_INT
3585 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3587 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3589 /* The algorithm notes that the choice y = x satisfies
3590 x*y == 1 mod 2^3, since x is assumed odd.
3591 Each iteration doubles the number of bits of significance in y. */
3593 unsigned HOST_WIDE_INT mask;
3594 unsigned HOST_WIDE_INT y = x;
3595 int nbit = 3;
3597 mask = (n == HOST_BITS_PER_WIDE_INT
3598 ? HOST_WIDE_INT_M1U
3599 : (HOST_WIDE_INT_1U << n) - 1);
3601 while (nbit < n)
3603 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3604 nbit *= 2;
3606 return y;
3609 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3610 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3611 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3612 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3613 become signed.
3615 The result is put in TARGET if that is convenient.
3617 MODE is the mode of operation. */
3620 expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0,
3621 rtx op1, rtx target, int unsignedp)
3623 rtx tem;
3624 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3626 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3627 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3628 tem = expand_and (mode, tem, op1, NULL_RTX);
3629 adj_operand
3630 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3631 adj_operand);
3633 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3634 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3635 tem = expand_and (mode, tem, op0, NULL_RTX);
3636 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3637 target);
3639 return target;
3642 /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3644 static rtx
3645 extract_high_half (scalar_int_mode mode, rtx op)
3647 if (mode == word_mode)
3648 return gen_highpart (mode, op);
3650 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3652 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3653 GET_MODE_BITSIZE (mode), 0, 1);
3654 return convert_modes (mode, wider_mode, op, 0);
3657 /* Like expmed_mult_highpart, but only consider using a multiplication
3658 optab. OP1 is an rtx for the constant operand. */
3660 static rtx
3661 expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1,
3662 rtx target, int unsignedp, int max_cost)
3664 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3665 optab moptab;
3666 rtx tem;
3667 int size;
3668 bool speed = optimize_insn_for_speed_p ();
3670 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3672 size = GET_MODE_BITSIZE (mode);
3674 /* Firstly, try using a multiplication insn that only generates the needed
3675 high part of the product, and in the sign flavor of unsignedp. */
3676 if (mul_highpart_cost (speed, mode) < max_cost)
3678 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3679 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3680 unsignedp, OPTAB_DIRECT);
3681 if (tem)
3682 return tem;
3685 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3686 Need to adjust the result after the multiplication. */
3687 if (size - 1 < BITS_PER_WORD
3688 && (mul_highpart_cost (speed, mode)
3689 + 2 * shift_cost (speed, mode, size-1)
3690 + 4 * add_cost (speed, mode) < max_cost))
3692 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3693 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3694 unsignedp, OPTAB_DIRECT);
3695 if (tem)
3696 /* We used the wrong signedness. Adjust the result. */
3697 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3698 tem, unsignedp);
3701 /* Try widening multiplication. */
3702 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3703 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3704 && mul_widen_cost (speed, wider_mode) < max_cost)
3706 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3707 unsignedp, OPTAB_WIDEN);
3708 if (tem)
3709 return extract_high_half (mode, tem);
3712 /* Try widening the mode and perform a non-widening multiplication. */
3713 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3714 && size - 1 < BITS_PER_WORD
3715 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3716 < max_cost))
3718 rtx_insn *insns;
3719 rtx wop0, wop1;
3721 /* We need to widen the operands, for example to ensure the
3722 constant multiplier is correctly sign or zero extended.
3723 Use a sequence to clean-up any instructions emitted by
3724 the conversions if things don't work out. */
3725 start_sequence ();
3726 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3727 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3728 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3729 unsignedp, OPTAB_WIDEN);
3730 insns = get_insns ();
3731 end_sequence ();
3733 if (tem)
3735 emit_insn (insns);
3736 return extract_high_half (mode, tem);
3740 /* Try widening multiplication of opposite signedness, and adjust. */
3741 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3742 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3743 && size - 1 < BITS_PER_WORD
3744 && (mul_widen_cost (speed, wider_mode)
3745 + 2 * shift_cost (speed, mode, size-1)
3746 + 4 * add_cost (speed, mode) < max_cost))
3748 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3749 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3750 if (tem != 0)
3752 tem = extract_high_half (mode, tem);
3753 /* We used the wrong signedness. Adjust the result. */
3754 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3755 target, unsignedp);
3759 return 0;
3762 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3763 putting the high half of the result in TARGET if that is convenient,
3764 and return where the result is. If the operation can not be performed,
3765 0 is returned.
3767 MODE is the mode of operation and result.
3769 UNSIGNEDP nonzero means unsigned multiply.
3771 MAX_COST is the total allowed cost for the expanded RTL. */
3773 static rtx
3774 expmed_mult_highpart (scalar_int_mode mode, rtx op0, rtx op1,
3775 rtx target, int unsignedp, int max_cost)
3777 unsigned HOST_WIDE_INT cnst1;
3778 int extra_cost;
3779 bool sign_adjust = false;
3780 enum mult_variant variant;
3781 struct algorithm alg;
3782 rtx tem;
3783 bool speed = optimize_insn_for_speed_p ();
3785 /* We can't support modes wider than HOST_BITS_PER_INT. */
3786 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3788 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3790 /* We can't optimize modes wider than BITS_PER_WORD.
3791 ??? We might be able to perform double-word arithmetic if
3792 mode == word_mode, however all the cost calculations in
3793 synth_mult etc. assume single-word operations. */
3794 scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3795 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3796 return expmed_mult_highpart_optab (mode, op0, op1, target,
3797 unsignedp, max_cost);
3799 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3801 /* Check whether we try to multiply by a negative constant. */
3802 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3804 sign_adjust = true;
3805 extra_cost += add_cost (speed, mode);
3808 /* See whether shift/add multiplication is cheap enough. */
3809 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3810 max_cost - extra_cost))
3812 /* See whether the specialized multiplication optabs are
3813 cheaper than the shift/add version. */
3814 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3815 alg.cost.cost + extra_cost);
3816 if (tem)
3817 return tem;
3819 tem = convert_to_mode (wider_mode, op0, unsignedp);
3820 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3821 tem = extract_high_half (mode, tem);
3823 /* Adjust result for signedness. */
3824 if (sign_adjust)
3825 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3827 return tem;
3829 return expmed_mult_highpart_optab (mode, op0, op1, target,
3830 unsignedp, max_cost);
3834 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3836 static rtx
3837 expand_smod_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
3839 rtx result, temp, shift;
3840 rtx_code_label *label;
3841 int logd;
3842 int prec = GET_MODE_PRECISION (mode);
3844 logd = floor_log2 (d);
3845 result = gen_reg_rtx (mode);
3847 /* Avoid conditional branches when they're expensive. */
3848 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3849 && optimize_insn_for_speed_p ())
3851 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3852 mode, 0, -1);
3853 if (signmask)
3855 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3856 signmask = force_reg (mode, signmask);
3857 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3859 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3860 which instruction sequence to use. If logical right shifts
3861 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3862 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3864 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3865 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3866 || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
3867 > COSTS_N_INSNS (2)))
3869 temp = expand_binop (mode, xor_optab, op0, signmask,
3870 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3871 temp = expand_binop (mode, sub_optab, temp, signmask,
3872 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3873 temp = expand_binop (mode, and_optab, temp,
3874 gen_int_mode (masklow, mode),
3875 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3876 temp = expand_binop (mode, xor_optab, temp, signmask,
3877 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3878 temp = expand_binop (mode, sub_optab, temp, signmask,
3879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3881 else
3883 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3884 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3885 signmask = force_reg (mode, signmask);
3887 temp = expand_binop (mode, add_optab, op0, signmask,
3888 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3889 temp = expand_binop (mode, and_optab, temp,
3890 gen_int_mode (masklow, mode),
3891 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3892 temp = expand_binop (mode, sub_optab, temp, signmask,
3893 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3895 return temp;
3899 /* Mask contains the mode's signbit and the significant bits of the
3900 modulus. By including the signbit in the operation, many targets
3901 can avoid an explicit compare operation in the following comparison
3902 against zero. */
3903 wide_int mask = wi::mask (logd, false, prec);
3904 mask = wi::set_bit (mask, prec - 1);
3906 temp = expand_binop (mode, and_optab, op0,
3907 immed_wide_int_const (mask, mode),
3908 result, 1, OPTAB_LIB_WIDEN);
3909 if (temp != result)
3910 emit_move_insn (result, temp);
3912 label = gen_label_rtx ();
3913 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3915 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3916 0, OPTAB_LIB_WIDEN);
3918 mask = wi::mask (logd, true, prec);
3919 temp = expand_binop (mode, ior_optab, temp,
3920 immed_wide_int_const (mask, mode),
3921 result, 1, OPTAB_LIB_WIDEN);
3922 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3923 0, OPTAB_LIB_WIDEN);
3924 if (temp != result)
3925 emit_move_insn (result, temp);
3926 emit_label (label);
3927 return result;
3930 /* Expand signed division of OP0 by a power of two D in mode MODE.
3931 This routine is only called for positive values of D. */
3933 static rtx
3934 expand_sdiv_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
3936 rtx temp;
3937 rtx_code_label *label;
3938 int logd;
3940 logd = floor_log2 (d);
3942 if (d == 2
3943 && BRANCH_COST (optimize_insn_for_speed_p (),
3944 false) >= 1)
3946 temp = gen_reg_rtx (mode);
3947 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3948 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3949 0, OPTAB_LIB_WIDEN);
3950 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3953 if (HAVE_conditional_move
3954 && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
3956 rtx temp2;
3958 start_sequence ();
3959 temp2 = copy_to_mode_reg (mode, op0);
3960 temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
3961 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3962 temp = force_reg (mode, temp);
3964 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3965 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3966 mode, temp, temp2, mode, 0);
3967 if (temp2)
3969 rtx_insn *seq = get_insns ();
3970 end_sequence ();
3971 emit_insn (seq);
3972 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3974 end_sequence ();
3977 if (BRANCH_COST (optimize_insn_for_speed_p (),
3978 false) >= 2)
3980 int ushift = GET_MODE_BITSIZE (mode) - logd;
3982 temp = gen_reg_rtx (mode);
3983 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3984 if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
3985 || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
3986 > COSTS_N_INSNS (1))
3987 temp = expand_binop (mode, and_optab, temp, gen_int_mode (d - 1, mode),
3988 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3989 else
3990 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3991 ushift, NULL_RTX, 1);
3992 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3993 0, OPTAB_LIB_WIDEN);
3994 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3997 label = gen_label_rtx ();
3998 temp = copy_to_mode_reg (mode, op0);
3999 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
4000 expand_inc (temp, gen_int_mode (d - 1, mode));
4001 emit_label (label);
4002 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4005 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4006 if that is convenient, and returning where the result is.
4007 You may request either the quotient or the remainder as the result;
4008 specify REM_FLAG nonzero to get the remainder.
4010 CODE is the expression code for which kind of division this is;
4011 it controls how rounding is done. MODE is the machine mode to use.
4012 UNSIGNEDP nonzero means do unsigned division. */
4014 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4015 and then correct it by or'ing in missing high bits
4016 if result of ANDI is nonzero.
4017 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4018 This could optimize to a bfexts instruction.
4019 But C doesn't use these operations, so their optimizations are
4020 left for later. */
4021 /* ??? For modulo, we don't actually need the highpart of the first product,
4022 the low part will do nicely. And for small divisors, the second multiply
4023 can also be a low-part only multiply or even be completely left out.
4024 E.g. to calculate the remainder of a division by 3 with a 32 bit
4025 multiply, multiply with 0x55555556 and extract the upper two bits;
4026 the result is exact for inputs up to 0x1fffffff.
4027 The input range can be reduced by using cross-sum rules.
4028 For odd divisors >= 3, the following table gives right shift counts
4029 so that if a number is shifted by an integer multiple of the given
4030 amount, the remainder stays the same:
4031 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4032 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4033 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4034 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4035 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4037 Cross-sum rules for even numbers can be derived by leaving as many bits
4038 to the right alone as the divisor has zeros to the right.
4039 E.g. if x is an unsigned 32 bit number:
4040 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4044 expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4045 rtx op0, rtx op1, rtx target, int unsignedp)
4047 machine_mode compute_mode;
4048 rtx tquotient;
4049 rtx quotient = 0, remainder = 0;
4050 rtx_insn *last;
4051 rtx_insn *insn;
4052 optab optab1, optab2;
4053 int op1_is_constant, op1_is_pow2 = 0;
4054 int max_cost, extra_cost;
4055 static HOST_WIDE_INT last_div_const = 0;
4056 bool speed = optimize_insn_for_speed_p ();
4058 op1_is_constant = CONST_INT_P (op1);
4059 if (op1_is_constant)
4061 wide_int ext_op1 = rtx_mode_t (op1, mode);
4062 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4063 || (! unsignedp
4064 && wi::popcount (wi::neg (ext_op1)) == 1));
4068 This is the structure of expand_divmod:
4070 First comes code to fix up the operands so we can perform the operations
4071 correctly and efficiently.
4073 Second comes a switch statement with code specific for each rounding mode.
4074 For some special operands this code emits all RTL for the desired
4075 operation, for other cases, it generates only a quotient and stores it in
4076 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4077 to indicate that it has not done anything.
4079 Last comes code that finishes the operation. If QUOTIENT is set and
4080 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4081 QUOTIENT is not set, it is computed using trunc rounding.
4083 We try to generate special code for division and remainder when OP1 is a
4084 constant. If |OP1| = 2**n we can use shifts and some other fast
4085 operations. For other values of OP1, we compute a carefully selected
4086 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4087 by m.
4089 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4090 half of the product. Different strategies for generating the product are
4091 implemented in expmed_mult_highpart.
4093 If what we actually want is the remainder, we generate that by another
4094 by-constant multiplication and a subtraction. */
4096 /* We shouldn't be called with OP1 == const1_rtx, but some of the
4097 code below will malfunction if we are, so check here and handle
4098 the special case if so. */
4099 if (op1 == const1_rtx)
4100 return rem_flag ? const0_rtx : op0;
4102 /* When dividing by -1, we could get an overflow.
4103 negv_optab can handle overflows. */
4104 if (! unsignedp && op1 == constm1_rtx)
4106 if (rem_flag)
4107 return const0_rtx;
4108 return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4109 ? negv_optab : neg_optab, op0, target, 0);
4112 if (target
4113 /* Don't use the function value register as a target
4114 since we have to read it as well as write it,
4115 and function-inlining gets confused by this. */
4116 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4117 /* Don't clobber an operand while doing a multi-step calculation. */
4118 || ((rem_flag || op1_is_constant)
4119 && (reg_mentioned_p (target, op0)
4120 || (MEM_P (op0) && MEM_P (target))))
4121 || reg_mentioned_p (target, op1)
4122 || (MEM_P (op1) && MEM_P (target))))
4123 target = 0;
4125 /* Get the mode in which to perform this computation. Normally it will
4126 be MODE, but sometimes we can't do the desired operation in MODE.
4127 If so, pick a wider mode in which we can do the operation. Convert
4128 to that mode at the start to avoid repeated conversions.
4130 First see what operations we need. These depend on the expression
4131 we are evaluating. (We assume that divxx3 insns exist under the
4132 same conditions that modxx3 insns and that these insns don't normally
4133 fail. If these assumptions are not correct, we may generate less
4134 efficient code in some cases.)
4136 Then see if we find a mode in which we can open-code that operation
4137 (either a division, modulus, or shift). Finally, check for the smallest
4138 mode for which we can do the operation with a library call. */
4140 /* We might want to refine this now that we have division-by-constant
4141 optimization. Since expmed_mult_highpart tries so many variants, it is
4142 not straightforward to generalize this. Maybe we should make an array
4143 of possible modes in init_expmed? Save this for GCC 2.7. */
4145 optab1 = (op1_is_pow2
4146 ? (unsignedp ? lshr_optab : ashr_optab)
4147 : (unsignedp ? udiv_optab : sdiv_optab));
4148 optab2 = (op1_is_pow2 ? optab1
4149 : (unsignedp ? udivmod_optab : sdivmod_optab));
4151 FOR_EACH_MODE_FROM (compute_mode, mode)
4152 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4153 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4154 break;
4156 if (compute_mode == VOIDmode)
4157 FOR_EACH_MODE_FROM (compute_mode, mode)
4158 if (optab_libfunc (optab1, compute_mode)
4159 || optab_libfunc (optab2, compute_mode))
4160 break;
4162 /* If we still couldn't find a mode, use MODE, but expand_binop will
4163 probably die. */
4164 if (compute_mode == VOIDmode)
4165 compute_mode = mode;
4167 if (target && GET_MODE (target) == compute_mode)
4168 tquotient = target;
4169 else
4170 tquotient = gen_reg_rtx (compute_mode);
4172 #if 0
4173 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4174 (mode), and thereby get better code when OP1 is a constant. Do that
4175 later. It will require going over all usages of SIZE below. */
4176 size = GET_MODE_BITSIZE (mode);
4177 #endif
4179 /* Only deduct something for a REM if the last divide done was
4180 for a different constant. Then set the constant of the last
4181 divide. */
4182 max_cost = (unsignedp
4183 ? udiv_cost (speed, compute_mode)
4184 : sdiv_cost (speed, compute_mode));
4185 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4186 && INTVAL (op1) == last_div_const))
4187 max_cost -= (mul_cost (speed, compute_mode)
4188 + add_cost (speed, compute_mode));
4190 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4192 /* Now convert to the best mode to use. */
4193 if (compute_mode != mode)
4195 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4196 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4198 /* convert_modes may have placed op1 into a register, so we
4199 must recompute the following. */
4200 op1_is_constant = CONST_INT_P (op1);
4201 if (op1_is_constant)
4203 wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4204 op1_is_pow2 = (wi::popcount (ext_op1) == 1
4205 || (! unsignedp
4206 && wi::popcount (wi::neg (ext_op1)) == 1));
4208 else
4209 op1_is_pow2 = 0;
4212 /* If one of the operands is a volatile MEM, copy it into a register. */
4214 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4215 op0 = force_reg (compute_mode, op0);
4216 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4217 op1 = force_reg (compute_mode, op1);
4219 /* If we need the remainder or if OP1 is constant, we need to
4220 put OP0 in a register in case it has any queued subexpressions. */
4221 if (rem_flag || op1_is_constant)
4222 op0 = force_reg (compute_mode, op0);
4224 last = get_last_insn ();
4226 /* Promote floor rounding to trunc rounding for unsigned operations. */
4227 if (unsignedp)
4229 if (code == FLOOR_DIV_EXPR)
4230 code = TRUNC_DIV_EXPR;
4231 if (code == FLOOR_MOD_EXPR)
4232 code = TRUNC_MOD_EXPR;
4233 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4234 code = TRUNC_DIV_EXPR;
4237 if (op1 != const0_rtx)
4238 switch (code)
4240 case TRUNC_MOD_EXPR:
4241 case TRUNC_DIV_EXPR:
4242 if (op1_is_constant)
4244 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4245 int size = GET_MODE_BITSIZE (int_mode);
4246 if (unsignedp)
4248 unsigned HOST_WIDE_INT mh, ml;
4249 int pre_shift, post_shift;
4250 int dummy;
4251 wide_int wd = rtx_mode_t (op1, int_mode);
4252 unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4254 if (wi::popcount (wd) == 1)
4256 pre_shift = floor_log2 (d);
4257 if (rem_flag)
4259 unsigned HOST_WIDE_INT mask
4260 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4261 remainder
4262 = expand_binop (int_mode, and_optab, op0,
4263 gen_int_mode (mask, int_mode),
4264 remainder, 1,
4265 OPTAB_LIB_WIDEN);
4266 if (remainder)
4267 return gen_lowpart (mode, remainder);
4269 quotient = expand_shift (RSHIFT_EXPR, int_mode, op0,
4270 pre_shift, tquotient, 1);
4272 else if (size <= HOST_BITS_PER_WIDE_INT)
4274 if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4276 /* Most significant bit of divisor is set; emit an scc
4277 insn. */
4278 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4279 int_mode, 1, 1);
4281 else
4283 /* Find a suitable multiplier and right shift count
4284 instead of multiplying with D. */
4286 mh = choose_multiplier (d, size, size,
4287 &ml, &post_shift, &dummy);
4289 /* If the suggested multiplier is more than SIZE bits,
4290 we can do better for even divisors, using an
4291 initial right shift. */
4292 if (mh != 0 && (d & 1) == 0)
4294 pre_shift = ctz_or_zero (d);
4295 mh = choose_multiplier (d >> pre_shift, size,
4296 size - pre_shift,
4297 &ml, &post_shift, &dummy);
4298 gcc_assert (!mh);
4300 else
4301 pre_shift = 0;
4303 if (mh != 0)
4305 rtx t1, t2, t3, t4;
4307 if (post_shift - 1 >= BITS_PER_WORD)
4308 goto fail1;
4310 extra_cost
4311 = (shift_cost (speed, int_mode, post_shift - 1)
4312 + shift_cost (speed, int_mode, 1)
4313 + 2 * add_cost (speed, int_mode));
4314 t1 = expmed_mult_highpart
4315 (int_mode, op0, gen_int_mode (ml, int_mode),
4316 NULL_RTX, 1, max_cost - extra_cost);
4317 if (t1 == 0)
4318 goto fail1;
4319 t2 = force_operand (gen_rtx_MINUS (int_mode,
4320 op0, t1),
4321 NULL_RTX);
4322 t3 = expand_shift (RSHIFT_EXPR, int_mode,
4323 t2, 1, NULL_RTX, 1);
4324 t4 = force_operand (gen_rtx_PLUS (int_mode,
4325 t1, t3),
4326 NULL_RTX);
4327 quotient = expand_shift
4328 (RSHIFT_EXPR, int_mode, t4,
4329 post_shift - 1, tquotient, 1);
4331 else
4333 rtx t1, t2;
4335 if (pre_shift >= BITS_PER_WORD
4336 || post_shift >= BITS_PER_WORD)
4337 goto fail1;
4339 t1 = expand_shift
4340 (RSHIFT_EXPR, int_mode, op0,
4341 pre_shift, NULL_RTX, 1);
4342 extra_cost
4343 = (shift_cost (speed, int_mode, pre_shift)
4344 + shift_cost (speed, int_mode, post_shift));
4345 t2 = expmed_mult_highpart
4346 (int_mode, t1,
4347 gen_int_mode (ml, int_mode),
4348 NULL_RTX, 1, max_cost - extra_cost);
4349 if (t2 == 0)
4350 goto fail1;
4351 quotient = expand_shift
4352 (RSHIFT_EXPR, int_mode, t2,
4353 post_shift, tquotient, 1);
4357 else /* Too wide mode to use tricky code */
4358 break;
4360 insn = get_last_insn ();
4361 if (insn != last)
4362 set_dst_reg_note (insn, REG_EQUAL,
4363 gen_rtx_UDIV (int_mode, op0, op1),
4364 quotient);
4366 else /* TRUNC_DIV, signed */
4368 unsigned HOST_WIDE_INT ml;
4369 int lgup, post_shift;
4370 rtx mlr;
4371 HOST_WIDE_INT d = INTVAL (op1);
4372 unsigned HOST_WIDE_INT abs_d;
4374 /* Since d might be INT_MIN, we have to cast to
4375 unsigned HOST_WIDE_INT before negating to avoid
4376 undefined signed overflow. */
4377 abs_d = (d >= 0
4378 ? (unsigned HOST_WIDE_INT) d
4379 : - (unsigned HOST_WIDE_INT) d);
4381 /* n rem d = n rem -d */
4382 if (rem_flag && d < 0)
4384 d = abs_d;
4385 op1 = gen_int_mode (abs_d, int_mode);
4388 if (d == 1)
4389 quotient = op0;
4390 else if (d == -1)
4391 quotient = expand_unop (int_mode, neg_optab, op0,
4392 tquotient, 0);
4393 else if (size <= HOST_BITS_PER_WIDE_INT
4394 && abs_d == HOST_WIDE_INT_1U << (size - 1))
4396 /* This case is not handled correctly below. */
4397 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4398 int_mode, 1, 1);
4399 if (quotient == 0)
4400 goto fail1;
4402 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4403 && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4404 && (rem_flag
4405 ? smod_pow2_cheap (speed, int_mode)
4406 : sdiv_pow2_cheap (speed, int_mode))
4407 /* We assume that cheap metric is true if the
4408 optab has an expander for this mode. */
4409 && ((optab_handler ((rem_flag ? smod_optab
4410 : sdiv_optab),
4411 int_mode)
4412 != CODE_FOR_nothing)
4413 || (optab_handler (sdivmod_optab, int_mode)
4414 != CODE_FOR_nothing)))
4416 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)
4417 && (size <= HOST_BITS_PER_WIDE_INT
4418 || abs_d != (unsigned HOST_WIDE_INT) d))
4420 if (rem_flag)
4422 remainder = expand_smod_pow2 (int_mode, op0, d);
4423 if (remainder)
4424 return gen_lowpart (mode, remainder);
4427 if (sdiv_pow2_cheap (speed, int_mode)
4428 && ((optab_handler (sdiv_optab, int_mode)
4429 != CODE_FOR_nothing)
4430 || (optab_handler (sdivmod_optab, int_mode)
4431 != CODE_FOR_nothing)))
4432 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4433 int_mode, op0,
4434 gen_int_mode (abs_d,
4435 int_mode),
4436 NULL_RTX, 0);
4437 else
4438 quotient = expand_sdiv_pow2 (int_mode, op0, abs_d);
4440 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4441 negate the quotient. */
4442 if (d < 0)
4444 insn = get_last_insn ();
4445 if (insn != last
4446 && abs_d < (HOST_WIDE_INT_1U
4447 << (HOST_BITS_PER_WIDE_INT - 1)))
4448 set_dst_reg_note (insn, REG_EQUAL,
4449 gen_rtx_DIV (int_mode, op0,
4450 gen_int_mode
4451 (abs_d,
4452 int_mode)),
4453 quotient);
4455 quotient = expand_unop (int_mode, neg_optab,
4456 quotient, quotient, 0);
4459 else if (size <= HOST_BITS_PER_WIDE_INT)
4461 choose_multiplier (abs_d, size, size - 1,
4462 &ml, &post_shift, &lgup);
4463 if (ml < HOST_WIDE_INT_1U << (size - 1))
4465 rtx t1, t2, t3;
4467 if (post_shift >= BITS_PER_WORD
4468 || size - 1 >= BITS_PER_WORD)
4469 goto fail1;
4471 extra_cost = (shift_cost (speed, int_mode, post_shift)
4472 + shift_cost (speed, int_mode, size - 1)
4473 + add_cost (speed, int_mode));
4474 t1 = expmed_mult_highpart
4475 (int_mode, op0, gen_int_mode (ml, int_mode),
4476 NULL_RTX, 0, max_cost - extra_cost);
4477 if (t1 == 0)
4478 goto fail1;
4479 t2 = expand_shift
4480 (RSHIFT_EXPR, int_mode, t1,
4481 post_shift, NULL_RTX, 0);
4482 t3 = expand_shift
4483 (RSHIFT_EXPR, int_mode, op0,
4484 size - 1, NULL_RTX, 0);
4485 if (d < 0)
4486 quotient
4487 = force_operand (gen_rtx_MINUS (int_mode, t3, t2),
4488 tquotient);
4489 else
4490 quotient
4491 = force_operand (gen_rtx_MINUS (int_mode, t2, t3),
4492 tquotient);
4494 else
4496 rtx t1, t2, t3, t4;
4498 if (post_shift >= BITS_PER_WORD
4499 || size - 1 >= BITS_PER_WORD)
4500 goto fail1;
4502 ml |= HOST_WIDE_INT_M1U << (size - 1);
4503 mlr = gen_int_mode (ml, int_mode);
4504 extra_cost = (shift_cost (speed, int_mode, post_shift)
4505 + shift_cost (speed, int_mode, size - 1)
4506 + 2 * add_cost (speed, int_mode));
4507 t1 = expmed_mult_highpart (int_mode, op0, mlr,
4508 NULL_RTX, 0,
4509 max_cost - extra_cost);
4510 if (t1 == 0)
4511 goto fail1;
4512 t2 = force_operand (gen_rtx_PLUS (int_mode, t1, op0),
4513 NULL_RTX);
4514 t3 = expand_shift
4515 (RSHIFT_EXPR, int_mode, t2,
4516 post_shift, NULL_RTX, 0);
4517 t4 = expand_shift
4518 (RSHIFT_EXPR, int_mode, op0,
4519 size - 1, NULL_RTX, 0);
4520 if (d < 0)
4521 quotient
4522 = force_operand (gen_rtx_MINUS (int_mode, t4, t3),
4523 tquotient);
4524 else
4525 quotient
4526 = force_operand (gen_rtx_MINUS (int_mode, t3, t4),
4527 tquotient);
4530 else /* Too wide mode to use tricky code */
4531 break;
4533 insn = get_last_insn ();
4534 if (insn != last)
4535 set_dst_reg_note (insn, REG_EQUAL,
4536 gen_rtx_DIV (int_mode, op0, op1),
4537 quotient);
4539 break;
4541 fail1:
4542 delete_insns_since (last);
4543 break;
4545 case FLOOR_DIV_EXPR:
4546 case FLOOR_MOD_EXPR:
4547 /* We will come here only for signed operations. */
4548 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4550 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4551 int size = GET_MODE_BITSIZE (int_mode);
4552 unsigned HOST_WIDE_INT mh, ml;
4553 int pre_shift, lgup, post_shift;
4554 HOST_WIDE_INT d = INTVAL (op1);
4556 if (d > 0)
4558 /* We could just as easily deal with negative constants here,
4559 but it does not seem worth the trouble for GCC 2.6. */
4560 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4562 pre_shift = floor_log2 (d);
4563 if (rem_flag)
4565 unsigned HOST_WIDE_INT mask
4566 = (HOST_WIDE_INT_1U << pre_shift) - 1;
4567 remainder = expand_binop
4568 (int_mode, and_optab, op0,
4569 gen_int_mode (mask, int_mode),
4570 remainder, 0, OPTAB_LIB_WIDEN);
4571 if (remainder)
4572 return gen_lowpart (mode, remainder);
4574 quotient = expand_shift
4575 (RSHIFT_EXPR, int_mode, op0,
4576 pre_shift, tquotient, 0);
4578 else
4580 rtx t1, t2, t3, t4;
4582 mh = choose_multiplier (d, size, size - 1,
4583 &ml, &post_shift, &lgup);
4584 gcc_assert (!mh);
4586 if (post_shift < BITS_PER_WORD
4587 && size - 1 < BITS_PER_WORD)
4589 t1 = expand_shift
4590 (RSHIFT_EXPR, int_mode, op0,
4591 size - 1, NULL_RTX, 0);
4592 t2 = expand_binop (int_mode, xor_optab, op0, t1,
4593 NULL_RTX, 0, OPTAB_WIDEN);
4594 extra_cost = (shift_cost (speed, int_mode, post_shift)
4595 + shift_cost (speed, int_mode, size - 1)
4596 + 2 * add_cost (speed, int_mode));
4597 t3 = expmed_mult_highpart
4598 (int_mode, t2, gen_int_mode (ml, int_mode),
4599 NULL_RTX, 1, max_cost - extra_cost);
4600 if (t3 != 0)
4602 t4 = expand_shift
4603 (RSHIFT_EXPR, int_mode, t3,
4604 post_shift, NULL_RTX, 1);
4605 quotient = expand_binop (int_mode, xor_optab,
4606 t4, t1, tquotient, 0,
4607 OPTAB_WIDEN);
4612 else
4614 rtx nsign, t1, t2, t3, t4;
4615 t1 = force_operand (gen_rtx_PLUS (int_mode,
4616 op0, constm1_rtx), NULL_RTX);
4617 t2 = expand_binop (int_mode, ior_optab, op0, t1, NULL_RTX,
4618 0, OPTAB_WIDEN);
4619 nsign = expand_shift (RSHIFT_EXPR, int_mode, t2,
4620 size - 1, NULL_RTX, 0);
4621 t3 = force_operand (gen_rtx_MINUS (int_mode, t1, nsign),
4622 NULL_RTX);
4623 t4 = expand_divmod (0, TRUNC_DIV_EXPR, int_mode, t3, op1,
4624 NULL_RTX, 0);
4625 if (t4)
4627 rtx t5;
4628 t5 = expand_unop (int_mode, one_cmpl_optab, nsign,
4629 NULL_RTX, 0);
4630 quotient = force_operand (gen_rtx_PLUS (int_mode, t4, t5),
4631 tquotient);
4636 if (quotient != 0)
4637 break;
4638 delete_insns_since (last);
4640 /* Try using an instruction that produces both the quotient and
4641 remainder, using truncation. We can easily compensate the quotient
4642 or remainder to get floor rounding, once we have the remainder.
4643 Notice that we compute also the final remainder value here,
4644 and return the result right away. */
4645 if (target == 0 || GET_MODE (target) != compute_mode)
4646 target = gen_reg_rtx (compute_mode);
4648 if (rem_flag)
4650 remainder
4651 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4652 quotient = gen_reg_rtx (compute_mode);
4654 else
4656 quotient
4657 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4658 remainder = gen_reg_rtx (compute_mode);
4661 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4662 quotient, remainder, 0))
4664 /* This could be computed with a branch-less sequence.
4665 Save that for later. */
4666 rtx tem;
4667 rtx_code_label *label = gen_label_rtx ();
4668 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4669 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4670 NULL_RTX, 0, OPTAB_WIDEN);
4671 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4672 expand_dec (quotient, const1_rtx);
4673 expand_inc (remainder, op1);
4674 emit_label (label);
4675 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4678 /* No luck with division elimination or divmod. Have to do it
4679 by conditionally adjusting op0 *and* the result. */
4681 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4682 rtx adjusted_op0;
4683 rtx tem;
4685 quotient = gen_reg_rtx (compute_mode);
4686 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4687 label1 = gen_label_rtx ();
4688 label2 = gen_label_rtx ();
4689 label3 = gen_label_rtx ();
4690 label4 = gen_label_rtx ();
4691 label5 = gen_label_rtx ();
4692 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4693 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4694 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4695 quotient, 0, OPTAB_LIB_WIDEN);
4696 if (tem != quotient)
4697 emit_move_insn (quotient, tem);
4698 emit_jump_insn (targetm.gen_jump (label5));
4699 emit_barrier ();
4700 emit_label (label1);
4701 expand_inc (adjusted_op0, const1_rtx);
4702 emit_jump_insn (targetm.gen_jump (label4));
4703 emit_barrier ();
4704 emit_label (label2);
4705 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4706 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4707 quotient, 0, OPTAB_LIB_WIDEN);
4708 if (tem != quotient)
4709 emit_move_insn (quotient, tem);
4710 emit_jump_insn (targetm.gen_jump (label5));
4711 emit_barrier ();
4712 emit_label (label3);
4713 expand_dec (adjusted_op0, const1_rtx);
4714 emit_label (label4);
4715 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4716 quotient, 0, OPTAB_LIB_WIDEN);
4717 if (tem != quotient)
4718 emit_move_insn (quotient, tem);
4719 expand_dec (quotient, const1_rtx);
4720 emit_label (label5);
4722 break;
4724 case CEIL_DIV_EXPR:
4725 case CEIL_MOD_EXPR:
4726 if (unsignedp)
4728 if (op1_is_constant
4729 && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4730 && (HWI_COMPUTABLE_MODE_P (compute_mode)
4731 || INTVAL (op1) >= 0))
4733 scalar_int_mode int_mode
4734 = as_a <scalar_int_mode> (compute_mode);
4735 rtx t1, t2, t3;
4736 unsigned HOST_WIDE_INT d = INTVAL (op1);
4737 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4738 floor_log2 (d), tquotient, 1);
4739 t2 = expand_binop (int_mode, and_optab, op0,
4740 gen_int_mode (d - 1, int_mode),
4741 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4742 t3 = gen_reg_rtx (int_mode);
4743 t3 = emit_store_flag (t3, NE, t2, const0_rtx, int_mode, 1, 1);
4744 if (t3 == 0)
4746 rtx_code_label *lab;
4747 lab = gen_label_rtx ();
4748 do_cmp_and_jump (t2, const0_rtx, EQ, int_mode, lab);
4749 expand_inc (t1, const1_rtx);
4750 emit_label (lab);
4751 quotient = t1;
4753 else
4754 quotient = force_operand (gen_rtx_PLUS (int_mode, t1, t3),
4755 tquotient);
4756 break;
4759 /* Try using an instruction that produces both the quotient and
4760 remainder, using truncation. We can easily compensate the
4761 quotient or remainder to get ceiling rounding, once we have the
4762 remainder. Notice that we compute also the final remainder
4763 value here, and return the result right away. */
4764 if (target == 0 || GET_MODE (target) != compute_mode)
4765 target = gen_reg_rtx (compute_mode);
4767 if (rem_flag)
4769 remainder = (REG_P (target)
4770 ? target : gen_reg_rtx (compute_mode));
4771 quotient = gen_reg_rtx (compute_mode);
4773 else
4775 quotient = (REG_P (target)
4776 ? target : gen_reg_rtx (compute_mode));
4777 remainder = gen_reg_rtx (compute_mode);
4780 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4781 remainder, 1))
4783 /* This could be computed with a branch-less sequence.
4784 Save that for later. */
4785 rtx_code_label *label = gen_label_rtx ();
4786 do_cmp_and_jump (remainder, const0_rtx, EQ,
4787 compute_mode, label);
4788 expand_inc (quotient, const1_rtx);
4789 expand_dec (remainder, op1);
4790 emit_label (label);
4791 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4794 /* No luck with division elimination or divmod. Have to do it
4795 by conditionally adjusting op0 *and* the result. */
4797 rtx_code_label *label1, *label2;
4798 rtx adjusted_op0, tem;
4800 quotient = gen_reg_rtx (compute_mode);
4801 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4802 label1 = gen_label_rtx ();
4803 label2 = gen_label_rtx ();
4804 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4805 compute_mode, label1);
4806 emit_move_insn (quotient, const0_rtx);
4807 emit_jump_insn (targetm.gen_jump (label2));
4808 emit_barrier ();
4809 emit_label (label1);
4810 expand_dec (adjusted_op0, const1_rtx);
4811 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4812 quotient, 1, OPTAB_LIB_WIDEN);
4813 if (tem != quotient)
4814 emit_move_insn (quotient, tem);
4815 expand_inc (quotient, const1_rtx);
4816 emit_label (label2);
4819 else /* signed */
4821 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4822 && INTVAL (op1) >= 0)
4824 /* This is extremely similar to the code for the unsigned case
4825 above. For 2.7 we should merge these variants, but for
4826 2.6.1 I don't want to touch the code for unsigned since that
4827 get used in C. The signed case will only be used by other
4828 languages (Ada). */
4830 rtx t1, t2, t3;
4831 unsigned HOST_WIDE_INT d = INTVAL (op1);
4832 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4833 floor_log2 (d), tquotient, 0);
4834 t2 = expand_binop (compute_mode, and_optab, op0,
4835 gen_int_mode (d - 1, compute_mode),
4836 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4837 t3 = gen_reg_rtx (compute_mode);
4838 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4839 compute_mode, 1, 1);
4840 if (t3 == 0)
4842 rtx_code_label *lab;
4843 lab = gen_label_rtx ();
4844 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4845 expand_inc (t1, const1_rtx);
4846 emit_label (lab);
4847 quotient = t1;
4849 else
4850 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4851 t1, t3),
4852 tquotient);
4853 break;
4856 /* Try using an instruction that produces both the quotient and
4857 remainder, using truncation. We can easily compensate the
4858 quotient or remainder to get ceiling rounding, once we have the
4859 remainder. Notice that we compute also the final remainder
4860 value here, and return the result right away. */
4861 if (target == 0 || GET_MODE (target) != compute_mode)
4862 target = gen_reg_rtx (compute_mode);
4863 if (rem_flag)
4865 remainder= (REG_P (target)
4866 ? target : gen_reg_rtx (compute_mode));
4867 quotient = gen_reg_rtx (compute_mode);
4869 else
4871 quotient = (REG_P (target)
4872 ? target : gen_reg_rtx (compute_mode));
4873 remainder = gen_reg_rtx (compute_mode);
4876 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4877 remainder, 0))
4879 /* This could be computed with a branch-less sequence.
4880 Save that for later. */
4881 rtx tem;
4882 rtx_code_label *label = gen_label_rtx ();
4883 do_cmp_and_jump (remainder, const0_rtx, EQ,
4884 compute_mode, label);
4885 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4886 NULL_RTX, 0, OPTAB_WIDEN);
4887 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4888 expand_inc (quotient, const1_rtx);
4889 expand_dec (remainder, op1);
4890 emit_label (label);
4891 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4894 /* No luck with division elimination or divmod. Have to do it
4895 by conditionally adjusting op0 *and* the result. */
4897 rtx_code_label *label1, *label2, *label3, *label4, *label5;
4898 rtx adjusted_op0;
4899 rtx tem;
4901 quotient = gen_reg_rtx (compute_mode);
4902 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4903 label1 = gen_label_rtx ();
4904 label2 = gen_label_rtx ();
4905 label3 = gen_label_rtx ();
4906 label4 = gen_label_rtx ();
4907 label5 = gen_label_rtx ();
4908 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4909 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4910 compute_mode, label1);
4911 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4912 quotient, 0, OPTAB_LIB_WIDEN);
4913 if (tem != quotient)
4914 emit_move_insn (quotient, tem);
4915 emit_jump_insn (targetm.gen_jump (label5));
4916 emit_barrier ();
4917 emit_label (label1);
4918 expand_dec (adjusted_op0, const1_rtx);
4919 emit_jump_insn (targetm.gen_jump (label4));
4920 emit_barrier ();
4921 emit_label (label2);
4922 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4923 compute_mode, label3);
4924 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4925 quotient, 0, OPTAB_LIB_WIDEN);
4926 if (tem != quotient)
4927 emit_move_insn (quotient, tem);
4928 emit_jump_insn (targetm.gen_jump (label5));
4929 emit_barrier ();
4930 emit_label (label3);
4931 expand_inc (adjusted_op0, const1_rtx);
4932 emit_label (label4);
4933 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4934 quotient, 0, OPTAB_LIB_WIDEN);
4935 if (tem != quotient)
4936 emit_move_insn (quotient, tem);
4937 expand_inc (quotient, const1_rtx);
4938 emit_label (label5);
4941 break;
4943 case EXACT_DIV_EXPR:
4944 if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4946 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4947 int size = GET_MODE_BITSIZE (int_mode);
4948 HOST_WIDE_INT d = INTVAL (op1);
4949 unsigned HOST_WIDE_INT ml;
4950 int pre_shift;
4951 rtx t1;
4953 pre_shift = ctz_or_zero (d);
4954 ml = invert_mod2n (d >> pre_shift, size);
4955 t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4956 pre_shift, NULL_RTX, unsignedp);
4957 quotient = expand_mult (int_mode, t1, gen_int_mode (ml, int_mode),
4958 NULL_RTX, 1);
4960 insn = get_last_insn ();
4961 set_dst_reg_note (insn, REG_EQUAL,
4962 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4963 int_mode, op0, op1),
4964 quotient);
4966 break;
4968 case ROUND_DIV_EXPR:
4969 case ROUND_MOD_EXPR:
4970 if (unsignedp)
4972 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4973 rtx tem;
4974 rtx_code_label *label;
4975 label = gen_label_rtx ();
4976 quotient = gen_reg_rtx (int_mode);
4977 remainder = gen_reg_rtx (int_mode);
4978 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4980 rtx tem;
4981 quotient = expand_binop (int_mode, udiv_optab, op0, op1,
4982 quotient, 1, OPTAB_LIB_WIDEN);
4983 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 1);
4984 remainder = expand_binop (int_mode, sub_optab, op0, tem,
4985 remainder, 1, OPTAB_LIB_WIDEN);
4987 tem = plus_constant (int_mode, op1, -1);
4988 tem = expand_shift (RSHIFT_EXPR, int_mode, tem, 1, NULL_RTX, 1);
4989 do_cmp_and_jump (remainder, tem, LEU, int_mode, label);
4990 expand_inc (quotient, const1_rtx);
4991 expand_dec (remainder, op1);
4992 emit_label (label);
4994 else
4996 scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4997 int size = GET_MODE_BITSIZE (int_mode);
4998 rtx abs_rem, abs_op1, tem, mask;
4999 rtx_code_label *label;
5000 label = gen_label_rtx ();
5001 quotient = gen_reg_rtx (int_mode);
5002 remainder = gen_reg_rtx (int_mode);
5003 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
5005 rtx tem;
5006 quotient = expand_binop (int_mode, sdiv_optab, op0, op1,
5007 quotient, 0, OPTAB_LIB_WIDEN);
5008 tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 0);
5009 remainder = expand_binop (int_mode, sub_optab, op0, tem,
5010 remainder, 0, OPTAB_LIB_WIDEN);
5012 abs_rem = expand_abs (int_mode, remainder, NULL_RTX, 1, 0);
5013 abs_op1 = expand_abs (int_mode, op1, NULL_RTX, 1, 0);
5014 tem = expand_shift (LSHIFT_EXPR, int_mode, abs_rem,
5015 1, NULL_RTX, 1);
5016 do_cmp_and_jump (tem, abs_op1, LTU, int_mode, label);
5017 tem = expand_binop (int_mode, xor_optab, op0, op1,
5018 NULL_RTX, 0, OPTAB_WIDEN);
5019 mask = expand_shift (RSHIFT_EXPR, int_mode, tem,
5020 size - 1, NULL_RTX, 0);
5021 tem = expand_binop (int_mode, xor_optab, mask, const1_rtx,
5022 NULL_RTX, 0, OPTAB_WIDEN);
5023 tem = expand_binop (int_mode, sub_optab, tem, mask,
5024 NULL_RTX, 0, OPTAB_WIDEN);
5025 expand_inc (quotient, tem);
5026 tem = expand_binop (int_mode, xor_optab, mask, op1,
5027 NULL_RTX, 0, OPTAB_WIDEN);
5028 tem = expand_binop (int_mode, sub_optab, tem, mask,
5029 NULL_RTX, 0, OPTAB_WIDEN);
5030 expand_dec (remainder, tem);
5031 emit_label (label);
5033 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5035 default:
5036 gcc_unreachable ();
5039 if (quotient == 0)
5041 if (target && GET_MODE (target) != compute_mode)
5042 target = 0;
5044 if (rem_flag)
5046 /* Try to produce the remainder without producing the quotient.
5047 If we seem to have a divmod pattern that does not require widening,
5048 don't try widening here. We should really have a WIDEN argument
5049 to expand_twoval_binop, since what we'd really like to do here is
5050 1) try a mod insn in compute_mode
5051 2) try a divmod insn in compute_mode
5052 3) try a div insn in compute_mode and multiply-subtract to get
5053 remainder
5054 4) try the same things with widening allowed. */
5055 remainder
5056 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5057 op0, op1, target,
5058 unsignedp,
5059 ((optab_handler (optab2, compute_mode)
5060 != CODE_FOR_nothing)
5061 ? OPTAB_DIRECT : OPTAB_WIDEN));
5062 if (remainder == 0)
5064 /* No luck there. Can we do remainder and divide at once
5065 without a library call? */
5066 remainder = gen_reg_rtx (compute_mode);
5067 if (! expand_twoval_binop ((unsignedp
5068 ? udivmod_optab
5069 : sdivmod_optab),
5070 op0, op1,
5071 NULL_RTX, remainder, unsignedp))
5072 remainder = 0;
5075 if (remainder)
5076 return gen_lowpart (mode, remainder);
5079 /* Produce the quotient. Try a quotient insn, but not a library call.
5080 If we have a divmod in this mode, use it in preference to widening
5081 the div (for this test we assume it will not fail). Note that optab2
5082 is set to the one of the two optabs that the call below will use. */
5083 quotient
5084 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5085 op0, op1, rem_flag ? NULL_RTX : target,
5086 unsignedp,
5087 ((optab_handler (optab2, compute_mode)
5088 != CODE_FOR_nothing)
5089 ? OPTAB_DIRECT : OPTAB_WIDEN));
5091 if (quotient == 0)
5093 /* No luck there. Try a quotient-and-remainder insn,
5094 keeping the quotient alone. */
5095 quotient = gen_reg_rtx (compute_mode);
5096 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5097 op0, op1,
5098 quotient, NULL_RTX, unsignedp))
5100 quotient = 0;
5101 if (! rem_flag)
5102 /* Still no luck. If we are not computing the remainder,
5103 use a library call for the quotient. */
5104 quotient = sign_expand_binop (compute_mode,
5105 udiv_optab, sdiv_optab,
5106 op0, op1, target,
5107 unsignedp, OPTAB_LIB_WIDEN);
5112 if (rem_flag)
5114 if (target && GET_MODE (target) != compute_mode)
5115 target = 0;
5117 if (quotient == 0)
5119 /* No divide instruction either. Use library for remainder. */
5120 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5121 op0, op1, target,
5122 unsignedp, OPTAB_LIB_WIDEN);
5123 /* No remainder function. Try a quotient-and-remainder
5124 function, keeping the remainder. */
5125 if (!remainder)
5127 remainder = gen_reg_rtx (compute_mode);
5128 if (!expand_twoval_binop_libfunc
5129 (unsignedp ? udivmod_optab : sdivmod_optab,
5130 op0, op1,
5131 NULL_RTX, remainder,
5132 unsignedp ? UMOD : MOD))
5133 remainder = NULL_RTX;
5136 else
5138 /* We divided. Now finish doing X - Y * (X / Y). */
5139 remainder = expand_mult (compute_mode, quotient, op1,
5140 NULL_RTX, unsignedp);
5141 remainder = expand_binop (compute_mode, sub_optab, op0,
5142 remainder, target, unsignedp,
5143 OPTAB_LIB_WIDEN);
5147 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5150 /* Return a tree node with data type TYPE, describing the value of X.
5151 Usually this is an VAR_DECL, if there is no obvious better choice.
5152 X may be an expression, however we only support those expressions
5153 generated by loop.c. */
5155 tree
5156 make_tree (tree type, rtx x)
5158 tree t;
5160 switch (GET_CODE (x))
5162 case CONST_INT:
5163 case CONST_WIDE_INT:
5164 t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5165 return t;
5167 case CONST_DOUBLE:
5168 STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5169 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5170 t = wide_int_to_tree (type,
5171 wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5172 HOST_BITS_PER_WIDE_INT * 2));
5173 else
5174 t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5176 return t;
5178 case CONST_VECTOR:
5180 int units = CONST_VECTOR_NUNITS (x);
5181 tree itype = TREE_TYPE (type);
5182 int i;
5184 /* Build a tree with vector elements. */
5185 auto_vec<tree, 32> elts (units);
5186 for (i = 0; i < units; ++i)
5188 rtx elt = CONST_VECTOR_ELT (x, i);
5189 elts.quick_push (make_tree (itype, elt));
5192 return build_vector (type, elts);
5195 case PLUS:
5196 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5197 make_tree (type, XEXP (x, 1)));
5199 case MINUS:
5200 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5201 make_tree (type, XEXP (x, 1)));
5203 case NEG:
5204 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5206 case MULT:
5207 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5208 make_tree (type, XEXP (x, 1)));
5210 case ASHIFT:
5211 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5212 make_tree (type, XEXP (x, 1)));
5214 case LSHIFTRT:
5215 t = unsigned_type_for (type);
5216 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5217 make_tree (t, XEXP (x, 0)),
5218 make_tree (type, XEXP (x, 1))));
5220 case ASHIFTRT:
5221 t = signed_type_for (type);
5222 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5223 make_tree (t, XEXP (x, 0)),
5224 make_tree (type, XEXP (x, 1))));
5226 case DIV:
5227 if (TREE_CODE (type) != REAL_TYPE)
5228 t = signed_type_for (type);
5229 else
5230 t = type;
5232 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5233 make_tree (t, XEXP (x, 0)),
5234 make_tree (t, XEXP (x, 1))));
5235 case UDIV:
5236 t = unsigned_type_for (type);
5237 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5238 make_tree (t, XEXP (x, 0)),
5239 make_tree (t, XEXP (x, 1))));
5241 case SIGN_EXTEND:
5242 case ZERO_EXTEND:
5243 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5244 GET_CODE (x) == ZERO_EXTEND);
5245 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5247 case CONST:
5249 rtx op = XEXP (x, 0);
5250 if (GET_CODE (op) == VEC_DUPLICATE)
5252 tree elt_tree = make_tree (TREE_TYPE (type), XEXP (op, 0));
5253 return build_vector_from_val (type, elt_tree);
5255 return make_tree (type, op);
5258 case SYMBOL_REF:
5259 t = SYMBOL_REF_DECL (x);
5260 if (t)
5261 return fold_convert (type, build_fold_addr_expr (t));
5262 /* fall through. */
5264 default:
5265 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5267 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5268 address mode to pointer mode. */
5269 if (POINTER_TYPE_P (type))
5270 x = convert_memory_address_addr_space
5271 (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5273 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5274 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5275 t->decl_with_rtl.rtl = x;
5277 return t;
5281 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5282 and returning TARGET.
5284 If TARGET is 0, a pseudo-register or constant is returned. */
5287 expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5289 rtx tem = 0;
5291 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5292 tem = simplify_binary_operation (AND, mode, op0, op1);
5293 if (tem == 0)
5294 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5296 if (target == 0)
5297 target = tem;
5298 else if (tem != target)
5299 emit_move_insn (target, tem);
5300 return target;
5303 /* Helper function for emit_store_flag. */
5305 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5306 machine_mode mode, machine_mode compare_mode,
5307 int unsignedp, rtx x, rtx y, int normalizep,
5308 machine_mode target_mode)
5310 struct expand_operand ops[4];
5311 rtx op0, comparison, subtarget;
5312 rtx_insn *last;
5313 scalar_int_mode result_mode = targetm.cstore_mode (icode);
5314 scalar_int_mode int_target_mode;
5316 last = get_last_insn ();
5317 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5318 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5319 if (!x || !y)
5321 delete_insns_since (last);
5322 return NULL_RTX;
5325 if (target_mode == VOIDmode)
5326 int_target_mode = result_mode;
5327 else
5328 int_target_mode = as_a <scalar_int_mode> (target_mode);
5329 if (!target)
5330 target = gen_reg_rtx (int_target_mode);
5332 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5334 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5335 create_fixed_operand (&ops[1], comparison);
5336 create_fixed_operand (&ops[2], x);
5337 create_fixed_operand (&ops[3], y);
5338 if (!maybe_expand_insn (icode, 4, ops))
5340 delete_insns_since (last);
5341 return NULL_RTX;
5343 subtarget = ops[0].value;
5345 /* If we are converting to a wider mode, first convert to
5346 INT_TARGET_MODE, then normalize. This produces better combining
5347 opportunities on machines that have a SIGN_EXTRACT when we are
5348 testing a single bit. This mostly benefits the 68k.
5350 If STORE_FLAG_VALUE does not have the sign bit set when
5351 interpreted in MODE, we can do this conversion as unsigned, which
5352 is usually more efficient. */
5353 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (result_mode))
5355 convert_move (target, subtarget,
5356 val_signbit_known_clear_p (result_mode,
5357 STORE_FLAG_VALUE));
5358 op0 = target;
5359 result_mode = int_target_mode;
5361 else
5362 op0 = subtarget;
5364 /* If we want to keep subexpressions around, don't reuse our last
5365 target. */
5366 if (optimize)
5367 subtarget = 0;
5369 /* Now normalize to the proper value in MODE. Sometimes we don't
5370 have to do anything. */
5371 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5373 /* STORE_FLAG_VALUE might be the most negative number, so write
5374 the comparison this way to avoid a compiler-time warning. */
5375 else if (- normalizep == STORE_FLAG_VALUE)
5376 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5378 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5379 it hard to use a value of just the sign bit due to ANSI integer
5380 constant typing rules. */
5381 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5382 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5383 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5384 normalizep == 1);
5385 else
5387 gcc_assert (STORE_FLAG_VALUE & 1);
5389 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5390 if (normalizep == -1)
5391 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5394 /* If we were converting to a smaller mode, do the conversion now. */
5395 if (int_target_mode != result_mode)
5397 convert_move (target, op0, 0);
5398 return target;
5400 else
5401 return op0;
5405 /* A subroutine of emit_store_flag only including "tricks" that do not
5406 need a recursive call. These are kept separate to avoid infinite
5407 loops. */
5409 static rtx
5410 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5411 machine_mode mode, int unsignedp, int normalizep,
5412 machine_mode target_mode)
5414 rtx subtarget;
5415 enum insn_code icode;
5416 machine_mode compare_mode;
5417 enum mode_class mclass;
5418 enum rtx_code scode;
5420 if (unsignedp)
5421 code = unsigned_condition (code);
5422 scode = swap_condition (code);
5424 /* If one operand is constant, make it the second one. Only do this
5425 if the other operand is not constant as well. */
5427 if (swap_commutative_operands_p (op0, op1))
5429 std::swap (op0, op1);
5430 code = swap_condition (code);
5433 if (mode == VOIDmode)
5434 mode = GET_MODE (op0);
5436 /* For some comparisons with 1 and -1, we can convert this to
5437 comparisons with zero. This will often produce more opportunities for
5438 store-flag insns. */
5440 switch (code)
5442 case LT:
5443 if (op1 == const1_rtx)
5444 op1 = const0_rtx, code = LE;
5445 break;
5446 case LE:
5447 if (op1 == constm1_rtx)
5448 op1 = const0_rtx, code = LT;
5449 break;
5450 case GE:
5451 if (op1 == const1_rtx)
5452 op1 = const0_rtx, code = GT;
5453 break;
5454 case GT:
5455 if (op1 == constm1_rtx)
5456 op1 = const0_rtx, code = GE;
5457 break;
5458 case GEU:
5459 if (op1 == const1_rtx)
5460 op1 = const0_rtx, code = NE;
5461 break;
5462 case LTU:
5463 if (op1 == const1_rtx)
5464 op1 = const0_rtx, code = EQ;
5465 break;
5466 default:
5467 break;
5470 /* If we are comparing a double-word integer with zero or -1, we can
5471 convert the comparison into one involving a single word. */
5472 scalar_int_mode int_mode;
5473 if (is_int_mode (mode, &int_mode)
5474 && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5475 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5477 rtx tem;
5478 if ((code == EQ || code == NE)
5479 && (op1 == const0_rtx || op1 == constm1_rtx))
5481 rtx op00, op01;
5483 /* Do a logical OR or AND of the two words and compare the
5484 result. */
5485 op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
5486 op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5487 tem = expand_binop (word_mode,
5488 op1 == const0_rtx ? ior_optab : and_optab,
5489 op00, op01, NULL_RTX, unsignedp,
5490 OPTAB_DIRECT);
5492 if (tem != 0)
5493 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5494 unsignedp, normalizep);
5496 else if ((code == LT || code == GE) && op1 == const0_rtx)
5498 rtx op0h;
5500 /* If testing the sign bit, can just test on high word. */
5501 op0h = simplify_gen_subreg (word_mode, op0, int_mode,
5502 subreg_highpart_offset (word_mode,
5503 int_mode));
5504 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5505 unsignedp, normalizep);
5507 else
5508 tem = NULL_RTX;
5510 if (tem)
5512 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5513 return tem;
5514 if (!target)
5515 target = gen_reg_rtx (target_mode);
5517 convert_move (target, tem,
5518 !val_signbit_known_set_p (word_mode,
5519 (normalizep ? normalizep
5520 : STORE_FLAG_VALUE)));
5521 return target;
5525 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5526 complement of A (for GE) and shifting the sign bit to the low bit. */
5527 if (op1 == const0_rtx && (code == LT || code == GE)
5528 && is_int_mode (mode, &int_mode)
5529 && (normalizep || STORE_FLAG_VALUE == 1
5530 || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5532 scalar_int_mode int_target_mode;
5533 subtarget = target;
5535 if (!target)
5536 int_target_mode = int_mode;
5537 else
5539 /* If the result is to be wider than OP0, it is best to convert it
5540 first. If it is to be narrower, it is *incorrect* to convert it
5541 first. */
5542 int_target_mode = as_a <scalar_int_mode> (target_mode);
5543 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (int_mode))
5545 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5546 int_mode = int_target_mode;
5550 if (int_target_mode != int_mode)
5551 subtarget = 0;
5553 if (code == GE)
5554 op0 = expand_unop (int_mode, one_cmpl_optab, op0,
5555 ((STORE_FLAG_VALUE == 1 || normalizep)
5556 ? 0 : subtarget), 0);
5558 if (STORE_FLAG_VALUE == 1 || normalizep)
5559 /* If we are supposed to produce a 0/1 value, we want to do
5560 a logical shift from the sign bit to the low-order bit; for
5561 a -1/0 value, we do an arithmetic shift. */
5562 op0 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5563 GET_MODE_BITSIZE (int_mode) - 1,
5564 subtarget, normalizep != -1);
5566 if (int_mode != int_target_mode)
5567 op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5569 return op0;
5572 mclass = GET_MODE_CLASS (mode);
5573 FOR_EACH_MODE_FROM (compare_mode, mode)
5575 machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5576 icode = optab_handler (cstore_optab, optab_mode);
5577 if (icode != CODE_FOR_nothing)
5579 do_pending_stack_adjust ();
5580 rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5581 unsignedp, op0, op1, normalizep, target_mode);
5582 if (tem)
5583 return tem;
5585 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5587 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5588 unsignedp, op1, op0, normalizep, target_mode);
5589 if (tem)
5590 return tem;
5592 break;
5596 return 0;
5599 /* Subroutine of emit_store_flag that handles cases in which the operands
5600 are scalar integers. SUBTARGET is the target to use for temporary
5601 operations and TRUEVAL is the value to store when the condition is
5602 true. All other arguments are as for emit_store_flag. */
5605 emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5606 rtx op1, scalar_int_mode mode, int unsignedp,
5607 int normalizep, rtx trueval)
5609 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5610 rtx_insn *last = get_last_insn ();
5612 /* If this is an equality comparison of integers, we can try to exclusive-or
5613 (or subtract) the two operands and use a recursive call to try the
5614 comparison with zero. Don't do any of these cases if branches are
5615 very cheap. */
5617 if ((code == EQ || code == NE) && op1 != const0_rtx)
5619 rtx tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5620 OPTAB_WIDEN);
5622 if (tem == 0)
5623 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5624 OPTAB_WIDEN);
5625 if (tem != 0)
5626 tem = emit_store_flag (target, code, tem, const0_rtx,
5627 mode, unsignedp, normalizep);
5628 if (tem != 0)
5629 return tem;
5631 delete_insns_since (last);
5634 /* For integer comparisons, try the reverse comparison. However, for
5635 small X and if we'd have anyway to extend, implementing "X != 0"
5636 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5637 rtx_code rcode = reverse_condition (code);
5638 if (can_compare_p (rcode, mode, ccp_store_flag)
5639 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5640 && code == NE
5641 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5642 && op1 == const0_rtx))
5644 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5645 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5647 /* Again, for the reverse comparison, use either an addition or a XOR. */
5648 if (want_add
5649 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5650 optimize_insn_for_speed_p ()) == 0)
5652 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5653 STORE_FLAG_VALUE, target_mode);
5654 if (tem != 0)
5655 tem = expand_binop (target_mode, add_optab, tem,
5656 gen_int_mode (normalizep, target_mode),
5657 target, 0, OPTAB_WIDEN);
5658 if (tem != 0)
5659 return tem;
5661 else if (!want_add
5662 && rtx_cost (trueval, mode, XOR, 1,
5663 optimize_insn_for_speed_p ()) == 0)
5665 rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5666 normalizep, target_mode);
5667 if (tem != 0)
5668 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5669 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5670 if (tem != 0)
5671 return tem;
5674 delete_insns_since (last);
5677 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5678 the constant zero. Reject all other comparisons at this point. Only
5679 do LE and GT if branches are expensive since they are expensive on
5680 2-operand machines. */
5682 if (op1 != const0_rtx
5683 || (code != EQ && code != NE
5684 && (BRANCH_COST (optimize_insn_for_speed_p (),
5685 false) <= 1 || (code != LE && code != GT))))
5686 return 0;
5688 /* Try to put the result of the comparison in the sign bit. Assume we can't
5689 do the necessary operation below. */
5691 rtx tem = 0;
5693 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5694 the sign bit set. */
5696 if (code == LE)
5698 /* This is destructive, so SUBTARGET can't be OP0. */
5699 if (rtx_equal_p (subtarget, op0))
5700 subtarget = 0;
5702 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5703 OPTAB_WIDEN);
5704 if (tem)
5705 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5706 OPTAB_WIDEN);
5709 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5710 number of bits in the mode of OP0, minus one. */
5712 if (code == GT)
5714 if (rtx_equal_p (subtarget, op0))
5715 subtarget = 0;
5717 tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5718 GET_MODE_BITSIZE (mode) - 1,
5719 subtarget, 0);
5720 if (tem)
5721 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5722 OPTAB_WIDEN);
5725 if (code == EQ || code == NE)
5727 /* For EQ or NE, one way to do the comparison is to apply an operation
5728 that converts the operand into a positive number if it is nonzero
5729 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5730 for NE we negate. This puts the result in the sign bit. Then we
5731 normalize with a shift, if needed.
5733 Two operations that can do the above actions are ABS and FFS, so try
5734 them. If that doesn't work, and MODE is smaller than a full word,
5735 we can use zero-extension to the wider mode (an unsigned conversion)
5736 as the operation. */
5738 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5739 that is compensated by the subsequent overflow when subtracting
5740 one / negating. */
5742 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5743 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5744 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5745 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5746 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5748 tem = convert_modes (word_mode, mode, op0, 1);
5749 mode = word_mode;
5752 if (tem != 0)
5754 if (code == EQ)
5755 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5756 0, OPTAB_WIDEN);
5757 else
5758 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5761 /* If we couldn't do it that way, for NE we can "or" the two's complement
5762 of the value with itself. For EQ, we take the one's complement of
5763 that "or", which is an extra insn, so we only handle EQ if branches
5764 are expensive. */
5766 if (tem == 0
5767 && (code == NE
5768 || BRANCH_COST (optimize_insn_for_speed_p (),
5769 false) > 1))
5771 if (rtx_equal_p (subtarget, op0))
5772 subtarget = 0;
5774 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5775 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5776 OPTAB_WIDEN);
5778 if (tem && code == EQ)
5779 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5783 if (tem && normalizep)
5784 tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
5785 GET_MODE_BITSIZE (mode) - 1,
5786 subtarget, normalizep == 1);
5788 if (tem)
5790 if (!target)
5792 else if (GET_MODE (tem) != target_mode)
5794 convert_move (target, tem, 0);
5795 tem = target;
5797 else if (!subtarget)
5799 emit_move_insn (target, tem);
5800 tem = target;
5803 else
5804 delete_insns_since (last);
5806 return tem;
5809 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5810 and storing in TARGET. Normally return TARGET.
5811 Return 0 if that cannot be done.
5813 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5814 it is VOIDmode, they cannot both be CONST_INT.
5816 UNSIGNEDP is for the case where we have to widen the operands
5817 to perform the operation. It says to use zero-extension.
5819 NORMALIZEP is 1 if we should convert the result to be either zero
5820 or one. Normalize is -1 if we should convert the result to be
5821 either zero or -1. If NORMALIZEP is zero, the result will be left
5822 "raw" out of the scc insn. */
5825 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5826 machine_mode mode, int unsignedp, int normalizep)
5828 machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5829 enum rtx_code rcode;
5830 rtx subtarget;
5831 rtx tem, trueval;
5832 rtx_insn *last;
5834 /* If we compare constants, we shouldn't use a store-flag operation,
5835 but a constant load. We can get there via the vanilla route that
5836 usually generates a compare-branch sequence, but will in this case
5837 fold the comparison to a constant, and thus elide the branch. */
5838 if (CONSTANT_P (op0) && CONSTANT_P (op1))
5839 return NULL_RTX;
5841 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5842 target_mode);
5843 if (tem)
5844 return tem;
5846 /* If we reached here, we can't do this with a scc insn, however there
5847 are some comparisons that can be done in other ways. Don't do any
5848 of these cases if branches are very cheap. */
5849 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5850 return 0;
5852 /* See what we need to return. We can only return a 1, -1, or the
5853 sign bit. */
5855 if (normalizep == 0)
5857 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5858 normalizep = STORE_FLAG_VALUE;
5860 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5862 else
5863 return 0;
5866 last = get_last_insn ();
5868 /* If optimizing, use different pseudo registers for each insn, instead
5869 of reusing the same pseudo. This leads to better CSE, but slows
5870 down the compiler, since there are more pseudos. */
5871 subtarget = (!optimize
5872 && (target_mode == mode)) ? target : NULL_RTX;
5873 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5875 /* For floating-point comparisons, try the reverse comparison or try
5876 changing the "orderedness" of the comparison. */
5877 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5879 enum rtx_code first_code;
5880 bool and_them;
5882 rcode = reverse_condition_maybe_unordered (code);
5883 if (can_compare_p (rcode, mode, ccp_store_flag)
5884 && (code == ORDERED || code == UNORDERED
5885 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5886 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5888 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5889 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5891 /* For the reverse comparison, use either an addition or a XOR. */
5892 if (want_add
5893 && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5894 optimize_insn_for_speed_p ()) == 0)
5896 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5897 STORE_FLAG_VALUE, target_mode);
5898 if (tem)
5899 return expand_binop (target_mode, add_optab, tem,
5900 gen_int_mode (normalizep, target_mode),
5901 target, 0, OPTAB_WIDEN);
5903 else if (!want_add
5904 && rtx_cost (trueval, mode, XOR, 1,
5905 optimize_insn_for_speed_p ()) == 0)
5907 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5908 normalizep, target_mode);
5909 if (tem)
5910 return expand_binop (target_mode, xor_optab, tem, trueval,
5911 target, INTVAL (trueval) >= 0,
5912 OPTAB_WIDEN);
5916 delete_insns_since (last);
5918 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5919 if (code == ORDERED || code == UNORDERED)
5920 return 0;
5922 and_them = split_comparison (code, mode, &first_code, &code);
5924 /* If there are no NaNs, the first comparison should always fall through.
5925 Effectively change the comparison to the other one. */
5926 if (!HONOR_NANS (mode))
5928 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5929 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5930 target_mode);
5933 if (!HAVE_conditional_move)
5934 return 0;
5936 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5937 conditional move. */
5938 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5939 normalizep, target_mode);
5940 if (tem == 0)
5941 return 0;
5943 if (and_them)
5944 tem = emit_conditional_move (target, code, op0, op1, mode,
5945 tem, const0_rtx, GET_MODE (tem), 0);
5946 else
5947 tem = emit_conditional_move (target, code, op0, op1, mode,
5948 trueval, tem, GET_MODE (tem), 0);
5950 if (tem == 0)
5951 delete_insns_since (last);
5952 return tem;
5955 /* The remaining tricks only apply to integer comparisons. */
5957 scalar_int_mode int_mode;
5958 if (is_int_mode (mode, &int_mode))
5959 return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
5960 unsignedp, normalizep, trueval);
5962 return 0;
5965 /* Like emit_store_flag, but always succeeds. */
5968 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5969 machine_mode mode, int unsignedp, int normalizep)
5971 rtx tem;
5972 rtx_code_label *label;
5973 rtx trueval, falseval;
5975 /* First see if emit_store_flag can do the job. */
5976 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5977 if (tem != 0)
5978 return tem;
5980 if (!target)
5981 target = gen_reg_rtx (word_mode);
5983 /* If this failed, we have to do this with set/compare/jump/set code.
5984 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5985 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5986 if (code == NE
5987 && GET_MODE_CLASS (mode) == MODE_INT
5988 && REG_P (target)
5989 && op0 == target
5990 && op1 == const0_rtx)
5992 label = gen_label_rtx ();
5993 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
5994 NULL_RTX, NULL, label,
5995 profile_probability::uninitialized ());
5996 emit_move_insn (target, trueval);
5997 emit_label (label);
5998 return target;
6001 if (!REG_P (target)
6002 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
6003 target = gen_reg_rtx (GET_MODE (target));
6005 /* Jump in the right direction if the target cannot implement CODE
6006 but can jump on its reverse condition. */
6007 falseval = const0_rtx;
6008 if (! can_compare_p (code, mode, ccp_jump)
6009 && (! FLOAT_MODE_P (mode)
6010 || code == ORDERED || code == UNORDERED
6011 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6012 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6014 enum rtx_code rcode;
6015 if (FLOAT_MODE_P (mode))
6016 rcode = reverse_condition_maybe_unordered (code);
6017 else
6018 rcode = reverse_condition (code);
6020 /* Canonicalize to UNORDERED for the libcall. */
6021 if (can_compare_p (rcode, mode, ccp_jump)
6022 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
6024 falseval = trueval;
6025 trueval = const0_rtx;
6026 code = rcode;
6030 emit_move_insn (target, trueval);
6031 label = gen_label_rtx ();
6032 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6033 label, profile_probability::uninitialized ());
6035 emit_move_insn (target, falseval);
6036 emit_label (label);
6038 return target;
6041 /* Perform possibly multi-word comparison and conditional jump to LABEL
6042 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6043 now a thin wrapper around do_compare_rtx_and_jump. */
6045 static void
6046 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6047 rtx_code_label *label)
6049 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6050 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6051 NULL, label, profile_probability::uninitialized ());