Merge from mainline
[official-gcc.git] / gcc / expmed.c
blob7d77587e6e61c6b535e0406d2938be6e04df609d
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "toplev.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "real.h"
38 #include "recog.h"
39 #include "langhooks.h"
41 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx);
44 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT, rtx);
46 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT, rtx, int);
50 static rtx mask_rtx (enum machine_mode, int, int, int);
51 static rtx lshift_value (enum machine_mode, rtx, int, int);
52 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int);
54 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
64 sequences. */
66 static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
67 static bool smod_pow2_cheap[NUM_MACHINE_MODES];
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
71 #endif
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
79 #endif
81 /* Reduce conditional compilation elsewhere. */
82 #ifndef HAVE_insv
83 #define HAVE_insv 0
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extv
88 #define HAVE_extv 0
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
91 #endif
92 #ifndef HAVE_extzv
93 #define HAVE_extzv 0
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
96 #endif
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost;
101 static int add_cost[NUM_MACHINE_MODES];
102 static int neg_cost[NUM_MACHINE_MODES];
103 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int mul_cost[NUM_MACHINE_MODES];
107 static int sdiv_cost[NUM_MACHINE_MODES];
108 static int udiv_cost[NUM_MACHINE_MODES];
109 static int mul_widen_cost[NUM_MACHINE_MODES];
110 static int mul_highpart_cost[NUM_MACHINE_MODES];
112 void
113 init_expmed (void)
115 struct
117 struct rtx_def reg; rtunion reg_fld[2];
118 struct rtx_def plus; rtunion plus_fld1;
119 struct rtx_def neg;
120 struct rtx_def mult; rtunion mult_fld1;
121 struct rtx_def sdiv; rtunion sdiv_fld1;
122 struct rtx_def udiv; rtunion udiv_fld1;
123 struct rtx_def zext;
124 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
125 struct rtx_def smod_32; rtunion smod_32_fld1;
126 struct rtx_def wide_mult; rtunion wide_mult_fld1;
127 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
128 struct rtx_def wide_trunc;
129 struct rtx_def shift; rtunion shift_fld1;
130 struct rtx_def shift_mult; rtunion shift_mult_fld1;
131 struct rtx_def shift_add; rtunion shift_add_fld1;
132 struct rtx_def shift_sub; rtunion shift_sub_fld1;
133 } all;
135 rtx pow2[MAX_BITS_PER_WORD];
136 rtx cint[MAX_BITS_PER_WORD];
137 int m, n;
138 enum machine_mode mode, wider_mode;
140 zero_cost = rtx_cost (const0_rtx, 0);
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 cint[m] = GEN_INT (m);
148 memset (&all, 0, sizeof all);
150 PUT_CODE (&all.reg, REG);
151 /* Avoid using hard regs in ways which may be unsupported. */
152 REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
154 PUT_CODE (&all.plus, PLUS);
155 XEXP (&all.plus, 0) = &all.reg;
156 XEXP (&all.plus, 1) = &all.reg;
158 PUT_CODE (&all.neg, NEG);
159 XEXP (&all.neg, 0) = &all.reg;
161 PUT_CODE (&all.mult, MULT);
162 XEXP (&all.mult, 0) = &all.reg;
163 XEXP (&all.mult, 1) = &all.reg;
165 PUT_CODE (&all.sdiv, DIV);
166 XEXP (&all.sdiv, 0) = &all.reg;
167 XEXP (&all.sdiv, 1) = &all.reg;
169 PUT_CODE (&all.udiv, UDIV);
170 XEXP (&all.udiv, 0) = &all.reg;
171 XEXP (&all.udiv, 1) = &all.reg;
173 PUT_CODE (&all.sdiv_32, DIV);
174 XEXP (&all.sdiv_32, 0) = &all.reg;
175 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
177 PUT_CODE (&all.smod_32, MOD);
178 XEXP (&all.smod_32, 0) = &all.reg;
179 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
181 PUT_CODE (&all.zext, ZERO_EXTEND);
182 XEXP (&all.zext, 0) = &all.reg;
184 PUT_CODE (&all.wide_mult, MULT);
185 XEXP (&all.wide_mult, 0) = &all.zext;
186 XEXP (&all.wide_mult, 1) = &all.zext;
188 PUT_CODE (&all.wide_lshr, LSHIFTRT);
189 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
191 PUT_CODE (&all.wide_trunc, TRUNCATE);
192 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
194 PUT_CODE (&all.shift, ASHIFT);
195 XEXP (&all.shift, 0) = &all.reg;
197 PUT_CODE (&all.shift_mult, MULT);
198 XEXP (&all.shift_mult, 0) = &all.reg;
200 PUT_CODE (&all.shift_add, PLUS);
201 XEXP (&all.shift_add, 0) = &all.shift_mult;
202 XEXP (&all.shift_add, 1) = &all.reg;
204 PUT_CODE (&all.shift_sub, MINUS);
205 XEXP (&all.shift_sub, 0) = &all.shift_mult;
206 XEXP (&all.shift_sub, 1) = &all.reg;
208 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
209 mode != VOIDmode;
210 mode = GET_MODE_WIDER_MODE (mode))
212 PUT_MODE (&all.reg, mode);
213 PUT_MODE (&all.plus, mode);
214 PUT_MODE (&all.neg, mode);
215 PUT_MODE (&all.mult, mode);
216 PUT_MODE (&all.sdiv, mode);
217 PUT_MODE (&all.udiv, mode);
218 PUT_MODE (&all.sdiv_32, mode);
219 PUT_MODE (&all.smod_32, mode);
220 PUT_MODE (&all.wide_trunc, mode);
221 PUT_MODE (&all.shift, mode);
222 PUT_MODE (&all.shift_mult, mode);
223 PUT_MODE (&all.shift_add, mode);
224 PUT_MODE (&all.shift_sub, mode);
226 add_cost[mode] = rtx_cost (&all.plus, SET);
227 neg_cost[mode] = rtx_cost (&all.neg, SET);
228 mul_cost[mode] = rtx_cost (&all.mult, SET);
229 sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
230 udiv_cost[mode] = rtx_cost (&all.udiv, SET);
232 sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
233 <= 2 * add_cost[mode]);
234 smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
235 <= 4 * add_cost[mode]);
237 wider_mode = GET_MODE_WIDER_MODE (mode);
238 if (wider_mode != VOIDmode)
240 PUT_MODE (&all.zext, wider_mode);
241 PUT_MODE (&all.wide_mult, wider_mode);
242 PUT_MODE (&all.wide_lshr, wider_mode);
243 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
245 mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
246 mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
249 shift_cost[mode][0] = 0;
250 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
252 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
253 for (m = 1; m < n; m++)
255 XEXP (&all.shift, 1) = cint[m];
256 XEXP (&all.shift_mult, 1) = pow2[m];
258 shift_cost[mode][m] = rtx_cost (&all.shift, SET);
259 shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
260 shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
265 /* Return an rtx representing minus the value of X.
266 MODE is the intended mode of the result,
267 useful if X is a CONST_INT. */
270 negate_rtx (enum machine_mode mode, rtx x)
272 rtx result = simplify_unary_operation (NEG, mode, x, mode);
274 if (result == 0)
275 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
277 return result;
280 /* Report on the availability of insv/extv/extzv and the desired mode
281 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
282 is false; else the mode of the specified operand. If OPNO is -1,
283 all the caller cares about is whether the insn is available. */
284 enum machine_mode
285 mode_for_extraction (enum extraction_pattern pattern, int opno)
287 const struct insn_data *data;
289 switch (pattern)
291 case EP_insv:
292 if (HAVE_insv)
294 data = &insn_data[CODE_FOR_insv];
295 break;
297 return MAX_MACHINE_MODE;
299 case EP_extv:
300 if (HAVE_extv)
302 data = &insn_data[CODE_FOR_extv];
303 break;
305 return MAX_MACHINE_MODE;
307 case EP_extzv:
308 if (HAVE_extzv)
310 data = &insn_data[CODE_FOR_extzv];
311 break;
313 return MAX_MACHINE_MODE;
315 default:
316 gcc_unreachable ();
319 if (opno == -1)
320 return VOIDmode;
322 /* Everyone who uses this function used to follow it with
323 if (result == VOIDmode) result = word_mode; */
324 if (data->operand[opno].mode == VOIDmode)
325 return word_mode;
326 return data->operand[opno].mode;
330 /* Generate code to store value from rtx VALUE
331 into a bit-field within structure STR_RTX
332 containing BITSIZE bits starting at bit BITNUM.
333 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
334 ALIGN is the alignment that STR_RTX is known to have.
335 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
337 /* ??? Note that there are two different ideas here for how
338 to determine the size to count bits within, for a register.
339 One is BITS_PER_WORD, and the other is the size of operand 3
340 of the insv pattern.
342 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
343 else, we use the mode of operand 3. */
346 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
347 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
348 rtx value)
350 unsigned int unit
351 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
352 unsigned HOST_WIDE_INT offset, bitpos;
353 rtx op0 = str_rtx;
354 int byte_offset;
355 rtx orig_value;
357 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
359 while (GET_CODE (op0) == SUBREG)
361 /* The following line once was done only if WORDS_BIG_ENDIAN,
362 but I think that is a mistake. WORDS_BIG_ENDIAN is
363 meaningful at a much higher level; when structures are copied
364 between memory and regs, the higher-numbered regs
365 always get higher addresses. */
366 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
367 op0 = SUBREG_REG (op0);
370 /* No action is needed if the target is a register and if the field
371 lies completely outside that register. This can occur if the source
372 code contains an out-of-bounds access to a small array. */
373 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
374 return value;
376 /* Use vec_set patterns for inserting parts of vectors whenever
377 available. */
378 if (VECTOR_MODE_P (GET_MODE (op0))
379 && !MEM_P (op0)
380 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
381 != CODE_FOR_nothing)
382 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
383 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
384 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
386 enum machine_mode outermode = GET_MODE (op0);
387 enum machine_mode innermode = GET_MODE_INNER (outermode);
388 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
389 int pos = bitnum / GET_MODE_BITSIZE (innermode);
390 rtx rtxpos = GEN_INT (pos);
391 rtx src = value;
392 rtx dest = op0;
393 rtx pat, seq;
394 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
395 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
396 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
398 start_sequence ();
400 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
401 src = copy_to_mode_reg (mode1, src);
403 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
404 rtxpos = copy_to_mode_reg (mode1, rtxpos);
406 /* We could handle this, but we should always be called with a pseudo
407 for our targets and all insns should take them as outputs. */
408 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
409 && (*insn_data[icode].operand[1].predicate) (src, mode1)
410 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
411 pat = GEN_FCN (icode) (dest, src, rtxpos);
412 seq = get_insns ();
413 end_sequence ();
414 if (pat)
416 emit_insn (seq);
417 emit_insn (pat);
418 return dest;
422 /* If the target is a register, overwriting the entire object, or storing
423 a full-word or multi-word field can be done with just a SUBREG.
425 If the target is memory, storing any naturally aligned field can be
426 done with a simple store. For targets that support fast unaligned
427 memory, any naturally sized, unit aligned field can be done directly. */
429 offset = bitnum / unit;
430 bitpos = bitnum % unit;
431 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
432 + (offset * UNITS_PER_WORD);
434 if (bitpos == 0
435 && bitsize == GET_MODE_BITSIZE (fieldmode)
436 && (!MEM_P (op0)
437 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
438 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
439 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
440 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
441 || (offset * BITS_PER_UNIT % bitsize == 0
442 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
444 if (MEM_P (op0))
445 op0 = adjust_address (op0, fieldmode, offset);
446 else if (GET_MODE (op0) != fieldmode)
447 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
448 byte_offset);
449 emit_move_insn (op0, value);
450 return value;
453 /* Make sure we are playing with integral modes. Pun with subregs
454 if we aren't. This must come after the entire register case above,
455 since that case is valid for any mode. The following cases are only
456 valid for integral modes. */
458 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
459 if (imode != GET_MODE (op0))
461 if (MEM_P (op0))
462 op0 = adjust_address (op0, imode, 0);
463 else
465 gcc_assert (imode != BLKmode);
466 op0 = gen_lowpart (imode, op0);
471 /* We may be accessing data outside the field, which means
472 we can alias adjacent data. */
473 if (MEM_P (op0))
475 op0 = shallow_copy_rtx (op0);
476 set_mem_alias_set (op0, 0);
477 set_mem_expr (op0, 0);
480 /* If OP0 is a register, BITPOS must count within a word.
481 But as we have it, it counts within whatever size OP0 now has.
482 On a bigendian machine, these are not the same, so convert. */
483 if (BYTES_BIG_ENDIAN
484 && !MEM_P (op0)
485 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
486 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
488 /* Storing an lsb-aligned field in a register
489 can be done with a movestrict instruction. */
491 if (!MEM_P (op0)
492 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
493 && bitsize == GET_MODE_BITSIZE (fieldmode)
494 && (movstrict_optab->handlers[fieldmode].insn_code
495 != CODE_FOR_nothing))
497 int icode = movstrict_optab->handlers[fieldmode].insn_code;
499 /* Get appropriate low part of the value being stored. */
500 if (GET_CODE (value) == CONST_INT || REG_P (value))
501 value = gen_lowpart (fieldmode, value);
502 else if (!(GET_CODE (value) == SYMBOL_REF
503 || GET_CODE (value) == LABEL_REF
504 || GET_CODE (value) == CONST))
505 value = convert_to_mode (fieldmode, value, 0);
507 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
508 value = copy_to_mode_reg (fieldmode, value);
510 if (GET_CODE (op0) == SUBREG)
512 /* Else we've got some float mode source being extracted into
513 a different float mode destination -- this combination of
514 subregs results in Severe Tire Damage. */
515 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
516 || GET_MODE_CLASS (fieldmode) == MODE_INT
517 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
518 op0 = SUBREG_REG (op0);
521 emit_insn (GEN_FCN (icode)
522 (gen_rtx_SUBREG (fieldmode, op0,
523 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
524 + (offset * UNITS_PER_WORD)),
525 value));
527 return value;
530 /* Handle fields bigger than a word. */
532 if (bitsize > BITS_PER_WORD)
534 /* Here we transfer the words of the field
535 in the order least significant first.
536 This is because the most significant word is the one which may
537 be less than full.
538 However, only do that if the value is not BLKmode. */
540 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
541 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
542 unsigned int i;
544 /* This is the mode we must force value to, so that there will be enough
545 subwords to extract. Note that fieldmode will often (always?) be
546 VOIDmode, because that is what store_field uses to indicate that this
547 is a bit field, but passing VOIDmode to operand_subword_force
548 is not allowed. */
549 fieldmode = GET_MODE (value);
550 if (fieldmode == VOIDmode)
551 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
553 for (i = 0; i < nwords; i++)
555 /* If I is 0, use the low-order word in both field and target;
556 if I is 1, use the next to lowest word; and so on. */
557 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
558 unsigned int bit_offset = (backwards
559 ? MAX ((int) bitsize - ((int) i + 1)
560 * BITS_PER_WORD,
562 : (int) i * BITS_PER_WORD);
564 store_bit_field (op0, MIN (BITS_PER_WORD,
565 bitsize - i * BITS_PER_WORD),
566 bitnum + bit_offset, word_mode,
567 operand_subword_force (value, wordnum, fieldmode));
569 return value;
572 /* From here on we can assume that the field to be stored in is
573 a full-word (whatever type that is), since it is shorter than a word. */
575 /* OFFSET is the number of words or bytes (UNIT says which)
576 from STR_RTX to the first word or byte containing part of the field. */
578 if (!MEM_P (op0))
580 if (offset != 0
581 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
583 if (!REG_P (op0))
585 /* Since this is a destination (lvalue), we can't copy
586 it to a pseudo. We can remove a SUBREG that does not
587 change the size of the operand. Such a SUBREG may
588 have been added above. */
589 gcc_assert (GET_CODE (op0) == SUBREG
590 && (GET_MODE_SIZE (GET_MODE (op0))
591 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
592 op0 = SUBREG_REG (op0);
594 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
595 op0, (offset * UNITS_PER_WORD));
597 offset = 0;
600 /* If VALUE has a floating-point or complex mode, access it as an
601 integer of the corresponding size. This can occur on a machine
602 with 64 bit registers that uses SFmode for float. It can also
603 occur for unaligned float or complex fields. */
604 orig_value = value;
605 if (GET_MODE (value) != VOIDmode
606 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
607 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
609 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
610 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
613 /* Now OFFSET is nonzero only if OP0 is memory
614 and is therefore always measured in bytes. */
616 if (HAVE_insv
617 && GET_MODE (value) != BLKmode
618 && bitsize > 0
619 && GET_MODE_BITSIZE (op_mode) >= bitsize
620 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
621 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
623 int xbitpos = bitpos;
624 rtx value1;
625 rtx xop0 = op0;
626 rtx last = get_last_insn ();
627 rtx pat;
628 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
629 int save_volatile_ok = volatile_ok;
631 volatile_ok = 1;
633 /* If this machine's insv can only insert into a register, copy OP0
634 into a register and save it back later. */
635 if (MEM_P (op0)
636 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
637 (op0, VOIDmode)))
639 rtx tempreg;
640 enum machine_mode bestmode;
642 /* Get the mode to use for inserting into this field. If OP0 is
643 BLKmode, get the smallest mode consistent with the alignment. If
644 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
645 mode. Otherwise, use the smallest mode containing the field. */
647 if (GET_MODE (op0) == BLKmode
648 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
649 bestmode
650 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
651 MEM_VOLATILE_P (op0));
652 else
653 bestmode = GET_MODE (op0);
655 if (bestmode == VOIDmode
656 || GET_MODE_SIZE (bestmode) < GET_MODE_SIZE (fieldmode)
657 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
658 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
659 goto insv_loses;
661 /* Adjust address to point to the containing unit of that mode.
662 Compute offset as multiple of this unit, counting in bytes. */
663 unit = GET_MODE_BITSIZE (bestmode);
664 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
665 bitpos = bitnum % unit;
666 op0 = adjust_address (op0, bestmode, offset);
668 /* Fetch that unit, store the bitfield in it, then store
669 the unit. */
670 tempreg = copy_to_reg (op0);
671 store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
672 emit_move_insn (op0, tempreg);
673 return value;
675 volatile_ok = save_volatile_ok;
677 /* Add OFFSET into OP0's address. */
678 if (MEM_P (xop0))
679 xop0 = adjust_address (xop0, byte_mode, offset);
681 /* If xop0 is a register, we need it in MAXMODE
682 to make it acceptable to the format of insv. */
683 if (GET_CODE (xop0) == SUBREG)
684 /* We can't just change the mode, because this might clobber op0,
685 and we will need the original value of op0 if insv fails. */
686 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
687 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
688 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
690 /* On big-endian machines, we count bits from the most significant.
691 If the bit field insn does not, we must invert. */
693 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
694 xbitpos = unit - bitsize - xbitpos;
696 /* We have been counting XBITPOS within UNIT.
697 Count instead within the size of the register. */
698 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
699 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
701 unit = GET_MODE_BITSIZE (maxmode);
703 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
704 value1 = value;
705 if (GET_MODE (value) != maxmode)
707 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
709 /* Optimization: Don't bother really extending VALUE
710 if it has all the bits we will actually use. However,
711 if we must narrow it, be sure we do it correctly. */
713 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
715 rtx tmp;
717 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
718 if (! tmp)
719 tmp = simplify_gen_subreg (maxmode,
720 force_reg (GET_MODE (value),
721 value1),
722 GET_MODE (value), 0);
723 value1 = tmp;
725 else
726 value1 = gen_lowpart (maxmode, value1);
728 else if (GET_CODE (value) == CONST_INT)
729 value1 = gen_int_mode (INTVAL (value), maxmode);
730 else
731 /* Parse phase is supposed to make VALUE's data type
732 match that of the component reference, which is a type
733 at least as wide as the field; so VALUE should have
734 a mode that corresponds to that type. */
735 gcc_assert (CONSTANT_P (value));
738 /* If this machine's insv insists on a register,
739 get VALUE1 into a register. */
740 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
741 (value1, maxmode)))
742 value1 = force_reg (maxmode, value1);
744 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
745 if (pat)
746 emit_insn (pat);
747 else
749 delete_insns_since (last);
750 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
753 else
754 insv_loses:
755 /* Insv is not available; store using shifts and boolean ops. */
756 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
757 return value;
760 /* Use shifts and boolean operations to store VALUE
761 into a bit field of width BITSIZE
762 in a memory location specified by OP0 except offset by OFFSET bytes.
763 (OFFSET must be 0 if OP0 is a register.)
764 The field starts at position BITPOS within the byte.
765 (If OP0 is a register, it may be a full word or a narrower mode,
766 but BITPOS still counts within a full word,
767 which is significant on bigendian machines.) */
769 static void
770 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
771 unsigned HOST_WIDE_INT bitsize,
772 unsigned HOST_WIDE_INT bitpos, rtx value)
774 enum machine_mode mode;
775 unsigned int total_bits = BITS_PER_WORD;
776 rtx subtarget, temp;
777 int all_zero = 0;
778 int all_one = 0;
780 /* There is a case not handled here:
781 a structure with a known alignment of just a halfword
782 and a field split across two aligned halfwords within the structure.
783 Or likewise a structure with a known alignment of just a byte
784 and a field split across two bytes.
785 Such cases are not supposed to be able to occur. */
787 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
789 gcc_assert (!offset);
790 /* Special treatment for a bit field split across two registers. */
791 if (bitsize + bitpos > BITS_PER_WORD)
793 store_split_bit_field (op0, bitsize, bitpos, value);
794 return;
797 else
799 /* Get the proper mode to use for this field. We want a mode that
800 includes the entire field. If such a mode would be larger than
801 a word, we won't be doing the extraction the normal way.
802 We don't want a mode bigger than the destination. */
804 mode = GET_MODE (op0);
805 if (GET_MODE_BITSIZE (mode) == 0
806 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
807 mode = word_mode;
808 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
809 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
811 if (mode == VOIDmode)
813 /* The only way this should occur is if the field spans word
814 boundaries. */
815 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
816 value);
817 return;
820 total_bits = GET_MODE_BITSIZE (mode);
822 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
823 be in the range 0 to total_bits-1, and put any excess bytes in
824 OFFSET. */
825 if (bitpos >= total_bits)
827 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
828 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
829 * BITS_PER_UNIT);
832 /* Get ref to an aligned byte, halfword, or word containing the field.
833 Adjust BITPOS to be position within a word,
834 and OFFSET to be the offset of that word.
835 Then alter OP0 to refer to that word. */
836 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
837 offset -= (offset % (total_bits / BITS_PER_UNIT));
838 op0 = adjust_address (op0, mode, offset);
841 mode = GET_MODE (op0);
843 /* Now MODE is either some integral mode for a MEM as OP0,
844 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
845 The bit field is contained entirely within OP0.
846 BITPOS is the starting bit number within OP0.
847 (OP0's mode may actually be narrower than MODE.) */
849 if (BYTES_BIG_ENDIAN)
850 /* BITPOS is the distance between our msb
851 and that of the containing datum.
852 Convert it to the distance from the lsb. */
853 bitpos = total_bits - bitsize - bitpos;
855 /* Now BITPOS is always the distance between our lsb
856 and that of OP0. */
858 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
859 we must first convert its mode to MODE. */
861 if (GET_CODE (value) == CONST_INT)
863 HOST_WIDE_INT v = INTVAL (value);
865 if (bitsize < HOST_BITS_PER_WIDE_INT)
866 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
868 if (v == 0)
869 all_zero = 1;
870 else if ((bitsize < HOST_BITS_PER_WIDE_INT
871 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
872 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
873 all_one = 1;
875 value = lshift_value (mode, value, bitpos, bitsize);
877 else
879 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
880 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
882 if (GET_MODE (value) != mode)
884 if ((REG_P (value) || GET_CODE (value) == SUBREG)
885 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
886 value = gen_lowpart (mode, value);
887 else
888 value = convert_to_mode (mode, value, 1);
891 if (must_and)
892 value = expand_binop (mode, and_optab, value,
893 mask_rtx (mode, 0, bitsize, 0),
894 NULL_RTX, 1, OPTAB_LIB_WIDEN);
895 if (bitpos > 0)
896 value = expand_shift (LSHIFT_EXPR, mode, value,
897 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
900 /* Now clear the chosen bits in OP0,
901 except that if VALUE is -1 we need not bother. */
903 subtarget = op0;
905 if (! all_one)
907 temp = expand_binop (mode, and_optab, op0,
908 mask_rtx (mode, bitpos, bitsize, 1),
909 subtarget, 1, OPTAB_LIB_WIDEN);
910 subtarget = temp;
912 else
913 temp = op0;
915 /* Now logical-or VALUE into OP0, unless it is zero. */
917 if (! all_zero)
918 temp = expand_binop (mode, ior_optab, temp, value,
919 subtarget, 1, OPTAB_LIB_WIDEN);
920 if (op0 != temp)
921 emit_move_insn (op0, temp);
924 /* Store a bit field that is split across multiple accessible memory objects.
926 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
927 BITSIZE is the field width; BITPOS the position of its first bit
928 (within the word).
929 VALUE is the value to store.
931 This does not yet handle fields wider than BITS_PER_WORD. */
933 static void
934 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
935 unsigned HOST_WIDE_INT bitpos, rtx value)
937 unsigned int unit;
938 unsigned int bitsdone = 0;
940 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
941 much at a time. */
942 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
943 unit = BITS_PER_WORD;
944 else
945 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
947 /* If VALUE is a constant other than a CONST_INT, get it into a register in
948 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
949 that VALUE might be a floating-point constant. */
950 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
952 rtx word = gen_lowpart_common (word_mode, value);
954 if (word && (value != word))
955 value = word;
956 else
957 value = gen_lowpart_common (word_mode,
958 force_reg (GET_MODE (value) != VOIDmode
959 ? GET_MODE (value)
960 : word_mode, value));
963 while (bitsdone < bitsize)
965 unsigned HOST_WIDE_INT thissize;
966 rtx part, word;
967 unsigned HOST_WIDE_INT thispos;
968 unsigned HOST_WIDE_INT offset;
970 offset = (bitpos + bitsdone) / unit;
971 thispos = (bitpos + bitsdone) % unit;
973 /* THISSIZE must not overrun a word boundary. Otherwise,
974 store_fixed_bit_field will call us again, and we will mutually
975 recurse forever. */
976 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
977 thissize = MIN (thissize, unit - thispos);
979 if (BYTES_BIG_ENDIAN)
981 int total_bits;
983 /* We must do an endian conversion exactly the same way as it is
984 done in extract_bit_field, so that the two calls to
985 extract_fixed_bit_field will have comparable arguments. */
986 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
987 total_bits = BITS_PER_WORD;
988 else
989 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
991 /* Fetch successively less significant portions. */
992 if (GET_CODE (value) == CONST_INT)
993 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
994 >> (bitsize - bitsdone - thissize))
995 & (((HOST_WIDE_INT) 1 << thissize) - 1));
996 else
997 /* The args are chosen so that the last part includes the
998 lsb. Give extract_bit_field the value it needs (with
999 endianness compensation) to fetch the piece we want. */
1000 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1001 total_bits - bitsize + bitsdone,
1002 NULL_RTX, 1);
1004 else
1006 /* Fetch successively more significant portions. */
1007 if (GET_CODE (value) == CONST_INT)
1008 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1009 >> bitsdone)
1010 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1011 else
1012 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1013 bitsdone, NULL_RTX, 1);
1016 /* If OP0 is a register, then handle OFFSET here.
1018 When handling multiword bitfields, extract_bit_field may pass
1019 down a word_mode SUBREG of a larger REG for a bitfield that actually
1020 crosses a word boundary. Thus, for a SUBREG, we must find
1021 the current word starting from the base register. */
1022 if (GET_CODE (op0) == SUBREG)
1024 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1025 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1026 GET_MODE (SUBREG_REG (op0)));
1027 offset = 0;
1029 else if (REG_P (op0))
1031 word = operand_subword_force (op0, offset, GET_MODE (op0));
1032 offset = 0;
1034 else
1035 word = op0;
1037 /* OFFSET is in UNITs, and UNIT is in bits.
1038 store_fixed_bit_field wants offset in bytes. */
1039 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1040 thispos, part);
1041 bitsdone += thissize;
1045 /* Generate code to extract a byte-field from STR_RTX
1046 containing BITSIZE bits, starting at BITNUM,
1047 and put it in TARGET if possible (if TARGET is nonzero).
1048 Regardless of TARGET, we return the rtx for where the value is placed.
1050 STR_RTX is the structure containing the byte (a REG or MEM).
1051 UNSIGNEDP is nonzero if this is an unsigned bit field.
1052 MODE is the natural mode of the field value once extracted.
1053 TMODE is the mode the caller would like the value to have;
1054 but the value may be returned with type MODE instead.
1056 TOTAL_SIZE is the size in bytes of the containing structure,
1057 or -1 if varying.
1059 If a TARGET is specified and we can store in it at no extra cost,
1060 we do so, and return TARGET.
1061 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1062 if they are equally easy. */
1065 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1066 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1067 enum machine_mode mode, enum machine_mode tmode)
1069 unsigned int unit
1070 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1071 unsigned HOST_WIDE_INT offset, bitpos;
1072 rtx op0 = str_rtx;
1073 rtx spec_target = target;
1074 rtx spec_target_subreg = 0;
1075 enum machine_mode int_mode;
1076 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1077 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1078 enum machine_mode mode1;
1079 int byte_offset;
1081 if (tmode == VOIDmode)
1082 tmode = mode;
1084 while (GET_CODE (op0) == SUBREG)
1086 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1087 op0 = SUBREG_REG (op0);
1090 /* If we have an out-of-bounds access to a register, just return an
1091 uninitialized register of the required mode. This can occur if the
1092 source code contains an out-of-bounds access to a small array. */
1093 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1094 return gen_reg_rtx (tmode);
1096 if (REG_P (op0)
1097 && mode == GET_MODE (op0)
1098 && bitnum == 0
1099 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1101 /* We're trying to extract a full register from itself. */
1102 return op0;
1105 /* Use vec_extract patterns for extracting parts of vectors whenever
1106 available. */
1107 if (VECTOR_MODE_P (GET_MODE (op0))
1108 && !MEM_P (op0)
1109 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1110 != CODE_FOR_nothing)
1111 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1112 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1114 enum machine_mode outermode = GET_MODE (op0);
1115 enum machine_mode innermode = GET_MODE_INNER (outermode);
1116 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1117 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1118 rtx rtxpos = GEN_INT (pos);
1119 rtx src = op0;
1120 rtx dest = NULL, pat, seq;
1121 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1122 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1123 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1125 if (innermode == tmode || innermode == mode)
1126 dest = target;
1128 if (!dest)
1129 dest = gen_reg_rtx (innermode);
1131 start_sequence ();
1133 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1134 dest = copy_to_mode_reg (mode0, dest);
1136 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1137 src = copy_to_mode_reg (mode1, src);
1139 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1140 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1142 /* We could handle this, but we should always be called with a pseudo
1143 for our targets and all insns should take them as outputs. */
1144 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1145 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1146 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1148 pat = GEN_FCN (icode) (dest, src, rtxpos);
1149 seq = get_insns ();
1150 end_sequence ();
1151 if (pat)
1153 emit_insn (seq);
1154 emit_insn (pat);
1155 return dest;
1159 /* Make sure we are playing with integral modes. Pun with subregs
1160 if we aren't. */
1162 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1163 if (imode != GET_MODE (op0))
1165 if (MEM_P (op0))
1166 op0 = adjust_address (op0, imode, 0);
1167 else
1169 gcc_assert (imode != BLKmode);
1170 op0 = gen_lowpart (imode, op0);
1172 /* If we got a SUBREG, force it into a register since we
1173 aren't going to be able to do another SUBREG on it. */
1174 if (GET_CODE (op0) == SUBREG)
1175 op0 = force_reg (imode, op0);
1180 /* We may be accessing data outside the field, which means
1181 we can alias adjacent data. */
1182 if (MEM_P (op0))
1184 op0 = shallow_copy_rtx (op0);
1185 set_mem_alias_set (op0, 0);
1186 set_mem_expr (op0, 0);
1189 /* Extraction of a full-word or multi-word value from a structure
1190 in a register or aligned memory can be done with just a SUBREG.
1191 A subword value in the least significant part of a register
1192 can also be extracted with a SUBREG. For this, we need the
1193 byte offset of the value in op0. */
1195 bitpos = bitnum % unit;
1196 offset = bitnum / unit;
1197 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1199 /* If OP0 is a register, BITPOS must count within a word.
1200 But as we have it, it counts within whatever size OP0 now has.
1201 On a bigendian machine, these are not the same, so convert. */
1202 if (BYTES_BIG_ENDIAN
1203 && !MEM_P (op0)
1204 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1205 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1207 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1208 If that's wrong, the solution is to test for it and set TARGET to 0
1209 if needed. */
1211 /* Only scalar integer modes can be converted via subregs. There is an
1212 additional problem for FP modes here in that they can have a precision
1213 which is different from the size. mode_for_size uses precision, but
1214 we want a mode based on the size, so we must avoid calling it for FP
1215 modes. */
1216 mode1 = (SCALAR_INT_MODE_P (tmode)
1217 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1218 : mode);
1220 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1221 && bitpos % BITS_PER_WORD == 0)
1222 || (mode1 != BLKmode
1223 /* ??? The big endian test here is wrong. This is correct
1224 if the value is in a register, and if mode_for_size is not
1225 the same mode as op0. This causes us to get unnecessarily
1226 inefficient code from the Thumb port when -mbig-endian. */
1227 && (BYTES_BIG_ENDIAN
1228 ? bitpos + bitsize == BITS_PER_WORD
1229 : bitpos == 0)))
1230 && ((!MEM_P (op0)
1231 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1232 GET_MODE_BITSIZE (GET_MODE (op0)))
1233 && GET_MODE_SIZE (mode1) != 0
1234 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1235 || (MEM_P (op0)
1236 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1237 || (offset * BITS_PER_UNIT % bitsize == 0
1238 && MEM_ALIGN (op0) % bitsize == 0)))))
1240 if (mode1 != GET_MODE (op0))
1242 if (MEM_P (op0))
1243 op0 = adjust_address (op0, mode1, offset);
1244 else
1246 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1247 byte_offset);
1248 if (sub == NULL)
1249 goto no_subreg_mode_swap;
1250 op0 = sub;
1253 if (mode1 != mode)
1254 return convert_to_mode (tmode, op0, unsignedp);
1255 return op0;
1257 no_subreg_mode_swap:
1259 /* Handle fields bigger than a word. */
1261 if (bitsize > BITS_PER_WORD)
1263 /* Here we transfer the words of the field
1264 in the order least significant first.
1265 This is because the most significant word is the one which may
1266 be less than full. */
1268 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1269 unsigned int i;
1271 if (target == 0 || !REG_P (target))
1272 target = gen_reg_rtx (mode);
1274 /* Indicate for flow that the entire target reg is being set. */
1275 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1277 for (i = 0; i < nwords; i++)
1279 /* If I is 0, use the low-order word in both field and target;
1280 if I is 1, use the next to lowest word; and so on. */
1281 /* Word number in TARGET to use. */
1282 unsigned int wordnum
1283 = (WORDS_BIG_ENDIAN
1284 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1285 : i);
1286 /* Offset from start of field in OP0. */
1287 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1288 ? MAX (0, ((int) bitsize - ((int) i + 1)
1289 * (int) BITS_PER_WORD))
1290 : (int) i * BITS_PER_WORD);
1291 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1292 rtx result_part
1293 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1294 bitsize - i * BITS_PER_WORD),
1295 bitnum + bit_offset, 1, target_part, mode,
1296 word_mode);
1298 gcc_assert (target_part);
1300 if (result_part != target_part)
1301 emit_move_insn (target_part, result_part);
1304 if (unsignedp)
1306 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1307 need to be zero'd out. */
1308 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1310 unsigned int i, total_words;
1312 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1313 for (i = nwords; i < total_words; i++)
1314 emit_move_insn
1315 (operand_subword (target,
1316 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1317 1, VOIDmode),
1318 const0_rtx);
1320 return target;
1323 /* Signed bit field: sign-extend with two arithmetic shifts. */
1324 target = expand_shift (LSHIFT_EXPR, mode, target,
1325 build_int_cst (NULL_TREE,
1326 GET_MODE_BITSIZE (mode) - bitsize),
1327 NULL_RTX, 0);
1328 return expand_shift (RSHIFT_EXPR, mode, target,
1329 build_int_cst (NULL_TREE,
1330 GET_MODE_BITSIZE (mode) - bitsize),
1331 NULL_RTX, 0);
1334 /* From here on we know the desired field is smaller than a word. */
1336 /* Check if there is a correspondingly-sized integer field, so we can
1337 safely extract it as one size of integer, if necessary; then
1338 truncate or extend to the size that is wanted; then use SUBREGs or
1339 convert_to_mode to get one of the modes we really wanted. */
1341 int_mode = int_mode_for_mode (tmode);
1342 if (int_mode == BLKmode)
1343 int_mode = int_mode_for_mode (mode);
1344 /* Should probably push op0 out to memory and then do a load. */
1345 gcc_assert (int_mode != BLKmode);
1347 /* OFFSET is the number of words or bytes (UNIT says which)
1348 from STR_RTX to the first word or byte containing part of the field. */
1349 if (!MEM_P (op0))
1351 if (offset != 0
1352 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1354 if (!REG_P (op0))
1355 op0 = copy_to_reg (op0);
1356 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1357 op0, (offset * UNITS_PER_WORD));
1359 offset = 0;
1362 /* Now OFFSET is nonzero only for memory operands. */
1364 if (unsignedp)
1366 if (HAVE_extzv
1367 && bitsize > 0
1368 && GET_MODE_BITSIZE (extzv_mode) >= bitsize
1369 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1370 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1372 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1373 rtx bitsize_rtx, bitpos_rtx;
1374 rtx last = get_last_insn ();
1375 rtx xop0 = op0;
1376 rtx xtarget = target;
1377 rtx xspec_target = spec_target;
1378 rtx xspec_target_subreg = spec_target_subreg;
1379 rtx pat;
1380 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1382 if (MEM_P (xop0))
1384 int save_volatile_ok = volatile_ok;
1385 volatile_ok = 1;
1387 /* Is the memory operand acceptable? */
1388 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1389 (xop0, GET_MODE (xop0))))
1391 /* No, load into a reg and extract from there. */
1392 enum machine_mode bestmode;
1394 /* Get the mode to use for inserting into this field. If
1395 OP0 is BLKmode, get the smallest mode consistent with the
1396 alignment. If OP0 is a non-BLKmode object that is no
1397 wider than MAXMODE, use its mode. Otherwise, use the
1398 smallest mode containing the field. */
1400 if (GET_MODE (xop0) == BLKmode
1401 || (GET_MODE_SIZE (GET_MODE (op0))
1402 > GET_MODE_SIZE (maxmode)))
1403 bestmode = get_best_mode (bitsize, bitnum,
1404 MEM_ALIGN (xop0), maxmode,
1405 MEM_VOLATILE_P (xop0));
1406 else
1407 bestmode = GET_MODE (xop0);
1409 if (bestmode == VOIDmode
1410 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1411 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1412 goto extzv_loses;
1414 /* Compute offset as multiple of this unit,
1415 counting in bytes. */
1416 unit = GET_MODE_BITSIZE (bestmode);
1417 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1418 xbitpos = bitnum % unit;
1419 xop0 = adjust_address (xop0, bestmode, xoffset);
1421 /* Make sure register is big enough for the whole field. */
1422 if (xoffset * BITS_PER_UNIT + unit
1423 < offset * BITS_PER_UNIT + bitsize)
1424 goto extzv_loses;
1426 /* Fetch it to a register in that size. */
1427 xop0 = force_reg (bestmode, xop0);
1429 /* XBITPOS counts within UNIT, which is what is expected. */
1431 else
1432 /* Get ref to first byte containing part of the field. */
1433 xop0 = adjust_address (xop0, byte_mode, xoffset);
1435 volatile_ok = save_volatile_ok;
1438 /* If op0 is a register, we need it in MAXMODE (which is usually
1439 SImode). to make it acceptable to the format of extzv. */
1440 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1441 goto extzv_loses;
1442 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1443 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1445 /* On big-endian machines, we count bits from the most significant.
1446 If the bit field insn does not, we must invert. */
1447 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1448 xbitpos = unit - bitsize - xbitpos;
1450 /* Now convert from counting within UNIT to counting in MAXMODE. */
1451 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1452 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1454 unit = GET_MODE_BITSIZE (maxmode);
1456 if (xtarget == 0)
1457 xtarget = xspec_target = gen_reg_rtx (tmode);
1459 if (GET_MODE (xtarget) != maxmode)
1461 if (REG_P (xtarget))
1463 int wider = (GET_MODE_SIZE (maxmode)
1464 > GET_MODE_SIZE (GET_MODE (xtarget)));
1465 xtarget = gen_lowpart (maxmode, xtarget);
1466 if (wider)
1467 xspec_target_subreg = xtarget;
1469 else
1470 xtarget = gen_reg_rtx (maxmode);
1473 /* If this machine's extzv insists on a register target,
1474 make sure we have one. */
1475 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1476 (xtarget, maxmode)))
1477 xtarget = gen_reg_rtx (maxmode);
1479 bitsize_rtx = GEN_INT (bitsize);
1480 bitpos_rtx = GEN_INT (xbitpos);
1482 pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1483 if (pat)
1485 emit_insn (pat);
1486 target = xtarget;
1487 spec_target = xspec_target;
1488 spec_target_subreg = xspec_target_subreg;
1490 else
1492 delete_insns_since (last);
1493 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1494 bitpos, target, 1);
1497 else
1498 extzv_loses:
1499 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1500 bitpos, target, 1);
1502 else
1504 if (HAVE_extv
1505 && bitsize > 0
1506 && GET_MODE_BITSIZE (extv_mode) >= bitsize
1507 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1508 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1510 int xbitpos = bitpos, xoffset = offset;
1511 rtx bitsize_rtx, bitpos_rtx;
1512 rtx last = get_last_insn ();
1513 rtx xop0 = op0, xtarget = target;
1514 rtx xspec_target = spec_target;
1515 rtx xspec_target_subreg = spec_target_subreg;
1516 rtx pat;
1517 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1519 if (MEM_P (xop0))
1521 /* Is the memory operand acceptable? */
1522 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1523 (xop0, GET_MODE (xop0))))
1525 /* No, load into a reg and extract from there. */
1526 enum machine_mode bestmode;
1528 /* Get the mode to use for inserting into this field. If
1529 OP0 is BLKmode, get the smallest mode consistent with the
1530 alignment. If OP0 is a non-BLKmode object that is no
1531 wider than MAXMODE, use its mode. Otherwise, use the
1532 smallest mode containing the field. */
1534 if (GET_MODE (xop0) == BLKmode
1535 || (GET_MODE_SIZE (GET_MODE (op0))
1536 > GET_MODE_SIZE (maxmode)))
1537 bestmode = get_best_mode (bitsize, bitnum,
1538 MEM_ALIGN (xop0), maxmode,
1539 MEM_VOLATILE_P (xop0));
1540 else
1541 bestmode = GET_MODE (xop0);
1543 if (bestmode == VOIDmode
1544 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1545 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1546 goto extv_loses;
1548 /* Compute offset as multiple of this unit,
1549 counting in bytes. */
1550 unit = GET_MODE_BITSIZE (bestmode);
1551 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1552 xbitpos = bitnum % unit;
1553 xop0 = adjust_address (xop0, bestmode, xoffset);
1555 /* Make sure register is big enough for the whole field. */
1556 if (xoffset * BITS_PER_UNIT + unit
1557 < offset * BITS_PER_UNIT + bitsize)
1558 goto extv_loses;
1560 /* Fetch it to a register in that size. */
1561 xop0 = force_reg (bestmode, xop0);
1563 /* XBITPOS counts within UNIT, which is what is expected. */
1565 else
1566 /* Get ref to first byte containing part of the field. */
1567 xop0 = adjust_address (xop0, byte_mode, xoffset);
1570 /* If op0 is a register, we need it in MAXMODE (which is usually
1571 SImode) to make it acceptable to the format of extv. */
1572 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1573 goto extv_loses;
1574 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1575 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1577 /* On big-endian machines, we count bits from the most significant.
1578 If the bit field insn does not, we must invert. */
1579 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1580 xbitpos = unit - bitsize - xbitpos;
1582 /* XBITPOS counts within a size of UNIT.
1583 Adjust to count within a size of MAXMODE. */
1584 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1585 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1587 unit = GET_MODE_BITSIZE (maxmode);
1589 if (xtarget == 0)
1590 xtarget = xspec_target = gen_reg_rtx (tmode);
1592 if (GET_MODE (xtarget) != maxmode)
1594 if (REG_P (xtarget))
1596 int wider = (GET_MODE_SIZE (maxmode)
1597 > GET_MODE_SIZE (GET_MODE (xtarget)));
1598 xtarget = gen_lowpart (maxmode, xtarget);
1599 if (wider)
1600 xspec_target_subreg = xtarget;
1602 else
1603 xtarget = gen_reg_rtx (maxmode);
1606 /* If this machine's extv insists on a register target,
1607 make sure we have one. */
1608 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1609 (xtarget, maxmode)))
1610 xtarget = gen_reg_rtx (maxmode);
1612 bitsize_rtx = GEN_INT (bitsize);
1613 bitpos_rtx = GEN_INT (xbitpos);
1615 pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1616 if (pat)
1618 emit_insn (pat);
1619 target = xtarget;
1620 spec_target = xspec_target;
1621 spec_target_subreg = xspec_target_subreg;
1623 else
1625 delete_insns_since (last);
1626 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1627 bitpos, target, 0);
1630 else
1631 extv_loses:
1632 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1633 bitpos, target, 0);
1635 if (target == spec_target)
1636 return target;
1637 if (target == spec_target_subreg)
1638 return spec_target;
1639 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1641 /* If the target mode is not a scalar integral, first convert to the
1642 integer mode of that size and then access it as a floating-point
1643 value via a SUBREG. */
1644 if (!SCALAR_INT_MODE_P (tmode))
1646 enum machine_mode smode
1647 = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1648 target = convert_to_mode (smode, target, unsignedp);
1649 target = force_reg (smode, target);
1650 return gen_lowpart (tmode, target);
1653 return convert_to_mode (tmode, target, unsignedp);
1655 return target;
1658 /* Extract a bit field using shifts and boolean operations
1659 Returns an rtx to represent the value.
1660 OP0 addresses a register (word) or memory (byte).
1661 BITPOS says which bit within the word or byte the bit field starts in.
1662 OFFSET says how many bytes farther the bit field starts;
1663 it is 0 if OP0 is a register.
1664 BITSIZE says how many bits long the bit field is.
1665 (If OP0 is a register, it may be narrower than a full word,
1666 but BITPOS still counts within a full word,
1667 which is significant on bigendian machines.)
1669 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1670 If TARGET is nonzero, attempts to store the value there
1671 and return TARGET, but this is not guaranteed.
1672 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1674 static rtx
1675 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1676 unsigned HOST_WIDE_INT offset,
1677 unsigned HOST_WIDE_INT bitsize,
1678 unsigned HOST_WIDE_INT bitpos, rtx target,
1679 int unsignedp)
1681 unsigned int total_bits = BITS_PER_WORD;
1682 enum machine_mode mode;
1684 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1686 /* Special treatment for a bit field split across two registers. */
1687 if (bitsize + bitpos > BITS_PER_WORD)
1688 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1690 else
1692 /* Get the proper mode to use for this field. We want a mode that
1693 includes the entire field. If such a mode would be larger than
1694 a word, we won't be doing the extraction the normal way. */
1696 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1697 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1699 if (mode == VOIDmode)
1700 /* The only way this should occur is if the field spans word
1701 boundaries. */
1702 return extract_split_bit_field (op0, bitsize,
1703 bitpos + offset * BITS_PER_UNIT,
1704 unsignedp);
1706 total_bits = GET_MODE_BITSIZE (mode);
1708 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1709 be in the range 0 to total_bits-1, and put any excess bytes in
1710 OFFSET. */
1711 if (bitpos >= total_bits)
1713 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1714 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1715 * BITS_PER_UNIT);
1718 /* Get ref to an aligned byte, halfword, or word containing the field.
1719 Adjust BITPOS to be position within a word,
1720 and OFFSET to be the offset of that word.
1721 Then alter OP0 to refer to that word. */
1722 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1723 offset -= (offset % (total_bits / BITS_PER_UNIT));
1724 op0 = adjust_address (op0, mode, offset);
1727 mode = GET_MODE (op0);
1729 if (BYTES_BIG_ENDIAN)
1730 /* BITPOS is the distance between our msb and that of OP0.
1731 Convert it to the distance from the lsb. */
1732 bitpos = total_bits - bitsize - bitpos;
1734 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1735 We have reduced the big-endian case to the little-endian case. */
1737 if (unsignedp)
1739 if (bitpos)
1741 /* If the field does not already start at the lsb,
1742 shift it so it does. */
1743 tree amount = build_int_cst (NULL_TREE, bitpos);
1744 /* Maybe propagate the target for the shift. */
1745 /* But not if we will return it--could confuse integrate.c. */
1746 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1747 if (tmode != mode) subtarget = 0;
1748 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1750 /* Convert the value to the desired mode. */
1751 if (mode != tmode)
1752 op0 = convert_to_mode (tmode, op0, 1);
1754 /* Unless the msb of the field used to be the msb when we shifted,
1755 mask out the upper bits. */
1757 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1758 return expand_binop (GET_MODE (op0), and_optab, op0,
1759 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1760 target, 1, OPTAB_LIB_WIDEN);
1761 return op0;
1764 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1765 then arithmetic-shift its lsb to the lsb of the word. */
1766 op0 = force_reg (mode, op0);
1767 if (mode != tmode)
1768 target = 0;
1770 /* Find the narrowest integer mode that contains the field. */
1772 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1773 mode = GET_MODE_WIDER_MODE (mode))
1774 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1776 op0 = convert_to_mode (mode, op0, 0);
1777 break;
1780 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1782 tree amount
1783 = build_int_cst (NULL_TREE,
1784 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1785 /* Maybe propagate the target for the shift. */
1786 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1787 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1790 return expand_shift (RSHIFT_EXPR, mode, op0,
1791 build_int_cst (NULL_TREE,
1792 GET_MODE_BITSIZE (mode) - bitsize),
1793 target, 0);
1796 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1797 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1798 complement of that if COMPLEMENT. The mask is truncated if
1799 necessary to the width of mode MODE. The mask is zero-extended if
1800 BITSIZE+BITPOS is too small for MODE. */
1802 static rtx
1803 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1805 HOST_WIDE_INT masklow, maskhigh;
1807 if (bitsize == 0)
1808 masklow = 0;
1809 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1810 masklow = (HOST_WIDE_INT) -1 << bitpos;
1811 else
1812 masklow = 0;
1814 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1815 masklow &= ((unsigned HOST_WIDE_INT) -1
1816 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1818 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1819 maskhigh = -1;
1820 else
1821 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1823 if (bitsize == 0)
1824 maskhigh = 0;
1825 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1826 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1827 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1828 else
1829 maskhigh = 0;
1831 if (complement)
1833 maskhigh = ~maskhigh;
1834 masklow = ~masklow;
1837 return immed_double_const (masklow, maskhigh, mode);
1840 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1841 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1843 static rtx
1844 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1846 unsigned HOST_WIDE_INT v = INTVAL (value);
1847 HOST_WIDE_INT low, high;
1849 if (bitsize < HOST_BITS_PER_WIDE_INT)
1850 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1852 if (bitpos < HOST_BITS_PER_WIDE_INT)
1854 low = v << bitpos;
1855 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1857 else
1859 low = 0;
1860 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1863 return immed_double_const (low, high, mode);
1866 /* Extract a bit field from a memory by forcing the alignment of the
1867 memory. This efficient only if the field spans at least 4 boundaries.
1869 OP0 is the MEM.
1870 BITSIZE is the field width; BITPOS is the position of the first bit.
1871 UNSIGNEDP is true if the result should be zero-extended. */
1873 static rtx
1874 extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1875 unsigned HOST_WIDE_INT bitpos,
1876 int unsignedp)
1878 enum machine_mode mode, dmode;
1879 unsigned int m_bitsize, m_size;
1880 unsigned int sign_shift_up, sign_shift_dn;
1881 rtx base, a1, a2, v1, v2, comb, shift, result, start;
1883 /* Choose a mode that will fit BITSIZE. */
1884 mode = smallest_mode_for_size (bitsize, MODE_INT);
1885 m_size = GET_MODE_SIZE (mode);
1886 m_bitsize = GET_MODE_BITSIZE (mode);
1888 /* Choose a mode twice as wide. Fail if no such mode exists. */
1889 dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1890 if (dmode == BLKmode)
1891 return NULL;
1893 do_pending_stack_adjust ();
1894 start = get_last_insn ();
1896 /* At the end, we'll need an additional shift to deal with sign/zero
1897 extension. By default this will be a left+right shift of the
1898 appropriate size. But we may be able to eliminate one of them. */
1899 sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1901 if (STRICT_ALIGNMENT)
1903 base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1904 bitpos %= BITS_PER_UNIT;
1906 /* We load two values to be concatenate. There's an edge condition
1907 that bears notice -- an aligned value at the end of a page can
1908 only load one value lest we segfault. So the two values we load
1909 are at "base & -size" and "(base + size - 1) & -size". If base
1910 is unaligned, the addresses will be aligned and sequential; if
1911 base is aligned, the addresses will both be equal to base. */
1913 a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1914 GEN_INT (-(HOST_WIDE_INT)m_size),
1915 NULL, true, OPTAB_LIB_WIDEN);
1916 mark_reg_pointer (a1, m_bitsize);
1917 v1 = gen_rtx_MEM (mode, a1);
1918 set_mem_align (v1, m_bitsize);
1919 v1 = force_reg (mode, validize_mem (v1));
1921 a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1922 a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1923 GEN_INT (-(HOST_WIDE_INT)m_size),
1924 NULL, true, OPTAB_LIB_WIDEN);
1925 v2 = gen_rtx_MEM (mode, a2);
1926 set_mem_align (v2, m_bitsize);
1927 v2 = force_reg (mode, validize_mem (v2));
1929 /* Combine these two values into a double-word value. */
1930 if (m_bitsize == BITS_PER_WORD)
1932 comb = gen_reg_rtx (dmode);
1933 emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1934 emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1935 emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1937 else
1939 if (BYTES_BIG_ENDIAN)
1940 comb = v1, v1 = v2, v2 = comb;
1941 v1 = convert_modes (dmode, mode, v1, true);
1942 if (v1 == NULL)
1943 goto fail;
1944 v2 = convert_modes (dmode, mode, v2, true);
1945 v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1946 NULL, true, OPTAB_LIB_WIDEN);
1947 if (v2 == NULL)
1948 goto fail;
1949 comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1950 true, OPTAB_LIB_WIDEN);
1951 if (comb == NULL)
1952 goto fail;
1955 shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1956 NULL, true, OPTAB_LIB_WIDEN);
1957 shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1959 if (bitpos != 0)
1961 if (sign_shift_up <= bitpos)
1962 bitpos -= sign_shift_up, sign_shift_up = 0;
1963 shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1964 NULL, true, OPTAB_LIB_WIDEN);
1967 else
1969 unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1970 bitpos %= BITS_PER_UNIT;
1972 /* When strict alignment is not required, we can just load directly
1973 from memory without masking. If the remaining BITPOS offset is
1974 small enough, we may be able to do all operations in MODE as
1975 opposed to DMODE. */
1976 if (bitpos + bitsize <= m_bitsize)
1977 dmode = mode;
1978 comb = adjust_address (op0, dmode, offset);
1980 if (sign_shift_up <= bitpos)
1981 bitpos -= sign_shift_up, sign_shift_up = 0;
1982 shift = GEN_INT (bitpos);
1985 /* Shift down the double-word such that the requested value is at bit 0. */
1986 if (shift != const0_rtx)
1987 comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
1988 comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
1989 if (comb == NULL)
1990 goto fail;
1992 /* If the field exactly matches MODE, then all we need to do is return the
1993 lowpart. Otherwise, shift to get the sign bits set properly. */
1994 result = force_reg (mode, gen_lowpart (mode, comb));
1996 if (sign_shift_up)
1997 result = expand_simple_binop (mode, ASHIFT, result,
1998 GEN_INT (sign_shift_up),
1999 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2000 if (sign_shift_dn)
2001 result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
2002 result, GEN_INT (sign_shift_dn),
2003 NULL_RTX, 0, OPTAB_LIB_WIDEN);
2005 return result;
2007 fail:
2008 delete_insns_since (start);
2009 return NULL;
2012 /* Extract a bit field that is split across two words
2013 and return an RTX for the result.
2015 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2016 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2017 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2019 static rtx
2020 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2021 unsigned HOST_WIDE_INT bitpos, int unsignedp)
2023 unsigned int unit;
2024 unsigned int bitsdone = 0;
2025 rtx result = NULL_RTX;
2026 int first = 1;
2028 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2029 much at a time. */
2030 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2031 unit = BITS_PER_WORD;
2032 else
2034 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2035 if (0 && bitsize / unit > 2)
2037 rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2038 unsignedp);
2039 if (tmp)
2040 return tmp;
2044 while (bitsdone < bitsize)
2046 unsigned HOST_WIDE_INT thissize;
2047 rtx part, word;
2048 unsigned HOST_WIDE_INT thispos;
2049 unsigned HOST_WIDE_INT offset;
2051 offset = (bitpos + bitsdone) / unit;
2052 thispos = (bitpos + bitsdone) % unit;
2054 /* THISSIZE must not overrun a word boundary. Otherwise,
2055 extract_fixed_bit_field will call us again, and we will mutually
2056 recurse forever. */
2057 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2058 thissize = MIN (thissize, unit - thispos);
2060 /* If OP0 is a register, then handle OFFSET here.
2062 When handling multiword bitfields, extract_bit_field may pass
2063 down a word_mode SUBREG of a larger REG for a bitfield that actually
2064 crosses a word boundary. Thus, for a SUBREG, we must find
2065 the current word starting from the base register. */
2066 if (GET_CODE (op0) == SUBREG)
2068 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2069 word = operand_subword_force (SUBREG_REG (op0), word_offset,
2070 GET_MODE (SUBREG_REG (op0)));
2071 offset = 0;
2073 else if (REG_P (op0))
2075 word = operand_subword_force (op0, offset, GET_MODE (op0));
2076 offset = 0;
2078 else
2079 word = op0;
2081 /* Extract the parts in bit-counting order,
2082 whose meaning is determined by BYTES_PER_UNIT.
2083 OFFSET is in UNITs, and UNIT is in bits.
2084 extract_fixed_bit_field wants offset in bytes. */
2085 part = extract_fixed_bit_field (word_mode, word,
2086 offset * unit / BITS_PER_UNIT,
2087 thissize, thispos, 0, 1);
2088 bitsdone += thissize;
2090 /* Shift this part into place for the result. */
2091 if (BYTES_BIG_ENDIAN)
2093 if (bitsize != bitsdone)
2094 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2095 build_int_cst (NULL_TREE, bitsize - bitsdone),
2096 0, 1);
2098 else
2100 if (bitsdone != thissize)
2101 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2102 build_int_cst (NULL_TREE,
2103 bitsdone - thissize), 0, 1);
2106 if (first)
2107 result = part;
2108 else
2109 /* Combine the parts with bitwise or. This works
2110 because we extracted each part as an unsigned bit field. */
2111 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2112 OPTAB_LIB_WIDEN);
2114 first = 0;
2117 /* Unsigned bit field: we are done. */
2118 if (unsignedp)
2119 return result;
2120 /* Signed bit field: sign-extend with two arithmetic shifts. */
2121 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2122 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2123 NULL_RTX, 0);
2124 return expand_shift (RSHIFT_EXPR, word_mode, result,
2125 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2126 NULL_RTX, 0);
2129 /* Add INC into TARGET. */
2131 void
2132 expand_inc (rtx target, rtx inc)
2134 rtx value = expand_binop (GET_MODE (target), add_optab,
2135 target, inc,
2136 target, 0, OPTAB_LIB_WIDEN);
2137 if (value != target)
2138 emit_move_insn (target, value);
2141 /* Subtract DEC from TARGET. */
2143 void
2144 expand_dec (rtx target, rtx dec)
2146 rtx value = expand_binop (GET_MODE (target), sub_optab,
2147 target, dec,
2148 target, 0, OPTAB_LIB_WIDEN);
2149 if (value != target)
2150 emit_move_insn (target, value);
2153 /* Output a shift instruction for expression code CODE,
2154 with SHIFTED being the rtx for the value to shift,
2155 and AMOUNT the tree for the amount to shift by.
2156 Store the result in the rtx TARGET, if that is convenient.
2157 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2158 Return the rtx for where the value is. */
2161 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2162 tree amount, rtx target, int unsignedp)
2164 rtx op1, temp = 0;
2165 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2166 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2167 int try;
2169 /* Previously detected shift-counts computed by NEGATE_EXPR
2170 and shifted in the other direction; but that does not work
2171 on all machines. */
2173 op1 = expand_normal (amount);
2175 if (SHIFT_COUNT_TRUNCATED)
2177 if (GET_CODE (op1) == CONST_INT
2178 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2179 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2180 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2181 % GET_MODE_BITSIZE (mode));
2182 else if (GET_CODE (op1) == SUBREG
2183 && subreg_lowpart_p (op1))
2184 op1 = SUBREG_REG (op1);
2187 if (op1 == const0_rtx)
2188 return shifted;
2190 /* Check whether its cheaper to implement a left shift by a constant
2191 bit count by a sequence of additions. */
2192 if (code == LSHIFT_EXPR
2193 && GET_CODE (op1) == CONST_INT
2194 && INTVAL (op1) > 0
2195 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2196 && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode])
2198 int i;
2199 for (i = 0; i < INTVAL (op1); i++)
2201 temp = force_reg (mode, shifted);
2202 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2203 unsignedp, OPTAB_LIB_WIDEN);
2205 return shifted;
2208 for (try = 0; temp == 0 && try < 3; try++)
2210 enum optab_methods methods;
2212 if (try == 0)
2213 methods = OPTAB_DIRECT;
2214 else if (try == 1)
2215 methods = OPTAB_WIDEN;
2216 else
2217 methods = OPTAB_LIB_WIDEN;
2219 if (rotate)
2221 /* Widening does not work for rotation. */
2222 if (methods == OPTAB_WIDEN)
2223 continue;
2224 else if (methods == OPTAB_LIB_WIDEN)
2226 /* If we have been unable to open-code this by a rotation,
2227 do it as the IOR of two shifts. I.e., to rotate A
2228 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2229 where C is the bitsize of A.
2231 It is theoretically possible that the target machine might
2232 not be able to perform either shift and hence we would
2233 be making two libcalls rather than just the one for the
2234 shift (similarly if IOR could not be done). We will allow
2235 this extremely unlikely lossage to avoid complicating the
2236 code below. */
2238 rtx subtarget = target == shifted ? 0 : target;
2239 rtx temp1;
2240 tree type = TREE_TYPE (amount);
2241 tree new_amount = make_tree (type, op1);
2242 tree other_amount
2243 = fold_build2 (MINUS_EXPR, type,
2244 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2245 amount);
2247 shifted = force_reg (mode, shifted);
2249 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2250 mode, shifted, new_amount, 0, 1);
2251 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2252 mode, shifted, other_amount, subtarget, 1);
2253 return expand_binop (mode, ior_optab, temp, temp1, target,
2254 unsignedp, methods);
2257 temp = expand_binop (mode,
2258 left ? rotl_optab : rotr_optab,
2259 shifted, op1, target, unsignedp, methods);
2261 else if (unsignedp)
2262 temp = expand_binop (mode,
2263 left ? ashl_optab : lshr_optab,
2264 shifted, op1, target, unsignedp, methods);
2266 /* Do arithmetic shifts.
2267 Also, if we are going to widen the operand, we can just as well
2268 use an arithmetic right-shift instead of a logical one. */
2269 if (temp == 0 && ! rotate
2270 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2272 enum optab_methods methods1 = methods;
2274 /* If trying to widen a log shift to an arithmetic shift,
2275 don't accept an arithmetic shift of the same size. */
2276 if (unsignedp)
2277 methods1 = OPTAB_MUST_WIDEN;
2279 /* Arithmetic shift */
2281 temp = expand_binop (mode,
2282 left ? ashl_optab : ashr_optab,
2283 shifted, op1, target, unsignedp, methods1);
2286 /* We used to try extzv here for logical right shifts, but that was
2287 only useful for one machine, the VAX, and caused poor code
2288 generation there for lshrdi3, so the code was deleted and a
2289 define_expand for lshrsi3 was added to vax.md. */
2292 gcc_assert (temp);
2293 return temp;
2296 enum alg_code {
2297 alg_unknown,
2298 alg_zero,
2299 alg_m, alg_shift,
2300 alg_add_t_m2,
2301 alg_sub_t_m2,
2302 alg_add_factor,
2303 alg_sub_factor,
2304 alg_add_t2_m,
2305 alg_sub_t2_m,
2306 alg_impossible
2309 /* This structure holds the "cost" of a multiply sequence. The
2310 "cost" field holds the total rtx_cost of every operator in the
2311 synthetic multiplication sequence, hence cost(a op b) is defined
2312 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2313 The "latency" field holds the minimum possible latency of the
2314 synthetic multiply, on a hypothetical infinitely parallel CPU.
2315 This is the critical path, or the maximum height, of the expression
2316 tree which is the sum of rtx_costs on the most expensive path from
2317 any leaf to the root. Hence latency(a op b) is defined as zero for
2318 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2320 struct mult_cost {
2321 short cost; /* Total rtx_cost of the multiplication sequence. */
2322 short latency; /* The latency of the multiplication sequence. */
2325 /* This macro is used to compare a pointer to a mult_cost against an
2326 single integer "rtx_cost" value. This is equivalent to the macro
2327 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2328 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2329 || ((X)->cost == (Y) && (X)->latency < (Y)))
2331 /* This macro is used to compare two pointers to mult_costs against
2332 each other. The macro returns true if X is cheaper than Y.
2333 Currently, the cheaper of two mult_costs is the one with the
2334 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2335 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2336 || ((X)->cost == (Y)->cost \
2337 && (X)->latency < (Y)->latency))
2339 /* This structure records a sequence of operations.
2340 `ops' is the number of operations recorded.
2341 `cost' is their total cost.
2342 The operations are stored in `op' and the corresponding
2343 logarithms of the integer coefficients in `log'.
2345 These are the operations:
2346 alg_zero total := 0;
2347 alg_m total := multiplicand;
2348 alg_shift total := total * coeff
2349 alg_add_t_m2 total := total + multiplicand * coeff;
2350 alg_sub_t_m2 total := total - multiplicand * coeff;
2351 alg_add_factor total := total * coeff + total;
2352 alg_sub_factor total := total * coeff - total;
2353 alg_add_t2_m total := total * coeff + multiplicand;
2354 alg_sub_t2_m total := total * coeff - multiplicand;
2356 The first operand must be either alg_zero or alg_m. */
2358 struct algorithm
2360 struct mult_cost cost;
2361 short ops;
2362 /* The size of the OP and LOG fields are not directly related to the
2363 word size, but the worst-case algorithms will be if we have few
2364 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2365 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2366 in total wordsize operations. */
2367 enum alg_code op[MAX_BITS_PER_WORD];
2368 char log[MAX_BITS_PER_WORD];
2371 /* The entry for our multiplication cache/hash table. */
2372 struct alg_hash_entry {
2373 /* The number we are multiplying by. */
2374 unsigned int t;
2376 /* The mode in which we are multiplying something by T. */
2377 enum machine_mode mode;
2379 /* The best multiplication algorithm for t. */
2380 enum alg_code alg;
2382 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2383 Otherwise, the cost within which multiplication by T is
2384 impossible. */
2385 struct mult_cost cost;
2388 /* The number of cache/hash entries. */
2389 #define NUM_ALG_HASH_ENTRIES 307
2391 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2392 actually a hash table. If we have a collision, that the older
2393 entry is kicked out. */
2394 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2396 /* Indicates the type of fixup needed after a constant multiplication.
2397 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2398 the result should be negated, and ADD_VARIANT means that the
2399 multiplicand should be added to the result. */
2400 enum mult_variant {basic_variant, negate_variant, add_variant};
2402 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2403 const struct mult_cost *, enum machine_mode mode);
2404 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2405 struct algorithm *, enum mult_variant *, int);
2406 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2407 const struct algorithm *, enum mult_variant);
2408 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2409 int, rtx *, int *, int *);
2410 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2411 static rtx extract_high_half (enum machine_mode, rtx);
2412 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2413 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2414 int, int);
2415 /* Compute and return the best algorithm for multiplying by T.
2416 The algorithm must cost less than cost_limit
2417 If retval.cost >= COST_LIMIT, no algorithm was found and all
2418 other field of the returned struct are undefined.
2419 MODE is the machine mode of the multiplication. */
2421 static void
2422 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2423 const struct mult_cost *cost_limit, enum machine_mode mode)
2425 int m;
2426 struct algorithm *alg_in, *best_alg;
2427 struct mult_cost best_cost;
2428 struct mult_cost new_limit;
2429 int op_cost, op_latency;
2430 unsigned HOST_WIDE_INT q;
2431 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2432 int hash_index;
2433 bool cache_hit = false;
2434 enum alg_code cache_alg = alg_zero;
2436 /* Indicate that no algorithm is yet found. If no algorithm
2437 is found, this value will be returned and indicate failure. */
2438 alg_out->cost.cost = cost_limit->cost + 1;
2439 alg_out->cost.latency = cost_limit->latency + 1;
2441 if (cost_limit->cost < 0
2442 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2443 return;
2445 /* Restrict the bits of "t" to the multiplication's mode. */
2446 t &= GET_MODE_MASK (mode);
2448 /* t == 1 can be done in zero cost. */
2449 if (t == 1)
2451 alg_out->ops = 1;
2452 alg_out->cost.cost = 0;
2453 alg_out->cost.latency = 0;
2454 alg_out->op[0] = alg_m;
2455 return;
2458 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2459 fail now. */
2460 if (t == 0)
2462 if (MULT_COST_LESS (cost_limit, zero_cost))
2463 return;
2464 else
2466 alg_out->ops = 1;
2467 alg_out->cost.cost = zero_cost;
2468 alg_out->cost.latency = zero_cost;
2469 alg_out->op[0] = alg_zero;
2470 return;
2474 /* We'll be needing a couple extra algorithm structures now. */
2476 alg_in = alloca (sizeof (struct algorithm));
2477 best_alg = alloca (sizeof (struct algorithm));
2478 best_cost = *cost_limit;
2480 /* Compute the hash index. */
2481 hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2483 /* See if we already know what to do for T. */
2484 if (alg_hash[hash_index].t == t
2485 && alg_hash[hash_index].mode == mode
2486 && alg_hash[hash_index].alg != alg_unknown)
2488 cache_alg = alg_hash[hash_index].alg;
2490 if (cache_alg == alg_impossible)
2492 /* The cache tells us that it's impossible to synthesize
2493 multiplication by T within alg_hash[hash_index].cost. */
2494 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2495 /* COST_LIMIT is at least as restrictive as the one
2496 recorded in the hash table, in which case we have no
2497 hope of synthesizing a multiplication. Just
2498 return. */
2499 return;
2501 /* If we get here, COST_LIMIT is less restrictive than the
2502 one recorded in the hash table, so we may be able to
2503 synthesize a multiplication. Proceed as if we didn't
2504 have the cache entry. */
2506 else
2508 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2509 /* The cached algorithm shows that this multiplication
2510 requires more cost than COST_LIMIT. Just return. This
2511 way, we don't clobber this cache entry with
2512 alg_impossible but retain useful information. */
2513 return;
2515 cache_hit = true;
2517 switch (cache_alg)
2519 case alg_shift:
2520 goto do_alg_shift;
2522 case alg_add_t_m2:
2523 case alg_sub_t_m2:
2524 goto do_alg_addsub_t_m2;
2526 case alg_add_factor:
2527 case alg_sub_factor:
2528 goto do_alg_addsub_factor;
2530 case alg_add_t2_m:
2531 goto do_alg_add_t2_m;
2533 case alg_sub_t2_m:
2534 goto do_alg_sub_t2_m;
2536 default:
2537 gcc_unreachable ();
2542 /* If we have a group of zero bits at the low-order part of T, try
2543 multiplying by the remaining bits and then doing a shift. */
2545 if ((t & 1) == 0)
2547 do_alg_shift:
2548 m = floor_log2 (t & -t); /* m = number of low zero bits */
2549 if (m < maxm)
2551 q = t >> m;
2552 /* The function expand_shift will choose between a shift and
2553 a sequence of additions, so the observed cost is given as
2554 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2555 op_cost = m * add_cost[mode];
2556 if (shift_cost[mode][m] < op_cost)
2557 op_cost = shift_cost[mode][m];
2558 new_limit.cost = best_cost.cost - op_cost;
2559 new_limit.latency = best_cost.latency - op_cost;
2560 synth_mult (alg_in, q, &new_limit, mode);
2562 alg_in->cost.cost += op_cost;
2563 alg_in->cost.latency += op_cost;
2564 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2566 struct algorithm *x;
2567 best_cost = alg_in->cost;
2568 x = alg_in, alg_in = best_alg, best_alg = x;
2569 best_alg->log[best_alg->ops] = m;
2570 best_alg->op[best_alg->ops] = alg_shift;
2573 if (cache_hit)
2574 goto done;
2577 /* If we have an odd number, add or subtract one. */
2578 if ((t & 1) != 0)
2580 unsigned HOST_WIDE_INT w;
2582 do_alg_addsub_t_m2:
2583 for (w = 1; (w & t) != 0; w <<= 1)
2585 /* If T was -1, then W will be zero after the loop. This is another
2586 case where T ends with ...111. Handling this with (T + 1) and
2587 subtract 1 produces slightly better code and results in algorithm
2588 selection much faster than treating it like the ...0111 case
2589 below. */
2590 if (w == 0
2591 || (w > 2
2592 /* Reject the case where t is 3.
2593 Thus we prefer addition in that case. */
2594 && t != 3))
2596 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2598 op_cost = add_cost[mode];
2599 new_limit.cost = best_cost.cost - op_cost;
2600 new_limit.latency = best_cost.latency - op_cost;
2601 synth_mult (alg_in, t + 1, &new_limit, mode);
2603 alg_in->cost.cost += op_cost;
2604 alg_in->cost.latency += op_cost;
2605 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2607 struct algorithm *x;
2608 best_cost = alg_in->cost;
2609 x = alg_in, alg_in = best_alg, best_alg = x;
2610 best_alg->log[best_alg->ops] = 0;
2611 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2614 else
2616 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2618 op_cost = add_cost[mode];
2619 new_limit.cost = best_cost.cost - op_cost;
2620 new_limit.latency = best_cost.latency - op_cost;
2621 synth_mult (alg_in, t - 1, &new_limit, mode);
2623 alg_in->cost.cost += op_cost;
2624 alg_in->cost.latency += op_cost;
2625 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2627 struct algorithm *x;
2628 best_cost = alg_in->cost;
2629 x = alg_in, alg_in = best_alg, best_alg = x;
2630 best_alg->log[best_alg->ops] = 0;
2631 best_alg->op[best_alg->ops] = alg_add_t_m2;
2634 if (cache_hit)
2635 goto done;
2638 /* Look for factors of t of the form
2639 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2640 If we find such a factor, we can multiply by t using an algorithm that
2641 multiplies by q, shift the result by m and add/subtract it to itself.
2643 We search for large factors first and loop down, even if large factors
2644 are less probable than small; if we find a large factor we will find a
2645 good sequence quickly, and therefore be able to prune (by decreasing
2646 COST_LIMIT) the search. */
2648 do_alg_addsub_factor:
2649 for (m = floor_log2 (t - 1); m >= 2; m--)
2651 unsigned HOST_WIDE_INT d;
2653 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2654 if (t % d == 0 && t > d && m < maxm
2655 && (!cache_hit || cache_alg == alg_add_factor))
2657 /* If the target has a cheap shift-and-add instruction use
2658 that in preference to a shift insn followed by an add insn.
2659 Assume that the shift-and-add is "atomic" with a latency
2660 equal to its cost, otherwise assume that on superscalar
2661 hardware the shift may be executed concurrently with the
2662 earlier steps in the algorithm. */
2663 op_cost = add_cost[mode] + shift_cost[mode][m];
2664 if (shiftadd_cost[mode][m] < op_cost)
2666 op_cost = shiftadd_cost[mode][m];
2667 op_latency = op_cost;
2669 else
2670 op_latency = add_cost[mode];
2672 new_limit.cost = best_cost.cost - op_cost;
2673 new_limit.latency = best_cost.latency - op_latency;
2674 synth_mult (alg_in, t / d, &new_limit, mode);
2676 alg_in->cost.cost += op_cost;
2677 alg_in->cost.latency += op_latency;
2678 if (alg_in->cost.latency < op_cost)
2679 alg_in->cost.latency = op_cost;
2680 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2682 struct algorithm *x;
2683 best_cost = alg_in->cost;
2684 x = alg_in, alg_in = best_alg, best_alg = x;
2685 best_alg->log[best_alg->ops] = m;
2686 best_alg->op[best_alg->ops] = alg_add_factor;
2688 /* Other factors will have been taken care of in the recursion. */
2689 break;
2692 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2693 if (t % d == 0 && t > d && m < maxm
2694 && (!cache_hit || cache_alg == alg_sub_factor))
2696 /* If the target has a cheap shift-and-subtract insn use
2697 that in preference to a shift insn followed by a sub insn.
2698 Assume that the shift-and-sub is "atomic" with a latency
2699 equal to it's cost, otherwise assume that on superscalar
2700 hardware the shift may be executed concurrently with the
2701 earlier steps in the algorithm. */
2702 op_cost = add_cost[mode] + shift_cost[mode][m];
2703 if (shiftsub_cost[mode][m] < op_cost)
2705 op_cost = shiftsub_cost[mode][m];
2706 op_latency = op_cost;
2708 else
2709 op_latency = add_cost[mode];
2711 new_limit.cost = best_cost.cost - op_cost;
2712 new_limit.latency = best_cost.latency - op_latency;
2713 synth_mult (alg_in, t / d, &new_limit, mode);
2715 alg_in->cost.cost += op_cost;
2716 alg_in->cost.latency += op_latency;
2717 if (alg_in->cost.latency < op_cost)
2718 alg_in->cost.latency = op_cost;
2719 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2721 struct algorithm *x;
2722 best_cost = alg_in->cost;
2723 x = alg_in, alg_in = best_alg, best_alg = x;
2724 best_alg->log[best_alg->ops] = m;
2725 best_alg->op[best_alg->ops] = alg_sub_factor;
2727 break;
2730 if (cache_hit)
2731 goto done;
2733 /* Try shift-and-add (load effective address) instructions,
2734 i.e. do a*3, a*5, a*9. */
2735 if ((t & 1) != 0)
2737 do_alg_add_t2_m:
2738 q = t - 1;
2739 q = q & -q;
2740 m = exact_log2 (q);
2741 if (m >= 0 && m < maxm)
2743 op_cost = shiftadd_cost[mode][m];
2744 new_limit.cost = best_cost.cost - op_cost;
2745 new_limit.latency = best_cost.latency - op_cost;
2746 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2748 alg_in->cost.cost += op_cost;
2749 alg_in->cost.latency += op_cost;
2750 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2752 struct algorithm *x;
2753 best_cost = alg_in->cost;
2754 x = alg_in, alg_in = best_alg, best_alg = x;
2755 best_alg->log[best_alg->ops] = m;
2756 best_alg->op[best_alg->ops] = alg_add_t2_m;
2759 if (cache_hit)
2760 goto done;
2762 do_alg_sub_t2_m:
2763 q = t + 1;
2764 q = q & -q;
2765 m = exact_log2 (q);
2766 if (m >= 0 && m < maxm)
2768 op_cost = shiftsub_cost[mode][m];
2769 new_limit.cost = best_cost.cost - op_cost;
2770 new_limit.latency = best_cost.latency - op_cost;
2771 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2773 alg_in->cost.cost += op_cost;
2774 alg_in->cost.latency += op_cost;
2775 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2777 struct algorithm *x;
2778 best_cost = alg_in->cost;
2779 x = alg_in, alg_in = best_alg, best_alg = x;
2780 best_alg->log[best_alg->ops] = m;
2781 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2784 if (cache_hit)
2785 goto done;
2788 done:
2789 /* If best_cost has not decreased, we have not found any algorithm. */
2790 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2792 /* We failed to find an algorithm. Record alg_impossible for
2793 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2794 we are asked to find an algorithm for T within the same or
2795 lower COST_LIMIT, we can immediately return to the
2796 caller. */
2797 alg_hash[hash_index].t = t;
2798 alg_hash[hash_index].mode = mode;
2799 alg_hash[hash_index].alg = alg_impossible;
2800 alg_hash[hash_index].cost = *cost_limit;
2801 return;
2804 /* Cache the result. */
2805 if (!cache_hit)
2807 alg_hash[hash_index].t = t;
2808 alg_hash[hash_index].mode = mode;
2809 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2810 alg_hash[hash_index].cost.cost = best_cost.cost;
2811 alg_hash[hash_index].cost.latency = best_cost.latency;
2814 /* If we are getting a too long sequence for `struct algorithm'
2815 to record, make this search fail. */
2816 if (best_alg->ops == MAX_BITS_PER_WORD)
2817 return;
2819 /* Copy the algorithm from temporary space to the space at alg_out.
2820 We avoid using structure assignment because the majority of
2821 best_alg is normally undefined, and this is a critical function. */
2822 alg_out->ops = best_alg->ops + 1;
2823 alg_out->cost = best_cost;
2824 memcpy (alg_out->op, best_alg->op,
2825 alg_out->ops * sizeof *alg_out->op);
2826 memcpy (alg_out->log, best_alg->log,
2827 alg_out->ops * sizeof *alg_out->log);
2830 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2831 Try three variations:
2833 - a shift/add sequence based on VAL itself
2834 - a shift/add sequence based on -VAL, followed by a negation
2835 - a shift/add sequence based on VAL - 1, followed by an addition.
2837 Return true if the cheapest of these cost less than MULT_COST,
2838 describing the algorithm in *ALG and final fixup in *VARIANT. */
2840 static bool
2841 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2842 struct algorithm *alg, enum mult_variant *variant,
2843 int mult_cost)
2845 struct algorithm alg2;
2846 struct mult_cost limit;
2847 int op_cost;
2849 /* Fail quickly for impossible bounds. */
2850 if (mult_cost < 0)
2851 return false;
2853 /* Ensure that mult_cost provides a reasonable upper bound.
2854 Any constant multiplication can be performed with less
2855 than 2 * bits additions. */
2856 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2857 if (mult_cost > op_cost)
2858 mult_cost = op_cost;
2860 *variant = basic_variant;
2861 limit.cost = mult_cost;
2862 limit.latency = mult_cost;
2863 synth_mult (alg, val, &limit, mode);
2865 /* This works only if the inverted value actually fits in an
2866 `unsigned int' */
2867 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2869 op_cost = neg_cost[mode];
2870 if (MULT_COST_LESS (&alg->cost, mult_cost))
2872 limit.cost = alg->cost.cost - op_cost;
2873 limit.latency = alg->cost.latency - op_cost;
2875 else
2877 limit.cost = mult_cost - op_cost;
2878 limit.latency = mult_cost - op_cost;
2881 synth_mult (&alg2, -val, &limit, mode);
2882 alg2.cost.cost += op_cost;
2883 alg2.cost.latency += op_cost;
2884 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2885 *alg = alg2, *variant = negate_variant;
2888 /* This proves very useful for division-by-constant. */
2889 op_cost = add_cost[mode];
2890 if (MULT_COST_LESS (&alg->cost, mult_cost))
2892 limit.cost = alg->cost.cost - op_cost;
2893 limit.latency = alg->cost.latency - op_cost;
2895 else
2897 limit.cost = mult_cost - op_cost;
2898 limit.latency = mult_cost - op_cost;
2901 synth_mult (&alg2, val - 1, &limit, mode);
2902 alg2.cost.cost += op_cost;
2903 alg2.cost.latency += op_cost;
2904 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2905 *alg = alg2, *variant = add_variant;
2907 return MULT_COST_LESS (&alg->cost, mult_cost);
2910 /* A subroutine of expand_mult, used for constant multiplications.
2911 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2912 convenient. Use the shift/add sequence described by ALG and apply
2913 the final fixup specified by VARIANT. */
2915 static rtx
2916 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2917 rtx target, const struct algorithm *alg,
2918 enum mult_variant variant)
2920 HOST_WIDE_INT val_so_far;
2921 rtx insn, accum, tem;
2922 int opno;
2923 enum machine_mode nmode;
2925 /* Avoid referencing memory over and over.
2926 For speed, but also for correctness when mem is volatile. */
2927 if (MEM_P (op0))
2928 op0 = force_reg (mode, op0);
2930 /* ACCUM starts out either as OP0 or as a zero, depending on
2931 the first operation. */
2933 if (alg->op[0] == alg_zero)
2935 accum = copy_to_mode_reg (mode, const0_rtx);
2936 val_so_far = 0;
2938 else if (alg->op[0] == alg_m)
2940 accum = copy_to_mode_reg (mode, op0);
2941 val_so_far = 1;
2943 else
2944 gcc_unreachable ();
2946 for (opno = 1; opno < alg->ops; opno++)
2948 int log = alg->log[opno];
2949 rtx shift_subtarget = optimize ? 0 : accum;
2950 rtx add_target
2951 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2952 && !optimize)
2953 ? target : 0;
2954 rtx accum_target = optimize ? 0 : accum;
2956 switch (alg->op[opno])
2958 case alg_shift:
2959 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2960 build_int_cst (NULL_TREE, log),
2961 NULL_RTX, 0);
2962 val_so_far <<= log;
2963 break;
2965 case alg_add_t_m2:
2966 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2967 build_int_cst (NULL_TREE, log),
2968 NULL_RTX, 0);
2969 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2970 add_target ? add_target : accum_target);
2971 val_so_far += (HOST_WIDE_INT) 1 << log;
2972 break;
2974 case alg_sub_t_m2:
2975 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2976 build_int_cst (NULL_TREE, log),
2977 NULL_RTX, 0);
2978 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2979 add_target ? add_target : accum_target);
2980 val_so_far -= (HOST_WIDE_INT) 1 << log;
2981 break;
2983 case alg_add_t2_m:
2984 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2985 build_int_cst (NULL_TREE, log),
2986 shift_subtarget,
2988 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2989 add_target ? add_target : accum_target);
2990 val_so_far = (val_so_far << log) + 1;
2991 break;
2993 case alg_sub_t2_m:
2994 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2995 build_int_cst (NULL_TREE, log),
2996 shift_subtarget, 0);
2997 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2998 add_target ? add_target : accum_target);
2999 val_so_far = (val_so_far << log) - 1;
3000 break;
3002 case alg_add_factor:
3003 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3004 build_int_cst (NULL_TREE, log),
3005 NULL_RTX, 0);
3006 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3007 add_target ? add_target : accum_target);
3008 val_so_far += val_so_far << log;
3009 break;
3011 case alg_sub_factor:
3012 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3013 build_int_cst (NULL_TREE, log),
3014 NULL_RTX, 0);
3015 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3016 (add_target
3017 ? add_target : (optimize ? 0 : tem)));
3018 val_so_far = (val_so_far << log) - val_so_far;
3019 break;
3021 default:
3022 gcc_unreachable ();
3025 /* Write a REG_EQUAL note on the last insn so that we can cse
3026 multiplication sequences. Note that if ACCUM is a SUBREG,
3027 we've set the inner register and must properly indicate
3028 that. */
3030 tem = op0, nmode = mode;
3031 if (GET_CODE (accum) == SUBREG)
3033 nmode = GET_MODE (SUBREG_REG (accum));
3034 tem = gen_lowpart (nmode, op0);
3037 insn = get_last_insn ();
3038 set_unique_reg_note (insn, REG_EQUAL,
3039 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3042 if (variant == negate_variant)
3044 val_so_far = -val_so_far;
3045 accum = expand_unop (mode, neg_optab, accum, target, 0);
3047 else if (variant == add_variant)
3049 val_so_far = val_so_far + 1;
3050 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3053 /* Compare only the bits of val and val_so_far that are significant
3054 in the result mode, to avoid sign-/zero-extension confusion. */
3055 val &= GET_MODE_MASK (mode);
3056 val_so_far &= GET_MODE_MASK (mode);
3057 gcc_assert (val == val_so_far);
3059 return accum;
3062 /* Perform a multiplication and return an rtx for the result.
3063 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3064 TARGET is a suggestion for where to store the result (an rtx).
3066 We check specially for a constant integer as OP1.
3067 If you want this check for OP0 as well, then before calling
3068 you should swap the two operands if OP0 would be constant. */
3071 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3072 int unsignedp)
3074 enum mult_variant variant;
3075 struct algorithm algorithm;
3076 int max_cost;
3078 /* Handling const0_rtx here allows us to use zero as a rogue value for
3079 coeff below. */
3080 if (op1 == const0_rtx)
3081 return const0_rtx;
3082 if (op1 == const1_rtx)
3083 return op0;
3084 if (op1 == constm1_rtx)
3085 return expand_unop (mode,
3086 GET_MODE_CLASS (mode) == MODE_INT
3087 && !unsignedp && flag_trapv
3088 ? negv_optab : neg_optab,
3089 op0, target, 0);
3091 /* These are the operations that are potentially turned into a sequence
3092 of shifts and additions. */
3093 if (SCALAR_INT_MODE_P (mode)
3094 && (unsignedp || !flag_trapv))
3096 HOST_WIDE_INT coeff = 0;
3097 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3099 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3100 less than or equal in size to `unsigned int' this doesn't matter.
3101 If the mode is larger than `unsigned int', then synth_mult works
3102 only if the constant value exactly fits in an `unsigned int' without
3103 any truncation. This means that multiplying by negative values does
3104 not work; results are off by 2^32 on a 32 bit machine. */
3106 if (GET_CODE (op1) == CONST_INT)
3108 /* Attempt to handle multiplication of DImode values by negative
3109 coefficients, by performing the multiplication by a positive
3110 multiplier and then inverting the result. */
3111 if (INTVAL (op1) < 0
3112 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3114 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3115 result is interpreted as an unsigned coefficient.
3116 Exclude cost of op0 from max_cost to match the cost
3117 calculation of the synth_mult. */
3118 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3119 - neg_cost[mode];
3120 if (max_cost > 0
3121 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3122 &variant, max_cost))
3124 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3125 NULL_RTX, &algorithm,
3126 variant);
3127 return expand_unop (mode, neg_optab, temp, target, 0);
3130 else coeff = INTVAL (op1);
3132 else if (GET_CODE (op1) == CONST_DOUBLE)
3134 /* If we are multiplying in DImode, it may still be a win
3135 to try to work with shifts and adds. */
3136 if (CONST_DOUBLE_HIGH (op1) == 0)
3137 coeff = CONST_DOUBLE_LOW (op1);
3138 else if (CONST_DOUBLE_LOW (op1) == 0
3139 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3141 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3142 + HOST_BITS_PER_WIDE_INT;
3143 return expand_shift (LSHIFT_EXPR, mode, op0,
3144 build_int_cst (NULL_TREE, shift),
3145 target, unsignedp);
3149 /* We used to test optimize here, on the grounds that it's better to
3150 produce a smaller program when -O is not used. But this causes
3151 such a terrible slowdown sometimes that it seems better to always
3152 use synth_mult. */
3153 if (coeff != 0)
3155 /* Special case powers of two. */
3156 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3157 return expand_shift (LSHIFT_EXPR, mode, op0,
3158 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3159 target, unsignedp);
3161 /* Exclude cost of op0 from max_cost to match the cost
3162 calculation of the synth_mult. */
3163 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3164 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3165 max_cost))
3166 return expand_mult_const (mode, op0, coeff, target,
3167 &algorithm, variant);
3171 if (GET_CODE (op0) == CONST_DOUBLE)
3173 rtx temp = op0;
3174 op0 = op1;
3175 op1 = temp;
3178 /* Expand x*2.0 as x+x. */
3179 if (GET_CODE (op1) == CONST_DOUBLE
3180 && SCALAR_FLOAT_MODE_P (mode))
3182 REAL_VALUE_TYPE d;
3183 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3185 if (REAL_VALUES_EQUAL (d, dconst2))
3187 op0 = force_reg (GET_MODE (op0), op0);
3188 return expand_binop (mode, add_optab, op0, op0,
3189 target, unsignedp, OPTAB_LIB_WIDEN);
3193 /* This used to use umul_optab if unsigned, but for non-widening multiply
3194 there is no difference between signed and unsigned. */
3195 op0 = expand_binop (mode,
3196 ! unsignedp
3197 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3198 ? smulv_optab : smul_optab,
3199 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3200 gcc_assert (op0);
3201 return op0;
3204 /* Return the smallest n such that 2**n >= X. */
3207 ceil_log2 (unsigned HOST_WIDE_INT x)
3209 return floor_log2 (x - 1) + 1;
3212 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3213 replace division by D, and put the least significant N bits of the result
3214 in *MULTIPLIER_PTR and return the most significant bit.
3216 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3217 needed precision is in PRECISION (should be <= N).
3219 PRECISION should be as small as possible so this function can choose
3220 multiplier more freely.
3222 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3223 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3225 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3226 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3228 static
3229 unsigned HOST_WIDE_INT
3230 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3231 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3233 HOST_WIDE_INT mhigh_hi, mlow_hi;
3234 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3235 int lgup, post_shift;
3236 int pow, pow2;
3237 unsigned HOST_WIDE_INT nl, dummy1;
3238 HOST_WIDE_INT nh, dummy2;
3240 /* lgup = ceil(log2(divisor)); */
3241 lgup = ceil_log2 (d);
3243 gcc_assert (lgup <= n);
3245 pow = n + lgup;
3246 pow2 = n + lgup - precision;
3248 /* We could handle this with some effort, but this case is much
3249 better handled directly with a scc insn, so rely on caller using
3250 that. */
3251 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3253 /* mlow = 2^(N + lgup)/d */
3254 if (pow >= HOST_BITS_PER_WIDE_INT)
3256 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3257 nl = 0;
3259 else
3261 nh = 0;
3262 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3264 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3265 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3267 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3268 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3269 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3270 else
3271 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3272 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3273 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3275 gcc_assert (!mhigh_hi || nh - d < d);
3276 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3277 /* Assert that mlow < mhigh. */
3278 gcc_assert (mlow_hi < mhigh_hi
3279 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3281 /* If precision == N, then mlow, mhigh exceed 2^N
3282 (but they do not exceed 2^(N+1)). */
3284 /* Reduce to lowest terms. */
3285 for (post_shift = lgup; post_shift > 0; post_shift--)
3287 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3288 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3289 if (ml_lo >= mh_lo)
3290 break;
3292 mlow_hi = 0;
3293 mlow_lo = ml_lo;
3294 mhigh_hi = 0;
3295 mhigh_lo = mh_lo;
3298 *post_shift_ptr = post_shift;
3299 *lgup_ptr = lgup;
3300 if (n < HOST_BITS_PER_WIDE_INT)
3302 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3303 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3304 return mhigh_lo >= mask;
3306 else
3308 *multiplier_ptr = GEN_INT (mhigh_lo);
3309 return mhigh_hi;
3313 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3314 congruent to 1 (mod 2**N). */
3316 static unsigned HOST_WIDE_INT
3317 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3319 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3321 /* The algorithm notes that the choice y = x satisfies
3322 x*y == 1 mod 2^3, since x is assumed odd.
3323 Each iteration doubles the number of bits of significance in y. */
3325 unsigned HOST_WIDE_INT mask;
3326 unsigned HOST_WIDE_INT y = x;
3327 int nbit = 3;
3329 mask = (n == HOST_BITS_PER_WIDE_INT
3330 ? ~(unsigned HOST_WIDE_INT) 0
3331 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3333 while (nbit < n)
3335 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3336 nbit *= 2;
3338 return y;
3341 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3342 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3343 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3344 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3345 become signed.
3347 The result is put in TARGET if that is convenient.
3349 MODE is the mode of operation. */
3352 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3353 rtx op1, rtx target, int unsignedp)
3355 rtx tem;
3356 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3358 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3359 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3360 NULL_RTX, 0);
3361 tem = expand_and (mode, tem, op1, NULL_RTX);
3362 adj_operand
3363 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3364 adj_operand);
3366 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3367 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3368 NULL_RTX, 0);
3369 tem = expand_and (mode, tem, op0, NULL_RTX);
3370 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3371 target);
3373 return target;
3376 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3378 static rtx
3379 extract_high_half (enum machine_mode mode, rtx op)
3381 enum machine_mode wider_mode;
3383 if (mode == word_mode)
3384 return gen_highpart (mode, op);
3386 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3388 wider_mode = GET_MODE_WIDER_MODE (mode);
3389 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3390 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3391 return convert_modes (mode, wider_mode, op, 0);
3394 /* Like expand_mult_highpart, but only consider using a multiplication
3395 optab. OP1 is an rtx for the constant operand. */
3397 static rtx
3398 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3399 rtx target, int unsignedp, int max_cost)
3401 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3402 enum machine_mode wider_mode;
3403 optab moptab;
3404 rtx tem;
3405 int size;
3407 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3409 wider_mode = GET_MODE_WIDER_MODE (mode);
3410 size = GET_MODE_BITSIZE (mode);
3412 /* Firstly, try using a multiplication insn that only generates the needed
3413 high part of the product, and in the sign flavor of unsignedp. */
3414 if (mul_highpart_cost[mode] < max_cost)
3416 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3417 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3418 unsignedp, OPTAB_DIRECT);
3419 if (tem)
3420 return tem;
3423 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3424 Need to adjust the result after the multiplication. */
3425 if (size - 1 < BITS_PER_WORD
3426 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3427 + 4 * add_cost[mode] < max_cost))
3429 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3430 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3431 unsignedp, OPTAB_DIRECT);
3432 if (tem)
3433 /* We used the wrong signedness. Adjust the result. */
3434 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3435 tem, unsignedp);
3438 /* Try widening multiplication. */
3439 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3440 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3441 && mul_widen_cost[wider_mode] < max_cost)
3443 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3444 unsignedp, OPTAB_WIDEN);
3445 if (tem)
3446 return extract_high_half (mode, tem);
3449 /* Try widening the mode and perform a non-widening multiplication. */
3450 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3451 && size - 1 < BITS_PER_WORD
3452 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3454 rtx insns, wop0, wop1;
3456 /* We need to widen the operands, for example to ensure the
3457 constant multiplier is correctly sign or zero extended.
3458 Use a sequence to clean-up any instructions emitted by
3459 the conversions if things don't work out. */
3460 start_sequence ();
3461 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3462 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3463 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3464 unsignedp, OPTAB_WIDEN);
3465 insns = get_insns ();
3466 end_sequence ();
3468 if (tem)
3470 emit_insn (insns);
3471 return extract_high_half (mode, tem);
3475 /* Try widening multiplication of opposite signedness, and adjust. */
3476 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3477 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3478 && size - 1 < BITS_PER_WORD
3479 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3480 + 4 * add_cost[mode] < max_cost))
3482 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3483 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3484 if (tem != 0)
3486 tem = extract_high_half (mode, tem);
3487 /* We used the wrong signedness. Adjust the result. */
3488 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3489 target, unsignedp);
3493 return 0;
3496 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3497 putting the high half of the result in TARGET if that is convenient,
3498 and return where the result is. If the operation can not be performed,
3499 0 is returned.
3501 MODE is the mode of operation and result.
3503 UNSIGNEDP nonzero means unsigned multiply.
3505 MAX_COST is the total allowed cost for the expanded RTL. */
3507 static rtx
3508 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3509 rtx target, int unsignedp, int max_cost)
3511 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3512 unsigned HOST_WIDE_INT cnst1;
3513 int extra_cost;
3514 bool sign_adjust = false;
3515 enum mult_variant variant;
3516 struct algorithm alg;
3517 rtx tem;
3519 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3520 /* We can't support modes wider than HOST_BITS_PER_INT. */
3521 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3523 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3525 /* We can't optimize modes wider than BITS_PER_WORD.
3526 ??? We might be able to perform double-word arithmetic if
3527 mode == word_mode, however all the cost calculations in
3528 synth_mult etc. assume single-word operations. */
3529 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3530 return expand_mult_highpart_optab (mode, op0, op1, target,
3531 unsignedp, max_cost);
3533 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3535 /* Check whether we try to multiply by a negative constant. */
3536 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3538 sign_adjust = true;
3539 extra_cost += add_cost[mode];
3542 /* See whether shift/add multiplication is cheap enough. */
3543 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3544 max_cost - extra_cost))
3546 /* See whether the specialized multiplication optabs are
3547 cheaper than the shift/add version. */
3548 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3549 alg.cost.cost + extra_cost);
3550 if (tem)
3551 return tem;
3553 tem = convert_to_mode (wider_mode, op0, unsignedp);
3554 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3555 tem = extract_high_half (mode, tem);
3557 /* Adjust result for signedness. */
3558 if (sign_adjust)
3559 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3561 return tem;
3563 return expand_mult_highpart_optab (mode, op0, op1, target,
3564 unsignedp, max_cost);
3568 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3570 static rtx
3571 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3573 unsigned HOST_WIDE_INT masklow, maskhigh;
3574 rtx result, temp, shift, label;
3575 int logd;
3577 logd = floor_log2 (d);
3578 result = gen_reg_rtx (mode);
3580 /* Avoid conditional branches when they're expensive. */
3581 if (BRANCH_COST >= 2
3582 && !optimize_size)
3584 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3585 mode, 0, -1);
3586 if (signmask)
3588 signmask = force_reg (mode, signmask);
3589 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3590 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3592 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3593 which instruction sequence to use. If logical right shifts
3594 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3595 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3597 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3598 if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3599 || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3601 temp = expand_binop (mode, xor_optab, op0, signmask,
3602 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3603 temp = expand_binop (mode, sub_optab, temp, signmask,
3604 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3605 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3606 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3607 temp = expand_binop (mode, xor_optab, temp, signmask,
3608 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3609 temp = expand_binop (mode, sub_optab, temp, signmask,
3610 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3612 else
3614 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3616 signmask = force_reg (mode, signmask);
3618 temp = expand_binop (mode, add_optab, op0, signmask,
3619 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3620 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3621 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3622 temp = expand_binop (mode, sub_optab, temp, signmask,
3623 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3625 return temp;
3629 /* Mask contains the mode's signbit and the significant bits of the
3630 modulus. By including the signbit in the operation, many targets
3631 can avoid an explicit compare operation in the following comparison
3632 against zero. */
3634 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3635 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3637 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3638 maskhigh = -1;
3640 else
3641 maskhigh = (HOST_WIDE_INT) -1
3642 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3644 temp = expand_binop (mode, and_optab, op0,
3645 immed_double_const (masklow, maskhigh, mode),
3646 result, 1, OPTAB_LIB_WIDEN);
3647 if (temp != result)
3648 emit_move_insn (result, temp);
3650 label = gen_label_rtx ();
3651 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3653 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3654 0, OPTAB_LIB_WIDEN);
3655 masklow = (HOST_WIDE_INT) -1 << logd;
3656 maskhigh = -1;
3657 temp = expand_binop (mode, ior_optab, temp,
3658 immed_double_const (masklow, maskhigh, mode),
3659 result, 1, OPTAB_LIB_WIDEN);
3660 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3661 0, OPTAB_LIB_WIDEN);
3662 if (temp != result)
3663 emit_move_insn (result, temp);
3664 emit_label (label);
3665 return result;
3668 /* Expand signed division of OP0 by a power of two D in mode MODE.
3669 This routine is only called for positive values of D. */
3671 static rtx
3672 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3674 rtx temp, label;
3675 tree shift;
3676 int logd;
3678 logd = floor_log2 (d);
3679 shift = build_int_cst (NULL_TREE, logd);
3681 if (d == 2 && BRANCH_COST >= 1)
3683 temp = gen_reg_rtx (mode);
3684 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3685 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3686 0, OPTAB_LIB_WIDEN);
3687 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3690 #ifdef HAVE_conditional_move
3691 if (BRANCH_COST >= 2)
3693 rtx temp2;
3695 /* ??? emit_conditional_move forces a stack adjustment via
3696 compare_from_rtx so, if the sequence is discarded, it will
3697 be lost. Do it now instead. */
3698 do_pending_stack_adjust ();
3700 start_sequence ();
3701 temp2 = copy_to_mode_reg (mode, op0);
3702 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3703 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3704 temp = force_reg (mode, temp);
3706 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3707 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3708 mode, temp, temp2, mode, 0);
3709 if (temp2)
3711 rtx seq = get_insns ();
3712 end_sequence ();
3713 emit_insn (seq);
3714 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3716 end_sequence ();
3718 #endif
3720 if (BRANCH_COST >= 2)
3722 int ushift = GET_MODE_BITSIZE (mode) - logd;
3724 temp = gen_reg_rtx (mode);
3725 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3726 if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3727 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3728 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3729 else
3730 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3731 build_int_cst (NULL_TREE, ushift),
3732 NULL_RTX, 1);
3733 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3734 0, OPTAB_LIB_WIDEN);
3735 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3738 label = gen_label_rtx ();
3739 temp = copy_to_mode_reg (mode, op0);
3740 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3741 expand_inc (temp, GEN_INT (d - 1));
3742 emit_label (label);
3743 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3746 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3747 if that is convenient, and returning where the result is.
3748 You may request either the quotient or the remainder as the result;
3749 specify REM_FLAG nonzero to get the remainder.
3751 CODE is the expression code for which kind of division this is;
3752 it controls how rounding is done. MODE is the machine mode to use.
3753 UNSIGNEDP nonzero means do unsigned division. */
3755 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3756 and then correct it by or'ing in missing high bits
3757 if result of ANDI is nonzero.
3758 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3759 This could optimize to a bfexts instruction.
3760 But C doesn't use these operations, so their optimizations are
3761 left for later. */
3762 /* ??? For modulo, we don't actually need the highpart of the first product,
3763 the low part will do nicely. And for small divisors, the second multiply
3764 can also be a low-part only multiply or even be completely left out.
3765 E.g. to calculate the remainder of a division by 3 with a 32 bit
3766 multiply, multiply with 0x55555556 and extract the upper two bits;
3767 the result is exact for inputs up to 0x1fffffff.
3768 The input range can be reduced by using cross-sum rules.
3769 For odd divisors >= 3, the following table gives right shift counts
3770 so that if a number is shifted by an integer multiple of the given
3771 amount, the remainder stays the same:
3772 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3773 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3774 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3775 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3776 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3778 Cross-sum rules for even numbers can be derived by leaving as many bits
3779 to the right alone as the divisor has zeros to the right.
3780 E.g. if x is an unsigned 32 bit number:
3781 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3785 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3786 rtx op0, rtx op1, rtx target, int unsignedp)
3788 enum machine_mode compute_mode;
3789 rtx tquotient;
3790 rtx quotient = 0, remainder = 0;
3791 rtx last;
3792 int size;
3793 rtx insn, set;
3794 optab optab1, optab2;
3795 int op1_is_constant, op1_is_pow2 = 0;
3796 int max_cost, extra_cost;
3797 static HOST_WIDE_INT last_div_const = 0;
3798 static HOST_WIDE_INT ext_op1;
3800 op1_is_constant = GET_CODE (op1) == CONST_INT;
3801 if (op1_is_constant)
3803 ext_op1 = INTVAL (op1);
3804 if (unsignedp)
3805 ext_op1 &= GET_MODE_MASK (mode);
3806 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3807 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3811 This is the structure of expand_divmod:
3813 First comes code to fix up the operands so we can perform the operations
3814 correctly and efficiently.
3816 Second comes a switch statement with code specific for each rounding mode.
3817 For some special operands this code emits all RTL for the desired
3818 operation, for other cases, it generates only a quotient and stores it in
3819 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3820 to indicate that it has not done anything.
3822 Last comes code that finishes the operation. If QUOTIENT is set and
3823 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3824 QUOTIENT is not set, it is computed using trunc rounding.
3826 We try to generate special code for division and remainder when OP1 is a
3827 constant. If |OP1| = 2**n we can use shifts and some other fast
3828 operations. For other values of OP1, we compute a carefully selected
3829 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3830 by m.
3832 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3833 half of the product. Different strategies for generating the product are
3834 implemented in expand_mult_highpart.
3836 If what we actually want is the remainder, we generate that by another
3837 by-constant multiplication and a subtraction. */
3839 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3840 code below will malfunction if we are, so check here and handle
3841 the special case if so. */
3842 if (op1 == const1_rtx)
3843 return rem_flag ? const0_rtx : op0;
3845 /* When dividing by -1, we could get an overflow.
3846 negv_optab can handle overflows. */
3847 if (! unsignedp && op1 == constm1_rtx)
3849 if (rem_flag)
3850 return const0_rtx;
3851 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3852 ? negv_optab : neg_optab, op0, target, 0);
3855 if (target
3856 /* Don't use the function value register as a target
3857 since we have to read it as well as write it,
3858 and function-inlining gets confused by this. */
3859 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3860 /* Don't clobber an operand while doing a multi-step calculation. */
3861 || ((rem_flag || op1_is_constant)
3862 && (reg_mentioned_p (target, op0)
3863 || (MEM_P (op0) && MEM_P (target))))
3864 || reg_mentioned_p (target, op1)
3865 || (MEM_P (op1) && MEM_P (target))))
3866 target = 0;
3868 /* Get the mode in which to perform this computation. Normally it will
3869 be MODE, but sometimes we can't do the desired operation in MODE.
3870 If so, pick a wider mode in which we can do the operation. Convert
3871 to that mode at the start to avoid repeated conversions.
3873 First see what operations we need. These depend on the expression
3874 we are evaluating. (We assume that divxx3 insns exist under the
3875 same conditions that modxx3 insns and that these insns don't normally
3876 fail. If these assumptions are not correct, we may generate less
3877 efficient code in some cases.)
3879 Then see if we find a mode in which we can open-code that operation
3880 (either a division, modulus, or shift). Finally, check for the smallest
3881 mode for which we can do the operation with a library call. */
3883 /* We might want to refine this now that we have division-by-constant
3884 optimization. Since expand_mult_highpart tries so many variants, it is
3885 not straightforward to generalize this. Maybe we should make an array
3886 of possible modes in init_expmed? Save this for GCC 2.7. */
3888 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3889 ? (unsignedp ? lshr_optab : ashr_optab)
3890 : (unsignedp ? udiv_optab : sdiv_optab));
3891 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3892 ? optab1
3893 : (unsignedp ? udivmod_optab : sdivmod_optab));
3895 for (compute_mode = mode; compute_mode != VOIDmode;
3896 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3897 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3898 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3899 break;
3901 if (compute_mode == VOIDmode)
3902 for (compute_mode = mode; compute_mode != VOIDmode;
3903 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3904 if (optab1->handlers[compute_mode].libfunc
3905 || optab2->handlers[compute_mode].libfunc)
3906 break;
3908 /* If we still couldn't find a mode, use MODE, but expand_binop will
3909 probably die. */
3910 if (compute_mode == VOIDmode)
3911 compute_mode = mode;
3913 if (target && GET_MODE (target) == compute_mode)
3914 tquotient = target;
3915 else
3916 tquotient = gen_reg_rtx (compute_mode);
3918 size = GET_MODE_BITSIZE (compute_mode);
3919 #if 0
3920 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3921 (mode), and thereby get better code when OP1 is a constant. Do that
3922 later. It will require going over all usages of SIZE below. */
3923 size = GET_MODE_BITSIZE (mode);
3924 #endif
3926 /* Only deduct something for a REM if the last divide done was
3927 for a different constant. Then set the constant of the last
3928 divide. */
3929 max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode];
3930 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3931 && INTVAL (op1) == last_div_const))
3932 max_cost -= mul_cost[compute_mode] + add_cost[compute_mode];
3934 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3936 /* Now convert to the best mode to use. */
3937 if (compute_mode != mode)
3939 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3940 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3942 /* convert_modes may have placed op1 into a register, so we
3943 must recompute the following. */
3944 op1_is_constant = GET_CODE (op1) == CONST_INT;
3945 op1_is_pow2 = (op1_is_constant
3946 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3947 || (! unsignedp
3948 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3951 /* If one of the operands is a volatile MEM, copy it into a register. */
3953 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3954 op0 = force_reg (compute_mode, op0);
3955 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3956 op1 = force_reg (compute_mode, op1);
3958 /* If we need the remainder or if OP1 is constant, we need to
3959 put OP0 in a register in case it has any queued subexpressions. */
3960 if (rem_flag || op1_is_constant)
3961 op0 = force_reg (compute_mode, op0);
3963 last = get_last_insn ();
3965 /* Promote floor rounding to trunc rounding for unsigned operations. */
3966 if (unsignedp)
3968 if (code == FLOOR_DIV_EXPR)
3969 code = TRUNC_DIV_EXPR;
3970 if (code == FLOOR_MOD_EXPR)
3971 code = TRUNC_MOD_EXPR;
3972 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3973 code = TRUNC_DIV_EXPR;
3976 if (op1 != const0_rtx)
3977 switch (code)
3979 case TRUNC_MOD_EXPR:
3980 case TRUNC_DIV_EXPR:
3981 if (op1_is_constant)
3983 if (unsignedp)
3985 unsigned HOST_WIDE_INT mh;
3986 int pre_shift, post_shift;
3987 int dummy;
3988 rtx ml;
3989 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3990 & GET_MODE_MASK (compute_mode));
3992 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3994 pre_shift = floor_log2 (d);
3995 if (rem_flag)
3997 remainder
3998 = expand_binop (compute_mode, and_optab, op0,
3999 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4000 remainder, 1,
4001 OPTAB_LIB_WIDEN);
4002 if (remainder)
4003 return gen_lowpart (mode, remainder);
4005 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4006 build_int_cst (NULL_TREE,
4007 pre_shift),
4008 tquotient, 1);
4010 else if (size <= HOST_BITS_PER_WIDE_INT)
4012 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4014 /* Most significant bit of divisor is set; emit an scc
4015 insn. */
4016 quotient = emit_store_flag (tquotient, GEU, op0, op1,
4017 compute_mode, 1, 1);
4018 if (quotient == 0)
4019 goto fail1;
4021 else
4023 /* Find a suitable multiplier and right shift count
4024 instead of multiplying with D. */
4026 mh = choose_multiplier (d, size, size,
4027 &ml, &post_shift, &dummy);
4029 /* If the suggested multiplier is more than SIZE bits,
4030 we can do better for even divisors, using an
4031 initial right shift. */
4032 if (mh != 0 && (d & 1) == 0)
4034 pre_shift = floor_log2 (d & -d);
4035 mh = choose_multiplier (d >> pre_shift, size,
4036 size - pre_shift,
4037 &ml, &post_shift, &dummy);
4038 gcc_assert (!mh);
4040 else
4041 pre_shift = 0;
4043 if (mh != 0)
4045 rtx t1, t2, t3, t4;
4047 if (post_shift - 1 >= BITS_PER_WORD)
4048 goto fail1;
4050 extra_cost
4051 = (shift_cost[compute_mode][post_shift - 1]
4052 + shift_cost[compute_mode][1]
4053 + 2 * add_cost[compute_mode]);
4054 t1 = expand_mult_highpart (compute_mode, op0, ml,
4055 NULL_RTX, 1,
4056 max_cost - extra_cost);
4057 if (t1 == 0)
4058 goto fail1;
4059 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4060 op0, t1),
4061 NULL_RTX);
4062 t3 = expand_shift
4063 (RSHIFT_EXPR, compute_mode, t2,
4064 build_int_cst (NULL_TREE, 1),
4065 NULL_RTX,1);
4066 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4067 t1, t3),
4068 NULL_RTX);
4069 quotient = expand_shift
4070 (RSHIFT_EXPR, compute_mode, t4,
4071 build_int_cst (NULL_TREE, post_shift - 1),
4072 tquotient, 1);
4074 else
4076 rtx t1, t2;
4078 if (pre_shift >= BITS_PER_WORD
4079 || post_shift >= BITS_PER_WORD)
4080 goto fail1;
4082 t1 = expand_shift
4083 (RSHIFT_EXPR, compute_mode, op0,
4084 build_int_cst (NULL_TREE, pre_shift),
4085 NULL_RTX, 1);
4086 extra_cost
4087 = (shift_cost[compute_mode][pre_shift]
4088 + shift_cost[compute_mode][post_shift]);
4089 t2 = expand_mult_highpart (compute_mode, t1, ml,
4090 NULL_RTX, 1,
4091 max_cost - extra_cost);
4092 if (t2 == 0)
4093 goto fail1;
4094 quotient = expand_shift
4095 (RSHIFT_EXPR, compute_mode, t2,
4096 build_int_cst (NULL_TREE, post_shift),
4097 tquotient, 1);
4101 else /* Too wide mode to use tricky code */
4102 break;
4104 insn = get_last_insn ();
4105 if (insn != last
4106 && (set = single_set (insn)) != 0
4107 && SET_DEST (set) == quotient)
4108 set_unique_reg_note (insn,
4109 REG_EQUAL,
4110 gen_rtx_UDIV (compute_mode, op0, op1));
4112 else /* TRUNC_DIV, signed */
4114 unsigned HOST_WIDE_INT ml;
4115 int lgup, post_shift;
4116 rtx mlr;
4117 HOST_WIDE_INT d = INTVAL (op1);
4118 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4120 /* n rem d = n rem -d */
4121 if (rem_flag && d < 0)
4123 d = abs_d;
4124 op1 = gen_int_mode (abs_d, compute_mode);
4127 if (d == 1)
4128 quotient = op0;
4129 else if (d == -1)
4130 quotient = expand_unop (compute_mode, neg_optab, op0,
4131 tquotient, 0);
4132 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4134 /* This case is not handled correctly below. */
4135 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4136 compute_mode, 1, 1);
4137 if (quotient == 0)
4138 goto fail1;
4140 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4141 && (rem_flag ? smod_pow2_cheap[compute_mode]
4142 : sdiv_pow2_cheap[compute_mode])
4143 /* We assume that cheap metric is true if the
4144 optab has an expander for this mode. */
4145 && (((rem_flag ? smod_optab : sdiv_optab)
4146 ->handlers[compute_mode].insn_code
4147 != CODE_FOR_nothing)
4148 || (sdivmod_optab->handlers[compute_mode]
4149 .insn_code != CODE_FOR_nothing)))
4151 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4153 if (rem_flag)
4155 remainder = expand_smod_pow2 (compute_mode, op0, d);
4156 if (remainder)
4157 return gen_lowpart (mode, remainder);
4160 if (sdiv_pow2_cheap[compute_mode]
4161 && ((sdiv_optab->handlers[compute_mode].insn_code
4162 != CODE_FOR_nothing)
4163 || (sdivmod_optab->handlers[compute_mode].insn_code
4164 != CODE_FOR_nothing)))
4165 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4166 compute_mode, op0,
4167 gen_int_mode (abs_d,
4168 compute_mode),
4169 NULL_RTX, 0);
4170 else
4171 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4173 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4174 negate the quotient. */
4175 if (d < 0)
4177 insn = get_last_insn ();
4178 if (insn != last
4179 && (set = single_set (insn)) != 0
4180 && SET_DEST (set) == quotient
4181 && abs_d < ((unsigned HOST_WIDE_INT) 1
4182 << (HOST_BITS_PER_WIDE_INT - 1)))
4183 set_unique_reg_note (insn,
4184 REG_EQUAL,
4185 gen_rtx_DIV (compute_mode,
4186 op0,
4187 GEN_INT
4188 (trunc_int_for_mode
4189 (abs_d,
4190 compute_mode))));
4192 quotient = expand_unop (compute_mode, neg_optab,
4193 quotient, quotient, 0);
4196 else if (size <= HOST_BITS_PER_WIDE_INT)
4198 choose_multiplier (abs_d, size, size - 1,
4199 &mlr, &post_shift, &lgup);
4200 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4201 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4203 rtx t1, t2, t3;
4205 if (post_shift >= BITS_PER_WORD
4206 || size - 1 >= BITS_PER_WORD)
4207 goto fail1;
4209 extra_cost = (shift_cost[compute_mode][post_shift]
4210 + shift_cost[compute_mode][size - 1]
4211 + add_cost[compute_mode]);
4212 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4213 NULL_RTX, 0,
4214 max_cost - extra_cost);
4215 if (t1 == 0)
4216 goto fail1;
4217 t2 = expand_shift
4218 (RSHIFT_EXPR, compute_mode, t1,
4219 build_int_cst (NULL_TREE, post_shift),
4220 NULL_RTX, 0);
4221 t3 = expand_shift
4222 (RSHIFT_EXPR, compute_mode, op0,
4223 build_int_cst (NULL_TREE, size - 1),
4224 NULL_RTX, 0);
4225 if (d < 0)
4226 quotient
4227 = force_operand (gen_rtx_MINUS (compute_mode,
4228 t3, t2),
4229 tquotient);
4230 else
4231 quotient
4232 = force_operand (gen_rtx_MINUS (compute_mode,
4233 t2, t3),
4234 tquotient);
4236 else
4238 rtx t1, t2, t3, t4;
4240 if (post_shift >= BITS_PER_WORD
4241 || size - 1 >= BITS_PER_WORD)
4242 goto fail1;
4244 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4245 mlr = gen_int_mode (ml, compute_mode);
4246 extra_cost = (shift_cost[compute_mode][post_shift]
4247 + shift_cost[compute_mode][size - 1]
4248 + 2 * add_cost[compute_mode]);
4249 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4250 NULL_RTX, 0,
4251 max_cost - extra_cost);
4252 if (t1 == 0)
4253 goto fail1;
4254 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4255 t1, op0),
4256 NULL_RTX);
4257 t3 = expand_shift
4258 (RSHIFT_EXPR, compute_mode, t2,
4259 build_int_cst (NULL_TREE, post_shift),
4260 NULL_RTX, 0);
4261 t4 = expand_shift
4262 (RSHIFT_EXPR, compute_mode, op0,
4263 build_int_cst (NULL_TREE, size - 1),
4264 NULL_RTX, 0);
4265 if (d < 0)
4266 quotient
4267 = force_operand (gen_rtx_MINUS (compute_mode,
4268 t4, t3),
4269 tquotient);
4270 else
4271 quotient
4272 = force_operand (gen_rtx_MINUS (compute_mode,
4273 t3, t4),
4274 tquotient);
4277 else /* Too wide mode to use tricky code */
4278 break;
4280 insn = get_last_insn ();
4281 if (insn != last
4282 && (set = single_set (insn)) != 0
4283 && SET_DEST (set) == quotient)
4284 set_unique_reg_note (insn,
4285 REG_EQUAL,
4286 gen_rtx_DIV (compute_mode, op0, op1));
4288 break;
4290 fail1:
4291 delete_insns_since (last);
4292 break;
4294 case FLOOR_DIV_EXPR:
4295 case FLOOR_MOD_EXPR:
4296 /* We will come here only for signed operations. */
4297 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4299 unsigned HOST_WIDE_INT mh;
4300 int pre_shift, lgup, post_shift;
4301 HOST_WIDE_INT d = INTVAL (op1);
4302 rtx ml;
4304 if (d > 0)
4306 /* We could just as easily deal with negative constants here,
4307 but it does not seem worth the trouble for GCC 2.6. */
4308 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4310 pre_shift = floor_log2 (d);
4311 if (rem_flag)
4313 remainder = expand_binop (compute_mode, and_optab, op0,
4314 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4315 remainder, 0, OPTAB_LIB_WIDEN);
4316 if (remainder)
4317 return gen_lowpart (mode, remainder);
4319 quotient = expand_shift
4320 (RSHIFT_EXPR, compute_mode, op0,
4321 build_int_cst (NULL_TREE, pre_shift),
4322 tquotient, 0);
4324 else
4326 rtx t1, t2, t3, t4;
4328 mh = choose_multiplier (d, size, size - 1,
4329 &ml, &post_shift, &lgup);
4330 gcc_assert (!mh);
4332 if (post_shift < BITS_PER_WORD
4333 && size - 1 < BITS_PER_WORD)
4335 t1 = expand_shift
4336 (RSHIFT_EXPR, compute_mode, op0,
4337 build_int_cst (NULL_TREE, size - 1),
4338 NULL_RTX, 0);
4339 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4340 NULL_RTX, 0, OPTAB_WIDEN);
4341 extra_cost = (shift_cost[compute_mode][post_shift]
4342 + shift_cost[compute_mode][size - 1]
4343 + 2 * add_cost[compute_mode]);
4344 t3 = expand_mult_highpart (compute_mode, t2, ml,
4345 NULL_RTX, 1,
4346 max_cost - extra_cost);
4347 if (t3 != 0)
4349 t4 = expand_shift
4350 (RSHIFT_EXPR, compute_mode, t3,
4351 build_int_cst (NULL_TREE, post_shift),
4352 NULL_RTX, 1);
4353 quotient = expand_binop (compute_mode, xor_optab,
4354 t4, t1, tquotient, 0,
4355 OPTAB_WIDEN);
4360 else
4362 rtx nsign, t1, t2, t3, t4;
4363 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4364 op0, constm1_rtx), NULL_RTX);
4365 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4366 0, OPTAB_WIDEN);
4367 nsign = expand_shift
4368 (RSHIFT_EXPR, compute_mode, t2,
4369 build_int_cst (NULL_TREE, size - 1),
4370 NULL_RTX, 0);
4371 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4372 NULL_RTX);
4373 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4374 NULL_RTX, 0);
4375 if (t4)
4377 rtx t5;
4378 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4379 NULL_RTX, 0);
4380 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4381 t4, t5),
4382 tquotient);
4387 if (quotient != 0)
4388 break;
4389 delete_insns_since (last);
4391 /* Try using an instruction that produces both the quotient and
4392 remainder, using truncation. We can easily compensate the quotient
4393 or remainder to get floor rounding, once we have the remainder.
4394 Notice that we compute also the final remainder value here,
4395 and return the result right away. */
4396 if (target == 0 || GET_MODE (target) != compute_mode)
4397 target = gen_reg_rtx (compute_mode);
4399 if (rem_flag)
4401 remainder
4402 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4403 quotient = gen_reg_rtx (compute_mode);
4405 else
4407 quotient
4408 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4409 remainder = gen_reg_rtx (compute_mode);
4412 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4413 quotient, remainder, 0))
4415 /* This could be computed with a branch-less sequence.
4416 Save that for later. */
4417 rtx tem;
4418 rtx label = gen_label_rtx ();
4419 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4420 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4421 NULL_RTX, 0, OPTAB_WIDEN);
4422 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4423 expand_dec (quotient, const1_rtx);
4424 expand_inc (remainder, op1);
4425 emit_label (label);
4426 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4429 /* No luck with division elimination or divmod. Have to do it
4430 by conditionally adjusting op0 *and* the result. */
4432 rtx label1, label2, label3, label4, label5;
4433 rtx adjusted_op0;
4434 rtx tem;
4436 quotient = gen_reg_rtx (compute_mode);
4437 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4438 label1 = gen_label_rtx ();
4439 label2 = gen_label_rtx ();
4440 label3 = gen_label_rtx ();
4441 label4 = gen_label_rtx ();
4442 label5 = gen_label_rtx ();
4443 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4444 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4445 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4446 quotient, 0, OPTAB_LIB_WIDEN);
4447 if (tem != quotient)
4448 emit_move_insn (quotient, tem);
4449 emit_jump_insn (gen_jump (label5));
4450 emit_barrier ();
4451 emit_label (label1);
4452 expand_inc (adjusted_op0, const1_rtx);
4453 emit_jump_insn (gen_jump (label4));
4454 emit_barrier ();
4455 emit_label (label2);
4456 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4457 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4458 quotient, 0, OPTAB_LIB_WIDEN);
4459 if (tem != quotient)
4460 emit_move_insn (quotient, tem);
4461 emit_jump_insn (gen_jump (label5));
4462 emit_barrier ();
4463 emit_label (label3);
4464 expand_dec (adjusted_op0, const1_rtx);
4465 emit_label (label4);
4466 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4467 quotient, 0, OPTAB_LIB_WIDEN);
4468 if (tem != quotient)
4469 emit_move_insn (quotient, tem);
4470 expand_dec (quotient, const1_rtx);
4471 emit_label (label5);
4473 break;
4475 case CEIL_DIV_EXPR:
4476 case CEIL_MOD_EXPR:
4477 if (unsignedp)
4479 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4481 rtx t1, t2, t3;
4482 unsigned HOST_WIDE_INT d = INTVAL (op1);
4483 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4484 build_int_cst (NULL_TREE, floor_log2 (d)),
4485 tquotient, 1);
4486 t2 = expand_binop (compute_mode, and_optab, op0,
4487 GEN_INT (d - 1),
4488 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4489 t3 = gen_reg_rtx (compute_mode);
4490 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4491 compute_mode, 1, 1);
4492 if (t3 == 0)
4494 rtx lab;
4495 lab = gen_label_rtx ();
4496 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4497 expand_inc (t1, const1_rtx);
4498 emit_label (lab);
4499 quotient = t1;
4501 else
4502 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4503 t1, t3),
4504 tquotient);
4505 break;
4508 /* Try using an instruction that produces both the quotient and
4509 remainder, using truncation. We can easily compensate the
4510 quotient or remainder to get ceiling rounding, once we have the
4511 remainder. Notice that we compute also the final remainder
4512 value here, and return the result right away. */
4513 if (target == 0 || GET_MODE (target) != compute_mode)
4514 target = gen_reg_rtx (compute_mode);
4516 if (rem_flag)
4518 remainder = (REG_P (target)
4519 ? target : gen_reg_rtx (compute_mode));
4520 quotient = gen_reg_rtx (compute_mode);
4522 else
4524 quotient = (REG_P (target)
4525 ? target : gen_reg_rtx (compute_mode));
4526 remainder = gen_reg_rtx (compute_mode);
4529 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4530 remainder, 1))
4532 /* This could be computed with a branch-less sequence.
4533 Save that for later. */
4534 rtx label = gen_label_rtx ();
4535 do_cmp_and_jump (remainder, const0_rtx, EQ,
4536 compute_mode, label);
4537 expand_inc (quotient, const1_rtx);
4538 expand_dec (remainder, op1);
4539 emit_label (label);
4540 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4543 /* No luck with division elimination or divmod. Have to do it
4544 by conditionally adjusting op0 *and* the result. */
4546 rtx label1, label2;
4547 rtx adjusted_op0, tem;
4549 quotient = gen_reg_rtx (compute_mode);
4550 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4551 label1 = gen_label_rtx ();
4552 label2 = gen_label_rtx ();
4553 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4554 compute_mode, label1);
4555 emit_move_insn (quotient, const0_rtx);
4556 emit_jump_insn (gen_jump (label2));
4557 emit_barrier ();
4558 emit_label (label1);
4559 expand_dec (adjusted_op0, const1_rtx);
4560 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4561 quotient, 1, OPTAB_LIB_WIDEN);
4562 if (tem != quotient)
4563 emit_move_insn (quotient, tem);
4564 expand_inc (quotient, const1_rtx);
4565 emit_label (label2);
4568 else /* signed */
4570 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4571 && INTVAL (op1) >= 0)
4573 /* This is extremely similar to the code for the unsigned case
4574 above. For 2.7 we should merge these variants, but for
4575 2.6.1 I don't want to touch the code for unsigned since that
4576 get used in C. The signed case will only be used by other
4577 languages (Ada). */
4579 rtx t1, t2, t3;
4580 unsigned HOST_WIDE_INT d = INTVAL (op1);
4581 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4582 build_int_cst (NULL_TREE, floor_log2 (d)),
4583 tquotient, 0);
4584 t2 = expand_binop (compute_mode, and_optab, op0,
4585 GEN_INT (d - 1),
4586 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4587 t3 = gen_reg_rtx (compute_mode);
4588 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4589 compute_mode, 1, 1);
4590 if (t3 == 0)
4592 rtx lab;
4593 lab = gen_label_rtx ();
4594 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4595 expand_inc (t1, const1_rtx);
4596 emit_label (lab);
4597 quotient = t1;
4599 else
4600 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4601 t1, t3),
4602 tquotient);
4603 break;
4606 /* Try using an instruction that produces both the quotient and
4607 remainder, using truncation. We can easily compensate the
4608 quotient or remainder to get ceiling rounding, once we have the
4609 remainder. Notice that we compute also the final remainder
4610 value here, and return the result right away. */
4611 if (target == 0 || GET_MODE (target) != compute_mode)
4612 target = gen_reg_rtx (compute_mode);
4613 if (rem_flag)
4615 remainder= (REG_P (target)
4616 ? target : gen_reg_rtx (compute_mode));
4617 quotient = gen_reg_rtx (compute_mode);
4619 else
4621 quotient = (REG_P (target)
4622 ? target : gen_reg_rtx (compute_mode));
4623 remainder = gen_reg_rtx (compute_mode);
4626 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4627 remainder, 0))
4629 /* This could be computed with a branch-less sequence.
4630 Save that for later. */
4631 rtx tem;
4632 rtx label = gen_label_rtx ();
4633 do_cmp_and_jump (remainder, const0_rtx, EQ,
4634 compute_mode, label);
4635 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4636 NULL_RTX, 0, OPTAB_WIDEN);
4637 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4638 expand_inc (quotient, const1_rtx);
4639 expand_dec (remainder, op1);
4640 emit_label (label);
4641 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4644 /* No luck with division elimination or divmod. Have to do it
4645 by conditionally adjusting op0 *and* the result. */
4647 rtx label1, label2, label3, label4, label5;
4648 rtx adjusted_op0;
4649 rtx tem;
4651 quotient = gen_reg_rtx (compute_mode);
4652 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4653 label1 = gen_label_rtx ();
4654 label2 = gen_label_rtx ();
4655 label3 = gen_label_rtx ();
4656 label4 = gen_label_rtx ();
4657 label5 = gen_label_rtx ();
4658 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4659 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4660 compute_mode, label1);
4661 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4662 quotient, 0, OPTAB_LIB_WIDEN);
4663 if (tem != quotient)
4664 emit_move_insn (quotient, tem);
4665 emit_jump_insn (gen_jump (label5));
4666 emit_barrier ();
4667 emit_label (label1);
4668 expand_dec (adjusted_op0, const1_rtx);
4669 emit_jump_insn (gen_jump (label4));
4670 emit_barrier ();
4671 emit_label (label2);
4672 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4673 compute_mode, label3);
4674 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4675 quotient, 0, OPTAB_LIB_WIDEN);
4676 if (tem != quotient)
4677 emit_move_insn (quotient, tem);
4678 emit_jump_insn (gen_jump (label5));
4679 emit_barrier ();
4680 emit_label (label3);
4681 expand_inc (adjusted_op0, const1_rtx);
4682 emit_label (label4);
4683 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4684 quotient, 0, OPTAB_LIB_WIDEN);
4685 if (tem != quotient)
4686 emit_move_insn (quotient, tem);
4687 expand_inc (quotient, const1_rtx);
4688 emit_label (label5);
4691 break;
4693 case EXACT_DIV_EXPR:
4694 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4696 HOST_WIDE_INT d = INTVAL (op1);
4697 unsigned HOST_WIDE_INT ml;
4698 int pre_shift;
4699 rtx t1;
4701 pre_shift = floor_log2 (d & -d);
4702 ml = invert_mod2n (d >> pre_shift, size);
4703 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4704 build_int_cst (NULL_TREE, pre_shift),
4705 NULL_RTX, unsignedp);
4706 quotient = expand_mult (compute_mode, t1,
4707 gen_int_mode (ml, compute_mode),
4708 NULL_RTX, 1);
4710 insn = get_last_insn ();
4711 set_unique_reg_note (insn,
4712 REG_EQUAL,
4713 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4714 compute_mode,
4715 op0, op1));
4717 break;
4719 case ROUND_DIV_EXPR:
4720 case ROUND_MOD_EXPR:
4721 if (unsignedp)
4723 rtx tem;
4724 rtx label;
4725 label = gen_label_rtx ();
4726 quotient = gen_reg_rtx (compute_mode);
4727 remainder = gen_reg_rtx (compute_mode);
4728 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4730 rtx tem;
4731 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4732 quotient, 1, OPTAB_LIB_WIDEN);
4733 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4734 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4735 remainder, 1, OPTAB_LIB_WIDEN);
4737 tem = plus_constant (op1, -1);
4738 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4739 build_int_cst (NULL_TREE, 1),
4740 NULL_RTX, 1);
4741 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4742 expand_inc (quotient, const1_rtx);
4743 expand_dec (remainder, op1);
4744 emit_label (label);
4746 else
4748 rtx abs_rem, abs_op1, tem, mask;
4749 rtx label;
4750 label = gen_label_rtx ();
4751 quotient = gen_reg_rtx (compute_mode);
4752 remainder = gen_reg_rtx (compute_mode);
4753 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4755 rtx tem;
4756 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4757 quotient, 0, OPTAB_LIB_WIDEN);
4758 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4759 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4760 remainder, 0, OPTAB_LIB_WIDEN);
4762 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4763 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4764 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4765 build_int_cst (NULL_TREE, 1),
4766 NULL_RTX, 1);
4767 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4768 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4769 NULL_RTX, 0, OPTAB_WIDEN);
4770 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4771 build_int_cst (NULL_TREE, size - 1),
4772 NULL_RTX, 0);
4773 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4774 NULL_RTX, 0, OPTAB_WIDEN);
4775 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4776 NULL_RTX, 0, OPTAB_WIDEN);
4777 expand_inc (quotient, tem);
4778 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4779 NULL_RTX, 0, OPTAB_WIDEN);
4780 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4781 NULL_RTX, 0, OPTAB_WIDEN);
4782 expand_dec (remainder, tem);
4783 emit_label (label);
4785 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4787 default:
4788 gcc_unreachable ();
4791 if (quotient == 0)
4793 if (target && GET_MODE (target) != compute_mode)
4794 target = 0;
4796 if (rem_flag)
4798 /* Try to produce the remainder without producing the quotient.
4799 If we seem to have a divmod pattern that does not require widening,
4800 don't try widening here. We should really have a WIDEN argument
4801 to expand_twoval_binop, since what we'd really like to do here is
4802 1) try a mod insn in compute_mode
4803 2) try a divmod insn in compute_mode
4804 3) try a div insn in compute_mode and multiply-subtract to get
4805 remainder
4806 4) try the same things with widening allowed. */
4807 remainder
4808 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4809 op0, op1, target,
4810 unsignedp,
4811 ((optab2->handlers[compute_mode].insn_code
4812 != CODE_FOR_nothing)
4813 ? OPTAB_DIRECT : OPTAB_WIDEN));
4814 if (remainder == 0)
4816 /* No luck there. Can we do remainder and divide at once
4817 without a library call? */
4818 remainder = gen_reg_rtx (compute_mode);
4819 if (! expand_twoval_binop ((unsignedp
4820 ? udivmod_optab
4821 : sdivmod_optab),
4822 op0, op1,
4823 NULL_RTX, remainder, unsignedp))
4824 remainder = 0;
4827 if (remainder)
4828 return gen_lowpart (mode, remainder);
4831 /* Produce the quotient. Try a quotient insn, but not a library call.
4832 If we have a divmod in this mode, use it in preference to widening
4833 the div (for this test we assume it will not fail). Note that optab2
4834 is set to the one of the two optabs that the call below will use. */
4835 quotient
4836 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4837 op0, op1, rem_flag ? NULL_RTX : target,
4838 unsignedp,
4839 ((optab2->handlers[compute_mode].insn_code
4840 != CODE_FOR_nothing)
4841 ? OPTAB_DIRECT : OPTAB_WIDEN));
4843 if (quotient == 0)
4845 /* No luck there. Try a quotient-and-remainder insn,
4846 keeping the quotient alone. */
4847 quotient = gen_reg_rtx (compute_mode);
4848 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4849 op0, op1,
4850 quotient, NULL_RTX, unsignedp))
4852 quotient = 0;
4853 if (! rem_flag)
4854 /* Still no luck. If we are not computing the remainder,
4855 use a library call for the quotient. */
4856 quotient = sign_expand_binop (compute_mode,
4857 udiv_optab, sdiv_optab,
4858 op0, op1, target,
4859 unsignedp, OPTAB_LIB_WIDEN);
4864 if (rem_flag)
4866 if (target && GET_MODE (target) != compute_mode)
4867 target = 0;
4869 if (quotient == 0)
4871 /* No divide instruction either. Use library for remainder. */
4872 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4873 op0, op1, target,
4874 unsignedp, OPTAB_LIB_WIDEN);
4875 /* No remainder function. Try a quotient-and-remainder
4876 function, keeping the remainder. */
4877 if (!remainder)
4879 remainder = gen_reg_rtx (compute_mode);
4880 if (!expand_twoval_binop_libfunc
4881 (unsignedp ? udivmod_optab : sdivmod_optab,
4882 op0, op1,
4883 NULL_RTX, remainder,
4884 unsignedp ? UMOD : MOD))
4885 remainder = NULL_RTX;
4888 else
4890 /* We divided. Now finish doing X - Y * (X / Y). */
4891 remainder = expand_mult (compute_mode, quotient, op1,
4892 NULL_RTX, unsignedp);
4893 remainder = expand_binop (compute_mode, sub_optab, op0,
4894 remainder, target, unsignedp,
4895 OPTAB_LIB_WIDEN);
4899 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4902 /* Return a tree node with data type TYPE, describing the value of X.
4903 Usually this is an VAR_DECL, if there is no obvious better choice.
4904 X may be an expression, however we only support those expressions
4905 generated by loop.c. */
4907 tree
4908 make_tree (tree type, rtx x)
4910 tree t;
4912 switch (GET_CODE (x))
4914 case CONST_INT:
4916 HOST_WIDE_INT hi = 0;
4918 if (INTVAL (x) < 0
4919 && !(TYPE_UNSIGNED (type)
4920 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4921 < HOST_BITS_PER_WIDE_INT)))
4922 hi = -1;
4924 t = build_int_cst_wide (type, INTVAL (x), hi);
4926 return t;
4929 case CONST_DOUBLE:
4930 if (GET_MODE (x) == VOIDmode)
4931 t = build_int_cst_wide (type,
4932 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4933 else
4935 REAL_VALUE_TYPE d;
4937 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4938 t = build_real (type, d);
4941 return t;
4943 case CONST_VECTOR:
4945 int i, units;
4946 rtx elt;
4947 tree t = NULL_TREE;
4949 units = CONST_VECTOR_NUNITS (x);
4951 /* Build a tree with vector elements. */
4952 for (i = units - 1; i >= 0; --i)
4954 elt = CONST_VECTOR_ELT (x, i);
4955 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4958 return build_vector (type, t);
4961 case PLUS:
4962 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4963 make_tree (type, XEXP (x, 1)));
4965 case MINUS:
4966 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4967 make_tree (type, XEXP (x, 1)));
4969 case NEG:
4970 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4972 case MULT:
4973 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4974 make_tree (type, XEXP (x, 1)));
4976 case ASHIFT:
4977 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4978 make_tree (type, XEXP (x, 1)));
4980 case LSHIFTRT:
4981 t = lang_hooks.types.unsigned_type (type);
4982 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4983 make_tree (t, XEXP (x, 0)),
4984 make_tree (type, XEXP (x, 1))));
4986 case ASHIFTRT:
4987 t = lang_hooks.types.signed_type (type);
4988 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4989 make_tree (t, XEXP (x, 0)),
4990 make_tree (type, XEXP (x, 1))));
4992 case DIV:
4993 if (TREE_CODE (type) != REAL_TYPE)
4994 t = lang_hooks.types.signed_type (type);
4995 else
4996 t = type;
4998 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4999 make_tree (t, XEXP (x, 0)),
5000 make_tree (t, XEXP (x, 1))));
5001 case UDIV:
5002 t = lang_hooks.types.unsigned_type (type);
5003 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5004 make_tree (t, XEXP (x, 0)),
5005 make_tree (t, XEXP (x, 1))));
5007 case SIGN_EXTEND:
5008 case ZERO_EXTEND:
5009 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5010 GET_CODE (x) == ZERO_EXTEND);
5011 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5013 default:
5014 t = build_decl (VAR_DECL, NULL_TREE, type);
5016 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5017 ptr_mode. So convert. */
5018 if (POINTER_TYPE_P (type))
5019 x = convert_memory_address (TYPE_MODE (type), x);
5021 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5022 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5023 t->decl_with_rtl.rtl = x;
5025 return t;
5029 /* Return an rtx representing the value of X * MULT + ADD.
5030 TARGET is a suggestion for where to store the result (an rtx).
5031 MODE is the machine mode for the computation.
5032 X and MULT must have mode MODE. ADD may have a different mode.
5033 So can X (defaults to same as MODE).
5034 UNSIGNEDP is nonzero to do unsigned multiplication.
5035 This may emit insns. */
5038 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
5039 int unsignedp)
5041 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
5042 tree add_type = (GET_MODE (add) == VOIDmode
5043 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
5044 unsignedp));
5045 tree result = fold_build2 (PLUS_EXPR, type,
5046 fold_build2 (MULT_EXPR, type,
5047 make_tree (type, x),
5048 make_tree (type, mult)),
5049 make_tree (add_type, add));
5051 return expand_expr (result, target, VOIDmode, 0);
5054 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5055 and returning TARGET.
5057 If TARGET is 0, a pseudo-register or constant is returned. */
5060 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5062 rtx tem = 0;
5064 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5065 tem = simplify_binary_operation (AND, mode, op0, op1);
5066 if (tem == 0)
5067 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5069 if (target == 0)
5070 target = tem;
5071 else if (tem != target)
5072 emit_move_insn (target, tem);
5073 return target;
5076 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5077 and storing in TARGET. Normally return TARGET.
5078 Return 0 if that cannot be done.
5080 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5081 it is VOIDmode, they cannot both be CONST_INT.
5083 UNSIGNEDP is for the case where we have to widen the operands
5084 to perform the operation. It says to use zero-extension.
5086 NORMALIZEP is 1 if we should convert the result to be either zero
5087 or one. Normalize is -1 if we should convert the result to be
5088 either zero or -1. If NORMALIZEP is zero, the result will be left
5089 "raw" out of the scc insn. */
5092 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5093 enum machine_mode mode, int unsignedp, int normalizep)
5095 rtx subtarget;
5096 enum insn_code icode;
5097 enum machine_mode compare_mode;
5098 enum machine_mode target_mode = GET_MODE (target);
5099 rtx tem;
5100 rtx last = get_last_insn ();
5101 rtx pattern, comparison;
5103 if (unsignedp)
5104 code = unsigned_condition (code);
5106 /* If one operand is constant, make it the second one. Only do this
5107 if the other operand is not constant as well. */
5109 if (swap_commutative_operands_p (op0, op1))
5111 tem = op0;
5112 op0 = op1;
5113 op1 = tem;
5114 code = swap_condition (code);
5117 if (mode == VOIDmode)
5118 mode = GET_MODE (op0);
5120 /* For some comparisons with 1 and -1, we can convert this to
5121 comparisons with zero. This will often produce more opportunities for
5122 store-flag insns. */
5124 switch (code)
5126 case LT:
5127 if (op1 == const1_rtx)
5128 op1 = const0_rtx, code = LE;
5129 break;
5130 case LE:
5131 if (op1 == constm1_rtx)
5132 op1 = const0_rtx, code = LT;
5133 break;
5134 case GE:
5135 if (op1 == const1_rtx)
5136 op1 = const0_rtx, code = GT;
5137 break;
5138 case GT:
5139 if (op1 == constm1_rtx)
5140 op1 = const0_rtx, code = GE;
5141 break;
5142 case GEU:
5143 if (op1 == const1_rtx)
5144 op1 = const0_rtx, code = NE;
5145 break;
5146 case LTU:
5147 if (op1 == const1_rtx)
5148 op1 = const0_rtx, code = EQ;
5149 break;
5150 default:
5151 break;
5154 /* If we are comparing a double-word integer with zero or -1, we can
5155 convert the comparison into one involving a single word. */
5156 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5157 && GET_MODE_CLASS (mode) == MODE_INT
5158 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5160 if ((code == EQ || code == NE)
5161 && (op1 == const0_rtx || op1 == constm1_rtx))
5163 rtx op00, op01, op0both;
5165 /* Do a logical OR or AND of the two words and compare the result. */
5166 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5167 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5168 op0both = expand_binop (word_mode,
5169 op1 == const0_rtx ? ior_optab : and_optab,
5170 op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5172 if (op0both != 0)
5173 return emit_store_flag (target, code, op0both, op1, word_mode,
5174 unsignedp, normalizep);
5176 else if ((code == LT || code == GE) && op1 == const0_rtx)
5178 rtx op0h;
5180 /* If testing the sign bit, can just test on high word. */
5181 op0h = simplify_gen_subreg (word_mode, op0, mode,
5182 subreg_highpart_offset (word_mode, mode));
5183 return emit_store_flag (target, code, op0h, op1, word_mode,
5184 unsignedp, normalizep);
5188 /* From now on, we won't change CODE, so set ICODE now. */
5189 icode = setcc_gen_code[(int) code];
5191 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5192 complement of A (for GE) and shifting the sign bit to the low bit. */
5193 if (op1 == const0_rtx && (code == LT || code == GE)
5194 && GET_MODE_CLASS (mode) == MODE_INT
5195 && (normalizep || STORE_FLAG_VALUE == 1
5196 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5197 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5198 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5200 subtarget = target;
5202 /* If the result is to be wider than OP0, it is best to convert it
5203 first. If it is to be narrower, it is *incorrect* to convert it
5204 first. */
5205 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5207 op0 = convert_modes (target_mode, mode, op0, 0);
5208 mode = target_mode;
5211 if (target_mode != mode)
5212 subtarget = 0;
5214 if (code == GE)
5215 op0 = expand_unop (mode, one_cmpl_optab, op0,
5216 ((STORE_FLAG_VALUE == 1 || normalizep)
5217 ? 0 : subtarget), 0);
5219 if (STORE_FLAG_VALUE == 1 || normalizep)
5220 /* If we are supposed to produce a 0/1 value, we want to do
5221 a logical shift from the sign bit to the low-order bit; for
5222 a -1/0 value, we do an arithmetic shift. */
5223 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5224 size_int (GET_MODE_BITSIZE (mode) - 1),
5225 subtarget, normalizep != -1);
5227 if (mode != target_mode)
5228 op0 = convert_modes (target_mode, mode, op0, 0);
5230 return op0;
5233 if (icode != CODE_FOR_nothing)
5235 insn_operand_predicate_fn pred;
5237 /* We think we may be able to do this with a scc insn. Emit the
5238 comparison and then the scc insn. */
5240 do_pending_stack_adjust ();
5241 last = get_last_insn ();
5243 comparison
5244 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5245 if (CONSTANT_P (comparison))
5247 switch (GET_CODE (comparison))
5249 case CONST_INT:
5250 if (comparison == const0_rtx)
5251 return const0_rtx;
5252 break;
5254 #ifdef FLOAT_STORE_FLAG_VALUE
5255 case CONST_DOUBLE:
5256 if (comparison == CONST0_RTX (GET_MODE (comparison)))
5257 return const0_rtx;
5258 break;
5259 #endif
5260 default:
5261 gcc_unreachable ();
5264 if (normalizep == 1)
5265 return const1_rtx;
5266 if (normalizep == -1)
5267 return constm1_rtx;
5268 return const_true_rtx;
5271 /* The code of COMPARISON may not match CODE if compare_from_rtx
5272 decided to swap its operands and reverse the original code.
5274 We know that compare_from_rtx returns either a CONST_INT or
5275 a new comparison code, so it is safe to just extract the
5276 code from COMPARISON. */
5277 code = GET_CODE (comparison);
5279 /* Get a reference to the target in the proper mode for this insn. */
5280 compare_mode = insn_data[(int) icode].operand[0].mode;
5281 subtarget = target;
5282 pred = insn_data[(int) icode].operand[0].predicate;
5283 if (optimize || ! (*pred) (subtarget, compare_mode))
5284 subtarget = gen_reg_rtx (compare_mode);
5286 pattern = GEN_FCN (icode) (subtarget);
5287 if (pattern)
5289 emit_insn (pattern);
5291 /* If we are converting to a wider mode, first convert to
5292 TARGET_MODE, then normalize. This produces better combining
5293 opportunities on machines that have a SIGN_EXTRACT when we are
5294 testing a single bit. This mostly benefits the 68k.
5296 If STORE_FLAG_VALUE does not have the sign bit set when
5297 interpreted in COMPARE_MODE, we can do this conversion as
5298 unsigned, which is usually more efficient. */
5299 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5301 convert_move (target, subtarget,
5302 (GET_MODE_BITSIZE (compare_mode)
5303 <= HOST_BITS_PER_WIDE_INT)
5304 && 0 == (STORE_FLAG_VALUE
5305 & ((HOST_WIDE_INT) 1
5306 << (GET_MODE_BITSIZE (compare_mode) -1))));
5307 op0 = target;
5308 compare_mode = target_mode;
5310 else
5311 op0 = subtarget;
5313 /* If we want to keep subexpressions around, don't reuse our
5314 last target. */
5316 if (optimize)
5317 subtarget = 0;
5319 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5320 we don't have to do anything. */
5321 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5323 /* STORE_FLAG_VALUE might be the most negative number, so write
5324 the comparison this way to avoid a compiler-time warning. */
5325 else if (- normalizep == STORE_FLAG_VALUE)
5326 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5328 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5329 makes it hard to use a value of just the sign bit due to
5330 ANSI integer constant typing rules. */
5331 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5332 && (STORE_FLAG_VALUE
5333 & ((HOST_WIDE_INT) 1
5334 << (GET_MODE_BITSIZE (compare_mode) - 1))))
5335 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5336 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5337 subtarget, normalizep == 1);
5338 else
5340 gcc_assert (STORE_FLAG_VALUE & 1);
5342 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5343 if (normalizep == -1)
5344 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5347 /* If we were converting to a smaller mode, do the
5348 conversion now. */
5349 if (target_mode != compare_mode)
5351 convert_move (target, op0, 0);
5352 return target;
5354 else
5355 return op0;
5359 delete_insns_since (last);
5361 /* If optimizing, use different pseudo registers for each insn, instead
5362 of reusing the same pseudo. This leads to better CSE, but slows
5363 down the compiler, since there are more pseudos */
5364 subtarget = (!optimize
5365 && (target_mode == mode)) ? target : NULL_RTX;
5367 /* If we reached here, we can't do this with a scc insn. However, there
5368 are some comparisons that can be done directly. For example, if
5369 this is an equality comparison of integers, we can try to exclusive-or
5370 (or subtract) the two operands and use a recursive call to try the
5371 comparison with zero. Don't do any of these cases if branches are
5372 very cheap. */
5374 if (BRANCH_COST > 0
5375 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5376 && op1 != const0_rtx)
5378 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5379 OPTAB_WIDEN);
5381 if (tem == 0)
5382 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5383 OPTAB_WIDEN);
5384 if (tem != 0)
5385 tem = emit_store_flag (target, code, tem, const0_rtx,
5386 mode, unsignedp, normalizep);
5387 if (tem == 0)
5388 delete_insns_since (last);
5389 return tem;
5392 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5393 the constant zero. Reject all other comparisons at this point. Only
5394 do LE and GT if branches are expensive since they are expensive on
5395 2-operand machines. */
5397 if (BRANCH_COST == 0
5398 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5399 || (code != EQ && code != NE
5400 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5401 return 0;
5403 /* See what we need to return. We can only return a 1, -1, or the
5404 sign bit. */
5406 if (normalizep == 0)
5408 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5409 normalizep = STORE_FLAG_VALUE;
5411 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5412 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5413 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5415 else
5416 return 0;
5419 /* Try to put the result of the comparison in the sign bit. Assume we can't
5420 do the necessary operation below. */
5422 tem = 0;
5424 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5425 the sign bit set. */
5427 if (code == LE)
5429 /* This is destructive, so SUBTARGET can't be OP0. */
5430 if (rtx_equal_p (subtarget, op0))
5431 subtarget = 0;
5433 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5434 OPTAB_WIDEN);
5435 if (tem)
5436 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5437 OPTAB_WIDEN);
5440 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5441 number of bits in the mode of OP0, minus one. */
5443 if (code == GT)
5445 if (rtx_equal_p (subtarget, op0))
5446 subtarget = 0;
5448 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5449 size_int (GET_MODE_BITSIZE (mode) - 1),
5450 subtarget, 0);
5451 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5452 OPTAB_WIDEN);
5455 if (code == EQ || code == NE)
5457 /* For EQ or NE, one way to do the comparison is to apply an operation
5458 that converts the operand into a positive number if it is nonzero
5459 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5460 for NE we negate. This puts the result in the sign bit. Then we
5461 normalize with a shift, if needed.
5463 Two operations that can do the above actions are ABS and FFS, so try
5464 them. If that doesn't work, and MODE is smaller than a full word,
5465 we can use zero-extension to the wider mode (an unsigned conversion)
5466 as the operation. */
5468 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5469 that is compensated by the subsequent overflow when subtracting
5470 one / negating. */
5472 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5473 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5474 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5475 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5476 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5478 tem = convert_modes (word_mode, mode, op0, 1);
5479 mode = word_mode;
5482 if (tem != 0)
5484 if (code == EQ)
5485 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5486 0, OPTAB_WIDEN);
5487 else
5488 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5491 /* If we couldn't do it that way, for NE we can "or" the two's complement
5492 of the value with itself. For EQ, we take the one's complement of
5493 that "or", which is an extra insn, so we only handle EQ if branches
5494 are expensive. */
5496 if (tem == 0 && (code == NE || BRANCH_COST > 1))
5498 if (rtx_equal_p (subtarget, op0))
5499 subtarget = 0;
5501 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5502 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5503 OPTAB_WIDEN);
5505 if (tem && code == EQ)
5506 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5510 if (tem && normalizep)
5511 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5512 size_int (GET_MODE_BITSIZE (mode) - 1),
5513 subtarget, normalizep == 1);
5515 if (tem)
5517 if (GET_MODE (tem) != target_mode)
5519 convert_move (target, tem, 0);
5520 tem = target;
5522 else if (!subtarget)
5524 emit_move_insn (target, tem);
5525 tem = target;
5528 else
5529 delete_insns_since (last);
5531 return tem;
5534 /* Like emit_store_flag, but always succeeds. */
5537 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5538 enum machine_mode mode, int unsignedp, int normalizep)
5540 rtx tem, label;
5542 /* First see if emit_store_flag can do the job. */
5543 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5544 if (tem != 0)
5545 return tem;
5547 if (normalizep == 0)
5548 normalizep = 1;
5550 /* If this failed, we have to do this with set/compare/jump/set code. */
5552 if (!REG_P (target)
5553 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5554 target = gen_reg_rtx (GET_MODE (target));
5556 emit_move_insn (target, const1_rtx);
5557 label = gen_label_rtx ();
5558 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5559 NULL_RTX, label);
5561 emit_move_insn (target, const0_rtx);
5562 emit_label (label);
5564 return target;
5567 /* Perform possibly multi-word comparison and conditional jump to LABEL
5568 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5569 now a thin wrapper around do_compare_rtx_and_jump. */
5571 static void
5572 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5573 rtx label)
5575 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5576 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5577 NULL_RTX, NULL_RTX, label);