PR other/16240
[official-gcc.git] / gcc / expmed.c
blobc8625036425666c0409990acb834b0bdd7717f76
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
60 static int sdiv_pow2_cheap[NUM_MACHINE_MODES];
61 static int smod_pow2_cheap[NUM_MACHINE_MODES];
63 #ifndef SLOW_UNALIGNED_ACCESS
64 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
65 #endif
67 /* For compilers that support multiple targets with different word sizes,
68 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
69 is the H8/300(H) compiler. */
71 #ifndef MAX_BITS_PER_WORD
72 #define MAX_BITS_PER_WORD BITS_PER_WORD
73 #endif
75 /* Reduce conditional compilation elsewhere. */
76 #ifndef HAVE_insv
77 #define HAVE_insv 0
78 #define CODE_FOR_insv CODE_FOR_nothing
79 #define gen_insv(a,b,c,d) NULL_RTX
80 #endif
81 #ifndef HAVE_extv
82 #define HAVE_extv 0
83 #define CODE_FOR_extv CODE_FOR_nothing
84 #define gen_extv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extzv
87 #define HAVE_extzv 0
88 #define CODE_FOR_extzv CODE_FOR_nothing
89 #define gen_extzv(a,b,c,d) NULL_RTX
90 #endif
92 /* Cost of various pieces of RTL. Note that some of these are indexed by
93 shift count and some by mode. */
94 static int zero_cost;
95 static int add_cost[NUM_MACHINE_MODES];
96 static int neg_cost[NUM_MACHINE_MODES];
97 static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
98 static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
99 static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
100 static int mul_cost[NUM_MACHINE_MODES];
101 static int div_cost[NUM_MACHINE_MODES];
102 static int mul_widen_cost[NUM_MACHINE_MODES];
103 static int mul_highpart_cost[NUM_MACHINE_MODES];
105 void
106 init_expmed (void)
108 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
109 rtx shift_pat, shiftadd_pat, shiftsub_pat;
110 rtx pow2[MAX_BITS_PER_WORD];
111 rtx cint[MAX_BITS_PER_WORD];
112 int dummy;
113 int m, n;
114 enum machine_mode mode, wider_mode;
116 start_sequence ();
118 zero_cost = rtx_cost (const0_rtx, 0);
120 init_recog ();
122 for (m = 1; m < MAX_BITS_PER_WORD; m++)
124 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
125 cint[m] = GEN_INT (m);
128 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
129 mode != VOIDmode;
130 mode = GET_MODE_WIDER_MODE (mode))
132 reg = gen_rtx_REG (mode, 10000);
133 add_cost[mode] = rtx_cost (gen_rtx_PLUS (mode, reg, reg), SET);
134 neg_cost[mode] = rtx_cost (gen_rtx_NEG (mode, reg), SET);
135 div_cost[mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
136 mul_cost[mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
138 sdiv_pow2_cheap[mode]
139 = (rtx_cost (gen_rtx_DIV (mode, reg, GEN_INT (32)), SET)
140 <= 2 * add_cost[mode]);
141 smod_pow2_cheap[mode]
142 = (rtx_cost (gen_rtx_MOD (mode, reg, GEN_INT (32)), SET)
143 <= 2 * add_cost[mode]);
145 wider_mode = GET_MODE_WIDER_MODE (mode);
146 if (wider_mode != VOIDmode)
148 mul_widen_cost[wider_mode]
149 = rtx_cost (gen_rtx_MULT (wider_mode,
150 gen_rtx_ZERO_EXTEND (wider_mode, reg),
151 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
152 SET);
153 mul_highpart_cost[mode]
154 = rtx_cost (gen_rtx_TRUNCATE
155 (mode,
156 gen_rtx_LSHIFTRT (wider_mode,
157 gen_rtx_MULT (wider_mode,
158 gen_rtx_ZERO_EXTEND
159 (wider_mode, reg),
160 gen_rtx_ZERO_EXTEND
161 (wider_mode, reg)),
162 GEN_INT (GET_MODE_BITSIZE (mode)))),
163 SET);
166 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
167 gen_rtx_ASHIFT (mode, reg,
168 const0_rtx)));
170 shiftadd_insn
171 = emit_insn (gen_rtx_SET (VOIDmode, reg,
172 gen_rtx_PLUS (mode,
173 gen_rtx_MULT (mode,
174 reg,
175 const0_rtx),
176 reg)));
178 shiftsub_insn
179 = emit_insn (gen_rtx_SET (VOIDmode, reg,
180 gen_rtx_MINUS (mode,
181 gen_rtx_MULT (mode,
182 reg,
183 const0_rtx),
184 reg)));
186 shift_pat = PATTERN (shift_insn);
187 shiftadd_pat = PATTERN (shiftadd_insn);
188 shiftsub_pat = PATTERN (shiftsub_insn);
190 shift_cost[mode][0] = 0;
191 shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
193 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
194 for (m = 1; m < n; m++)
196 shift_cost[mode][m] = 32000;
197 XEXP (SET_SRC (shift_pat), 1) = cint[m];
198 if (recog (shift_pat, shift_insn, &dummy) >= 0)
199 shift_cost[mode][m] = rtx_cost (SET_SRC (shift_pat), SET);
201 shiftadd_cost[mode][m] = 32000;
202 XEXP (XEXP (SET_SRC (shiftadd_pat), 0), 1) = pow2[m];
203 if (recog (shiftadd_pat, shiftadd_insn, &dummy) >= 0)
204 shiftadd_cost[mode][m] = rtx_cost (SET_SRC (shiftadd_pat), SET);
206 shiftsub_cost[mode][m] = 32000;
207 XEXP (XEXP (SET_SRC (shiftsub_pat), 0), 1) = pow2[m];
208 if (recog (shiftsub_pat, shiftsub_insn, &dummy) >= 0)
209 shiftsub_cost[mode][m] = rtx_cost (SET_SRC (shiftsub_pat), SET);
213 end_sequence ();
216 /* Return an rtx representing minus the value of X.
217 MODE is the intended mode of the result,
218 useful if X is a CONST_INT. */
221 negate_rtx (enum machine_mode mode, rtx x)
223 rtx result = simplify_unary_operation (NEG, mode, x, mode);
225 if (result == 0)
226 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
228 return result;
231 /* Report on the availability of insv/extv/extzv and the desired mode
232 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
233 is false; else the mode of the specified operand. If OPNO is -1,
234 all the caller cares about is whether the insn is available. */
235 enum machine_mode
236 mode_for_extraction (enum extraction_pattern pattern, int opno)
238 const struct insn_data *data;
240 switch (pattern)
242 case EP_insv:
243 if (HAVE_insv)
245 data = &insn_data[CODE_FOR_insv];
246 break;
248 return MAX_MACHINE_MODE;
250 case EP_extv:
251 if (HAVE_extv)
253 data = &insn_data[CODE_FOR_extv];
254 break;
256 return MAX_MACHINE_MODE;
258 case EP_extzv:
259 if (HAVE_extzv)
261 data = &insn_data[CODE_FOR_extzv];
262 break;
264 return MAX_MACHINE_MODE;
266 default:
267 abort ();
270 if (opno == -1)
271 return VOIDmode;
273 /* Everyone who uses this function used to follow it with
274 if (result == VOIDmode) result = word_mode; */
275 if (data->operand[opno].mode == VOIDmode)
276 return word_mode;
277 return data->operand[opno].mode;
281 /* Generate code to store value from rtx VALUE
282 into a bit-field within structure STR_RTX
283 containing BITSIZE bits starting at bit BITNUM.
284 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
285 ALIGN is the alignment that STR_RTX is known to have.
286 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
288 /* ??? Note that there are two different ideas here for how
289 to determine the size to count bits within, for a register.
290 One is BITS_PER_WORD, and the other is the size of operand 3
291 of the insv pattern.
293 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
294 else, we use the mode of operand 3. */
297 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
298 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
299 rtx value, HOST_WIDE_INT total_size)
301 unsigned int unit
302 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
303 unsigned HOST_WIDE_INT offset = bitnum / unit;
304 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
305 rtx op0 = str_rtx;
306 int byte_offset;
308 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
310 /* Discount the part of the structure before the desired byte.
311 We need to know how many bytes are safe to reference after it. */
312 if (total_size >= 0)
313 total_size -= (bitpos / BIGGEST_ALIGNMENT
314 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
316 while (GET_CODE (op0) == SUBREG)
318 /* The following line once was done only if WORDS_BIG_ENDIAN,
319 but I think that is a mistake. WORDS_BIG_ENDIAN is
320 meaningful at a much higher level; when structures are copied
321 between memory and regs, the higher-numbered regs
322 always get higher addresses. */
323 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
324 /* We used to adjust BITPOS here, but now we do the whole adjustment
325 right after the loop. */
326 op0 = SUBREG_REG (op0);
329 value = protect_from_queue (value, 0);
331 /* Use vec_extract patterns for extracting parts of vectors whenever
332 available. */
333 if (VECTOR_MODE_P (GET_MODE (op0))
334 && GET_CODE (op0) != MEM
335 && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
336 != CODE_FOR_nothing)
337 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
338 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
339 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
341 enum machine_mode outermode = GET_MODE (op0);
342 enum machine_mode innermode = GET_MODE_INNER (outermode);
343 int icode = (int) vec_set_optab->handlers[outermode].insn_code;
344 int pos = bitnum / GET_MODE_BITSIZE (innermode);
345 rtx rtxpos = GEN_INT (pos);
346 rtx src = value;
347 rtx dest = op0;
348 rtx pat, seq;
349 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
350 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
351 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
353 start_sequence ();
355 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
356 src = copy_to_mode_reg (mode1, src);
358 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
359 rtxpos = copy_to_mode_reg (mode1, rtxpos);
361 /* We could handle this, but we should always be called with a pseudo
362 for our targets and all insns should take them as outputs. */
363 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
364 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
365 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
366 abort ();
367 pat = GEN_FCN (icode) (dest, src, rtxpos);
368 seq = get_insns ();
369 end_sequence ();
370 if (pat)
372 emit_insn (seq);
373 emit_insn (pat);
374 return dest;
378 if (flag_force_mem)
380 int old_generating_concat_p = generating_concat_p;
381 generating_concat_p = 0;
382 value = force_not_mem (value);
383 generating_concat_p = old_generating_concat_p;
386 /* If the target is a register, overwriting the entire object, or storing
387 a full-word or multi-word field can be done with just a SUBREG.
389 If the target is memory, storing any naturally aligned field can be
390 done with a simple store. For targets that support fast unaligned
391 memory, any naturally sized, unit aligned field can be done directly. */
393 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
394 + (offset * UNITS_PER_WORD);
396 if (bitpos == 0
397 && bitsize == GET_MODE_BITSIZE (fieldmode)
398 && (GET_CODE (op0) != MEM
399 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
400 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
401 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
402 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
403 || (offset * BITS_PER_UNIT % bitsize == 0
404 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
406 if (GET_MODE (op0) != fieldmode)
408 if (GET_CODE (op0) == SUBREG)
410 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
411 || GET_MODE_CLASS (fieldmode) == MODE_INT
412 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
413 op0 = SUBREG_REG (op0);
414 else
415 /* Else we've got some float mode source being extracted into
416 a different float mode destination -- this combination of
417 subregs results in Severe Tire Damage. */
418 abort ();
420 if (REG_P (op0))
421 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
422 else
423 op0 = adjust_address (op0, fieldmode, offset);
425 emit_move_insn (op0, value);
426 return value;
429 /* Make sure we are playing with integral modes. Pun with subregs
430 if we aren't. This must come after the entire register case above,
431 since that case is valid for any mode. The following cases are only
432 valid for integral modes. */
434 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
435 if (imode != GET_MODE (op0))
437 if (GET_CODE (op0) == MEM)
438 op0 = adjust_address (op0, imode, 0);
439 else if (imode != BLKmode)
440 op0 = gen_lowpart (imode, op0);
441 else
442 abort ();
446 /* We may be accessing data outside the field, which means
447 we can alias adjacent data. */
448 if (GET_CODE (op0) == MEM)
450 op0 = shallow_copy_rtx (op0);
451 set_mem_alias_set (op0, 0);
452 set_mem_expr (op0, 0);
455 /* If OP0 is a register, BITPOS must count within a word.
456 But as we have it, it counts within whatever size OP0 now has.
457 On a bigendian machine, these are not the same, so convert. */
458 if (BYTES_BIG_ENDIAN
459 && GET_CODE (op0) != MEM
460 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
461 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
463 /* Storing an lsb-aligned field in a register
464 can be done with a movestrict instruction. */
466 if (GET_CODE (op0) != MEM
467 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
468 && bitsize == GET_MODE_BITSIZE (fieldmode)
469 && (movstrict_optab->handlers[fieldmode].insn_code
470 != CODE_FOR_nothing))
472 int icode = movstrict_optab->handlers[fieldmode].insn_code;
474 /* Get appropriate low part of the value being stored. */
475 if (GET_CODE (value) == CONST_INT || REG_P (value))
476 value = gen_lowpart (fieldmode, value);
477 else if (!(GET_CODE (value) == SYMBOL_REF
478 || GET_CODE (value) == LABEL_REF
479 || GET_CODE (value) == CONST))
480 value = convert_to_mode (fieldmode, value, 0);
482 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
483 value = copy_to_mode_reg (fieldmode, value);
485 if (GET_CODE (op0) == SUBREG)
487 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
488 || GET_MODE_CLASS (fieldmode) == MODE_INT
489 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
490 op0 = SUBREG_REG (op0);
491 else
492 /* Else we've got some float mode source being extracted into
493 a different float mode destination -- this combination of
494 subregs results in Severe Tire Damage. */
495 abort ();
498 emit_insn (GEN_FCN (icode)
499 (gen_rtx_SUBREG (fieldmode, op0,
500 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
501 + (offset * UNITS_PER_WORD)),
502 value));
504 return value;
507 /* Handle fields bigger than a word. */
509 if (bitsize > BITS_PER_WORD)
511 /* Here we transfer the words of the field
512 in the order least significant first.
513 This is because the most significant word is the one which may
514 be less than full.
515 However, only do that if the value is not BLKmode. */
517 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
518 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
519 unsigned int i;
521 /* This is the mode we must force value to, so that there will be enough
522 subwords to extract. Note that fieldmode will often (always?) be
523 VOIDmode, because that is what store_field uses to indicate that this
524 is a bit field, but passing VOIDmode to operand_subword_force will
525 result in an abort. */
526 fieldmode = GET_MODE (value);
527 if (fieldmode == VOIDmode)
528 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
530 for (i = 0; i < nwords; i++)
532 /* If I is 0, use the low-order word in both field and target;
533 if I is 1, use the next to lowest word; and so on. */
534 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
535 unsigned int bit_offset = (backwards
536 ? MAX ((int) bitsize - ((int) i + 1)
537 * BITS_PER_WORD,
539 : (int) i * BITS_PER_WORD);
541 store_bit_field (op0, MIN (BITS_PER_WORD,
542 bitsize - i * BITS_PER_WORD),
543 bitnum + bit_offset, word_mode,
544 operand_subword_force (value, wordnum, fieldmode),
545 total_size);
547 return value;
550 /* From here on we can assume that the field to be stored in is
551 a full-word (whatever type that is), since it is shorter than a word. */
553 /* OFFSET is the number of words or bytes (UNIT says which)
554 from STR_RTX to the first word or byte containing part of the field. */
556 if (GET_CODE (op0) != MEM)
558 if (offset != 0
559 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
561 if (!REG_P (op0))
563 /* Since this is a destination (lvalue), we can't copy it to a
564 pseudo. We can trivially remove a SUBREG that does not
565 change the size of the operand. Such a SUBREG may have been
566 added above. Otherwise, abort. */
567 if (GET_CODE (op0) == SUBREG
568 && (GET_MODE_SIZE (GET_MODE (op0))
569 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
570 op0 = SUBREG_REG (op0);
571 else
572 abort ();
574 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
575 op0, (offset * UNITS_PER_WORD));
577 offset = 0;
579 else
580 op0 = protect_from_queue (op0, 1);
582 /* If VALUE is a floating-point mode, access it as an integer of the
583 corresponding size. This can occur on a machine with 64 bit registers
584 that uses SFmode for float. This can also occur for unaligned float
585 structure fields. */
586 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
587 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
588 value = gen_lowpart ((GET_MODE (value) == VOIDmode
589 ? word_mode : int_mode_for_mode (GET_MODE (value))),
590 value);
592 /* Now OFFSET is nonzero only if OP0 is memory
593 and is therefore always measured in bytes. */
595 if (HAVE_insv
596 && GET_MODE (value) != BLKmode
597 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
598 /* Ensure insv's size is wide enough for this field. */
599 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
600 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
601 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
603 int xbitpos = bitpos;
604 rtx value1;
605 rtx xop0 = op0;
606 rtx last = get_last_insn ();
607 rtx pat;
608 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
609 int save_volatile_ok = volatile_ok;
611 volatile_ok = 1;
613 /* If this machine's insv can only insert into a register, copy OP0
614 into a register and save it back later. */
615 /* This used to check flag_force_mem, but that was a serious
616 de-optimization now that flag_force_mem is enabled by -O2. */
617 if (GET_CODE (op0) == MEM
618 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
619 (op0, VOIDmode)))
621 rtx tempreg;
622 enum machine_mode bestmode;
624 /* Get the mode to use for inserting into this field. If OP0 is
625 BLKmode, get the smallest mode consistent with the alignment. If
626 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
627 mode. Otherwise, use the smallest mode containing the field. */
629 if (GET_MODE (op0) == BLKmode
630 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
631 bestmode
632 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
633 MEM_VOLATILE_P (op0));
634 else
635 bestmode = GET_MODE (op0);
637 if (bestmode == VOIDmode
638 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
639 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
640 goto insv_loses;
642 /* Adjust address to point to the containing unit of that mode.
643 Compute offset as multiple of this unit, counting in bytes. */
644 unit = GET_MODE_BITSIZE (bestmode);
645 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
646 bitpos = bitnum % unit;
647 op0 = adjust_address (op0, bestmode, offset);
649 /* Fetch that unit, store the bitfield in it, then store
650 the unit. */
651 tempreg = copy_to_reg (op0);
652 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
653 total_size);
654 emit_move_insn (op0, tempreg);
655 return value;
657 volatile_ok = save_volatile_ok;
659 /* Add OFFSET into OP0's address. */
660 if (GET_CODE (xop0) == MEM)
661 xop0 = adjust_address (xop0, byte_mode, offset);
663 /* If xop0 is a register, we need it in MAXMODE
664 to make it acceptable to the format of insv. */
665 if (GET_CODE (xop0) == SUBREG)
666 /* We can't just change the mode, because this might clobber op0,
667 and we will need the original value of op0 if insv fails. */
668 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
669 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
670 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
672 /* On big-endian machines, we count bits from the most significant.
673 If the bit field insn does not, we must invert. */
675 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
676 xbitpos = unit - bitsize - xbitpos;
678 /* We have been counting XBITPOS within UNIT.
679 Count instead within the size of the register. */
680 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
681 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
683 unit = GET_MODE_BITSIZE (maxmode);
685 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
686 value1 = value;
687 if (GET_MODE (value) != maxmode)
689 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
691 /* Optimization: Don't bother really extending VALUE
692 if it has all the bits we will actually use. However,
693 if we must narrow it, be sure we do it correctly. */
695 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
697 rtx tmp;
699 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
700 if (! tmp)
701 tmp = simplify_gen_subreg (maxmode,
702 force_reg (GET_MODE (value),
703 value1),
704 GET_MODE (value), 0);
705 value1 = tmp;
707 else
708 value1 = gen_lowpart (maxmode, value1);
710 else if (GET_CODE (value) == CONST_INT)
711 value1 = gen_int_mode (INTVAL (value), maxmode);
712 else if (!CONSTANT_P (value))
713 /* Parse phase is supposed to make VALUE's data type
714 match that of the component reference, which is a type
715 at least as wide as the field; so VALUE should have
716 a mode that corresponds to that type. */
717 abort ();
720 /* If this machine's insv insists on a register,
721 get VALUE1 into a register. */
722 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
723 (value1, maxmode)))
724 value1 = force_reg (maxmode, value1);
726 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
727 if (pat)
728 emit_insn (pat);
729 else
731 delete_insns_since (last);
732 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
735 else
736 insv_loses:
737 /* Insv is not available; store using shifts and boolean ops. */
738 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
739 return value;
742 /* Use shifts and boolean operations to store VALUE
743 into a bit field of width BITSIZE
744 in a memory location specified by OP0 except offset by OFFSET bytes.
745 (OFFSET must be 0 if OP0 is a register.)
746 The field starts at position BITPOS within the byte.
747 (If OP0 is a register, it may be a full word or a narrower mode,
748 but BITPOS still counts within a full word,
749 which is significant on bigendian machines.)
751 Note that protect_from_queue has already been done on OP0 and VALUE. */
753 static void
754 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
755 unsigned HOST_WIDE_INT bitsize,
756 unsigned HOST_WIDE_INT bitpos, rtx value)
758 enum machine_mode mode;
759 unsigned int total_bits = BITS_PER_WORD;
760 rtx subtarget, temp;
761 int all_zero = 0;
762 int all_one = 0;
764 /* There is a case not handled here:
765 a structure with a known alignment of just a halfword
766 and a field split across two aligned halfwords within the structure.
767 Or likewise a structure with a known alignment of just a byte
768 and a field split across two bytes.
769 Such cases are not supposed to be able to occur. */
771 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
773 if (offset != 0)
774 abort ();
775 /* Special treatment for a bit field split across two registers. */
776 if (bitsize + bitpos > BITS_PER_WORD)
778 store_split_bit_field (op0, bitsize, bitpos, value);
779 return;
782 else
784 /* Get the proper mode to use for this field. We want a mode that
785 includes the entire field. If such a mode would be larger than
786 a word, we won't be doing the extraction the normal way.
787 We don't want a mode bigger than the destination. */
789 mode = GET_MODE (op0);
790 if (GET_MODE_BITSIZE (mode) == 0
791 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
792 mode = word_mode;
793 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
794 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
796 if (mode == VOIDmode)
798 /* The only way this should occur is if the field spans word
799 boundaries. */
800 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
801 value);
802 return;
805 total_bits = GET_MODE_BITSIZE (mode);
807 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
808 be in the range 0 to total_bits-1, and put any excess bytes in
809 OFFSET. */
810 if (bitpos >= total_bits)
812 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
813 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
814 * BITS_PER_UNIT);
817 /* Get ref to an aligned byte, halfword, or word containing the field.
818 Adjust BITPOS to be position within a word,
819 and OFFSET to be the offset of that word.
820 Then alter OP0 to refer to that word. */
821 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
822 offset -= (offset % (total_bits / BITS_PER_UNIT));
823 op0 = adjust_address (op0, mode, offset);
826 mode = GET_MODE (op0);
828 /* Now MODE is either some integral mode for a MEM as OP0,
829 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
830 The bit field is contained entirely within OP0.
831 BITPOS is the starting bit number within OP0.
832 (OP0's mode may actually be narrower than MODE.) */
834 if (BYTES_BIG_ENDIAN)
835 /* BITPOS is the distance between our msb
836 and that of the containing datum.
837 Convert it to the distance from the lsb. */
838 bitpos = total_bits - bitsize - bitpos;
840 /* Now BITPOS is always the distance between our lsb
841 and that of OP0. */
843 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
844 we must first convert its mode to MODE. */
846 if (GET_CODE (value) == CONST_INT)
848 HOST_WIDE_INT v = INTVAL (value);
850 if (bitsize < HOST_BITS_PER_WIDE_INT)
851 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
853 if (v == 0)
854 all_zero = 1;
855 else if ((bitsize < HOST_BITS_PER_WIDE_INT
856 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
857 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
858 all_one = 1;
860 value = lshift_value (mode, value, bitpos, bitsize);
862 else
864 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
865 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
867 if (GET_MODE (value) != mode)
869 if ((REG_P (value) || GET_CODE (value) == SUBREG)
870 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
871 value = gen_lowpart (mode, value);
872 else
873 value = convert_to_mode (mode, value, 1);
876 if (must_and)
877 value = expand_binop (mode, and_optab, value,
878 mask_rtx (mode, 0, bitsize, 0),
879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
880 if (bitpos > 0)
881 value = expand_shift (LSHIFT_EXPR, mode, value,
882 build_int_2 (bitpos, 0), NULL_RTX, 1);
885 /* Now clear the chosen bits in OP0,
886 except that if VALUE is -1 we need not bother. */
888 subtarget = (REG_P (op0) || ! flag_force_mem) ? op0 : 0;
890 if (! all_one)
892 temp = expand_binop (mode, and_optab, op0,
893 mask_rtx (mode, bitpos, bitsize, 1),
894 subtarget, 1, OPTAB_LIB_WIDEN);
895 subtarget = temp;
897 else
898 temp = op0;
900 /* Now logical-or VALUE into OP0, unless it is zero. */
902 if (! all_zero)
903 temp = expand_binop (mode, ior_optab, temp, value,
904 subtarget, 1, OPTAB_LIB_WIDEN);
905 if (op0 != temp)
906 emit_move_insn (op0, temp);
909 /* Store a bit field that is split across multiple accessible memory objects.
911 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
912 BITSIZE is the field width; BITPOS the position of its first bit
913 (within the word).
914 VALUE is the value to store.
916 This does not yet handle fields wider than BITS_PER_WORD. */
918 static void
919 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
920 unsigned HOST_WIDE_INT bitpos, rtx value)
922 unsigned int unit;
923 unsigned int bitsdone = 0;
925 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
926 much at a time. */
927 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
928 unit = BITS_PER_WORD;
929 else
930 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
932 /* If VALUE is a constant other than a CONST_INT, get it into a register in
933 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
934 that VALUE might be a floating-point constant. */
935 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
937 rtx word = gen_lowpart_common (word_mode, value);
939 if (word && (value != word))
940 value = word;
941 else
942 value = gen_lowpart_common (word_mode,
943 force_reg (GET_MODE (value) != VOIDmode
944 ? GET_MODE (value)
945 : word_mode, value));
947 else if (GET_CODE (value) == ADDRESSOF)
948 value = copy_to_reg (value);
950 while (bitsdone < bitsize)
952 unsigned HOST_WIDE_INT thissize;
953 rtx part, word;
954 unsigned HOST_WIDE_INT thispos;
955 unsigned HOST_WIDE_INT offset;
957 offset = (bitpos + bitsdone) / unit;
958 thispos = (bitpos + bitsdone) % unit;
960 /* THISSIZE must not overrun a word boundary. Otherwise,
961 store_fixed_bit_field will call us again, and we will mutually
962 recurse forever. */
963 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
964 thissize = MIN (thissize, unit - thispos);
966 if (BYTES_BIG_ENDIAN)
968 int total_bits;
970 /* We must do an endian conversion exactly the same way as it is
971 done in extract_bit_field, so that the two calls to
972 extract_fixed_bit_field will have comparable arguments. */
973 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
974 total_bits = BITS_PER_WORD;
975 else
976 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
978 /* Fetch successively less significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> (bitsize - bitsdone - thissize))
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 /* The args are chosen so that the last part includes the
985 lsb. Give extract_bit_field the value it needs (with
986 endianness compensation) to fetch the piece we want. */
987 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
988 total_bits - bitsize + bitsdone,
989 NULL_RTX, 1);
991 else
993 /* Fetch successively more significant portions. */
994 if (GET_CODE (value) == CONST_INT)
995 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
996 >> bitsdone)
997 & (((HOST_WIDE_INT) 1 << thissize) - 1));
998 else
999 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1000 bitsdone, NULL_RTX, 1);
1003 /* If OP0 is a register, then handle OFFSET here.
1005 When handling multiword bitfields, extract_bit_field may pass
1006 down a word_mode SUBREG of a larger REG for a bitfield that actually
1007 crosses a word boundary. Thus, for a SUBREG, we must find
1008 the current word starting from the base register. */
1009 if (GET_CODE (op0) == SUBREG)
1011 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1012 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1013 GET_MODE (SUBREG_REG (op0)));
1014 offset = 0;
1016 else if (REG_P (op0))
1018 word = operand_subword_force (op0, offset, GET_MODE (op0));
1019 offset = 0;
1021 else
1022 word = op0;
1024 /* OFFSET is in UNITs, and UNIT is in bits.
1025 store_fixed_bit_field wants offset in bytes. */
1026 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1027 thispos, part);
1028 bitsdone += thissize;
1032 /* Generate code to extract a byte-field from STR_RTX
1033 containing BITSIZE bits, starting at BITNUM,
1034 and put it in TARGET if possible (if TARGET is nonzero).
1035 Regardless of TARGET, we return the rtx for where the value is placed.
1036 It may be a QUEUED.
1038 STR_RTX is the structure containing the byte (a REG or MEM).
1039 UNSIGNEDP is nonzero if this is an unsigned bit field.
1040 MODE is the natural mode of the field value once extracted.
1041 TMODE is the mode the caller would like the value to have;
1042 but the value may be returned with type MODE instead.
1044 TOTAL_SIZE is the size in bytes of the containing structure,
1045 or -1 if varying.
1047 If a TARGET is specified and we can store in it at no extra cost,
1048 we do so, and return TARGET.
1049 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1050 if they are equally easy. */
1053 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1054 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1055 enum machine_mode mode, enum machine_mode tmode,
1056 HOST_WIDE_INT total_size)
1058 unsigned int unit
1059 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1060 unsigned HOST_WIDE_INT offset = bitnum / unit;
1061 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1062 rtx op0 = str_rtx;
1063 rtx spec_target = target;
1064 rtx spec_target_subreg = 0;
1065 enum machine_mode int_mode;
1066 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1067 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1068 enum machine_mode mode1;
1069 int byte_offset;
1071 /* Discount the part of the structure before the desired byte.
1072 We need to know how many bytes are safe to reference after it. */
1073 if (total_size >= 0)
1074 total_size -= (bitpos / BIGGEST_ALIGNMENT
1075 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1077 if (tmode == VOIDmode)
1078 tmode = mode;
1080 while (GET_CODE (op0) == SUBREG)
1082 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1083 if (bitpos > unit)
1085 offset += (bitpos / unit);
1086 bitpos %= unit;
1088 op0 = SUBREG_REG (op0);
1091 if (REG_P (op0)
1092 && mode == GET_MODE (op0)
1093 && bitnum == 0
1094 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1096 /* We're trying to extract a full register from itself. */
1097 return op0;
1100 /* Use vec_extract patterns for extracting parts of vectors whenever
1101 available. */
1102 if (VECTOR_MODE_P (GET_MODE (op0))
1103 && GET_CODE (op0) != MEM
1104 && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1105 != CODE_FOR_nothing)
1106 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1107 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1109 enum machine_mode outermode = GET_MODE (op0);
1110 enum machine_mode innermode = GET_MODE_INNER (outermode);
1111 int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1112 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1113 rtx rtxpos = GEN_INT (pos);
1114 rtx src = op0;
1115 rtx dest = NULL, pat, seq;
1116 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1117 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1118 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1120 if (innermode == tmode || innermode == mode)
1121 dest = target;
1123 if (!dest)
1124 dest = gen_reg_rtx (innermode);
1126 start_sequence ();
1128 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1129 dest = copy_to_mode_reg (mode0, dest);
1131 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1132 src = copy_to_mode_reg (mode1, src);
1134 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1135 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1137 /* We could handle this, but we should always be called with a pseudo
1138 for our targets and all insns should take them as outputs. */
1139 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1140 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1141 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1142 abort ();
1144 pat = GEN_FCN (icode) (dest, src, rtxpos);
1145 seq = get_insns ();
1146 end_sequence ();
1147 if (pat)
1149 emit_insn (seq);
1150 emit_insn (pat);
1151 return dest;
1155 /* Make sure we are playing with integral modes. Pun with subregs
1156 if we aren't. */
1158 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1159 if (imode != GET_MODE (op0))
1161 if (GET_CODE (op0) == MEM)
1162 op0 = adjust_address (op0, imode, 0);
1163 else if (imode != BLKmode)
1164 op0 = gen_lowpart (imode, op0);
1165 else
1166 abort ();
1170 /* We may be accessing data outside the field, which means
1171 we can alias adjacent data. */
1172 if (GET_CODE (op0) == MEM)
1174 op0 = shallow_copy_rtx (op0);
1175 set_mem_alias_set (op0, 0);
1176 set_mem_expr (op0, 0);
1179 /* Extraction of a full-word or multi-word value from a structure
1180 in a register or aligned memory can be done with just a SUBREG.
1181 A subword value in the least significant part of a register
1182 can also be extracted with a SUBREG. For this, we need the
1183 byte offset of the value in op0. */
1185 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1187 /* If OP0 is a register, BITPOS must count within a word.
1188 But as we have it, it counts within whatever size OP0 now has.
1189 On a bigendian machine, these are not the same, so convert. */
1190 if (BYTES_BIG_ENDIAN
1191 && GET_CODE (op0) != MEM
1192 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1193 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1195 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1196 If that's wrong, the solution is to test for it and set TARGET to 0
1197 if needed. */
1199 /* Only scalar integer modes can be converted via subregs. There is an
1200 additional problem for FP modes here in that they can have a precision
1201 which is different from the size. mode_for_size uses precision, but
1202 we want a mode based on the size, so we must avoid calling it for FP
1203 modes. */
1204 mode1 = (SCALAR_INT_MODE_P (tmode)
1205 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1206 : mode);
1208 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1209 && bitpos % BITS_PER_WORD == 0)
1210 || (mode1 != BLKmode
1211 /* ??? The big endian test here is wrong. This is correct
1212 if the value is in a register, and if mode_for_size is not
1213 the same mode as op0. This causes us to get unnecessarily
1214 inefficient code from the Thumb port when -mbig-endian. */
1215 && (BYTES_BIG_ENDIAN
1216 ? bitpos + bitsize == BITS_PER_WORD
1217 : bitpos == 0)))
1218 && ((GET_CODE (op0) != MEM
1219 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1220 GET_MODE_BITSIZE (GET_MODE (op0)))
1221 && GET_MODE_SIZE (mode1) != 0
1222 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1223 || (GET_CODE (op0) == MEM
1224 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1225 || (offset * BITS_PER_UNIT % bitsize == 0
1226 && MEM_ALIGN (op0) % bitsize == 0)))))
1228 if (mode1 != GET_MODE (op0))
1230 if (GET_CODE (op0) == SUBREG)
1232 if (GET_MODE (SUBREG_REG (op0)) == mode1
1233 || GET_MODE_CLASS (mode1) == MODE_INT
1234 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1235 op0 = SUBREG_REG (op0);
1236 else
1237 /* Else we've got some float mode source being extracted into
1238 a different float mode destination -- this combination of
1239 subregs results in Severe Tire Damage. */
1240 goto no_subreg_mode_swap;
1242 if (REG_P (op0))
1243 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1244 else
1245 op0 = adjust_address (op0, mode1, offset);
1247 if (mode1 != mode)
1248 return convert_to_mode (tmode, op0, unsignedp);
1249 return op0;
1251 no_subreg_mode_swap:
1253 /* Handle fields bigger than a word. */
1255 if (bitsize > BITS_PER_WORD)
1257 /* Here we transfer the words of the field
1258 in the order least significant first.
1259 This is because the most significant word is the one which may
1260 be less than full. */
1262 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1263 unsigned int i;
1265 if (target == 0 || !REG_P (target))
1266 target = gen_reg_rtx (mode);
1268 /* Indicate for flow that the entire target reg is being set. */
1269 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1271 for (i = 0; i < nwords; i++)
1273 /* If I is 0, use the low-order word in both field and target;
1274 if I is 1, use the next to lowest word; and so on. */
1275 /* Word number in TARGET to use. */
1276 unsigned int wordnum
1277 = (WORDS_BIG_ENDIAN
1278 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1279 : i);
1280 /* Offset from start of field in OP0. */
1281 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1282 ? MAX (0, ((int) bitsize - ((int) i + 1)
1283 * (int) BITS_PER_WORD))
1284 : (int) i * BITS_PER_WORD);
1285 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1286 rtx result_part
1287 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1288 bitsize - i * BITS_PER_WORD),
1289 bitnum + bit_offset, 1, target_part, mode,
1290 word_mode, total_size);
1292 if (target_part == 0)
1293 abort ();
1295 if (result_part != target_part)
1296 emit_move_insn (target_part, result_part);
1299 if (unsignedp)
1301 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1302 need to be zero'd out. */
1303 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1305 unsigned int i, total_words;
1307 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1308 for (i = nwords; i < total_words; i++)
1309 emit_move_insn
1310 (operand_subword (target,
1311 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1312 1, VOIDmode),
1313 const0_rtx);
1315 return target;
1318 /* Signed bit field: sign-extend with two arithmetic shifts. */
1319 target = expand_shift (LSHIFT_EXPR, mode, target,
1320 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1321 NULL_RTX, 0);
1322 return expand_shift (RSHIFT_EXPR, mode, target,
1323 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1324 NULL_RTX, 0);
1327 /* From here on we know the desired field is smaller than a word. */
1329 /* Check if there is a correspondingly-sized integer field, so we can
1330 safely extract it as one size of integer, if necessary; then
1331 truncate or extend to the size that is wanted; then use SUBREGs or
1332 convert_to_mode to get one of the modes we really wanted. */
1334 int_mode = int_mode_for_mode (tmode);
1335 if (int_mode == BLKmode)
1336 int_mode = int_mode_for_mode (mode);
1337 if (int_mode == BLKmode)
1338 abort (); /* Should probably push op0 out to memory and then
1339 do a load. */
1341 /* OFFSET is the number of words or bytes (UNIT says which)
1342 from STR_RTX to the first word or byte containing part of the field. */
1344 if (GET_CODE (op0) != MEM)
1346 if (offset != 0
1347 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1349 if (!REG_P (op0))
1350 op0 = copy_to_reg (op0);
1351 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1352 op0, (offset * UNITS_PER_WORD));
1354 offset = 0;
1356 else
1357 op0 = protect_from_queue (str_rtx, 1);
1359 /* Now OFFSET is nonzero only for memory operands. */
1361 if (unsignedp)
1363 if (HAVE_extzv
1364 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1365 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1366 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1368 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1369 rtx bitsize_rtx, bitpos_rtx;
1370 rtx last = get_last_insn ();
1371 rtx xop0 = op0;
1372 rtx xtarget = target;
1373 rtx xspec_target = spec_target;
1374 rtx xspec_target_subreg = spec_target_subreg;
1375 rtx pat;
1376 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1378 if (GET_CODE (xop0) == MEM)
1380 int save_volatile_ok = volatile_ok;
1381 volatile_ok = 1;
1383 /* Is the memory operand acceptable? */
1384 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1385 (xop0, GET_MODE (xop0))))
1387 /* No, load into a reg and extract from there. */
1388 enum machine_mode bestmode;
1390 /* Get the mode to use for inserting into this field. If
1391 OP0 is BLKmode, get the smallest mode consistent with the
1392 alignment. If OP0 is a non-BLKmode object that is no
1393 wider than MAXMODE, use its mode. Otherwise, use the
1394 smallest mode containing the field. */
1396 if (GET_MODE (xop0) == BLKmode
1397 || (GET_MODE_SIZE (GET_MODE (op0))
1398 > GET_MODE_SIZE (maxmode)))
1399 bestmode = get_best_mode (bitsize, bitnum,
1400 MEM_ALIGN (xop0), maxmode,
1401 MEM_VOLATILE_P (xop0));
1402 else
1403 bestmode = GET_MODE (xop0);
1405 if (bestmode == VOIDmode
1406 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1407 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1408 goto extzv_loses;
1410 /* Compute offset as multiple of this unit,
1411 counting in bytes. */
1412 unit = GET_MODE_BITSIZE (bestmode);
1413 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1414 xbitpos = bitnum % unit;
1415 xop0 = adjust_address (xop0, bestmode, xoffset);
1417 /* Fetch it to a register in that size. */
1418 xop0 = force_reg (bestmode, xop0);
1420 /* XBITPOS counts within UNIT, which is what is expected. */
1422 else
1423 /* Get ref to first byte containing part of the field. */
1424 xop0 = adjust_address (xop0, byte_mode, xoffset);
1426 volatile_ok = save_volatile_ok;
1429 /* If op0 is a register, we need it in MAXMODE (which is usually
1430 SImode). to make it acceptable to the format of extzv. */
1431 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1432 goto extzv_loses;
1433 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1434 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1436 /* On big-endian machines, we count bits from the most significant.
1437 If the bit field insn does not, we must invert. */
1438 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1439 xbitpos = unit - bitsize - xbitpos;
1441 /* Now convert from counting within UNIT to counting in MAXMODE. */
1442 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1443 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1445 unit = GET_MODE_BITSIZE (maxmode);
1447 if (xtarget == 0
1448 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1449 xtarget = xspec_target = gen_reg_rtx (tmode);
1451 if (GET_MODE (xtarget) != maxmode)
1453 if (REG_P (xtarget))
1455 int wider = (GET_MODE_SIZE (maxmode)
1456 > GET_MODE_SIZE (GET_MODE (xtarget)));
1457 xtarget = gen_lowpart (maxmode, xtarget);
1458 if (wider)
1459 xspec_target_subreg = xtarget;
1461 else
1462 xtarget = gen_reg_rtx (maxmode);
1465 /* If this machine's extzv insists on a register target,
1466 make sure we have one. */
1467 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1468 (xtarget, maxmode)))
1469 xtarget = gen_reg_rtx (maxmode);
1471 bitsize_rtx = GEN_INT (bitsize);
1472 bitpos_rtx = GEN_INT (xbitpos);
1474 pat = gen_extzv (protect_from_queue (xtarget, 1),
1475 xop0, bitsize_rtx, bitpos_rtx);
1476 if (pat)
1478 emit_insn (pat);
1479 target = xtarget;
1480 spec_target = xspec_target;
1481 spec_target_subreg = xspec_target_subreg;
1483 else
1485 delete_insns_since (last);
1486 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1487 bitpos, target, 1);
1490 else
1491 extzv_loses:
1492 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1493 bitpos, target, 1);
1495 else
1497 if (HAVE_extv
1498 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1499 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1500 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1502 int xbitpos = bitpos, xoffset = offset;
1503 rtx bitsize_rtx, bitpos_rtx;
1504 rtx last = get_last_insn ();
1505 rtx xop0 = op0, xtarget = target;
1506 rtx xspec_target = spec_target;
1507 rtx xspec_target_subreg = spec_target_subreg;
1508 rtx pat;
1509 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1511 if (GET_CODE (xop0) == MEM)
1513 /* Is the memory operand acceptable? */
1514 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1515 (xop0, GET_MODE (xop0))))
1517 /* No, load into a reg and extract from there. */
1518 enum machine_mode bestmode;
1520 /* Get the mode to use for inserting into this field. If
1521 OP0 is BLKmode, get the smallest mode consistent with the
1522 alignment. If OP0 is a non-BLKmode object that is no
1523 wider than MAXMODE, use its mode. Otherwise, use the
1524 smallest mode containing the field. */
1526 if (GET_MODE (xop0) == BLKmode
1527 || (GET_MODE_SIZE (GET_MODE (op0))
1528 > GET_MODE_SIZE (maxmode)))
1529 bestmode = get_best_mode (bitsize, bitnum,
1530 MEM_ALIGN (xop0), maxmode,
1531 MEM_VOLATILE_P (xop0));
1532 else
1533 bestmode = GET_MODE (xop0);
1535 if (bestmode == VOIDmode
1536 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1537 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1538 goto extv_loses;
1540 /* Compute offset as multiple of this unit,
1541 counting in bytes. */
1542 unit = GET_MODE_BITSIZE (bestmode);
1543 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1544 xbitpos = bitnum % unit;
1545 xop0 = adjust_address (xop0, bestmode, xoffset);
1547 /* Fetch it to a register in that size. */
1548 xop0 = force_reg (bestmode, xop0);
1550 /* XBITPOS counts within UNIT, which is what is expected. */
1552 else
1553 /* Get ref to first byte containing part of the field. */
1554 xop0 = adjust_address (xop0, byte_mode, xoffset);
1557 /* If op0 is a register, we need it in MAXMODE (which is usually
1558 SImode) to make it acceptable to the format of extv. */
1559 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1560 goto extv_loses;
1561 if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1562 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1564 /* On big-endian machines, we count bits from the most significant.
1565 If the bit field insn does not, we must invert. */
1566 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1567 xbitpos = unit - bitsize - xbitpos;
1569 /* XBITPOS counts within a size of UNIT.
1570 Adjust to count within a size of MAXMODE. */
1571 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1572 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1574 unit = GET_MODE_BITSIZE (maxmode);
1576 if (xtarget == 0
1577 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1578 xtarget = xspec_target = gen_reg_rtx (tmode);
1580 if (GET_MODE (xtarget) != maxmode)
1582 if (REG_P (xtarget))
1584 int wider = (GET_MODE_SIZE (maxmode)
1585 > GET_MODE_SIZE (GET_MODE (xtarget)));
1586 xtarget = gen_lowpart (maxmode, xtarget);
1587 if (wider)
1588 xspec_target_subreg = xtarget;
1590 else
1591 xtarget = gen_reg_rtx (maxmode);
1594 /* If this machine's extv insists on a register target,
1595 make sure we have one. */
1596 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1597 (xtarget, maxmode)))
1598 xtarget = gen_reg_rtx (maxmode);
1600 bitsize_rtx = GEN_INT (bitsize);
1601 bitpos_rtx = GEN_INT (xbitpos);
1603 pat = gen_extv (protect_from_queue (xtarget, 1),
1604 xop0, bitsize_rtx, bitpos_rtx);
1605 if (pat)
1607 emit_insn (pat);
1608 target = xtarget;
1609 spec_target = xspec_target;
1610 spec_target_subreg = xspec_target_subreg;
1612 else
1614 delete_insns_since (last);
1615 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1616 bitpos, target, 0);
1619 else
1620 extv_loses:
1621 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1622 bitpos, target, 0);
1624 if (target == spec_target)
1625 return target;
1626 if (target == spec_target_subreg)
1627 return spec_target;
1628 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1630 /* If the target mode is floating-point, first convert to the
1631 integer mode of that size and then access it as a floating-point
1632 value via a SUBREG. */
1633 if (GET_MODE_CLASS (tmode) != MODE_INT
1634 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1636 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1637 MODE_INT, 0),
1638 target, unsignedp);
1639 return gen_lowpart (tmode, target);
1641 else
1642 return convert_to_mode (tmode, target, unsignedp);
1644 return target;
1647 /* Extract a bit field using shifts and boolean operations
1648 Returns an rtx to represent the value.
1649 OP0 addresses a register (word) or memory (byte).
1650 BITPOS says which bit within the word or byte the bit field starts in.
1651 OFFSET says how many bytes farther the bit field starts;
1652 it is 0 if OP0 is a register.
1653 BITSIZE says how many bits long the bit field is.
1654 (If OP0 is a register, it may be narrower than a full word,
1655 but BITPOS still counts within a full word,
1656 which is significant on bigendian machines.)
1658 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1659 If TARGET is nonzero, attempts to store the value there
1660 and return TARGET, but this is not guaranteed.
1661 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1663 static rtx
1664 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1665 unsigned HOST_WIDE_INT offset,
1666 unsigned HOST_WIDE_INT bitsize,
1667 unsigned HOST_WIDE_INT bitpos, rtx target,
1668 int unsignedp)
1670 unsigned int total_bits = BITS_PER_WORD;
1671 enum machine_mode mode;
1673 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1675 /* Special treatment for a bit field split across two registers. */
1676 if (bitsize + bitpos > BITS_PER_WORD)
1677 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1679 else
1681 /* Get the proper mode to use for this field. We want a mode that
1682 includes the entire field. If such a mode would be larger than
1683 a word, we won't be doing the extraction the normal way. */
1685 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1686 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1688 if (mode == VOIDmode)
1689 /* The only way this should occur is if the field spans word
1690 boundaries. */
1691 return extract_split_bit_field (op0, bitsize,
1692 bitpos + offset * BITS_PER_UNIT,
1693 unsignedp);
1695 total_bits = GET_MODE_BITSIZE (mode);
1697 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1698 be in the range 0 to total_bits-1, and put any excess bytes in
1699 OFFSET. */
1700 if (bitpos >= total_bits)
1702 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1703 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1704 * BITS_PER_UNIT);
1707 /* Get ref to an aligned byte, halfword, or word containing the field.
1708 Adjust BITPOS to be position within a word,
1709 and OFFSET to be the offset of that word.
1710 Then alter OP0 to refer to that word. */
1711 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1712 offset -= (offset % (total_bits / BITS_PER_UNIT));
1713 op0 = adjust_address (op0, mode, offset);
1716 mode = GET_MODE (op0);
1718 if (BYTES_BIG_ENDIAN)
1719 /* BITPOS is the distance between our msb and that of OP0.
1720 Convert it to the distance from the lsb. */
1721 bitpos = total_bits - bitsize - bitpos;
1723 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1724 We have reduced the big-endian case to the little-endian case. */
1726 if (unsignedp)
1728 if (bitpos)
1730 /* If the field does not already start at the lsb,
1731 shift it so it does. */
1732 tree amount = build_int_2 (bitpos, 0);
1733 /* Maybe propagate the target for the shift. */
1734 /* But not if we will return it--could confuse integrate.c. */
1735 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1736 if (tmode != mode) subtarget = 0;
1737 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1739 /* Convert the value to the desired mode. */
1740 if (mode != tmode)
1741 op0 = convert_to_mode (tmode, op0, 1);
1743 /* Unless the msb of the field used to be the msb when we shifted,
1744 mask out the upper bits. */
1746 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1747 return expand_binop (GET_MODE (op0), and_optab, op0,
1748 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1749 target, 1, OPTAB_LIB_WIDEN);
1750 return op0;
1753 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1754 then arithmetic-shift its lsb to the lsb of the word. */
1755 op0 = force_reg (mode, op0);
1756 if (mode != tmode)
1757 target = 0;
1759 /* Find the narrowest integer mode that contains the field. */
1761 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1762 mode = GET_MODE_WIDER_MODE (mode))
1763 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1765 op0 = convert_to_mode (mode, op0, 0);
1766 break;
1769 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1771 tree amount
1772 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1773 /* Maybe propagate the target for the shift. */
1774 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1775 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1778 return expand_shift (RSHIFT_EXPR, mode, op0,
1779 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1780 target, 0);
1783 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1784 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1785 complement of that if COMPLEMENT. The mask is truncated if
1786 necessary to the width of mode MODE. The mask is zero-extended if
1787 BITSIZE+BITPOS is too small for MODE. */
1789 static rtx
1790 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1792 HOST_WIDE_INT masklow, maskhigh;
1794 if (bitsize == 0)
1795 masklow = 0;
1796 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1797 masklow = (HOST_WIDE_INT) -1 << bitpos;
1798 else
1799 masklow = 0;
1801 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1802 masklow &= ((unsigned HOST_WIDE_INT) -1
1803 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1805 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1806 maskhigh = -1;
1807 else
1808 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1810 if (bitsize == 0)
1811 maskhigh = 0;
1812 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1813 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1814 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1815 else
1816 maskhigh = 0;
1818 if (complement)
1820 maskhigh = ~maskhigh;
1821 masklow = ~masklow;
1824 return immed_double_const (masklow, maskhigh, mode);
1827 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1828 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1830 static rtx
1831 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1833 unsigned HOST_WIDE_INT v = INTVAL (value);
1834 HOST_WIDE_INT low, high;
1836 if (bitsize < HOST_BITS_PER_WIDE_INT)
1837 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1839 if (bitpos < HOST_BITS_PER_WIDE_INT)
1841 low = v << bitpos;
1842 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1844 else
1846 low = 0;
1847 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1850 return immed_double_const (low, high, mode);
1853 /* Extract a bit field that is split across two words
1854 and return an RTX for the result.
1856 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1857 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1858 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1860 static rtx
1861 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1862 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1864 unsigned int unit;
1865 unsigned int bitsdone = 0;
1866 rtx result = NULL_RTX;
1867 int first = 1;
1869 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1870 much at a time. */
1871 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1872 unit = BITS_PER_WORD;
1873 else
1874 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1876 while (bitsdone < bitsize)
1878 unsigned HOST_WIDE_INT thissize;
1879 rtx part, word;
1880 unsigned HOST_WIDE_INT thispos;
1881 unsigned HOST_WIDE_INT offset;
1883 offset = (bitpos + bitsdone) / unit;
1884 thispos = (bitpos + bitsdone) % unit;
1886 /* THISSIZE must not overrun a word boundary. Otherwise,
1887 extract_fixed_bit_field will call us again, and we will mutually
1888 recurse forever. */
1889 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1890 thissize = MIN (thissize, unit - thispos);
1892 /* If OP0 is a register, then handle OFFSET here.
1894 When handling multiword bitfields, extract_bit_field may pass
1895 down a word_mode SUBREG of a larger REG for a bitfield that actually
1896 crosses a word boundary. Thus, for a SUBREG, we must find
1897 the current word starting from the base register. */
1898 if (GET_CODE (op0) == SUBREG)
1900 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1901 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1902 GET_MODE (SUBREG_REG (op0)));
1903 offset = 0;
1905 else if (REG_P (op0))
1907 word = operand_subword_force (op0, offset, GET_MODE (op0));
1908 offset = 0;
1910 else
1911 word = op0;
1913 /* Extract the parts in bit-counting order,
1914 whose meaning is determined by BYTES_PER_UNIT.
1915 OFFSET is in UNITs, and UNIT is in bits.
1916 extract_fixed_bit_field wants offset in bytes. */
1917 part = extract_fixed_bit_field (word_mode, word,
1918 offset * unit / BITS_PER_UNIT,
1919 thissize, thispos, 0, 1);
1920 bitsdone += thissize;
1922 /* Shift this part into place for the result. */
1923 if (BYTES_BIG_ENDIAN)
1925 if (bitsize != bitsdone)
1926 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1927 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1929 else
1931 if (bitsdone != thissize)
1932 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1933 build_int_2 (bitsdone - thissize, 0), 0, 1);
1936 if (first)
1937 result = part;
1938 else
1939 /* Combine the parts with bitwise or. This works
1940 because we extracted each part as an unsigned bit field. */
1941 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1942 OPTAB_LIB_WIDEN);
1944 first = 0;
1947 /* Unsigned bit field: we are done. */
1948 if (unsignedp)
1949 return result;
1950 /* Signed bit field: sign-extend with two arithmetic shifts. */
1951 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1952 build_int_2 (BITS_PER_WORD - bitsize, 0),
1953 NULL_RTX, 0);
1954 return expand_shift (RSHIFT_EXPR, word_mode, result,
1955 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1958 /* Add INC into TARGET. */
1960 void
1961 expand_inc (rtx target, rtx inc)
1963 rtx value = expand_binop (GET_MODE (target), add_optab,
1964 target, inc,
1965 target, 0, OPTAB_LIB_WIDEN);
1966 if (value != target)
1967 emit_move_insn (target, value);
1970 /* Subtract DEC from TARGET. */
1972 void
1973 expand_dec (rtx target, rtx dec)
1975 rtx value = expand_binop (GET_MODE (target), sub_optab,
1976 target, dec,
1977 target, 0, OPTAB_LIB_WIDEN);
1978 if (value != target)
1979 emit_move_insn (target, value);
1982 /* Output a shift instruction for expression code CODE,
1983 with SHIFTED being the rtx for the value to shift,
1984 and AMOUNT the tree for the amount to shift by.
1985 Store the result in the rtx TARGET, if that is convenient.
1986 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1987 Return the rtx for where the value is. */
1990 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1991 tree amount, rtx target, int unsignedp)
1993 rtx op1, temp = 0;
1994 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1995 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1996 int try;
1998 /* Previously detected shift-counts computed by NEGATE_EXPR
1999 and shifted in the other direction; but that does not work
2000 on all machines. */
2002 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
2004 if (SHIFT_COUNT_TRUNCATED)
2006 if (GET_CODE (op1) == CONST_INT
2007 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2008 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2009 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2010 % GET_MODE_BITSIZE (mode));
2011 else if (GET_CODE (op1) == SUBREG
2012 && subreg_lowpart_p (op1))
2013 op1 = SUBREG_REG (op1);
2016 if (op1 == const0_rtx)
2017 return shifted;
2019 for (try = 0; temp == 0 && try < 3; try++)
2021 enum optab_methods methods;
2023 if (try == 0)
2024 methods = OPTAB_DIRECT;
2025 else if (try == 1)
2026 methods = OPTAB_WIDEN;
2027 else
2028 methods = OPTAB_LIB_WIDEN;
2030 if (rotate)
2032 /* Widening does not work for rotation. */
2033 if (methods == OPTAB_WIDEN)
2034 continue;
2035 else if (methods == OPTAB_LIB_WIDEN)
2037 /* If we have been unable to open-code this by a rotation,
2038 do it as the IOR of two shifts. I.e., to rotate A
2039 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2040 where C is the bitsize of A.
2042 It is theoretically possible that the target machine might
2043 not be able to perform either shift and hence we would
2044 be making two libcalls rather than just the one for the
2045 shift (similarly if IOR could not be done). We will allow
2046 this extremely unlikely lossage to avoid complicating the
2047 code below. */
2049 rtx subtarget = target == shifted ? 0 : target;
2050 rtx temp1;
2051 tree type = TREE_TYPE (amount);
2052 tree new_amount = make_tree (type, op1);
2053 tree other_amount
2054 = fold (build (MINUS_EXPR, type,
2055 convert (type,
2056 build_int_2 (GET_MODE_BITSIZE (mode),
2057 0)),
2058 amount));
2060 shifted = force_reg (mode, shifted);
2062 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2063 mode, shifted, new_amount, subtarget, 1);
2064 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2065 mode, shifted, other_amount, 0, 1);
2066 return expand_binop (mode, ior_optab, temp, temp1, target,
2067 unsignedp, methods);
2070 temp = expand_binop (mode,
2071 left ? rotl_optab : rotr_optab,
2072 shifted, op1, target, unsignedp, methods);
2074 /* If we don't have the rotate, but we are rotating by a constant
2075 that is in range, try a rotate in the opposite direction. */
2077 if (temp == 0 && GET_CODE (op1) == CONST_INT
2078 && INTVAL (op1) > 0
2079 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2080 temp = expand_binop (mode,
2081 left ? rotr_optab : rotl_optab,
2082 shifted,
2083 GEN_INT (GET_MODE_BITSIZE (mode)
2084 - INTVAL (op1)),
2085 target, unsignedp, methods);
2087 else if (unsignedp)
2088 temp = expand_binop (mode,
2089 left ? ashl_optab : lshr_optab,
2090 shifted, op1, target, unsignedp, methods);
2092 /* Do arithmetic shifts.
2093 Also, if we are going to widen the operand, we can just as well
2094 use an arithmetic right-shift instead of a logical one. */
2095 if (temp == 0 && ! rotate
2096 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2098 enum optab_methods methods1 = methods;
2100 /* If trying to widen a log shift to an arithmetic shift,
2101 don't accept an arithmetic shift of the same size. */
2102 if (unsignedp)
2103 methods1 = OPTAB_MUST_WIDEN;
2105 /* Arithmetic shift */
2107 temp = expand_binop (mode,
2108 left ? ashl_optab : ashr_optab,
2109 shifted, op1, target, unsignedp, methods1);
2112 /* We used to try extzv here for logical right shifts, but that was
2113 only useful for one machine, the VAX, and caused poor code
2114 generation there for lshrdi3, so the code was deleted and a
2115 define_expand for lshrsi3 was added to vax.md. */
2118 if (temp == 0)
2119 abort ();
2120 return temp;
2123 enum alg_code { alg_zero, alg_m, alg_shift,
2124 alg_add_t_m2, alg_sub_t_m2,
2125 alg_add_factor, alg_sub_factor,
2126 alg_add_t2_m, alg_sub_t2_m,
2127 alg_add, alg_subtract, alg_factor, alg_shiftop };
2129 /* This structure records a sequence of operations.
2130 `ops' is the number of operations recorded.
2131 `cost' is their total cost.
2132 The operations are stored in `op' and the corresponding
2133 logarithms of the integer coefficients in `log'.
2135 These are the operations:
2136 alg_zero total := 0;
2137 alg_m total := multiplicand;
2138 alg_shift total := total * coeff
2139 alg_add_t_m2 total := total + multiplicand * coeff;
2140 alg_sub_t_m2 total := total - multiplicand * coeff;
2141 alg_add_factor total := total * coeff + total;
2142 alg_sub_factor total := total * coeff - total;
2143 alg_add_t2_m total := total * coeff + multiplicand;
2144 alg_sub_t2_m total := total * coeff - multiplicand;
2146 The first operand must be either alg_zero or alg_m. */
2148 struct algorithm
2150 short cost;
2151 short ops;
2152 /* The size of the OP and LOG fields are not directly related to the
2153 word size, but the worst-case algorithms will be if we have few
2154 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2155 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2156 in total wordsize operations. */
2157 enum alg_code op[MAX_BITS_PER_WORD];
2158 char log[MAX_BITS_PER_WORD];
2161 /* Indicates the type of fixup needed after a constant multiplication.
2162 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2163 the result should be negated, and ADD_VARIANT means that the
2164 multiplicand should be added to the result. */
2165 enum mult_variant {basic_variant, negate_variant, add_variant};
2167 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2168 int, enum machine_mode mode);
2169 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2170 struct algorithm *, enum mult_variant *, int);
2171 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2172 const struct algorithm *, enum mult_variant);
2173 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2174 int, unsigned HOST_WIDE_INT *,
2175 int *, int *);
2176 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2177 static rtx extract_high_half (enum machine_mode, rtx);
2178 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2179 int, int);
2180 /* Compute and return the best algorithm for multiplying by T.
2181 The algorithm must cost less than cost_limit
2182 If retval.cost >= COST_LIMIT, no algorithm was found and all
2183 other field of the returned struct are undefined.
2184 MODE is the machine mode of the multiplication. */
2186 static void
2187 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2188 int cost_limit, enum machine_mode mode)
2190 int m;
2191 struct algorithm *alg_in, *best_alg;
2192 int cost;
2193 unsigned HOST_WIDE_INT q;
2194 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2196 /* Indicate that no algorithm is yet found. If no algorithm
2197 is found, this value will be returned and indicate failure. */
2198 alg_out->cost = cost_limit;
2200 if (cost_limit <= 0)
2201 return;
2203 /* Restrict the bits of "t" to the multiplication's mode. */
2204 t &= GET_MODE_MASK (mode);
2206 /* t == 1 can be done in zero cost. */
2207 if (t == 1)
2209 alg_out->ops = 1;
2210 alg_out->cost = 0;
2211 alg_out->op[0] = alg_m;
2212 return;
2215 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2216 fail now. */
2217 if (t == 0)
2219 if (zero_cost >= cost_limit)
2220 return;
2221 else
2223 alg_out->ops = 1;
2224 alg_out->cost = zero_cost;
2225 alg_out->op[0] = alg_zero;
2226 return;
2230 /* We'll be needing a couple extra algorithm structures now. */
2232 alg_in = alloca (sizeof (struct algorithm));
2233 best_alg = alloca (sizeof (struct algorithm));
2235 /* If we have a group of zero bits at the low-order part of T, try
2236 multiplying by the remaining bits and then doing a shift. */
2238 if ((t & 1) == 0)
2240 m = floor_log2 (t & -t); /* m = number of low zero bits */
2241 if (m < maxm)
2243 q = t >> m;
2244 cost = shift_cost[mode][m];
2245 synth_mult (alg_in, q, cost_limit - cost, mode);
2247 cost += alg_in->cost;
2248 if (cost < cost_limit)
2250 struct algorithm *x;
2251 x = alg_in, alg_in = best_alg, best_alg = x;
2252 best_alg->log[best_alg->ops] = m;
2253 best_alg->op[best_alg->ops] = alg_shift;
2254 cost_limit = cost;
2259 /* If we have an odd number, add or subtract one. */
2260 if ((t & 1) != 0)
2262 unsigned HOST_WIDE_INT w;
2264 for (w = 1; (w & t) != 0; w <<= 1)
2266 /* If T was -1, then W will be zero after the loop. This is another
2267 case where T ends with ...111. Handling this with (T + 1) and
2268 subtract 1 produces slightly better code and results in algorithm
2269 selection much faster than treating it like the ...0111 case
2270 below. */
2271 if (w == 0
2272 || (w > 2
2273 /* Reject the case where t is 3.
2274 Thus we prefer addition in that case. */
2275 && t != 3))
2277 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2279 cost = add_cost[mode];
2280 synth_mult (alg_in, t + 1, cost_limit - cost, mode);
2282 cost += alg_in->cost;
2283 if (cost < cost_limit)
2285 struct algorithm *x;
2286 x = alg_in, alg_in = best_alg, best_alg = x;
2287 best_alg->log[best_alg->ops] = 0;
2288 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2289 cost_limit = cost;
2292 else
2294 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2296 cost = add_cost[mode];
2297 synth_mult (alg_in, t - 1, cost_limit - cost, mode);
2299 cost += alg_in->cost;
2300 if (cost < cost_limit)
2302 struct algorithm *x;
2303 x = alg_in, alg_in = best_alg, best_alg = x;
2304 best_alg->log[best_alg->ops] = 0;
2305 best_alg->op[best_alg->ops] = alg_add_t_m2;
2306 cost_limit = cost;
2311 /* Look for factors of t of the form
2312 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2313 If we find such a factor, we can multiply by t using an algorithm that
2314 multiplies by q, shift the result by m and add/subtract it to itself.
2316 We search for large factors first and loop down, even if large factors
2317 are less probable than small; if we find a large factor we will find a
2318 good sequence quickly, and therefore be able to prune (by decreasing
2319 COST_LIMIT) the search. */
2321 for (m = floor_log2 (t - 1); m >= 2; m--)
2323 unsigned HOST_WIDE_INT d;
2325 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2326 if (t % d == 0 && t > d && m < maxm)
2328 cost = add_cost[mode] + shift_cost[mode][m];
2329 if (shiftadd_cost[mode][m] < cost)
2330 cost = shiftadd_cost[mode][m];
2331 synth_mult (alg_in, t / d, cost_limit - cost, mode);
2333 cost += alg_in->cost;
2334 if (cost < cost_limit)
2336 struct algorithm *x;
2337 x = alg_in, alg_in = best_alg, best_alg = x;
2338 best_alg->log[best_alg->ops] = m;
2339 best_alg->op[best_alg->ops] = alg_add_factor;
2340 cost_limit = cost;
2342 /* Other factors will have been taken care of in the recursion. */
2343 break;
2346 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2347 if (t % d == 0 && t > d && m < maxm)
2349 cost = add_cost[mode] + shift_cost[mode][m];
2350 if (shiftsub_cost[mode][m] < cost)
2351 cost = shiftsub_cost[mode][m];
2352 synth_mult (alg_in, t / d, cost_limit - cost, mode);
2354 cost += alg_in->cost;
2355 if (cost < cost_limit)
2357 struct algorithm *x;
2358 x = alg_in, alg_in = best_alg, best_alg = x;
2359 best_alg->log[best_alg->ops] = m;
2360 best_alg->op[best_alg->ops] = alg_sub_factor;
2361 cost_limit = cost;
2363 break;
2367 /* Try shift-and-add (load effective address) instructions,
2368 i.e. do a*3, a*5, a*9. */
2369 if ((t & 1) != 0)
2371 q = t - 1;
2372 q = q & -q;
2373 m = exact_log2 (q);
2374 if (m >= 0 && m < maxm)
2376 cost = shiftadd_cost[mode][m];
2377 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost, mode);
2379 cost += alg_in->cost;
2380 if (cost < cost_limit)
2382 struct algorithm *x;
2383 x = alg_in, alg_in = best_alg, best_alg = x;
2384 best_alg->log[best_alg->ops] = m;
2385 best_alg->op[best_alg->ops] = alg_add_t2_m;
2386 cost_limit = cost;
2390 q = t + 1;
2391 q = q & -q;
2392 m = exact_log2 (q);
2393 if (m >= 0 && m < maxm)
2395 cost = shiftsub_cost[mode][m];
2396 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost, mode);
2398 cost += alg_in->cost;
2399 if (cost < cost_limit)
2401 struct algorithm *x;
2402 x = alg_in, alg_in = best_alg, best_alg = x;
2403 best_alg->log[best_alg->ops] = m;
2404 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2405 cost_limit = cost;
2410 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2411 we have not found any algorithm. */
2412 if (cost_limit == alg_out->cost)
2413 return;
2415 /* If we are getting a too long sequence for `struct algorithm'
2416 to record, make this search fail. */
2417 if (best_alg->ops == MAX_BITS_PER_WORD)
2418 return;
2420 /* Copy the algorithm from temporary space to the space at alg_out.
2421 We avoid using structure assignment because the majority of
2422 best_alg is normally undefined, and this is a critical function. */
2423 alg_out->ops = best_alg->ops + 1;
2424 alg_out->cost = cost_limit;
2425 memcpy (alg_out->op, best_alg->op,
2426 alg_out->ops * sizeof *alg_out->op);
2427 memcpy (alg_out->log, best_alg->log,
2428 alg_out->ops * sizeof *alg_out->log);
2431 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2432 Try three variations:
2434 - a shift/add sequence based on VAL itself
2435 - a shift/add sequence based on -VAL, followed by a negation
2436 - a shift/add sequence based on VAL - 1, followed by an addition.
2438 Return true if the cheapest of these cost less than MULT_COST,
2439 describing the algorithm in *ALG and final fixup in *VARIANT. */
2441 static bool
2442 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2443 struct algorithm *alg, enum mult_variant *variant,
2444 int mult_cost)
2446 struct algorithm alg2;
2448 *variant = basic_variant;
2449 synth_mult (alg, val, mult_cost, mode);
2451 /* This works only if the inverted value actually fits in an
2452 `unsigned int' */
2453 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2455 synth_mult (&alg2, -val, MIN (alg->cost, mult_cost) - neg_cost[mode],
2456 mode);
2457 alg2.cost += neg_cost[mode];
2458 if (alg2.cost < alg->cost)
2459 *alg = alg2, *variant = negate_variant;
2462 /* This proves very useful for division-by-constant. */
2463 synth_mult (&alg2, val - 1, MIN (alg->cost, mult_cost) - add_cost[mode],
2464 mode);
2465 alg2.cost += add_cost[mode];
2466 if (alg2.cost < alg->cost)
2467 *alg = alg2, *variant = add_variant;
2469 return alg->cost < mult_cost;
2472 /* A subroutine of expand_mult, used for constant multiplications.
2473 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2474 convenient. Use the shift/add sequence described by ALG and apply
2475 the final fixup specified by VARIANT. */
2477 static rtx
2478 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2479 rtx target, const struct algorithm *alg,
2480 enum mult_variant variant)
2482 HOST_WIDE_INT val_so_far;
2483 rtx insn, accum, tem;
2484 int opno;
2485 enum machine_mode nmode;
2487 /* op0 must be register to make mult_cost match the precomputed
2488 shiftadd_cost array. */
2489 op0 = protect_from_queue (op0, 0);
2491 /* Avoid referencing memory over and over.
2492 For speed, but also for correctness when mem is volatile. */
2493 if (GET_CODE (op0) == MEM)
2494 op0 = force_reg (mode, op0);
2496 /* ACCUM starts out either as OP0 or as a zero, depending on
2497 the first operation. */
2499 if (alg->op[0] == alg_zero)
2501 accum = copy_to_mode_reg (mode, const0_rtx);
2502 val_so_far = 0;
2504 else if (alg->op[0] == alg_m)
2506 accum = copy_to_mode_reg (mode, op0);
2507 val_so_far = 1;
2509 else
2510 abort ();
2512 for (opno = 1; opno < alg->ops; opno++)
2514 int log = alg->log[opno];
2515 int preserve = preserve_subexpressions_p ();
2516 rtx shift_subtarget = preserve ? 0 : accum;
2517 rtx add_target
2518 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2519 && ! preserve)
2520 ? target : 0;
2521 rtx accum_target = preserve ? 0 : accum;
2523 switch (alg->op[opno])
2525 case alg_shift:
2526 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2527 build_int_2 (log, 0), NULL_RTX, 0);
2528 val_so_far <<= log;
2529 break;
2531 case alg_add_t_m2:
2532 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2533 build_int_2 (log, 0), NULL_RTX, 0);
2534 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2535 add_target ? add_target : accum_target);
2536 val_so_far += (HOST_WIDE_INT) 1 << log;
2537 break;
2539 case alg_sub_t_m2:
2540 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2541 build_int_2 (log, 0), NULL_RTX, 0);
2542 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2543 add_target ? add_target : accum_target);
2544 val_so_far -= (HOST_WIDE_INT) 1 << log;
2545 break;
2547 case alg_add_t2_m:
2548 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2549 build_int_2 (log, 0), shift_subtarget,
2551 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2552 add_target ? add_target : accum_target);
2553 val_so_far = (val_so_far << log) + 1;
2554 break;
2556 case alg_sub_t2_m:
2557 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2558 build_int_2 (log, 0), shift_subtarget, 0);
2559 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2560 add_target ? add_target : accum_target);
2561 val_so_far = (val_so_far << log) - 1;
2562 break;
2564 case alg_add_factor:
2565 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2566 build_int_2 (log, 0), NULL_RTX, 0);
2567 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2568 add_target ? add_target : accum_target);
2569 val_so_far += val_so_far << log;
2570 break;
2572 case alg_sub_factor:
2573 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2574 build_int_2 (log, 0), NULL_RTX, 0);
2575 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2576 (add_target ? add_target
2577 : preserve ? 0 : tem));
2578 val_so_far = (val_so_far << log) - val_so_far;
2579 break;
2581 default:
2582 abort ();
2585 /* Write a REG_EQUAL note on the last insn so that we can cse
2586 multiplication sequences. Note that if ACCUM is a SUBREG,
2587 we've set the inner register and must properly indicate
2588 that. */
2590 tem = op0, nmode = mode;
2591 if (GET_CODE (accum) == SUBREG)
2593 nmode = GET_MODE (SUBREG_REG (accum));
2594 tem = gen_lowpart (nmode, op0);
2597 insn = get_last_insn ();
2598 set_unique_reg_note (insn, REG_EQUAL,
2599 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
2602 if (variant == negate_variant)
2604 val_so_far = -val_so_far;
2605 accum = expand_unop (mode, neg_optab, accum, target, 0);
2607 else if (variant == add_variant)
2609 val_so_far = val_so_far + 1;
2610 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2613 /* Compare only the bits of val and val_so_far that are significant
2614 in the result mode, to avoid sign-/zero-extension confusion. */
2615 val &= GET_MODE_MASK (mode);
2616 val_so_far &= GET_MODE_MASK (mode);
2617 if (val != val_so_far)
2618 abort ();
2620 return accum;
2623 /* Perform a multiplication and return an rtx for the result.
2624 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2625 TARGET is a suggestion for where to store the result (an rtx).
2627 We check specially for a constant integer as OP1.
2628 If you want this check for OP0 as well, then before calling
2629 you should swap the two operands if OP0 would be constant. */
2632 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2633 int unsignedp)
2635 rtx const_op1 = op1;
2636 enum mult_variant variant;
2637 struct algorithm algorithm;
2639 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2640 less than or equal in size to `unsigned int' this doesn't matter.
2641 If the mode is larger than `unsigned int', then synth_mult works only
2642 if the constant value exactly fits in an `unsigned int' without any
2643 truncation. This means that multiplying by negative values does
2644 not work; results are off by 2^32 on a 32 bit machine. */
2646 /* If we are multiplying in DImode, it may still be a win
2647 to try to work with shifts and adds. */
2648 if (GET_CODE (op1) == CONST_DOUBLE
2649 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2650 && HOST_BITS_PER_INT >= BITS_PER_WORD
2651 && CONST_DOUBLE_HIGH (op1) == 0)
2652 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2653 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2654 && GET_CODE (op1) == CONST_INT
2655 && INTVAL (op1) < 0)
2656 const_op1 = 0;
2658 /* We used to test optimize here, on the grounds that it's better to
2659 produce a smaller program when -O is not used.
2660 But this causes such a terrible slowdown sometimes
2661 that it seems better to use synth_mult always. */
2663 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2664 && (unsignedp || !flag_trapv))
2666 int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2668 if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant,
2669 mult_cost))
2670 return expand_mult_const (mode, op0, INTVAL (const_op1), target,
2671 &algorithm, variant);
2674 if (GET_CODE (op0) == CONST_DOUBLE)
2676 rtx temp = op0;
2677 op0 = op1;
2678 op1 = temp;
2681 /* Expand x*2.0 as x+x. */
2682 if (GET_CODE (op1) == CONST_DOUBLE
2683 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2685 REAL_VALUE_TYPE d;
2686 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2688 if (REAL_VALUES_EQUAL (d, dconst2))
2690 op0 = force_reg (GET_MODE (op0), op0);
2691 return expand_binop (mode, add_optab, op0, op0,
2692 target, unsignedp, OPTAB_LIB_WIDEN);
2696 /* This used to use umul_optab if unsigned, but for non-widening multiply
2697 there is no difference between signed and unsigned. */
2698 op0 = expand_binop (mode,
2699 ! unsignedp
2700 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2701 ? smulv_optab : smul_optab,
2702 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2703 if (op0 == 0)
2704 abort ();
2705 return op0;
2708 /* Return the smallest n such that 2**n >= X. */
2711 ceil_log2 (unsigned HOST_WIDE_INT x)
2713 return floor_log2 (x - 1) + 1;
2716 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2717 replace division by D, and put the least significant N bits of the result
2718 in *MULTIPLIER_PTR and return the most significant bit.
2720 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2721 needed precision is in PRECISION (should be <= N).
2723 PRECISION should be as small as possible so this function can choose
2724 multiplier more freely.
2726 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2727 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2729 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2730 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2732 static
2733 unsigned HOST_WIDE_INT
2734 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2735 unsigned HOST_WIDE_INT *multiplier_ptr,
2736 int *post_shift_ptr, int *lgup_ptr)
2738 HOST_WIDE_INT mhigh_hi, mlow_hi;
2739 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2740 int lgup, post_shift;
2741 int pow, pow2;
2742 unsigned HOST_WIDE_INT nl, dummy1;
2743 HOST_WIDE_INT nh, dummy2;
2745 /* lgup = ceil(log2(divisor)); */
2746 lgup = ceil_log2 (d);
2748 if (lgup > n)
2749 abort ();
2751 pow = n + lgup;
2752 pow2 = n + lgup - precision;
2754 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2756 /* We could handle this with some effort, but this case is much better
2757 handled directly with a scc insn, so rely on caller using that. */
2758 abort ();
2761 /* mlow = 2^(N + lgup)/d */
2762 if (pow >= HOST_BITS_PER_WIDE_INT)
2764 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2765 nl = 0;
2767 else
2769 nh = 0;
2770 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2772 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2773 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2775 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2776 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2777 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2778 else
2779 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2780 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2781 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2783 if (mhigh_hi && nh - d >= d)
2784 abort ();
2785 if (mhigh_hi > 1 || mlow_hi > 1)
2786 abort ();
2787 /* Assert that mlow < mhigh. */
2788 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2789 abort ();
2791 /* If precision == N, then mlow, mhigh exceed 2^N
2792 (but they do not exceed 2^(N+1)). */
2794 /* Reduce to lowest terms. */
2795 for (post_shift = lgup; post_shift > 0; post_shift--)
2797 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2798 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2799 if (ml_lo >= mh_lo)
2800 break;
2802 mlow_hi = 0;
2803 mlow_lo = ml_lo;
2804 mhigh_hi = 0;
2805 mhigh_lo = mh_lo;
2808 *post_shift_ptr = post_shift;
2809 *lgup_ptr = lgup;
2810 if (n < HOST_BITS_PER_WIDE_INT)
2812 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2813 *multiplier_ptr = mhigh_lo & mask;
2814 return mhigh_lo >= mask;
2816 else
2818 *multiplier_ptr = mhigh_lo;
2819 return mhigh_hi;
2823 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2824 congruent to 1 (mod 2**N). */
2826 static unsigned HOST_WIDE_INT
2827 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2829 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2831 /* The algorithm notes that the choice y = x satisfies
2832 x*y == 1 mod 2^3, since x is assumed odd.
2833 Each iteration doubles the number of bits of significance in y. */
2835 unsigned HOST_WIDE_INT mask;
2836 unsigned HOST_WIDE_INT y = x;
2837 int nbit = 3;
2839 mask = (n == HOST_BITS_PER_WIDE_INT
2840 ? ~(unsigned HOST_WIDE_INT) 0
2841 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2843 while (nbit < n)
2845 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2846 nbit *= 2;
2848 return y;
2851 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2852 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2853 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2854 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2855 become signed.
2857 The result is put in TARGET if that is convenient.
2859 MODE is the mode of operation. */
2862 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2863 rtx op1, rtx target, int unsignedp)
2865 rtx tem;
2866 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2868 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2869 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2870 NULL_RTX, 0);
2871 tem = expand_and (mode, tem, op1, NULL_RTX);
2872 adj_operand
2873 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2874 adj_operand);
2876 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2877 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2878 NULL_RTX, 0);
2879 tem = expand_and (mode, tem, op0, NULL_RTX);
2880 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2881 target);
2883 return target;
2886 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
2888 static rtx
2889 extract_high_half (enum machine_mode mode, rtx op)
2891 enum machine_mode wider_mode;
2893 if (mode == word_mode)
2894 return gen_highpart (mode, op);
2896 wider_mode = GET_MODE_WIDER_MODE (mode);
2897 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
2898 build_int_2 (GET_MODE_BITSIZE (mode), 0), 0, 1);
2899 return convert_modes (mode, wider_mode, op, 0);
2902 /* Like expand_mult_highpart, but only consider using a multiplication
2903 optab. OP1 is an rtx for the constant operand. */
2905 static rtx
2906 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
2907 rtx target, int unsignedp, int max_cost)
2909 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
2910 enum machine_mode wider_mode;
2911 optab moptab;
2912 rtx tem;
2913 int size;
2915 wider_mode = GET_MODE_WIDER_MODE (mode);
2916 size = GET_MODE_BITSIZE (mode);
2918 /* Firstly, try using a multiplication insn that only generates the needed
2919 high part of the product, and in the sign flavor of unsignedp. */
2920 if (mul_highpart_cost[mode] < max_cost)
2922 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2923 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2924 unsignedp, OPTAB_DIRECT);
2925 if (tem)
2926 return tem;
2929 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2930 Need to adjust the result after the multiplication. */
2931 if (size - 1 < BITS_PER_WORD
2932 && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
2933 + 4 * add_cost[mode] < max_cost))
2935 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2936 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2937 unsignedp, OPTAB_DIRECT);
2938 if (tem)
2939 /* We used the wrong signedness. Adjust the result. */
2940 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2941 tem, unsignedp);
2944 /* Try widening multiplication. */
2945 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2946 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2947 && mul_widen_cost[wider_mode] < max_cost)
2949 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
2950 unsignedp, OPTAB_WIDEN);
2951 if (tem)
2952 return extract_high_half (mode, tem);
2955 /* Try widening the mode and perform a non-widening multiplication. */
2956 moptab = smul_optab;
2957 if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2958 && size - 1 < BITS_PER_WORD
2959 && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
2961 tem = expand_binop (wider_mode, moptab, op0, op1, 0,
2962 unsignedp, OPTAB_WIDEN);
2963 if (tem)
2964 return extract_high_half (mode, tem);
2967 /* Try widening multiplication of opposite signedness, and adjust. */
2968 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2969 if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
2970 && size - 1 < BITS_PER_WORD
2971 && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
2972 + 4 * add_cost[mode] < max_cost))
2974 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
2975 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2976 if (tem != 0)
2978 tem = extract_high_half (mode, tem);
2979 /* We used the wrong signedness. Adjust the result. */
2980 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2981 target, unsignedp);
2985 return 0;
2988 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2989 in TARGET if that is convenient, and return where the result is. If the
2990 operation can not be performed, 0 is returned.
2992 MODE is the mode of operation and result.
2994 UNSIGNEDP nonzero means unsigned multiply.
2996 MAX_COST is the total allowed cost for the expanded RTL. */
2999 expand_mult_highpart (enum machine_mode mode, rtx op0,
3000 unsigned HOST_WIDE_INT cnst1, rtx target,
3001 int unsignedp, int max_cost)
3003 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3004 int extra_cost;
3005 bool sign_adjust = false;
3006 enum mult_variant variant;
3007 struct algorithm alg;
3008 rtx op1, tem;
3010 /* We can't support modes wider than HOST_BITS_PER_INT. */
3011 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3012 abort ();
3014 op1 = gen_int_mode (cnst1, wider_mode);
3015 cnst1 &= GET_MODE_MASK (mode);
3017 /* We can't optimize modes wider than BITS_PER_WORD.
3018 ??? We might be able to perform double-word arithmetic if
3019 mode == word_mode, however all the cost calculations in
3020 synth_mult etc. assume single-word operations. */
3021 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3022 return expand_mult_highpart_optab (mode, op0, op1, target,
3023 unsignedp, max_cost);
3025 extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3027 /* Check whether we try to multiply by a negative constant. */
3028 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3030 sign_adjust = true;
3031 extra_cost += add_cost[mode];
3034 /* See whether shift/add multiplication is cheap enough. */
3035 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3036 max_cost - extra_cost))
3038 /* See whether the specialized multiplication optabs are
3039 cheaper than the shift/add version. */
3040 tem = expand_mult_highpart_optab (mode, op0, op1, target,
3041 unsignedp, alg.cost + extra_cost);
3042 if (tem)
3043 return tem;
3045 tem = convert_to_mode (wider_mode, op0, unsignedp);
3046 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3047 tem = extract_high_half (mode, tem);
3049 /* Adjust result for signedness. */
3050 if (sign_adjust)
3051 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3053 return tem;
3055 return expand_mult_highpart_optab (mode, op0, op1, target,
3056 unsignedp, max_cost);
3059 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3060 if that is convenient, and returning where the result is.
3061 You may request either the quotient or the remainder as the result;
3062 specify REM_FLAG nonzero to get the remainder.
3064 CODE is the expression code for which kind of division this is;
3065 it controls how rounding is done. MODE is the machine mode to use.
3066 UNSIGNEDP nonzero means do unsigned division. */
3068 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3069 and then correct it by or'ing in missing high bits
3070 if result of ANDI is nonzero.
3071 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3072 This could optimize to a bfexts instruction.
3073 But C doesn't use these operations, so their optimizations are
3074 left for later. */
3075 /* ??? For modulo, we don't actually need the highpart of the first product,
3076 the low part will do nicely. And for small divisors, the second multiply
3077 can also be a low-part only multiply or even be completely left out.
3078 E.g. to calculate the remainder of a division by 3 with a 32 bit
3079 multiply, multiply with 0x55555556 and extract the upper two bits;
3080 the result is exact for inputs up to 0x1fffffff.
3081 The input range can be reduced by using cross-sum rules.
3082 For odd divisors >= 3, the following table gives right shift counts
3083 so that if a number is shifted by an integer multiple of the given
3084 amount, the remainder stays the same:
3085 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3086 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3087 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3088 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3089 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3091 Cross-sum rules for even numbers can be derived by leaving as many bits
3092 to the right alone as the divisor has zeros to the right.
3093 E.g. if x is an unsigned 32 bit number:
3094 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3097 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3100 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3101 rtx op0, rtx op1, rtx target, int unsignedp)
3103 enum machine_mode compute_mode;
3104 rtx tquotient;
3105 rtx quotient = 0, remainder = 0;
3106 rtx last;
3107 int size;
3108 rtx insn, set;
3109 optab optab1, optab2;
3110 int op1_is_constant, op1_is_pow2 = 0;
3111 int max_cost, extra_cost;
3112 static HOST_WIDE_INT last_div_const = 0;
3113 static HOST_WIDE_INT ext_op1;
3115 op1_is_constant = GET_CODE (op1) == CONST_INT;
3116 if (op1_is_constant)
3118 ext_op1 = INTVAL (op1);
3119 if (unsignedp)
3120 ext_op1 &= GET_MODE_MASK (mode);
3121 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3122 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3126 This is the structure of expand_divmod:
3128 First comes code to fix up the operands so we can perform the operations
3129 correctly and efficiently.
3131 Second comes a switch statement with code specific for each rounding mode.
3132 For some special operands this code emits all RTL for the desired
3133 operation, for other cases, it generates only a quotient and stores it in
3134 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3135 to indicate that it has not done anything.
3137 Last comes code that finishes the operation. If QUOTIENT is set and
3138 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3139 QUOTIENT is not set, it is computed using trunc rounding.
3141 We try to generate special code for division and remainder when OP1 is a
3142 constant. If |OP1| = 2**n we can use shifts and some other fast
3143 operations. For other values of OP1, we compute a carefully selected
3144 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3145 by m.
3147 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3148 half of the product. Different strategies for generating the product are
3149 implemented in expand_mult_highpart.
3151 If what we actually want is the remainder, we generate that by another
3152 by-constant multiplication and a subtraction. */
3154 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3155 code below will malfunction if we are, so check here and handle
3156 the special case if so. */
3157 if (op1 == const1_rtx)
3158 return rem_flag ? const0_rtx : op0;
3160 /* When dividing by -1, we could get an overflow.
3161 negv_optab can handle overflows. */
3162 if (! unsignedp && op1 == constm1_rtx)
3164 if (rem_flag)
3165 return const0_rtx;
3166 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3167 ? negv_optab : neg_optab, op0, target, 0);
3170 if (target
3171 /* Don't use the function value register as a target
3172 since we have to read it as well as write it,
3173 and function-inlining gets confused by this. */
3174 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3175 /* Don't clobber an operand while doing a multi-step calculation. */
3176 || ((rem_flag || op1_is_constant)
3177 && (reg_mentioned_p (target, op0)
3178 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3179 || reg_mentioned_p (target, op1)
3180 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3181 target = 0;
3183 /* Get the mode in which to perform this computation. Normally it will
3184 be MODE, but sometimes we can't do the desired operation in MODE.
3185 If so, pick a wider mode in which we can do the operation. Convert
3186 to that mode at the start to avoid repeated conversions.
3188 First see what operations we need. These depend on the expression
3189 we are evaluating. (We assume that divxx3 insns exist under the
3190 same conditions that modxx3 insns and that these insns don't normally
3191 fail. If these assumptions are not correct, we may generate less
3192 efficient code in some cases.)
3194 Then see if we find a mode in which we can open-code that operation
3195 (either a division, modulus, or shift). Finally, check for the smallest
3196 mode for which we can do the operation with a library call. */
3198 /* We might want to refine this now that we have division-by-constant
3199 optimization. Since expand_mult_highpart tries so many variants, it is
3200 not straightforward to generalize this. Maybe we should make an array
3201 of possible modes in init_expmed? Save this for GCC 2.7. */
3203 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3204 ? (unsignedp ? lshr_optab : ashr_optab)
3205 : (unsignedp ? udiv_optab : sdiv_optab));
3206 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3207 ? optab1
3208 : (unsignedp ? udivmod_optab : sdivmod_optab));
3210 for (compute_mode = mode; compute_mode != VOIDmode;
3211 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3212 if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3213 || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3214 break;
3216 if (compute_mode == VOIDmode)
3217 for (compute_mode = mode; compute_mode != VOIDmode;
3218 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3219 if (optab1->handlers[compute_mode].libfunc
3220 || optab2->handlers[compute_mode].libfunc)
3221 break;
3223 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3224 in expand_binop. */
3225 if (compute_mode == VOIDmode)
3226 compute_mode = mode;
3228 if (target && GET_MODE (target) == compute_mode)
3229 tquotient = target;
3230 else
3231 tquotient = gen_reg_rtx (compute_mode);
3233 size = GET_MODE_BITSIZE (compute_mode);
3234 #if 0
3235 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3236 (mode), and thereby get better code when OP1 is a constant. Do that
3237 later. It will require going over all usages of SIZE below. */
3238 size = GET_MODE_BITSIZE (mode);
3239 #endif
3241 /* Only deduct something for a REM if the last divide done was
3242 for a different constant. Then set the constant of the last
3243 divide. */
3244 max_cost = div_cost[compute_mode]
3245 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3246 && INTVAL (op1) == last_div_const)
3247 ? mul_cost[compute_mode] + add_cost[compute_mode]
3248 : 0);
3250 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3252 /* Now convert to the best mode to use. */
3253 if (compute_mode != mode)
3255 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3256 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3258 /* convert_modes may have placed op1 into a register, so we
3259 must recompute the following. */
3260 op1_is_constant = GET_CODE (op1) == CONST_INT;
3261 op1_is_pow2 = (op1_is_constant
3262 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3263 || (! unsignedp
3264 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3267 /* If one of the operands is a volatile MEM, copy it into a register. */
3269 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3270 op0 = force_reg (compute_mode, op0);
3271 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3272 op1 = force_reg (compute_mode, op1);
3274 /* If we need the remainder or if OP1 is constant, we need to
3275 put OP0 in a register in case it has any queued subexpressions. */
3276 if (rem_flag || op1_is_constant)
3277 op0 = force_reg (compute_mode, op0);
3279 last = get_last_insn ();
3281 /* Promote floor rounding to trunc rounding for unsigned operations. */
3282 if (unsignedp)
3284 if (code == FLOOR_DIV_EXPR)
3285 code = TRUNC_DIV_EXPR;
3286 if (code == FLOOR_MOD_EXPR)
3287 code = TRUNC_MOD_EXPR;
3288 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3289 code = TRUNC_DIV_EXPR;
3292 if (op1 != const0_rtx)
3293 switch (code)
3295 case TRUNC_MOD_EXPR:
3296 case TRUNC_DIV_EXPR:
3297 if (op1_is_constant)
3299 if (unsignedp)
3301 unsigned HOST_WIDE_INT mh, ml;
3302 int pre_shift, post_shift;
3303 int dummy;
3304 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3305 & GET_MODE_MASK (compute_mode));
3307 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3309 pre_shift = floor_log2 (d);
3310 if (rem_flag)
3312 remainder
3313 = expand_binop (compute_mode, and_optab, op0,
3314 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3315 remainder, 1,
3316 OPTAB_LIB_WIDEN);
3317 if (remainder)
3318 return gen_lowpart (mode, remainder);
3320 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3321 build_int_2 (pre_shift, 0),
3322 tquotient, 1);
3324 else if (size <= HOST_BITS_PER_WIDE_INT)
3326 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3328 /* Most significant bit of divisor is set; emit an scc
3329 insn. */
3330 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3331 compute_mode, 1, 1);
3332 if (quotient == 0)
3333 goto fail1;
3335 else
3337 /* Find a suitable multiplier and right shift count
3338 instead of multiplying with D. */
3340 mh = choose_multiplier (d, size, size,
3341 &ml, &post_shift, &dummy);
3343 /* If the suggested multiplier is more than SIZE bits,
3344 we can do better for even divisors, using an
3345 initial right shift. */
3346 if (mh != 0 && (d & 1) == 0)
3348 pre_shift = floor_log2 (d & -d);
3349 mh = choose_multiplier (d >> pre_shift, size,
3350 size - pre_shift,
3351 &ml, &post_shift, &dummy);
3352 if (mh)
3353 abort ();
3355 else
3356 pre_shift = 0;
3358 if (mh != 0)
3360 rtx t1, t2, t3, t4;
3362 if (post_shift - 1 >= BITS_PER_WORD)
3363 goto fail1;
3365 extra_cost
3366 = (shift_cost[compute_mode][post_shift - 1]
3367 + shift_cost[compute_mode][1]
3368 + 2 * add_cost[compute_mode]);
3369 t1 = expand_mult_highpart (compute_mode, op0, ml,
3370 NULL_RTX, 1,
3371 max_cost - extra_cost);
3372 if (t1 == 0)
3373 goto fail1;
3374 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3375 op0, t1),
3376 NULL_RTX);
3377 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3378 build_int_2 (1, 0), NULL_RTX,1);
3379 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3380 t1, t3),
3381 NULL_RTX);
3382 quotient
3383 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3384 build_int_2 (post_shift - 1, 0),
3385 tquotient, 1);
3387 else
3389 rtx t1, t2;
3391 if (pre_shift >= BITS_PER_WORD
3392 || post_shift >= BITS_PER_WORD)
3393 goto fail1;
3395 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3396 build_int_2 (pre_shift, 0),
3397 NULL_RTX, 1);
3398 extra_cost
3399 = (shift_cost[compute_mode][pre_shift]
3400 + shift_cost[compute_mode][post_shift]);
3401 t2 = expand_mult_highpart (compute_mode, t1, ml,
3402 NULL_RTX, 1,
3403 max_cost - extra_cost);
3404 if (t2 == 0)
3405 goto fail1;
3406 quotient
3407 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3408 build_int_2 (post_shift, 0),
3409 tquotient, 1);
3413 else /* Too wide mode to use tricky code */
3414 break;
3416 insn = get_last_insn ();
3417 if (insn != last
3418 && (set = single_set (insn)) != 0
3419 && SET_DEST (set) == quotient)
3420 set_unique_reg_note (insn,
3421 REG_EQUAL,
3422 gen_rtx_UDIV (compute_mode, op0, op1));
3424 else /* TRUNC_DIV, signed */
3426 unsigned HOST_WIDE_INT ml;
3427 int lgup, post_shift;
3428 HOST_WIDE_INT d = INTVAL (op1);
3429 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3431 /* n rem d = n rem -d */
3432 if (rem_flag && d < 0)
3434 d = abs_d;
3435 op1 = gen_int_mode (abs_d, compute_mode);
3438 if (d == 1)
3439 quotient = op0;
3440 else if (d == -1)
3441 quotient = expand_unop (compute_mode, neg_optab, op0,
3442 tquotient, 0);
3443 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3445 /* This case is not handled correctly below. */
3446 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3447 compute_mode, 1, 1);
3448 if (quotient == 0)
3449 goto fail1;
3451 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3452 && (rem_flag ? smod_pow2_cheap[compute_mode]
3453 : sdiv_pow2_cheap[compute_mode])
3454 /* ??? The cheap metric is computed only for
3455 word_mode. If this operation is wider, this may
3456 not be so. Assume true if the optab has an
3457 expander for this mode. */
3458 && (((rem_flag ? smod_optab : sdiv_optab)
3459 ->handlers[compute_mode].insn_code
3460 != CODE_FOR_nothing)
3461 || (sdivmod_optab->handlers[compute_mode]
3462 .insn_code != CODE_FOR_nothing)))
3464 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3466 lgup = floor_log2 (abs_d);
3467 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3469 rtx label = gen_label_rtx ();
3470 rtx t1;
3472 t1 = copy_to_mode_reg (compute_mode, op0);
3473 do_cmp_and_jump (t1, const0_rtx, GE,
3474 compute_mode, label);
3475 expand_inc (t1, gen_int_mode (abs_d - 1,
3476 compute_mode));
3477 emit_label (label);
3478 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3479 build_int_2 (lgup, 0),
3480 tquotient, 0);
3482 else
3484 rtx t1, t2, t3;
3485 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3486 build_int_2 (size - 1, 0),
3487 NULL_RTX, 0);
3488 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3489 build_int_2 (size - lgup, 0),
3490 NULL_RTX, 1);
3491 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3492 op0, t2),
3493 NULL_RTX);
3494 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3495 build_int_2 (lgup, 0),
3496 tquotient, 0);
3499 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3500 the quotient. */
3501 if (d < 0)
3503 insn = get_last_insn ();
3504 if (insn != last
3505 && (set = single_set (insn)) != 0
3506 && SET_DEST (set) == quotient
3507 && abs_d < ((unsigned HOST_WIDE_INT) 1
3508 << (HOST_BITS_PER_WIDE_INT - 1)))
3509 set_unique_reg_note (insn,
3510 REG_EQUAL,
3511 gen_rtx_DIV (compute_mode,
3512 op0,
3513 GEN_INT
3514 (trunc_int_for_mode
3515 (abs_d,
3516 compute_mode))));
3518 quotient = expand_unop (compute_mode, neg_optab,
3519 quotient, quotient, 0);
3522 else if (size <= HOST_BITS_PER_WIDE_INT)
3524 choose_multiplier (abs_d, size, size - 1,
3525 &ml, &post_shift, &lgup);
3526 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3528 rtx t1, t2, t3;
3530 if (post_shift >= BITS_PER_WORD
3531 || size - 1 >= BITS_PER_WORD)
3532 goto fail1;
3534 extra_cost = (shift_cost[compute_mode][post_shift]
3535 + shift_cost[compute_mode][size - 1]
3536 + add_cost[compute_mode]);
3537 t1 = expand_mult_highpart (compute_mode, op0, ml,
3538 NULL_RTX, 0,
3539 max_cost - extra_cost);
3540 if (t1 == 0)
3541 goto fail1;
3542 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3543 build_int_2 (post_shift, 0), NULL_RTX, 0);
3544 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3545 build_int_2 (size - 1, 0), NULL_RTX, 0);
3546 if (d < 0)
3547 quotient
3548 = force_operand (gen_rtx_MINUS (compute_mode,
3549 t3, t2),
3550 tquotient);
3551 else
3552 quotient
3553 = force_operand (gen_rtx_MINUS (compute_mode,
3554 t2, t3),
3555 tquotient);
3557 else
3559 rtx t1, t2, t3, t4;
3561 if (post_shift >= BITS_PER_WORD
3562 || size - 1 >= BITS_PER_WORD)
3563 goto fail1;
3565 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3566 extra_cost = (shift_cost[compute_mode][post_shift]
3567 + shift_cost[compute_mode][size - 1]
3568 + 2 * add_cost[compute_mode]);
3569 t1 = expand_mult_highpart (compute_mode, op0, ml,
3570 NULL_RTX, 0,
3571 max_cost - extra_cost);
3572 if (t1 == 0)
3573 goto fail1;
3574 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3575 t1, op0),
3576 NULL_RTX);
3577 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3578 build_int_2 (post_shift, 0),
3579 NULL_RTX, 0);
3580 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3581 build_int_2 (size - 1, 0),
3582 NULL_RTX, 0);
3583 if (d < 0)
3584 quotient
3585 = force_operand (gen_rtx_MINUS (compute_mode,
3586 t4, t3),
3587 tquotient);
3588 else
3589 quotient
3590 = force_operand (gen_rtx_MINUS (compute_mode,
3591 t3, t4),
3592 tquotient);
3595 else /* Too wide mode to use tricky code */
3596 break;
3598 insn = get_last_insn ();
3599 if (insn != last
3600 && (set = single_set (insn)) != 0
3601 && SET_DEST (set) == quotient)
3602 set_unique_reg_note (insn,
3603 REG_EQUAL,
3604 gen_rtx_DIV (compute_mode, op0, op1));
3606 break;
3608 fail1:
3609 delete_insns_since (last);
3610 break;
3612 case FLOOR_DIV_EXPR:
3613 case FLOOR_MOD_EXPR:
3614 /* We will come here only for signed operations. */
3615 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3617 unsigned HOST_WIDE_INT mh, ml;
3618 int pre_shift, lgup, post_shift;
3619 HOST_WIDE_INT d = INTVAL (op1);
3621 if (d > 0)
3623 /* We could just as easily deal with negative constants here,
3624 but it does not seem worth the trouble for GCC 2.6. */
3625 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3627 pre_shift = floor_log2 (d);
3628 if (rem_flag)
3630 remainder = expand_binop (compute_mode, and_optab, op0,
3631 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3632 remainder, 0, OPTAB_LIB_WIDEN);
3633 if (remainder)
3634 return gen_lowpart (mode, remainder);
3636 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3637 build_int_2 (pre_shift, 0),
3638 tquotient, 0);
3640 else
3642 rtx t1, t2, t3, t4;
3644 mh = choose_multiplier (d, size, size - 1,
3645 &ml, &post_shift, &lgup);
3646 if (mh)
3647 abort ();
3649 if (post_shift < BITS_PER_WORD
3650 && size - 1 < BITS_PER_WORD)
3652 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3653 build_int_2 (size - 1, 0),
3654 NULL_RTX, 0);
3655 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3656 NULL_RTX, 0, OPTAB_WIDEN);
3657 extra_cost = (shift_cost[compute_mode][post_shift]
3658 + shift_cost[compute_mode][size - 1]
3659 + 2 * add_cost[compute_mode]);
3660 t3 = expand_mult_highpart (compute_mode, t2, ml,
3661 NULL_RTX, 1,
3662 max_cost - extra_cost);
3663 if (t3 != 0)
3665 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3666 build_int_2 (post_shift, 0),
3667 NULL_RTX, 1);
3668 quotient = expand_binop (compute_mode, xor_optab,
3669 t4, t1, tquotient, 0,
3670 OPTAB_WIDEN);
3675 else
3677 rtx nsign, t1, t2, t3, t4;
3678 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3679 op0, constm1_rtx), NULL_RTX);
3680 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3681 0, OPTAB_WIDEN);
3682 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3683 build_int_2 (size - 1, 0), NULL_RTX, 0);
3684 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3685 NULL_RTX);
3686 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3687 NULL_RTX, 0);
3688 if (t4)
3690 rtx t5;
3691 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3692 NULL_RTX, 0);
3693 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3694 t4, t5),
3695 tquotient);
3700 if (quotient != 0)
3701 break;
3702 delete_insns_since (last);
3704 /* Try using an instruction that produces both the quotient and
3705 remainder, using truncation. We can easily compensate the quotient
3706 or remainder to get floor rounding, once we have the remainder.
3707 Notice that we compute also the final remainder value here,
3708 and return the result right away. */
3709 if (target == 0 || GET_MODE (target) != compute_mode)
3710 target = gen_reg_rtx (compute_mode);
3712 if (rem_flag)
3714 remainder
3715 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
3716 quotient = gen_reg_rtx (compute_mode);
3718 else
3720 quotient
3721 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
3722 remainder = gen_reg_rtx (compute_mode);
3725 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3726 quotient, remainder, 0))
3728 /* This could be computed with a branch-less sequence.
3729 Save that for later. */
3730 rtx tem;
3731 rtx label = gen_label_rtx ();
3732 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3733 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3734 NULL_RTX, 0, OPTAB_WIDEN);
3735 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3736 expand_dec (quotient, const1_rtx);
3737 expand_inc (remainder, op1);
3738 emit_label (label);
3739 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3742 /* No luck with division elimination or divmod. Have to do it
3743 by conditionally adjusting op0 *and* the result. */
3745 rtx label1, label2, label3, label4, label5;
3746 rtx adjusted_op0;
3747 rtx tem;
3749 quotient = gen_reg_rtx (compute_mode);
3750 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3751 label1 = gen_label_rtx ();
3752 label2 = gen_label_rtx ();
3753 label3 = gen_label_rtx ();
3754 label4 = gen_label_rtx ();
3755 label5 = gen_label_rtx ();
3756 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3757 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3758 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3759 quotient, 0, OPTAB_LIB_WIDEN);
3760 if (tem != quotient)
3761 emit_move_insn (quotient, tem);
3762 emit_jump_insn (gen_jump (label5));
3763 emit_barrier ();
3764 emit_label (label1);
3765 expand_inc (adjusted_op0, const1_rtx);
3766 emit_jump_insn (gen_jump (label4));
3767 emit_barrier ();
3768 emit_label (label2);
3769 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3770 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3771 quotient, 0, OPTAB_LIB_WIDEN);
3772 if (tem != quotient)
3773 emit_move_insn (quotient, tem);
3774 emit_jump_insn (gen_jump (label5));
3775 emit_barrier ();
3776 emit_label (label3);
3777 expand_dec (adjusted_op0, const1_rtx);
3778 emit_label (label4);
3779 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3780 quotient, 0, OPTAB_LIB_WIDEN);
3781 if (tem != quotient)
3782 emit_move_insn (quotient, tem);
3783 expand_dec (quotient, const1_rtx);
3784 emit_label (label5);
3786 break;
3788 case CEIL_DIV_EXPR:
3789 case CEIL_MOD_EXPR:
3790 if (unsignedp)
3792 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3794 rtx t1, t2, t3;
3795 unsigned HOST_WIDE_INT d = INTVAL (op1);
3796 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3797 build_int_2 (floor_log2 (d), 0),
3798 tquotient, 1);
3799 t2 = expand_binop (compute_mode, and_optab, op0,
3800 GEN_INT (d - 1),
3801 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3802 t3 = gen_reg_rtx (compute_mode);
3803 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3804 compute_mode, 1, 1);
3805 if (t3 == 0)
3807 rtx lab;
3808 lab = gen_label_rtx ();
3809 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3810 expand_inc (t1, const1_rtx);
3811 emit_label (lab);
3812 quotient = t1;
3814 else
3815 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3816 t1, t3),
3817 tquotient);
3818 break;
3821 /* Try using an instruction that produces both the quotient and
3822 remainder, using truncation. We can easily compensate the
3823 quotient or remainder to get ceiling rounding, once we have the
3824 remainder. Notice that we compute also the final remainder
3825 value here, and return the result right away. */
3826 if (target == 0 || GET_MODE (target) != compute_mode)
3827 target = gen_reg_rtx (compute_mode);
3829 if (rem_flag)
3831 remainder = (REG_P (target)
3832 ? target : gen_reg_rtx (compute_mode));
3833 quotient = gen_reg_rtx (compute_mode);
3835 else
3837 quotient = (REG_P (target)
3838 ? target : gen_reg_rtx (compute_mode));
3839 remainder = gen_reg_rtx (compute_mode);
3842 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3843 remainder, 1))
3845 /* This could be computed with a branch-less sequence.
3846 Save that for later. */
3847 rtx label = gen_label_rtx ();
3848 do_cmp_and_jump (remainder, const0_rtx, EQ,
3849 compute_mode, label);
3850 expand_inc (quotient, const1_rtx);
3851 expand_dec (remainder, op1);
3852 emit_label (label);
3853 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3856 /* No luck with division elimination or divmod. Have to do it
3857 by conditionally adjusting op0 *and* the result. */
3859 rtx label1, label2;
3860 rtx adjusted_op0, tem;
3862 quotient = gen_reg_rtx (compute_mode);
3863 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3864 label1 = gen_label_rtx ();
3865 label2 = gen_label_rtx ();
3866 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3867 compute_mode, label1);
3868 emit_move_insn (quotient, const0_rtx);
3869 emit_jump_insn (gen_jump (label2));
3870 emit_barrier ();
3871 emit_label (label1);
3872 expand_dec (adjusted_op0, const1_rtx);
3873 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3874 quotient, 1, OPTAB_LIB_WIDEN);
3875 if (tem != quotient)
3876 emit_move_insn (quotient, tem);
3877 expand_inc (quotient, const1_rtx);
3878 emit_label (label2);
3881 else /* signed */
3883 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3884 && INTVAL (op1) >= 0)
3886 /* This is extremely similar to the code for the unsigned case
3887 above. For 2.7 we should merge these variants, but for
3888 2.6.1 I don't want to touch the code for unsigned since that
3889 get used in C. The signed case will only be used by other
3890 languages (Ada). */
3892 rtx t1, t2, t3;
3893 unsigned HOST_WIDE_INT d = INTVAL (op1);
3894 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3895 build_int_2 (floor_log2 (d), 0),
3896 tquotient, 0);
3897 t2 = expand_binop (compute_mode, and_optab, op0,
3898 GEN_INT (d - 1),
3899 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3900 t3 = gen_reg_rtx (compute_mode);
3901 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3902 compute_mode, 1, 1);
3903 if (t3 == 0)
3905 rtx lab;
3906 lab = gen_label_rtx ();
3907 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3908 expand_inc (t1, const1_rtx);
3909 emit_label (lab);
3910 quotient = t1;
3912 else
3913 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3914 t1, t3),
3915 tquotient);
3916 break;
3919 /* Try using an instruction that produces both the quotient and
3920 remainder, using truncation. We can easily compensate the
3921 quotient or remainder to get ceiling rounding, once we have the
3922 remainder. Notice that we compute also the final remainder
3923 value here, and return the result right away. */
3924 if (target == 0 || GET_MODE (target) != compute_mode)
3925 target = gen_reg_rtx (compute_mode);
3926 if (rem_flag)
3928 remainder= (REG_P (target)
3929 ? target : gen_reg_rtx (compute_mode));
3930 quotient = gen_reg_rtx (compute_mode);
3932 else
3934 quotient = (REG_P (target)
3935 ? target : gen_reg_rtx (compute_mode));
3936 remainder = gen_reg_rtx (compute_mode);
3939 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3940 remainder, 0))
3942 /* This could be computed with a branch-less sequence.
3943 Save that for later. */
3944 rtx tem;
3945 rtx label = gen_label_rtx ();
3946 do_cmp_and_jump (remainder, const0_rtx, EQ,
3947 compute_mode, label);
3948 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3949 NULL_RTX, 0, OPTAB_WIDEN);
3950 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3951 expand_inc (quotient, const1_rtx);
3952 expand_dec (remainder, op1);
3953 emit_label (label);
3954 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3957 /* No luck with division elimination or divmod. Have to do it
3958 by conditionally adjusting op0 *and* the result. */
3960 rtx label1, label2, label3, label4, label5;
3961 rtx adjusted_op0;
3962 rtx tem;
3964 quotient = gen_reg_rtx (compute_mode);
3965 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3966 label1 = gen_label_rtx ();
3967 label2 = gen_label_rtx ();
3968 label3 = gen_label_rtx ();
3969 label4 = gen_label_rtx ();
3970 label5 = gen_label_rtx ();
3971 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3972 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3973 compute_mode, label1);
3974 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3975 quotient, 0, OPTAB_LIB_WIDEN);
3976 if (tem != quotient)
3977 emit_move_insn (quotient, tem);
3978 emit_jump_insn (gen_jump (label5));
3979 emit_barrier ();
3980 emit_label (label1);
3981 expand_dec (adjusted_op0, const1_rtx);
3982 emit_jump_insn (gen_jump (label4));
3983 emit_barrier ();
3984 emit_label (label2);
3985 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3986 compute_mode, label3);
3987 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3988 quotient, 0, OPTAB_LIB_WIDEN);
3989 if (tem != quotient)
3990 emit_move_insn (quotient, tem);
3991 emit_jump_insn (gen_jump (label5));
3992 emit_barrier ();
3993 emit_label (label3);
3994 expand_inc (adjusted_op0, const1_rtx);
3995 emit_label (label4);
3996 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3997 quotient, 0, OPTAB_LIB_WIDEN);
3998 if (tem != quotient)
3999 emit_move_insn (quotient, tem);
4000 expand_inc (quotient, const1_rtx);
4001 emit_label (label5);
4004 break;
4006 case EXACT_DIV_EXPR:
4007 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4009 HOST_WIDE_INT d = INTVAL (op1);
4010 unsigned HOST_WIDE_INT ml;
4011 int pre_shift;
4012 rtx t1;
4014 pre_shift = floor_log2 (d & -d);
4015 ml = invert_mod2n (d >> pre_shift, size);
4016 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4017 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
4018 quotient = expand_mult (compute_mode, t1,
4019 gen_int_mode (ml, compute_mode),
4020 NULL_RTX, 1);
4022 insn = get_last_insn ();
4023 set_unique_reg_note (insn,
4024 REG_EQUAL,
4025 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4026 compute_mode,
4027 op0, op1));
4029 break;
4031 case ROUND_DIV_EXPR:
4032 case ROUND_MOD_EXPR:
4033 if (unsignedp)
4035 rtx tem;
4036 rtx label;
4037 label = gen_label_rtx ();
4038 quotient = gen_reg_rtx (compute_mode);
4039 remainder = gen_reg_rtx (compute_mode);
4040 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4042 rtx tem;
4043 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4044 quotient, 1, OPTAB_LIB_WIDEN);
4045 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4046 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4047 remainder, 1, OPTAB_LIB_WIDEN);
4049 tem = plus_constant (op1, -1);
4050 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4051 build_int_2 (1, 0), NULL_RTX, 1);
4052 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4053 expand_inc (quotient, const1_rtx);
4054 expand_dec (remainder, op1);
4055 emit_label (label);
4057 else
4059 rtx abs_rem, abs_op1, tem, mask;
4060 rtx label;
4061 label = gen_label_rtx ();
4062 quotient = gen_reg_rtx (compute_mode);
4063 remainder = gen_reg_rtx (compute_mode);
4064 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4066 rtx tem;
4067 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4068 quotient, 0, OPTAB_LIB_WIDEN);
4069 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4070 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4071 remainder, 0, OPTAB_LIB_WIDEN);
4073 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4074 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4075 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4076 build_int_2 (1, 0), NULL_RTX, 1);
4077 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4078 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4079 NULL_RTX, 0, OPTAB_WIDEN);
4080 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4081 build_int_2 (size - 1, 0), NULL_RTX, 0);
4082 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4083 NULL_RTX, 0, OPTAB_WIDEN);
4084 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4085 NULL_RTX, 0, OPTAB_WIDEN);
4086 expand_inc (quotient, tem);
4087 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4088 NULL_RTX, 0, OPTAB_WIDEN);
4089 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4090 NULL_RTX, 0, OPTAB_WIDEN);
4091 expand_dec (remainder, tem);
4092 emit_label (label);
4094 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4096 default:
4097 abort ();
4100 if (quotient == 0)
4102 if (target && GET_MODE (target) != compute_mode)
4103 target = 0;
4105 if (rem_flag)
4107 /* Try to produce the remainder without producing the quotient.
4108 If we seem to have a divmod pattern that does not require widening,
4109 don't try widening here. We should really have a WIDEN argument
4110 to expand_twoval_binop, since what we'd really like to do here is
4111 1) try a mod insn in compute_mode
4112 2) try a divmod insn in compute_mode
4113 3) try a div insn in compute_mode and multiply-subtract to get
4114 remainder
4115 4) try the same things with widening allowed. */
4116 remainder
4117 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4118 op0, op1, target,
4119 unsignedp,
4120 ((optab2->handlers[compute_mode].insn_code
4121 != CODE_FOR_nothing)
4122 ? OPTAB_DIRECT : OPTAB_WIDEN));
4123 if (remainder == 0)
4125 /* No luck there. Can we do remainder and divide at once
4126 without a library call? */
4127 remainder = gen_reg_rtx (compute_mode);
4128 if (! expand_twoval_binop ((unsignedp
4129 ? udivmod_optab
4130 : sdivmod_optab),
4131 op0, op1,
4132 NULL_RTX, remainder, unsignedp))
4133 remainder = 0;
4136 if (remainder)
4137 return gen_lowpart (mode, remainder);
4140 /* Produce the quotient. Try a quotient insn, but not a library call.
4141 If we have a divmod in this mode, use it in preference to widening
4142 the div (for this test we assume it will not fail). Note that optab2
4143 is set to the one of the two optabs that the call below will use. */
4144 quotient
4145 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4146 op0, op1, rem_flag ? NULL_RTX : target,
4147 unsignedp,
4148 ((optab2->handlers[compute_mode].insn_code
4149 != CODE_FOR_nothing)
4150 ? OPTAB_DIRECT : OPTAB_WIDEN));
4152 if (quotient == 0)
4154 /* No luck there. Try a quotient-and-remainder insn,
4155 keeping the quotient alone. */
4156 quotient = gen_reg_rtx (compute_mode);
4157 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4158 op0, op1,
4159 quotient, NULL_RTX, unsignedp))
4161 quotient = 0;
4162 if (! rem_flag)
4163 /* Still no luck. If we are not computing the remainder,
4164 use a library call for the quotient. */
4165 quotient = sign_expand_binop (compute_mode,
4166 udiv_optab, sdiv_optab,
4167 op0, op1, target,
4168 unsignedp, OPTAB_LIB_WIDEN);
4173 if (rem_flag)
4175 if (target && GET_MODE (target) != compute_mode)
4176 target = 0;
4178 if (quotient == 0)
4179 /* No divide instruction either. Use library for remainder. */
4180 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4181 op0, op1, target,
4182 unsignedp, OPTAB_LIB_WIDEN);
4183 else
4185 /* We divided. Now finish doing X - Y * (X / Y). */
4186 remainder = expand_mult (compute_mode, quotient, op1,
4187 NULL_RTX, unsignedp);
4188 remainder = expand_binop (compute_mode, sub_optab, op0,
4189 remainder, target, unsignedp,
4190 OPTAB_LIB_WIDEN);
4194 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4197 /* Return a tree node with data type TYPE, describing the value of X.
4198 Usually this is an RTL_EXPR, if there is no obvious better choice.
4199 X may be an expression, however we only support those expressions
4200 generated by loop.c. */
4202 tree
4203 make_tree (tree type, rtx x)
4205 tree t;
4207 switch (GET_CODE (x))
4209 case CONST_INT:
4210 t = build_int_2 (INTVAL (x),
4211 (TYPE_UNSIGNED (type)
4212 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4213 < HOST_BITS_PER_WIDE_INT))
4214 || INTVAL (x) >= 0 ? 0 : -1);
4215 TREE_TYPE (t) = type;
4216 return t;
4218 case CONST_DOUBLE:
4219 if (GET_MODE (x) == VOIDmode)
4221 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4222 TREE_TYPE (t) = type;
4224 else
4226 REAL_VALUE_TYPE d;
4228 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4229 t = build_real (type, d);
4232 return t;
4234 case CONST_VECTOR:
4236 int i, units;
4237 rtx elt;
4238 tree t = NULL_TREE;
4240 units = CONST_VECTOR_NUNITS (x);
4242 /* Build a tree with vector elements. */
4243 for (i = units - 1; i >= 0; --i)
4245 elt = CONST_VECTOR_ELT (x, i);
4246 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4249 return build_vector (type, t);
4252 case PLUS:
4253 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4254 make_tree (type, XEXP (x, 1))));
4256 case MINUS:
4257 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4258 make_tree (type, XEXP (x, 1))));
4260 case NEG:
4261 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4263 case MULT:
4264 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4265 make_tree (type, XEXP (x, 1))));
4267 case ASHIFT:
4268 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4269 make_tree (type, XEXP (x, 1))));
4271 case LSHIFTRT:
4272 t = lang_hooks.types.unsigned_type (type);
4273 return fold (convert (type,
4274 build (RSHIFT_EXPR, t,
4275 make_tree (t, XEXP (x, 0)),
4276 make_tree (type, XEXP (x, 1)))));
4278 case ASHIFTRT:
4279 t = lang_hooks.types.signed_type (type);
4280 return fold (convert (type,
4281 build (RSHIFT_EXPR, t,
4282 make_tree (t, XEXP (x, 0)),
4283 make_tree (type, XEXP (x, 1)))));
4285 case DIV:
4286 if (TREE_CODE (type) != REAL_TYPE)
4287 t = lang_hooks.types.signed_type (type);
4288 else
4289 t = type;
4291 return fold (convert (type,
4292 build (TRUNC_DIV_EXPR, t,
4293 make_tree (t, XEXP (x, 0)),
4294 make_tree (t, XEXP (x, 1)))));
4295 case UDIV:
4296 t = lang_hooks.types.unsigned_type (type);
4297 return fold (convert (type,
4298 build (TRUNC_DIV_EXPR, t,
4299 make_tree (t, XEXP (x, 0)),
4300 make_tree (t, XEXP (x, 1)))));
4302 case SIGN_EXTEND:
4303 case ZERO_EXTEND:
4304 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4305 GET_CODE (x) == ZERO_EXTEND);
4306 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4308 default:
4309 t = make_node (RTL_EXPR);
4310 TREE_TYPE (t) = type;
4312 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4313 ptr_mode. So convert. */
4314 if (POINTER_TYPE_P (type))
4315 x = convert_memory_address (TYPE_MODE (type), x);
4317 RTL_EXPR_RTL (t) = x;
4318 /* There are no insns to be output
4319 when this rtl_expr is used. */
4320 RTL_EXPR_SEQUENCE (t) = 0;
4321 return t;
4325 /* Check whether the multiplication X * MULT + ADD overflows.
4326 X, MULT and ADD must be CONST_*.
4327 MODE is the machine mode for the computation.
4328 X and MULT must have mode MODE. ADD may have a different mode.
4329 So can X (defaults to same as MODE).
4330 UNSIGNEDP is nonzero to do unsigned multiplication. */
4332 bool
4333 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4335 tree type, mult_type, add_type, result;
4337 type = lang_hooks.types.type_for_mode (mode, unsignedp);
4339 /* In order to get a proper overflow indication from an unsigned
4340 type, we have to pretend that it's a sizetype. */
4341 mult_type = type;
4342 if (unsignedp)
4344 mult_type = copy_node (type);
4345 TYPE_IS_SIZETYPE (mult_type) = 1;
4348 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4349 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
4351 result = fold (build (PLUS_EXPR, mult_type,
4352 fold (build (MULT_EXPR, mult_type,
4353 make_tree (mult_type, x),
4354 make_tree (mult_type, mult))),
4355 make_tree (add_type, add)));
4357 return TREE_CONSTANT_OVERFLOW (result);
4360 /* Return an rtx representing the value of X * MULT + ADD.
4361 TARGET is a suggestion for where to store the result (an rtx).
4362 MODE is the machine mode for the computation.
4363 X and MULT must have mode MODE. ADD may have a different mode.
4364 So can X (defaults to same as MODE).
4365 UNSIGNEDP is nonzero to do unsigned multiplication.
4366 This may emit insns. */
4369 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4370 int unsignedp)
4372 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
4373 tree add_type = (GET_MODE (add) == VOIDmode
4374 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
4375 unsignedp));
4376 tree result = fold (build (PLUS_EXPR, type,
4377 fold (build (MULT_EXPR, type,
4378 make_tree (type, x),
4379 make_tree (type, mult))),
4380 make_tree (add_type, add)));
4382 return expand_expr (result, target, VOIDmode, 0);
4385 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4386 and returning TARGET.
4388 If TARGET is 0, a pseudo-register or constant is returned. */
4391 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4393 rtx tem = 0;
4395 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4396 tem = simplify_binary_operation (AND, mode, op0, op1);
4397 if (tem == 0)
4398 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4400 if (target == 0)
4401 target = tem;
4402 else if (tem != target)
4403 emit_move_insn (target, tem);
4404 return target;
4407 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4408 and storing in TARGET. Normally return TARGET.
4409 Return 0 if that cannot be done.
4411 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4412 it is VOIDmode, they cannot both be CONST_INT.
4414 UNSIGNEDP is for the case where we have to widen the operands
4415 to perform the operation. It says to use zero-extension.
4417 NORMALIZEP is 1 if we should convert the result to be either zero
4418 or one. Normalize is -1 if we should convert the result to be
4419 either zero or -1. If NORMALIZEP is zero, the result will be left
4420 "raw" out of the scc insn. */
4423 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4424 enum machine_mode mode, int unsignedp, int normalizep)
4426 rtx subtarget;
4427 enum insn_code icode;
4428 enum machine_mode compare_mode;
4429 enum machine_mode target_mode = GET_MODE (target);
4430 rtx tem;
4431 rtx last = get_last_insn ();
4432 rtx pattern, comparison;
4434 /* ??? Ok to do this and then fail? */
4435 op0 = protect_from_queue (op0, 0);
4436 op1 = protect_from_queue (op1, 0);
4438 if (unsignedp)
4439 code = unsigned_condition (code);
4441 /* If one operand is constant, make it the second one. Only do this
4442 if the other operand is not constant as well. */
4444 if (swap_commutative_operands_p (op0, op1))
4446 tem = op0;
4447 op0 = op1;
4448 op1 = tem;
4449 code = swap_condition (code);
4452 if (mode == VOIDmode)
4453 mode = GET_MODE (op0);
4455 /* For some comparisons with 1 and -1, we can convert this to
4456 comparisons with zero. This will often produce more opportunities for
4457 store-flag insns. */
4459 switch (code)
4461 case LT:
4462 if (op1 == const1_rtx)
4463 op1 = const0_rtx, code = LE;
4464 break;
4465 case LE:
4466 if (op1 == constm1_rtx)
4467 op1 = const0_rtx, code = LT;
4468 break;
4469 case GE:
4470 if (op1 == const1_rtx)
4471 op1 = const0_rtx, code = GT;
4472 break;
4473 case GT:
4474 if (op1 == constm1_rtx)
4475 op1 = const0_rtx, code = GE;
4476 break;
4477 case GEU:
4478 if (op1 == const1_rtx)
4479 op1 = const0_rtx, code = NE;
4480 break;
4481 case LTU:
4482 if (op1 == const1_rtx)
4483 op1 = const0_rtx, code = EQ;
4484 break;
4485 default:
4486 break;
4489 /* If we are comparing a double-word integer with zero, we can convert
4490 the comparison into one involving a single word. */
4491 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4492 && GET_MODE_CLASS (mode) == MODE_INT
4493 && op1 == const0_rtx
4494 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4496 if (code == EQ || code == NE)
4498 rtx op00, op01, op0both;
4500 /* Do a logical OR of the two words and compare the result. */
4501 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4502 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4503 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4504 NULL_RTX, unsignedp, OPTAB_DIRECT);
4505 if (op0both != 0)
4506 return emit_store_flag (target, code, op0both, op1, word_mode,
4507 unsignedp, normalizep);
4509 else if (code == LT || code == GE)
4511 rtx op0h;
4513 /* If testing the sign bit, can just test on high word. */
4514 op0h = simplify_gen_subreg (word_mode, op0, mode,
4515 subreg_highpart_offset (word_mode, mode));
4516 return emit_store_flag (target, code, op0h, op1, word_mode,
4517 unsignedp, normalizep);
4521 /* From now on, we won't change CODE, so set ICODE now. */
4522 icode = setcc_gen_code[(int) code];
4524 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4525 complement of A (for GE) and shifting the sign bit to the low bit. */
4526 if (op1 == const0_rtx && (code == LT || code == GE)
4527 && GET_MODE_CLASS (mode) == MODE_INT
4528 && (normalizep || STORE_FLAG_VALUE == 1
4529 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4530 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4531 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4533 subtarget = target;
4535 /* If the result is to be wider than OP0, it is best to convert it
4536 first. If it is to be narrower, it is *incorrect* to convert it
4537 first. */
4538 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4540 op0 = protect_from_queue (op0, 0);
4541 op0 = convert_modes (target_mode, mode, op0, 0);
4542 mode = target_mode;
4545 if (target_mode != mode)
4546 subtarget = 0;
4548 if (code == GE)
4549 op0 = expand_unop (mode, one_cmpl_optab, op0,
4550 ((STORE_FLAG_VALUE == 1 || normalizep)
4551 ? 0 : subtarget), 0);
4553 if (STORE_FLAG_VALUE == 1 || normalizep)
4554 /* If we are supposed to produce a 0/1 value, we want to do
4555 a logical shift from the sign bit to the low-order bit; for
4556 a -1/0 value, we do an arithmetic shift. */
4557 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4558 size_int (GET_MODE_BITSIZE (mode) - 1),
4559 subtarget, normalizep != -1);
4561 if (mode != target_mode)
4562 op0 = convert_modes (target_mode, mode, op0, 0);
4564 return op0;
4567 if (icode != CODE_FOR_nothing)
4569 insn_operand_predicate_fn pred;
4571 /* We think we may be able to do this with a scc insn. Emit the
4572 comparison and then the scc insn.
4574 compare_from_rtx may call emit_queue, which would be deleted below
4575 if the scc insn fails. So call it ourselves before setting LAST.
4576 Likewise for do_pending_stack_adjust. */
4578 emit_queue ();
4579 do_pending_stack_adjust ();
4580 last = get_last_insn ();
4582 comparison
4583 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4584 if (CONSTANT_P (comparison))
4586 if (GET_CODE (comparison) == CONST_INT)
4588 if (comparison == const0_rtx)
4589 return const0_rtx;
4591 #ifdef FLOAT_STORE_FLAG_VALUE
4592 else if (GET_CODE (comparison) == CONST_DOUBLE)
4594 if (comparison == CONST0_RTX (GET_MODE (comparison)))
4595 return const0_rtx;
4597 #endif
4598 else
4599 abort ();
4600 if (normalizep == 1)
4601 return const1_rtx;
4602 if (normalizep == -1)
4603 return constm1_rtx;
4604 return const_true_rtx;
4607 /* The code of COMPARISON may not match CODE if compare_from_rtx
4608 decided to swap its operands and reverse the original code.
4610 We know that compare_from_rtx returns either a CONST_INT or
4611 a new comparison code, so it is safe to just extract the
4612 code from COMPARISON. */
4613 code = GET_CODE (comparison);
4615 /* Get a reference to the target in the proper mode for this insn. */
4616 compare_mode = insn_data[(int) icode].operand[0].mode;
4617 subtarget = target;
4618 pred = insn_data[(int) icode].operand[0].predicate;
4619 if (preserve_subexpressions_p ()
4620 || ! (*pred) (subtarget, compare_mode))
4621 subtarget = gen_reg_rtx (compare_mode);
4623 pattern = GEN_FCN (icode) (subtarget);
4624 if (pattern)
4626 emit_insn (pattern);
4628 /* If we are converting to a wider mode, first convert to
4629 TARGET_MODE, then normalize. This produces better combining
4630 opportunities on machines that have a SIGN_EXTRACT when we are
4631 testing a single bit. This mostly benefits the 68k.
4633 If STORE_FLAG_VALUE does not have the sign bit set when
4634 interpreted in COMPARE_MODE, we can do this conversion as
4635 unsigned, which is usually more efficient. */
4636 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4638 convert_move (target, subtarget,
4639 (GET_MODE_BITSIZE (compare_mode)
4640 <= HOST_BITS_PER_WIDE_INT)
4641 && 0 == (STORE_FLAG_VALUE
4642 & ((HOST_WIDE_INT) 1
4643 << (GET_MODE_BITSIZE (compare_mode) -1))));
4644 op0 = target;
4645 compare_mode = target_mode;
4647 else
4648 op0 = subtarget;
4650 /* If we want to keep subexpressions around, don't reuse our
4651 last target. */
4653 if (preserve_subexpressions_p ())
4654 subtarget = 0;
4656 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4657 we don't have to do anything. */
4658 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4660 /* STORE_FLAG_VALUE might be the most negative number, so write
4661 the comparison this way to avoid a compiler-time warning. */
4662 else if (- normalizep == STORE_FLAG_VALUE)
4663 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4665 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4666 makes it hard to use a value of just the sign bit due to
4667 ANSI integer constant typing rules. */
4668 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4669 && (STORE_FLAG_VALUE
4670 & ((HOST_WIDE_INT) 1
4671 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4672 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4673 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4674 subtarget, normalizep == 1);
4675 else if (STORE_FLAG_VALUE & 1)
4677 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4678 if (normalizep == -1)
4679 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4681 else
4682 abort ();
4684 /* If we were converting to a smaller mode, do the
4685 conversion now. */
4686 if (target_mode != compare_mode)
4688 convert_move (target, op0, 0);
4689 return target;
4691 else
4692 return op0;
4696 delete_insns_since (last);
4698 /* If expensive optimizations, use different pseudo registers for each
4699 insn, instead of reusing the same pseudo. This leads to better CSE,
4700 but slows down the compiler, since there are more pseudos */
4701 subtarget = (!flag_expensive_optimizations
4702 && (target_mode == mode)) ? target : NULL_RTX;
4704 /* If we reached here, we can't do this with a scc insn. However, there
4705 are some comparisons that can be done directly. For example, if
4706 this is an equality comparison of integers, we can try to exclusive-or
4707 (or subtract) the two operands and use a recursive call to try the
4708 comparison with zero. Don't do any of these cases if branches are
4709 very cheap. */
4711 if (BRANCH_COST > 0
4712 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4713 && op1 != const0_rtx)
4715 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4716 OPTAB_WIDEN);
4718 if (tem == 0)
4719 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4720 OPTAB_WIDEN);
4721 if (tem != 0)
4722 tem = emit_store_flag (target, code, tem, const0_rtx,
4723 mode, unsignedp, normalizep);
4724 if (tem == 0)
4725 delete_insns_since (last);
4726 return tem;
4729 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4730 the constant zero. Reject all other comparisons at this point. Only
4731 do LE and GT if branches are expensive since they are expensive on
4732 2-operand machines. */
4734 if (BRANCH_COST == 0
4735 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4736 || (code != EQ && code != NE
4737 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4738 return 0;
4740 /* See what we need to return. We can only return a 1, -1, or the
4741 sign bit. */
4743 if (normalizep == 0)
4745 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4746 normalizep = STORE_FLAG_VALUE;
4748 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4749 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4750 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4752 else
4753 return 0;
4756 /* Try to put the result of the comparison in the sign bit. Assume we can't
4757 do the necessary operation below. */
4759 tem = 0;
4761 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4762 the sign bit set. */
4764 if (code == LE)
4766 /* This is destructive, so SUBTARGET can't be OP0. */
4767 if (rtx_equal_p (subtarget, op0))
4768 subtarget = 0;
4770 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4771 OPTAB_WIDEN);
4772 if (tem)
4773 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4774 OPTAB_WIDEN);
4777 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4778 number of bits in the mode of OP0, minus one. */
4780 if (code == GT)
4782 if (rtx_equal_p (subtarget, op0))
4783 subtarget = 0;
4785 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4786 size_int (GET_MODE_BITSIZE (mode) - 1),
4787 subtarget, 0);
4788 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4789 OPTAB_WIDEN);
4792 if (code == EQ || code == NE)
4794 /* For EQ or NE, one way to do the comparison is to apply an operation
4795 that converts the operand into a positive number if it is nonzero
4796 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4797 for NE we negate. This puts the result in the sign bit. Then we
4798 normalize with a shift, if needed.
4800 Two operations that can do the above actions are ABS and FFS, so try
4801 them. If that doesn't work, and MODE is smaller than a full word,
4802 we can use zero-extension to the wider mode (an unsigned conversion)
4803 as the operation. */
4805 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4806 that is compensated by the subsequent overflow when subtracting
4807 one / negating. */
4809 if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
4810 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4811 else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
4812 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4813 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4815 op0 = protect_from_queue (op0, 0);
4816 tem = convert_modes (word_mode, mode, op0, 1);
4817 mode = word_mode;
4820 if (tem != 0)
4822 if (code == EQ)
4823 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4824 0, OPTAB_WIDEN);
4825 else
4826 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4829 /* If we couldn't do it that way, for NE we can "or" the two's complement
4830 of the value with itself. For EQ, we take the one's complement of
4831 that "or", which is an extra insn, so we only handle EQ if branches
4832 are expensive. */
4834 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4836 if (rtx_equal_p (subtarget, op0))
4837 subtarget = 0;
4839 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4840 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4841 OPTAB_WIDEN);
4843 if (tem && code == EQ)
4844 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4848 if (tem && normalizep)
4849 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4850 size_int (GET_MODE_BITSIZE (mode) - 1),
4851 subtarget, normalizep == 1);
4853 if (tem)
4855 if (GET_MODE (tem) != target_mode)
4857 convert_move (target, tem, 0);
4858 tem = target;
4860 else if (!subtarget)
4862 emit_move_insn (target, tem);
4863 tem = target;
4866 else
4867 delete_insns_since (last);
4869 return tem;
4872 /* Like emit_store_flag, but always succeeds. */
4875 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4876 enum machine_mode mode, int unsignedp, int normalizep)
4878 rtx tem, label;
4880 /* First see if emit_store_flag can do the job. */
4881 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4882 if (tem != 0)
4883 return tem;
4885 if (normalizep == 0)
4886 normalizep = 1;
4888 /* If this failed, we have to do this with set/compare/jump/set code. */
4890 if (!REG_P (target)
4891 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4892 target = gen_reg_rtx (GET_MODE (target));
4894 emit_move_insn (target, const1_rtx);
4895 label = gen_label_rtx ();
4896 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4897 NULL_RTX, label);
4899 emit_move_insn (target, const0_rtx);
4900 emit_label (label);
4902 return target;
4905 /* Perform possibly multi-word comparison and conditional jump to LABEL
4906 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4908 The algorithm is based on the code in expr.c:do_jump.
4910 Note that this does not perform a general comparison. Only variants
4911 generated within expmed.c are correctly handled, others abort (but could
4912 be handled if needed). */
4914 static void
4915 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4916 rtx label)
4918 /* If this mode is an integer too wide to compare properly,
4919 compare word by word. Rely on cse to optimize constant cases. */
4921 if (GET_MODE_CLASS (mode) == MODE_INT
4922 && ! can_compare_p (op, mode, ccp_jump))
4924 rtx label2 = gen_label_rtx ();
4926 switch (op)
4928 case LTU:
4929 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4930 break;
4932 case LEU:
4933 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4934 break;
4936 case LT:
4937 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4938 break;
4940 case GT:
4941 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4942 break;
4944 case GE:
4945 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4946 break;
4948 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4949 that's the only equality operations we do */
4950 case EQ:
4951 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4952 abort ();
4953 do_jump_by_parts_equality_rtx (arg1, label2, label);
4954 break;
4956 case NE:
4957 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4958 abort ();
4959 do_jump_by_parts_equality_rtx (arg1, label, label2);
4960 break;
4962 default:
4963 abort ();
4966 emit_label (label2);
4968 else
4969 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);