(thumb_jump): Reduce the backward branch range, and increase the forward branch
[official-gcc.git] / gcc / expmed.c
blob208e947ea6bfa6ebf3d85741d74d28b1371c87e9
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
60 static int sdiv_pow2_cheap, smod_pow2_cheap;
62 #ifndef SLOW_UNALIGNED_ACCESS
63 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
64 #endif
66 /* For compilers that support multiple targets with different word sizes,
67 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
68 is the H8/300(H) compiler. */
70 #ifndef MAX_BITS_PER_WORD
71 #define MAX_BITS_PER_WORD BITS_PER_WORD
72 #endif
74 /* Reduce conditional compilation elsewhere. */
75 #ifndef HAVE_insv
76 #define HAVE_insv 0
77 #define CODE_FOR_insv CODE_FOR_nothing
78 #define gen_insv(a,b,c,d) NULL_RTX
79 #endif
80 #ifndef HAVE_extv
81 #define HAVE_extv 0
82 #define CODE_FOR_extv CODE_FOR_nothing
83 #define gen_extv(a,b,c,d) NULL_RTX
84 #endif
85 #ifndef HAVE_extzv
86 #define HAVE_extzv 0
87 #define CODE_FOR_extzv CODE_FOR_nothing
88 #define gen_extzv(a,b,c,d) NULL_RTX
89 #endif
91 /* Cost of various pieces of RTL. Note that some of these are indexed by
92 shift count and some by mode. */
93 static int add_cost, negate_cost, zero_cost;
94 static int shift_cost[MAX_BITS_PER_WORD];
95 static int shiftadd_cost[MAX_BITS_PER_WORD];
96 static int shiftsub_cost[MAX_BITS_PER_WORD];
97 static int mul_cost[NUM_MACHINE_MODES];
98 static int div_cost[NUM_MACHINE_MODES];
99 static int mul_widen_cost[NUM_MACHINE_MODES];
100 static int mul_highpart_cost[NUM_MACHINE_MODES];
102 void
103 init_expmed (void)
105 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
106 int dummy;
107 int m;
108 enum machine_mode mode, wider_mode;
110 start_sequence ();
112 /* This is "some random pseudo register" for purposes of calling recog
113 to see what insns exist. */
114 reg = gen_rtx_REG (word_mode, 10000);
116 zero_cost = rtx_cost (const0_rtx, 0);
117 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
119 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
120 gen_rtx_ASHIFT (word_mode, reg,
121 const0_rtx)));
123 shiftadd_insn
124 = emit_insn (gen_rtx_SET (VOIDmode, reg,
125 gen_rtx_PLUS (word_mode,
126 gen_rtx_MULT (word_mode,
127 reg, const0_rtx),
128 reg)));
130 shiftsub_insn
131 = emit_insn (gen_rtx_SET (VOIDmode, reg,
132 gen_rtx_MINUS (word_mode,
133 gen_rtx_MULT (word_mode,
134 reg, const0_rtx),
135 reg)));
137 init_recog ();
139 shift_cost[0] = 0;
140 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
147 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
148 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
149 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
151 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
152 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
153 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
156 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
157 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
160 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
162 sdiv_pow2_cheap
163 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
164 <= 2 * add_cost);
165 smod_pow2_cheap
166 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
169 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
170 mode != VOIDmode;
171 mode = GET_MODE_WIDER_MODE (mode))
173 reg = gen_rtx_REG (mode, 10000);
174 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
175 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
176 wider_mode = GET_MODE_WIDER_MODE (mode);
177 if (wider_mode != VOIDmode)
179 mul_widen_cost[(int) wider_mode]
180 = rtx_cost (gen_rtx_MULT (wider_mode,
181 gen_rtx_ZERO_EXTEND (wider_mode, reg),
182 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
183 SET);
184 mul_highpart_cost[(int) mode]
185 = rtx_cost (gen_rtx_TRUNCATE
186 (mode,
187 gen_rtx_LSHIFTRT (wider_mode,
188 gen_rtx_MULT (wider_mode,
189 gen_rtx_ZERO_EXTEND
190 (wider_mode, reg),
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg)),
193 GEN_INT (GET_MODE_BITSIZE (mode)))),
194 SET);
198 end_sequence ();
201 /* Return an rtx representing minus the value of X.
202 MODE is the intended mode of the result,
203 useful if X is a CONST_INT. */
206 negate_rtx (enum machine_mode mode, rtx x)
208 rtx result = simplify_unary_operation (NEG, mode, x, mode);
210 if (result == 0)
211 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
213 return result;
216 /* Report on the availability of insv/extv/extzv and the desired mode
217 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
218 is false; else the mode of the specified operand. If OPNO is -1,
219 all the caller cares about is whether the insn is available. */
220 enum machine_mode
221 mode_for_extraction (enum extraction_pattern pattern, int opno)
223 const struct insn_data *data;
225 switch (pattern)
227 case EP_insv:
228 if (HAVE_insv)
230 data = &insn_data[CODE_FOR_insv];
231 break;
233 return MAX_MACHINE_MODE;
235 case EP_extv:
236 if (HAVE_extv)
238 data = &insn_data[CODE_FOR_extv];
239 break;
241 return MAX_MACHINE_MODE;
243 case EP_extzv:
244 if (HAVE_extzv)
246 data = &insn_data[CODE_FOR_extzv];
247 break;
249 return MAX_MACHINE_MODE;
251 default:
252 abort ();
255 if (opno == -1)
256 return VOIDmode;
258 /* Everyone who uses this function used to follow it with
259 if (result == VOIDmode) result = word_mode; */
260 if (data->operand[opno].mode == VOIDmode)
261 return word_mode;
262 return data->operand[opno].mode;
266 /* Generate code to store value from rtx VALUE
267 into a bit-field within structure STR_RTX
268 containing BITSIZE bits starting at bit BITNUM.
269 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
270 ALIGN is the alignment that STR_RTX is known to have.
271 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
273 /* ??? Note that there are two different ideas here for how
274 to determine the size to count bits within, for a register.
275 One is BITS_PER_WORD, and the other is the size of operand 3
276 of the insv pattern.
278 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
279 else, we use the mode of operand 3. */
282 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
283 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
284 rtx value, HOST_WIDE_INT total_size)
286 unsigned int unit
287 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
288 unsigned HOST_WIDE_INT offset = bitnum / unit;
289 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
290 rtx op0 = str_rtx;
291 int byte_offset;
293 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
295 /* Discount the part of the structure before the desired byte.
296 We need to know how many bytes are safe to reference after it. */
297 if (total_size >= 0)
298 total_size -= (bitpos / BIGGEST_ALIGNMENT
299 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
301 while (GET_CODE (op0) == SUBREG)
303 /* The following line once was done only if WORDS_BIG_ENDIAN,
304 but I think that is a mistake. WORDS_BIG_ENDIAN is
305 meaningful at a much higher level; when structures are copied
306 between memory and regs, the higher-numbered regs
307 always get higher addresses. */
308 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
309 /* We used to adjust BITPOS here, but now we do the whole adjustment
310 right after the loop. */
311 op0 = SUBREG_REG (op0);
314 value = protect_from_queue (value, 0);
316 /* Use vec_extract patterns for extracting parts of vectors whenever
317 available. */
318 if (VECTOR_MODE_P (GET_MODE (op0))
319 && GET_CODE (op0) != MEM
320 && (vec_set_optab->handlers[(int)GET_MODE (op0)].insn_code
321 != CODE_FOR_nothing)
322 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
323 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
324 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
326 enum machine_mode outermode = GET_MODE (op0);
327 enum machine_mode innermode = GET_MODE_INNER (outermode);
328 int icode = (int) vec_set_optab->handlers[(int) outermode].insn_code;
329 int pos = bitnum / GET_MODE_BITSIZE (innermode);
330 rtx rtxpos = GEN_INT (pos);
331 rtx src = value;
332 rtx dest = op0;
333 rtx pat, seq;
334 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
335 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
336 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
338 start_sequence ();
340 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
341 src = copy_to_mode_reg (mode1, src);
343 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
344 rtxpos = copy_to_mode_reg (mode1, rtxpos);
346 /* We could handle this, but we should always be called with a pseudo
347 for our targets and all insns should take them as outputs. */
348 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
349 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
350 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
351 abort ();
352 pat = GEN_FCN (icode) (dest, src, rtxpos);
353 seq = get_insns ();
354 end_sequence ();
355 if (pat)
357 emit_insn (seq);
358 emit_insn (pat);
359 return dest;
363 if (flag_force_mem)
365 int old_generating_concat_p = generating_concat_p;
366 generating_concat_p = 0;
367 value = force_not_mem (value);
368 generating_concat_p = old_generating_concat_p;
371 /* If the target is a register, overwriting the entire object, or storing
372 a full-word or multi-word field can be done with just a SUBREG.
374 If the target is memory, storing any naturally aligned field can be
375 done with a simple store. For targets that support fast unaligned
376 memory, any naturally sized, unit aligned field can be done directly. */
378 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
379 + (offset * UNITS_PER_WORD);
381 if (bitpos == 0
382 && bitsize == GET_MODE_BITSIZE (fieldmode)
383 && (GET_CODE (op0) != MEM
384 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
385 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
386 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
387 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
388 || (offset * BITS_PER_UNIT % bitsize == 0
389 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
391 if (GET_MODE (op0) != fieldmode)
393 if (GET_CODE (op0) == SUBREG)
395 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
396 || GET_MODE_CLASS (fieldmode) == MODE_INT
397 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
398 op0 = SUBREG_REG (op0);
399 else
400 /* Else we've got some float mode source being extracted into
401 a different float mode destination -- this combination of
402 subregs results in Severe Tire Damage. */
403 abort ();
405 if (GET_CODE (op0) == REG)
406 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
407 else
408 op0 = adjust_address (op0, fieldmode, offset);
410 emit_move_insn (op0, value);
411 return value;
414 /* Make sure we are playing with integral modes. Pun with subregs
415 if we aren't. This must come after the entire register case above,
416 since that case is valid for any mode. The following cases are only
417 valid for integral modes. */
419 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
420 if (imode != GET_MODE (op0))
422 if (GET_CODE (op0) == MEM)
423 op0 = adjust_address (op0, imode, 0);
424 else if (imode != BLKmode)
425 op0 = gen_lowpart (imode, op0);
426 else
427 abort ();
431 /* We may be accessing data outside the field, which means
432 we can alias adjacent data. */
433 if (GET_CODE (op0) == MEM)
435 op0 = shallow_copy_rtx (op0);
436 set_mem_alias_set (op0, 0);
437 set_mem_expr (op0, 0);
440 /* If OP0 is a register, BITPOS must count within a word.
441 But as we have it, it counts within whatever size OP0 now has.
442 On a bigendian machine, these are not the same, so convert. */
443 if (BYTES_BIG_ENDIAN
444 && GET_CODE (op0) != MEM
445 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
446 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
448 /* Storing an lsb-aligned field in a register
449 can be done with a movestrict instruction. */
451 if (GET_CODE (op0) != MEM
452 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
454 && (movstrict_optab->handlers[(int) fieldmode].insn_code
455 != CODE_FOR_nothing))
457 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
459 /* Get appropriate low part of the value being stored. */
460 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
461 value = gen_lowpart (fieldmode, value);
462 else if (!(GET_CODE (value) == SYMBOL_REF
463 || GET_CODE (value) == LABEL_REF
464 || GET_CODE (value) == CONST))
465 value = convert_to_mode (fieldmode, value, 0);
467 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
468 value = copy_to_mode_reg (fieldmode, value);
470 if (GET_CODE (op0) == SUBREG)
472 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
473 || GET_MODE_CLASS (fieldmode) == MODE_INT
474 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
475 op0 = SUBREG_REG (op0);
476 else
477 /* Else we've got some float mode source being extracted into
478 a different float mode destination -- this combination of
479 subregs results in Severe Tire Damage. */
480 abort ();
483 emit_insn (GEN_FCN (icode)
484 (gen_rtx_SUBREG (fieldmode, op0,
485 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
486 + (offset * UNITS_PER_WORD)),
487 value));
489 return value;
492 /* Handle fields bigger than a word. */
494 if (bitsize > BITS_PER_WORD)
496 /* Here we transfer the words of the field
497 in the order least significant first.
498 This is because the most significant word is the one which may
499 be less than full.
500 However, only do that if the value is not BLKmode. */
502 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
503 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
504 unsigned int i;
506 /* This is the mode we must force value to, so that there will be enough
507 subwords to extract. Note that fieldmode will often (always?) be
508 VOIDmode, because that is what store_field uses to indicate that this
509 is a bit field, but passing VOIDmode to operand_subword_force will
510 result in an abort. */
511 fieldmode = GET_MODE (value);
512 if (fieldmode == VOIDmode)
513 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
515 for (i = 0; i < nwords; i++)
517 /* If I is 0, use the low-order word in both field and target;
518 if I is 1, use the next to lowest word; and so on. */
519 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
520 unsigned int bit_offset = (backwards
521 ? MAX ((int) bitsize - ((int) i + 1)
522 * BITS_PER_WORD,
524 : (int) i * BITS_PER_WORD);
526 store_bit_field (op0, MIN (BITS_PER_WORD,
527 bitsize - i * BITS_PER_WORD),
528 bitnum + bit_offset, word_mode,
529 operand_subword_force (value, wordnum, fieldmode),
530 total_size);
532 return value;
535 /* From here on we can assume that the field to be stored in is
536 a full-word (whatever type that is), since it is shorter than a word. */
538 /* OFFSET is the number of words or bytes (UNIT says which)
539 from STR_RTX to the first word or byte containing part of the field. */
541 if (GET_CODE (op0) != MEM)
543 if (offset != 0
544 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
546 if (GET_CODE (op0) != REG)
548 /* Since this is a destination (lvalue), we can't copy it to a
549 pseudo. We can trivially remove a SUBREG that does not
550 change the size of the operand. Such a SUBREG may have been
551 added above. Otherwise, abort. */
552 if (GET_CODE (op0) == SUBREG
553 && (GET_MODE_SIZE (GET_MODE (op0))
554 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
555 op0 = SUBREG_REG (op0);
556 else
557 abort ();
559 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
560 op0, (offset * UNITS_PER_WORD));
562 offset = 0;
564 else
565 op0 = protect_from_queue (op0, 1);
567 /* If VALUE is a floating-point mode, access it as an integer of the
568 corresponding size. This can occur on a machine with 64 bit registers
569 that uses SFmode for float. This can also occur for unaligned float
570 structure fields. */
571 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
572 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
573 value = gen_lowpart ((GET_MODE (value) == VOIDmode
574 ? word_mode : int_mode_for_mode (GET_MODE (value))),
575 value);
577 /* Now OFFSET is nonzero only if OP0 is memory
578 and is therefore always measured in bytes. */
580 if (HAVE_insv
581 && GET_MODE (value) != BLKmode
582 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
583 /* Ensure insv's size is wide enough for this field. */
584 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
585 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
586 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
588 int xbitpos = bitpos;
589 rtx value1;
590 rtx xop0 = op0;
591 rtx last = get_last_insn ();
592 rtx pat;
593 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
594 int save_volatile_ok = volatile_ok;
596 volatile_ok = 1;
598 /* If this machine's insv can only insert into a register, copy OP0
599 into a register and save it back later. */
600 /* This used to check flag_force_mem, but that was a serious
601 de-optimization now that flag_force_mem is enabled by -O2. */
602 if (GET_CODE (op0) == MEM
603 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
604 (op0, VOIDmode)))
606 rtx tempreg;
607 enum machine_mode bestmode;
609 /* Get the mode to use for inserting into this field. If OP0 is
610 BLKmode, get the smallest mode consistent with the alignment. If
611 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
612 mode. Otherwise, use the smallest mode containing the field. */
614 if (GET_MODE (op0) == BLKmode
615 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
616 bestmode
617 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
618 MEM_VOLATILE_P (op0));
619 else
620 bestmode = GET_MODE (op0);
622 if (bestmode == VOIDmode
623 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
624 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
625 goto insv_loses;
627 /* Adjust address to point to the containing unit of that mode.
628 Compute offset as multiple of this unit, counting in bytes. */
629 unit = GET_MODE_BITSIZE (bestmode);
630 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
631 bitpos = bitnum % unit;
632 op0 = adjust_address (op0, bestmode, offset);
634 /* Fetch that unit, store the bitfield in it, then store
635 the unit. */
636 tempreg = copy_to_reg (op0);
637 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
638 total_size);
639 emit_move_insn (op0, tempreg);
640 return value;
642 volatile_ok = save_volatile_ok;
644 /* Add OFFSET into OP0's address. */
645 if (GET_CODE (xop0) == MEM)
646 xop0 = adjust_address (xop0, byte_mode, offset);
648 /* If xop0 is a register, we need it in MAXMODE
649 to make it acceptable to the format of insv. */
650 if (GET_CODE (xop0) == SUBREG)
651 /* We can't just change the mode, because this might clobber op0,
652 and we will need the original value of op0 if insv fails. */
653 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
654 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
655 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
657 /* On big-endian machines, we count bits from the most significant.
658 If the bit field insn does not, we must invert. */
660 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
661 xbitpos = unit - bitsize - xbitpos;
663 /* We have been counting XBITPOS within UNIT.
664 Count instead within the size of the register. */
665 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
666 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
668 unit = GET_MODE_BITSIZE (maxmode);
670 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
671 value1 = value;
672 if (GET_MODE (value) != maxmode)
674 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
676 /* Optimization: Don't bother really extending VALUE
677 if it has all the bits we will actually use. However,
678 if we must narrow it, be sure we do it correctly. */
680 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
682 rtx tmp;
684 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
685 if (! tmp)
686 tmp = simplify_gen_subreg (maxmode,
687 force_reg (GET_MODE (value),
688 value1),
689 GET_MODE (value), 0);
690 value1 = tmp;
692 else
693 value1 = gen_lowpart (maxmode, value1);
695 else if (GET_CODE (value) == CONST_INT)
696 value1 = gen_int_mode (INTVAL (value), maxmode);
697 else if (!CONSTANT_P (value))
698 /* Parse phase is supposed to make VALUE's data type
699 match that of the component reference, which is a type
700 at least as wide as the field; so VALUE should have
701 a mode that corresponds to that type. */
702 abort ();
705 /* If this machine's insv insists on a register,
706 get VALUE1 into a register. */
707 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
708 (value1, maxmode)))
709 value1 = force_reg (maxmode, value1);
711 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
712 if (pat)
713 emit_insn (pat);
714 else
716 delete_insns_since (last);
717 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
720 else
721 insv_loses:
722 /* Insv is not available; store using shifts and boolean ops. */
723 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
724 return value;
727 /* Use shifts and boolean operations to store VALUE
728 into a bit field of width BITSIZE
729 in a memory location specified by OP0 except offset by OFFSET bytes.
730 (OFFSET must be 0 if OP0 is a register.)
731 The field starts at position BITPOS within the byte.
732 (If OP0 is a register, it may be a full word or a narrower mode,
733 but BITPOS still counts within a full word,
734 which is significant on bigendian machines.)
736 Note that protect_from_queue has already been done on OP0 and VALUE. */
738 static void
739 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
740 unsigned HOST_WIDE_INT bitsize,
741 unsigned HOST_WIDE_INT bitpos, rtx value)
743 enum machine_mode mode;
744 unsigned int total_bits = BITS_PER_WORD;
745 rtx subtarget, temp;
746 int all_zero = 0;
747 int all_one = 0;
749 /* There is a case not handled here:
750 a structure with a known alignment of just a halfword
751 and a field split across two aligned halfwords within the structure.
752 Or likewise a structure with a known alignment of just a byte
753 and a field split across two bytes.
754 Such cases are not supposed to be able to occur. */
756 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
758 if (offset != 0)
759 abort ();
760 /* Special treatment for a bit field split across two registers. */
761 if (bitsize + bitpos > BITS_PER_WORD)
763 store_split_bit_field (op0, bitsize, bitpos, value);
764 return;
767 else
769 /* Get the proper mode to use for this field. We want a mode that
770 includes the entire field. If such a mode would be larger than
771 a word, we won't be doing the extraction the normal way.
772 We don't want a mode bigger than the destination. */
774 mode = GET_MODE (op0);
775 if (GET_MODE_BITSIZE (mode) == 0
776 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
777 mode = word_mode;
778 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
779 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
781 if (mode == VOIDmode)
783 /* The only way this should occur is if the field spans word
784 boundaries. */
785 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
786 value);
787 return;
790 total_bits = GET_MODE_BITSIZE (mode);
792 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
793 be in the range 0 to total_bits-1, and put any excess bytes in
794 OFFSET. */
795 if (bitpos >= total_bits)
797 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
798 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
799 * BITS_PER_UNIT);
802 /* Get ref to an aligned byte, halfword, or word containing the field.
803 Adjust BITPOS to be position within a word,
804 and OFFSET to be the offset of that word.
805 Then alter OP0 to refer to that word. */
806 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
807 offset -= (offset % (total_bits / BITS_PER_UNIT));
808 op0 = adjust_address (op0, mode, offset);
811 mode = GET_MODE (op0);
813 /* Now MODE is either some integral mode for a MEM as OP0,
814 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
815 The bit field is contained entirely within OP0.
816 BITPOS is the starting bit number within OP0.
817 (OP0's mode may actually be narrower than MODE.) */
819 if (BYTES_BIG_ENDIAN)
820 /* BITPOS is the distance between our msb
821 and that of the containing datum.
822 Convert it to the distance from the lsb. */
823 bitpos = total_bits - bitsize - bitpos;
825 /* Now BITPOS is always the distance between our lsb
826 and that of OP0. */
828 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
829 we must first convert its mode to MODE. */
831 if (GET_CODE (value) == CONST_INT)
833 HOST_WIDE_INT v = INTVAL (value);
835 if (bitsize < HOST_BITS_PER_WIDE_INT)
836 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
838 if (v == 0)
839 all_zero = 1;
840 else if ((bitsize < HOST_BITS_PER_WIDE_INT
841 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
842 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
843 all_one = 1;
845 value = lshift_value (mode, value, bitpos, bitsize);
847 else
849 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
850 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
852 if (GET_MODE (value) != mode)
854 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
855 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
856 value = gen_lowpart (mode, value);
857 else
858 value = convert_to_mode (mode, value, 1);
861 if (must_and)
862 value = expand_binop (mode, and_optab, value,
863 mask_rtx (mode, 0, bitsize, 0),
864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
865 if (bitpos > 0)
866 value = expand_shift (LSHIFT_EXPR, mode, value,
867 build_int_2 (bitpos, 0), NULL_RTX, 1);
870 /* Now clear the chosen bits in OP0,
871 except that if VALUE is -1 we need not bother. */
873 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
875 if (! all_one)
877 temp = expand_binop (mode, and_optab, op0,
878 mask_rtx (mode, bitpos, bitsize, 1),
879 subtarget, 1, OPTAB_LIB_WIDEN);
880 subtarget = temp;
882 else
883 temp = op0;
885 /* Now logical-or VALUE into OP0, unless it is zero. */
887 if (! all_zero)
888 temp = expand_binop (mode, ior_optab, temp, value,
889 subtarget, 1, OPTAB_LIB_WIDEN);
890 if (op0 != temp)
891 emit_move_insn (op0, temp);
894 /* Store a bit field that is split across multiple accessible memory objects.
896 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
897 BITSIZE is the field width; BITPOS the position of its first bit
898 (within the word).
899 VALUE is the value to store.
901 This does not yet handle fields wider than BITS_PER_WORD. */
903 static void
904 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
905 unsigned HOST_WIDE_INT bitpos, rtx value)
907 unsigned int unit;
908 unsigned int bitsdone = 0;
910 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
911 much at a time. */
912 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
913 unit = BITS_PER_WORD;
914 else
915 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
917 /* If VALUE is a constant other than a CONST_INT, get it into a register in
918 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
919 that VALUE might be a floating-point constant. */
920 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
922 rtx word = gen_lowpart_common (word_mode, value);
924 if (word && (value != word))
925 value = word;
926 else
927 value = gen_lowpart_common (word_mode,
928 force_reg (GET_MODE (value) != VOIDmode
929 ? GET_MODE (value)
930 : word_mode, value));
932 else if (GET_CODE (value) == ADDRESSOF)
933 value = copy_to_reg (value);
935 while (bitsdone < bitsize)
937 unsigned HOST_WIDE_INT thissize;
938 rtx part, word;
939 unsigned HOST_WIDE_INT thispos;
940 unsigned HOST_WIDE_INT offset;
942 offset = (bitpos + bitsdone) / unit;
943 thispos = (bitpos + bitsdone) % unit;
945 /* THISSIZE must not overrun a word boundary. Otherwise,
946 store_fixed_bit_field will call us again, and we will mutually
947 recurse forever. */
948 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
949 thissize = MIN (thissize, unit - thispos);
951 if (BYTES_BIG_ENDIAN)
953 int total_bits;
955 /* We must do an endian conversion exactly the same way as it is
956 done in extract_bit_field, so that the two calls to
957 extract_fixed_bit_field will have comparable arguments. */
958 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
959 total_bits = BITS_PER_WORD;
960 else
961 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
963 /* Fetch successively less significant portions. */
964 if (GET_CODE (value) == CONST_INT)
965 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
966 >> (bitsize - bitsdone - thissize))
967 & (((HOST_WIDE_INT) 1 << thissize) - 1));
968 else
969 /* The args are chosen so that the last part includes the
970 lsb. Give extract_bit_field the value it needs (with
971 endianness compensation) to fetch the piece we want. */
972 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
973 total_bits - bitsize + bitsdone,
974 NULL_RTX, 1);
976 else
978 /* Fetch successively more significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> bitsdone)
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
985 bitsdone, NULL_RTX, 1);
988 /* If OP0 is a register, then handle OFFSET here.
990 When handling multiword bitfields, extract_bit_field may pass
991 down a word_mode SUBREG of a larger REG for a bitfield that actually
992 crosses a word boundary. Thus, for a SUBREG, we must find
993 the current word starting from the base register. */
994 if (GET_CODE (op0) == SUBREG)
996 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
997 word = operand_subword_force (SUBREG_REG (op0), word_offset,
998 GET_MODE (SUBREG_REG (op0)));
999 offset = 0;
1001 else if (GET_CODE (op0) == REG)
1003 word = operand_subword_force (op0, offset, GET_MODE (op0));
1004 offset = 0;
1006 else
1007 word = op0;
1009 /* OFFSET is in UNITs, and UNIT is in bits.
1010 store_fixed_bit_field wants offset in bytes. */
1011 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1012 thispos, part);
1013 bitsdone += thissize;
1017 /* Generate code to extract a byte-field from STR_RTX
1018 containing BITSIZE bits, starting at BITNUM,
1019 and put it in TARGET if possible (if TARGET is nonzero).
1020 Regardless of TARGET, we return the rtx for where the value is placed.
1021 It may be a QUEUED.
1023 STR_RTX is the structure containing the byte (a REG or MEM).
1024 UNSIGNEDP is nonzero if this is an unsigned bit field.
1025 MODE is the natural mode of the field value once extracted.
1026 TMODE is the mode the caller would like the value to have;
1027 but the value may be returned with type MODE instead.
1029 TOTAL_SIZE is the size in bytes of the containing structure,
1030 or -1 if varying.
1032 If a TARGET is specified and we can store in it at no extra cost,
1033 we do so, and return TARGET.
1034 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1035 if they are equally easy. */
1038 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1039 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1040 enum machine_mode mode, enum machine_mode tmode,
1041 HOST_WIDE_INT total_size)
1043 unsigned int unit
1044 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1045 unsigned HOST_WIDE_INT offset = bitnum / unit;
1046 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1047 rtx op0 = str_rtx;
1048 rtx spec_target = target;
1049 rtx spec_target_subreg = 0;
1050 enum machine_mode int_mode;
1051 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1052 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1053 enum machine_mode mode1;
1054 int byte_offset;
1056 /* Discount the part of the structure before the desired byte.
1057 We need to know how many bytes are safe to reference after it. */
1058 if (total_size >= 0)
1059 total_size -= (bitpos / BIGGEST_ALIGNMENT
1060 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1062 if (tmode == VOIDmode)
1063 tmode = mode;
1065 while (GET_CODE (op0) == SUBREG)
1067 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1068 if (bitpos > unit)
1070 offset += (bitpos / unit);
1071 bitpos %= unit;
1073 op0 = SUBREG_REG (op0);
1076 if (GET_CODE (op0) == REG
1077 && mode == GET_MODE (op0)
1078 && bitnum == 0
1079 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1081 /* We're trying to extract a full register from itself. */
1082 return op0;
1085 /* Use vec_extract patterns for extracting parts of vectors whenever
1086 available. */
1087 if (VECTOR_MODE_P (GET_MODE (op0))
1088 && GET_CODE (op0) != MEM
1089 && (vec_extract_optab->handlers[(int)GET_MODE (op0)].insn_code
1090 != CODE_FOR_nothing)
1091 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1092 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1094 enum machine_mode outermode = GET_MODE (op0);
1095 enum machine_mode innermode = GET_MODE_INNER (outermode);
1096 int icode = (int) vec_extract_optab->handlers[(int) outermode].insn_code;
1097 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1098 rtx rtxpos = GEN_INT (pos);
1099 rtx src = op0;
1100 rtx dest = NULL, pat, seq;
1101 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1102 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1103 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1105 if (innermode == tmode || innermode == mode)
1106 dest = target;
1108 if (!dest)
1109 dest = gen_reg_rtx (innermode);
1111 start_sequence ();
1113 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1114 dest = copy_to_mode_reg (mode0, dest);
1116 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1117 src = copy_to_mode_reg (mode1, src);
1119 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1120 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1122 /* We could handle this, but we should always be called with a pseudo
1123 for our targets and all insns should take them as outputs. */
1124 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1125 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1126 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1127 abort ();
1128 pat = GEN_FCN (icode) (dest, src, rtxpos);
1129 seq = get_insns ();
1130 end_sequence ();
1131 if (pat)
1133 emit_insn (seq);
1134 emit_insn (pat);
1135 return extract_bit_field (dest, bitsize,
1136 bitnum - pos * GET_MODE_BITSIZE (innermode),
1137 unsignedp, target, mode, tmode, total_size);
1141 /* Make sure we are playing with integral modes. Pun with subregs
1142 if we aren't. */
1144 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1145 if (imode != GET_MODE (op0))
1147 if (GET_CODE (op0) == MEM)
1148 op0 = adjust_address (op0, imode, 0);
1149 else if (imode != BLKmode)
1150 op0 = gen_lowpart (imode, op0);
1151 else
1152 abort ();
1156 /* We may be accessing data outside the field, which means
1157 we can alias adjacent data. */
1158 if (GET_CODE (op0) == MEM)
1160 op0 = shallow_copy_rtx (op0);
1161 set_mem_alias_set (op0, 0);
1162 set_mem_expr (op0, 0);
1165 /* Extraction of a full-word or multi-word value from a structure
1166 in a register or aligned memory can be done with just a SUBREG.
1167 A subword value in the least significant part of a register
1168 can also be extracted with a SUBREG. For this, we need the
1169 byte offset of the value in op0. */
1171 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1173 /* If OP0 is a register, BITPOS must count within a word.
1174 But as we have it, it counts within whatever size OP0 now has.
1175 On a bigendian machine, these are not the same, so convert. */
1176 if (BYTES_BIG_ENDIAN
1177 && GET_CODE (op0) != MEM
1178 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1179 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1181 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1182 If that's wrong, the solution is to test for it and set TARGET to 0
1183 if needed. */
1185 /* Only scalar integer modes can be converted via subregs. There is an
1186 additional problem for FP modes here in that they can have a precision
1187 which is different from the size. mode_for_size uses precision, but
1188 we want a mode based on the size, so we must avoid calling it for FP
1189 modes. */
1190 mode1 = (SCALAR_INT_MODE_P (tmode)
1191 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1192 : mode);
1194 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1195 && bitpos % BITS_PER_WORD == 0)
1196 || (mode1 != BLKmode
1197 /* ??? The big endian test here is wrong. This is correct
1198 if the value is in a register, and if mode_for_size is not
1199 the same mode as op0. This causes us to get unnecessarily
1200 inefficient code from the Thumb port when -mbig-endian. */
1201 && (BYTES_BIG_ENDIAN
1202 ? bitpos + bitsize == BITS_PER_WORD
1203 : bitpos == 0)))
1204 && ((GET_CODE (op0) != MEM
1205 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1206 GET_MODE_BITSIZE (GET_MODE (op0)))
1207 && GET_MODE_SIZE (mode1) != 0
1208 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1209 || (GET_CODE (op0) == MEM
1210 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1211 || (offset * BITS_PER_UNIT % bitsize == 0
1212 && MEM_ALIGN (op0) % bitsize == 0)))))
1214 if (mode1 != GET_MODE (op0))
1216 if (GET_CODE (op0) == SUBREG)
1218 if (GET_MODE (SUBREG_REG (op0)) == mode1
1219 || GET_MODE_CLASS (mode1) == MODE_INT
1220 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1221 op0 = SUBREG_REG (op0);
1222 else
1223 /* Else we've got some float mode source being extracted into
1224 a different float mode destination -- this combination of
1225 subregs results in Severe Tire Damage. */
1226 goto no_subreg_mode_swap;
1228 if (GET_CODE (op0) == REG)
1229 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1230 else
1231 op0 = adjust_address (op0, mode1, offset);
1233 if (mode1 != mode)
1234 return convert_to_mode (tmode, op0, unsignedp);
1235 return op0;
1237 no_subreg_mode_swap:
1239 /* Handle fields bigger than a word. */
1241 if (bitsize > BITS_PER_WORD)
1243 /* Here we transfer the words of the field
1244 in the order least significant first.
1245 This is because the most significant word is the one which may
1246 be less than full. */
1248 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1249 unsigned int i;
1251 if (target == 0 || GET_CODE (target) != REG)
1252 target = gen_reg_rtx (mode);
1254 /* Indicate for flow that the entire target reg is being set. */
1255 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1257 for (i = 0; i < nwords; i++)
1259 /* If I is 0, use the low-order word in both field and target;
1260 if I is 1, use the next to lowest word; and so on. */
1261 /* Word number in TARGET to use. */
1262 unsigned int wordnum
1263 = (WORDS_BIG_ENDIAN
1264 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1265 : i);
1266 /* Offset from start of field in OP0. */
1267 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1268 ? MAX (0, ((int) bitsize - ((int) i + 1)
1269 * (int) BITS_PER_WORD))
1270 : (int) i * BITS_PER_WORD);
1271 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1272 rtx result_part
1273 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1274 bitsize - i * BITS_PER_WORD),
1275 bitnum + bit_offset, 1, target_part, mode,
1276 word_mode, total_size);
1278 if (target_part == 0)
1279 abort ();
1281 if (result_part != target_part)
1282 emit_move_insn (target_part, result_part);
1285 if (unsignedp)
1287 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1288 need to be zero'd out. */
1289 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1291 unsigned int i, total_words;
1293 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1294 for (i = nwords; i < total_words; i++)
1295 emit_move_insn
1296 (operand_subword (target,
1297 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1298 1, VOIDmode),
1299 const0_rtx);
1301 return target;
1304 /* Signed bit field: sign-extend with two arithmetic shifts. */
1305 target = expand_shift (LSHIFT_EXPR, mode, target,
1306 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1307 NULL_RTX, 0);
1308 return expand_shift (RSHIFT_EXPR, mode, target,
1309 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1310 NULL_RTX, 0);
1313 /* From here on we know the desired field is smaller than a word. */
1315 /* Check if there is a correspondingly-sized integer field, so we can
1316 safely extract it as one size of integer, if necessary; then
1317 truncate or extend to the size that is wanted; then use SUBREGs or
1318 convert_to_mode to get one of the modes we really wanted. */
1320 int_mode = int_mode_for_mode (tmode);
1321 if (int_mode == BLKmode)
1322 int_mode = int_mode_for_mode (mode);
1323 if (int_mode == BLKmode)
1324 abort (); /* Should probably push op0 out to memory and then
1325 do a load. */
1327 /* OFFSET is the number of words or bytes (UNIT says which)
1328 from STR_RTX to the first word or byte containing part of the field. */
1330 if (GET_CODE (op0) != MEM)
1332 if (offset != 0
1333 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1335 if (GET_CODE (op0) != REG)
1336 op0 = copy_to_reg (op0);
1337 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1338 op0, (offset * UNITS_PER_WORD));
1340 offset = 0;
1342 else
1343 op0 = protect_from_queue (str_rtx, 1);
1345 /* Now OFFSET is nonzero only for memory operands. */
1347 if (unsignedp)
1349 if (HAVE_extzv
1350 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1351 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1352 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1354 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1355 rtx bitsize_rtx, bitpos_rtx;
1356 rtx last = get_last_insn ();
1357 rtx xop0 = op0;
1358 rtx xtarget = target;
1359 rtx xspec_target = spec_target;
1360 rtx xspec_target_subreg = spec_target_subreg;
1361 rtx pat;
1362 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1364 if (GET_CODE (xop0) == MEM)
1366 int save_volatile_ok = volatile_ok;
1367 volatile_ok = 1;
1369 /* Is the memory operand acceptable? */
1370 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1371 (xop0, GET_MODE (xop0))))
1373 /* No, load into a reg and extract from there. */
1374 enum machine_mode bestmode;
1376 /* Get the mode to use for inserting into this field. If
1377 OP0 is BLKmode, get the smallest mode consistent with the
1378 alignment. If OP0 is a non-BLKmode object that is no
1379 wider than MAXMODE, use its mode. Otherwise, use the
1380 smallest mode containing the field. */
1382 if (GET_MODE (xop0) == BLKmode
1383 || (GET_MODE_SIZE (GET_MODE (op0))
1384 > GET_MODE_SIZE (maxmode)))
1385 bestmode = get_best_mode (bitsize, bitnum,
1386 MEM_ALIGN (xop0), maxmode,
1387 MEM_VOLATILE_P (xop0));
1388 else
1389 bestmode = GET_MODE (xop0);
1391 if (bestmode == VOIDmode
1392 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1393 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1394 goto extzv_loses;
1396 /* Compute offset as multiple of this unit,
1397 counting in bytes. */
1398 unit = GET_MODE_BITSIZE (bestmode);
1399 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1400 xbitpos = bitnum % unit;
1401 xop0 = adjust_address (xop0, bestmode, xoffset);
1403 /* Fetch it to a register in that size. */
1404 xop0 = force_reg (bestmode, xop0);
1406 /* XBITPOS counts within UNIT, which is what is expected. */
1408 else
1409 /* Get ref to first byte containing part of the field. */
1410 xop0 = adjust_address (xop0, byte_mode, xoffset);
1412 volatile_ok = save_volatile_ok;
1415 /* If op0 is a register, we need it in MAXMODE (which is usually
1416 SImode). to make it acceptable to the format of extzv. */
1417 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1418 goto extzv_loses;
1419 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1420 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1422 /* On big-endian machines, we count bits from the most significant.
1423 If the bit field insn does not, we must invert. */
1424 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1425 xbitpos = unit - bitsize - xbitpos;
1427 /* Now convert from counting within UNIT to counting in MAXMODE. */
1428 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1429 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1431 unit = GET_MODE_BITSIZE (maxmode);
1433 if (xtarget == 0
1434 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1435 xtarget = xspec_target = gen_reg_rtx (tmode);
1437 if (GET_MODE (xtarget) != maxmode)
1439 if (GET_CODE (xtarget) == REG)
1441 int wider = (GET_MODE_SIZE (maxmode)
1442 > GET_MODE_SIZE (GET_MODE (xtarget)));
1443 xtarget = gen_lowpart (maxmode, xtarget);
1444 if (wider)
1445 xspec_target_subreg = xtarget;
1447 else
1448 xtarget = gen_reg_rtx (maxmode);
1451 /* If this machine's extzv insists on a register target,
1452 make sure we have one. */
1453 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1454 (xtarget, maxmode)))
1455 xtarget = gen_reg_rtx (maxmode);
1457 bitsize_rtx = GEN_INT (bitsize);
1458 bitpos_rtx = GEN_INT (xbitpos);
1460 pat = gen_extzv (protect_from_queue (xtarget, 1),
1461 xop0, bitsize_rtx, bitpos_rtx);
1462 if (pat)
1464 emit_insn (pat);
1465 target = xtarget;
1466 spec_target = xspec_target;
1467 spec_target_subreg = xspec_target_subreg;
1469 else
1471 delete_insns_since (last);
1472 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1473 bitpos, target, 1);
1476 else
1477 extzv_loses:
1478 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1479 bitpos, target, 1);
1481 else
1483 if (HAVE_extv
1484 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1485 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1486 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1488 int xbitpos = bitpos, xoffset = offset;
1489 rtx bitsize_rtx, bitpos_rtx;
1490 rtx last = get_last_insn ();
1491 rtx xop0 = op0, xtarget = target;
1492 rtx xspec_target = spec_target;
1493 rtx xspec_target_subreg = spec_target_subreg;
1494 rtx pat;
1495 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1497 if (GET_CODE (xop0) == MEM)
1499 /* Is the memory operand acceptable? */
1500 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1501 (xop0, GET_MODE (xop0))))
1503 /* No, load into a reg and extract from there. */
1504 enum machine_mode bestmode;
1506 /* Get the mode to use for inserting into this field. If
1507 OP0 is BLKmode, get the smallest mode consistent with the
1508 alignment. If OP0 is a non-BLKmode object that is no
1509 wider than MAXMODE, use its mode. Otherwise, use the
1510 smallest mode containing the field. */
1512 if (GET_MODE (xop0) == BLKmode
1513 || (GET_MODE_SIZE (GET_MODE (op0))
1514 > GET_MODE_SIZE (maxmode)))
1515 bestmode = get_best_mode (bitsize, bitnum,
1516 MEM_ALIGN (xop0), maxmode,
1517 MEM_VOLATILE_P (xop0));
1518 else
1519 bestmode = GET_MODE (xop0);
1521 if (bestmode == VOIDmode
1522 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1523 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1524 goto extv_loses;
1526 /* Compute offset as multiple of this unit,
1527 counting in bytes. */
1528 unit = GET_MODE_BITSIZE (bestmode);
1529 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1530 xbitpos = bitnum % unit;
1531 xop0 = adjust_address (xop0, bestmode, xoffset);
1533 /* Fetch it to a register in that size. */
1534 xop0 = force_reg (bestmode, xop0);
1536 /* XBITPOS counts within UNIT, which is what is expected. */
1538 else
1539 /* Get ref to first byte containing part of the field. */
1540 xop0 = adjust_address (xop0, byte_mode, xoffset);
1543 /* If op0 is a register, we need it in MAXMODE (which is usually
1544 SImode) to make it acceptable to the format of extv. */
1545 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1546 goto extv_loses;
1547 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1548 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1550 /* On big-endian machines, we count bits from the most significant.
1551 If the bit field insn does not, we must invert. */
1552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1553 xbitpos = unit - bitsize - xbitpos;
1555 /* XBITPOS counts within a size of UNIT.
1556 Adjust to count within a size of MAXMODE. */
1557 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1558 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1560 unit = GET_MODE_BITSIZE (maxmode);
1562 if (xtarget == 0
1563 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1564 xtarget = xspec_target = gen_reg_rtx (tmode);
1566 if (GET_MODE (xtarget) != maxmode)
1568 if (GET_CODE (xtarget) == REG)
1570 int wider = (GET_MODE_SIZE (maxmode)
1571 > GET_MODE_SIZE (GET_MODE (xtarget)));
1572 xtarget = gen_lowpart (maxmode, xtarget);
1573 if (wider)
1574 xspec_target_subreg = xtarget;
1576 else
1577 xtarget = gen_reg_rtx (maxmode);
1580 /* If this machine's extv insists on a register target,
1581 make sure we have one. */
1582 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1583 (xtarget, maxmode)))
1584 xtarget = gen_reg_rtx (maxmode);
1586 bitsize_rtx = GEN_INT (bitsize);
1587 bitpos_rtx = GEN_INT (xbitpos);
1589 pat = gen_extv (protect_from_queue (xtarget, 1),
1590 xop0, bitsize_rtx, bitpos_rtx);
1591 if (pat)
1593 emit_insn (pat);
1594 target = xtarget;
1595 spec_target = xspec_target;
1596 spec_target_subreg = xspec_target_subreg;
1598 else
1600 delete_insns_since (last);
1601 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1602 bitpos, target, 0);
1605 else
1606 extv_loses:
1607 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1608 bitpos, target, 0);
1610 if (target == spec_target)
1611 return target;
1612 if (target == spec_target_subreg)
1613 return spec_target;
1614 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1616 /* If the target mode is floating-point, first convert to the
1617 integer mode of that size and then access it as a floating-point
1618 value via a SUBREG. */
1619 if (GET_MODE_CLASS (tmode) != MODE_INT
1620 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1622 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1623 MODE_INT, 0),
1624 target, unsignedp);
1625 return gen_lowpart (tmode, target);
1627 else
1628 return convert_to_mode (tmode, target, unsignedp);
1630 return target;
1633 /* Extract a bit field using shifts and boolean operations
1634 Returns an rtx to represent the value.
1635 OP0 addresses a register (word) or memory (byte).
1636 BITPOS says which bit within the word or byte the bit field starts in.
1637 OFFSET says how many bytes farther the bit field starts;
1638 it is 0 if OP0 is a register.
1639 BITSIZE says how many bits long the bit field is.
1640 (If OP0 is a register, it may be narrower than a full word,
1641 but BITPOS still counts within a full word,
1642 which is significant on bigendian machines.)
1644 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1645 If TARGET is nonzero, attempts to store the value there
1646 and return TARGET, but this is not guaranteed.
1647 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1649 static rtx
1650 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1651 unsigned HOST_WIDE_INT offset,
1652 unsigned HOST_WIDE_INT bitsize,
1653 unsigned HOST_WIDE_INT bitpos, rtx target,
1654 int unsignedp)
1656 unsigned int total_bits = BITS_PER_WORD;
1657 enum machine_mode mode;
1659 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1661 /* Special treatment for a bit field split across two registers. */
1662 if (bitsize + bitpos > BITS_PER_WORD)
1663 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1665 else
1667 /* Get the proper mode to use for this field. We want a mode that
1668 includes the entire field. If such a mode would be larger than
1669 a word, we won't be doing the extraction the normal way. */
1671 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1672 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1674 if (mode == VOIDmode)
1675 /* The only way this should occur is if the field spans word
1676 boundaries. */
1677 return extract_split_bit_field (op0, bitsize,
1678 bitpos + offset * BITS_PER_UNIT,
1679 unsignedp);
1681 total_bits = GET_MODE_BITSIZE (mode);
1683 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1684 be in the range 0 to total_bits-1, and put any excess bytes in
1685 OFFSET. */
1686 if (bitpos >= total_bits)
1688 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1689 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1690 * BITS_PER_UNIT);
1693 /* Get ref to an aligned byte, halfword, or word containing the field.
1694 Adjust BITPOS to be position within a word,
1695 and OFFSET to be the offset of that word.
1696 Then alter OP0 to refer to that word. */
1697 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1698 offset -= (offset % (total_bits / BITS_PER_UNIT));
1699 op0 = adjust_address (op0, mode, offset);
1702 mode = GET_MODE (op0);
1704 if (BYTES_BIG_ENDIAN)
1705 /* BITPOS is the distance between our msb and that of OP0.
1706 Convert it to the distance from the lsb. */
1707 bitpos = total_bits - bitsize - bitpos;
1709 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1710 We have reduced the big-endian case to the little-endian case. */
1712 if (unsignedp)
1714 if (bitpos)
1716 /* If the field does not already start at the lsb,
1717 shift it so it does. */
1718 tree amount = build_int_2 (bitpos, 0);
1719 /* Maybe propagate the target for the shift. */
1720 /* But not if we will return it--could confuse integrate.c. */
1721 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1722 && !REG_FUNCTION_VALUE_P (target)
1723 ? target : 0);
1724 if (tmode != mode) subtarget = 0;
1725 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1727 /* Convert the value to the desired mode. */
1728 if (mode != tmode)
1729 op0 = convert_to_mode (tmode, op0, 1);
1731 /* Unless the msb of the field used to be the msb when we shifted,
1732 mask out the upper bits. */
1734 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1735 return expand_binop (GET_MODE (op0), and_optab, op0,
1736 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1737 target, 1, OPTAB_LIB_WIDEN);
1738 return op0;
1741 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1742 then arithmetic-shift its lsb to the lsb of the word. */
1743 op0 = force_reg (mode, op0);
1744 if (mode != tmode)
1745 target = 0;
1747 /* Find the narrowest integer mode that contains the field. */
1749 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1750 mode = GET_MODE_WIDER_MODE (mode))
1751 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1753 op0 = convert_to_mode (mode, op0, 0);
1754 break;
1757 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1759 tree amount
1760 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1761 /* Maybe propagate the target for the shift. */
1762 /* But not if we will return the result--could confuse integrate.c. */
1763 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1764 && ! REG_FUNCTION_VALUE_P (target)
1765 ? target : 0);
1766 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1769 return expand_shift (RSHIFT_EXPR, mode, op0,
1770 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1771 target, 0);
1774 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1775 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1776 complement of that if COMPLEMENT. The mask is truncated if
1777 necessary to the width of mode MODE. The mask is zero-extended if
1778 BITSIZE+BITPOS is too small for MODE. */
1780 static rtx
1781 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1783 HOST_WIDE_INT masklow, maskhigh;
1785 if (bitsize == 0)
1786 masklow = 0;
1787 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1788 masklow = (HOST_WIDE_INT) -1 << bitpos;
1789 else
1790 masklow = 0;
1792 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1793 masklow &= ((unsigned HOST_WIDE_INT) -1
1794 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1796 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1797 maskhigh = -1;
1798 else
1799 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1801 if (bitsize == 0)
1802 maskhigh = 0;
1803 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1804 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1805 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1806 else
1807 maskhigh = 0;
1809 if (complement)
1811 maskhigh = ~maskhigh;
1812 masklow = ~masklow;
1815 return immed_double_const (masklow, maskhigh, mode);
1818 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1819 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1821 static rtx
1822 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1824 unsigned HOST_WIDE_INT v = INTVAL (value);
1825 HOST_WIDE_INT low, high;
1827 if (bitsize < HOST_BITS_PER_WIDE_INT)
1828 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1830 if (bitpos < HOST_BITS_PER_WIDE_INT)
1832 low = v << bitpos;
1833 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1835 else
1837 low = 0;
1838 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1841 return immed_double_const (low, high, mode);
1844 /* Extract a bit field that is split across two words
1845 and return an RTX for the result.
1847 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1848 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1849 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1851 static rtx
1852 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1853 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1855 unsigned int unit;
1856 unsigned int bitsdone = 0;
1857 rtx result = NULL_RTX;
1858 int first = 1;
1860 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1861 much at a time. */
1862 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1863 unit = BITS_PER_WORD;
1864 else
1865 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1867 while (bitsdone < bitsize)
1869 unsigned HOST_WIDE_INT thissize;
1870 rtx part, word;
1871 unsigned HOST_WIDE_INT thispos;
1872 unsigned HOST_WIDE_INT offset;
1874 offset = (bitpos + bitsdone) / unit;
1875 thispos = (bitpos + bitsdone) % unit;
1877 /* THISSIZE must not overrun a word boundary. Otherwise,
1878 extract_fixed_bit_field will call us again, and we will mutually
1879 recurse forever. */
1880 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1881 thissize = MIN (thissize, unit - thispos);
1883 /* If OP0 is a register, then handle OFFSET here.
1885 When handling multiword bitfields, extract_bit_field may pass
1886 down a word_mode SUBREG of a larger REG for a bitfield that actually
1887 crosses a word boundary. Thus, for a SUBREG, we must find
1888 the current word starting from the base register. */
1889 if (GET_CODE (op0) == SUBREG)
1891 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1892 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1893 GET_MODE (SUBREG_REG (op0)));
1894 offset = 0;
1896 else if (GET_CODE (op0) == REG)
1898 word = operand_subword_force (op0, offset, GET_MODE (op0));
1899 offset = 0;
1901 else
1902 word = op0;
1904 /* Extract the parts in bit-counting order,
1905 whose meaning is determined by BYTES_PER_UNIT.
1906 OFFSET is in UNITs, and UNIT is in bits.
1907 extract_fixed_bit_field wants offset in bytes. */
1908 part = extract_fixed_bit_field (word_mode, word,
1909 offset * unit / BITS_PER_UNIT,
1910 thissize, thispos, 0, 1);
1911 bitsdone += thissize;
1913 /* Shift this part into place for the result. */
1914 if (BYTES_BIG_ENDIAN)
1916 if (bitsize != bitsdone)
1917 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1918 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1920 else
1922 if (bitsdone != thissize)
1923 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1924 build_int_2 (bitsdone - thissize, 0), 0, 1);
1927 if (first)
1928 result = part;
1929 else
1930 /* Combine the parts with bitwise or. This works
1931 because we extracted each part as an unsigned bit field. */
1932 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1933 OPTAB_LIB_WIDEN);
1935 first = 0;
1938 /* Unsigned bit field: we are done. */
1939 if (unsignedp)
1940 return result;
1941 /* Signed bit field: sign-extend with two arithmetic shifts. */
1942 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1943 build_int_2 (BITS_PER_WORD - bitsize, 0),
1944 NULL_RTX, 0);
1945 return expand_shift (RSHIFT_EXPR, word_mode, result,
1946 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1949 /* Add INC into TARGET. */
1951 void
1952 expand_inc (rtx target, rtx inc)
1954 rtx value = expand_binop (GET_MODE (target), add_optab,
1955 target, inc,
1956 target, 0, OPTAB_LIB_WIDEN);
1957 if (value != target)
1958 emit_move_insn (target, value);
1961 /* Subtract DEC from TARGET. */
1963 void
1964 expand_dec (rtx target, rtx dec)
1966 rtx value = expand_binop (GET_MODE (target), sub_optab,
1967 target, dec,
1968 target, 0, OPTAB_LIB_WIDEN);
1969 if (value != target)
1970 emit_move_insn (target, value);
1973 /* Output a shift instruction for expression code CODE,
1974 with SHIFTED being the rtx for the value to shift,
1975 and AMOUNT the tree for the amount to shift by.
1976 Store the result in the rtx TARGET, if that is convenient.
1977 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1978 Return the rtx for where the value is. */
1981 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1982 tree amount, rtx target, int unsignedp)
1984 rtx op1, temp = 0;
1985 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1986 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1987 int try;
1989 /* Previously detected shift-counts computed by NEGATE_EXPR
1990 and shifted in the other direction; but that does not work
1991 on all machines. */
1993 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1995 if (SHIFT_COUNT_TRUNCATED)
1997 if (GET_CODE (op1) == CONST_INT
1998 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1999 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2000 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2001 % GET_MODE_BITSIZE (mode));
2002 else if (GET_CODE (op1) == SUBREG
2003 && subreg_lowpart_p (op1))
2004 op1 = SUBREG_REG (op1);
2007 if (op1 == const0_rtx)
2008 return shifted;
2010 for (try = 0; temp == 0 && try < 3; try++)
2012 enum optab_methods methods;
2014 if (try == 0)
2015 methods = OPTAB_DIRECT;
2016 else if (try == 1)
2017 methods = OPTAB_WIDEN;
2018 else
2019 methods = OPTAB_LIB_WIDEN;
2021 if (rotate)
2023 /* Widening does not work for rotation. */
2024 if (methods == OPTAB_WIDEN)
2025 continue;
2026 else if (methods == OPTAB_LIB_WIDEN)
2028 /* If we have been unable to open-code this by a rotation,
2029 do it as the IOR of two shifts. I.e., to rotate A
2030 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2031 where C is the bitsize of A.
2033 It is theoretically possible that the target machine might
2034 not be able to perform either shift and hence we would
2035 be making two libcalls rather than just the one for the
2036 shift (similarly if IOR could not be done). We will allow
2037 this extremely unlikely lossage to avoid complicating the
2038 code below. */
2040 rtx subtarget = target == shifted ? 0 : target;
2041 rtx temp1;
2042 tree type = TREE_TYPE (amount);
2043 tree new_amount = make_tree (type, op1);
2044 tree other_amount
2045 = fold (build (MINUS_EXPR, type,
2046 convert (type,
2047 build_int_2 (GET_MODE_BITSIZE (mode),
2048 0)),
2049 amount));
2051 shifted = force_reg (mode, shifted);
2053 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2054 mode, shifted, new_amount, subtarget, 1);
2055 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2056 mode, shifted, other_amount, 0, 1);
2057 return expand_binop (mode, ior_optab, temp, temp1, target,
2058 unsignedp, methods);
2061 temp = expand_binop (mode,
2062 left ? rotl_optab : rotr_optab,
2063 shifted, op1, target, unsignedp, methods);
2065 /* If we don't have the rotate, but we are rotating by a constant
2066 that is in range, try a rotate in the opposite direction. */
2068 if (temp == 0 && GET_CODE (op1) == CONST_INT
2069 && INTVAL (op1) > 0
2070 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2071 temp = expand_binop (mode,
2072 left ? rotr_optab : rotl_optab,
2073 shifted,
2074 GEN_INT (GET_MODE_BITSIZE (mode)
2075 - INTVAL (op1)),
2076 target, unsignedp, methods);
2078 else if (unsignedp)
2079 temp = expand_binop (mode,
2080 left ? ashl_optab : lshr_optab,
2081 shifted, op1, target, unsignedp, methods);
2083 /* Do arithmetic shifts.
2084 Also, if we are going to widen the operand, we can just as well
2085 use an arithmetic right-shift instead of a logical one. */
2086 if (temp == 0 && ! rotate
2087 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2089 enum optab_methods methods1 = methods;
2091 /* If trying to widen a log shift to an arithmetic shift,
2092 don't accept an arithmetic shift of the same size. */
2093 if (unsignedp)
2094 methods1 = OPTAB_MUST_WIDEN;
2096 /* Arithmetic shift */
2098 temp = expand_binop (mode,
2099 left ? ashl_optab : ashr_optab,
2100 shifted, op1, target, unsignedp, methods1);
2103 /* We used to try extzv here for logical right shifts, but that was
2104 only useful for one machine, the VAX, and caused poor code
2105 generation there for lshrdi3, so the code was deleted and a
2106 define_expand for lshrsi3 was added to vax.md. */
2109 if (temp == 0)
2110 abort ();
2111 return temp;
2114 enum alg_code { alg_zero, alg_m, alg_shift,
2115 alg_add_t_m2, alg_sub_t_m2,
2116 alg_add_factor, alg_sub_factor,
2117 alg_add_t2_m, alg_sub_t2_m,
2118 alg_add, alg_subtract, alg_factor, alg_shiftop };
2120 /* This structure records a sequence of operations.
2121 `ops' is the number of operations recorded.
2122 `cost' is their total cost.
2123 The operations are stored in `op' and the corresponding
2124 logarithms of the integer coefficients in `log'.
2126 These are the operations:
2127 alg_zero total := 0;
2128 alg_m total := multiplicand;
2129 alg_shift total := total * coeff
2130 alg_add_t_m2 total := total + multiplicand * coeff;
2131 alg_sub_t_m2 total := total - multiplicand * coeff;
2132 alg_add_factor total := total * coeff + total;
2133 alg_sub_factor total := total * coeff - total;
2134 alg_add_t2_m total := total * coeff + multiplicand;
2135 alg_sub_t2_m total := total * coeff - multiplicand;
2137 The first operand must be either alg_zero or alg_m. */
2139 struct algorithm
2141 short cost;
2142 short ops;
2143 /* The size of the OP and LOG fields are not directly related to the
2144 word size, but the worst-case algorithms will be if we have few
2145 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2146 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2147 in total wordsize operations. */
2148 enum alg_code op[MAX_BITS_PER_WORD];
2149 char log[MAX_BITS_PER_WORD];
2152 /* Indicates the type of fixup needed after a constant multiplication.
2153 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2154 the result should be negated, and ADD_VARIANT means that the
2155 multiplicand should be added to the result. */
2156 enum mult_variant {basic_variant, negate_variant, add_variant};
2158 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, int);
2159 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2160 struct algorithm *, enum mult_variant *, int);
2161 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2162 const struct algorithm *, enum mult_variant);
2163 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2164 int, unsigned HOST_WIDE_INT *,
2165 int *, int *);
2166 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2167 static rtx extract_high_half (enum machine_mode, rtx);
2168 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2169 int, int);
2170 /* Compute and return the best algorithm for multiplying by T.
2171 The algorithm must cost less than cost_limit
2172 If retval.cost >= COST_LIMIT, no algorithm was found and all
2173 other field of the returned struct are undefined. */
2175 static void
2176 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2177 int cost_limit)
2179 int m;
2180 struct algorithm *alg_in, *best_alg;
2181 int cost;
2182 unsigned HOST_WIDE_INT q;
2184 /* Indicate that no algorithm is yet found. If no algorithm
2185 is found, this value will be returned and indicate failure. */
2186 alg_out->cost = cost_limit;
2188 if (cost_limit <= 0)
2189 return;
2191 /* t == 1 can be done in zero cost. */
2192 if (t == 1)
2194 alg_out->ops = 1;
2195 alg_out->cost = 0;
2196 alg_out->op[0] = alg_m;
2197 return;
2200 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2201 fail now. */
2202 if (t == 0)
2204 if (zero_cost >= cost_limit)
2205 return;
2206 else
2208 alg_out->ops = 1;
2209 alg_out->cost = zero_cost;
2210 alg_out->op[0] = alg_zero;
2211 return;
2215 /* We'll be needing a couple extra algorithm structures now. */
2217 alg_in = alloca (sizeof (struct algorithm));
2218 best_alg = alloca (sizeof (struct algorithm));
2220 /* If we have a group of zero bits at the low-order part of T, try
2221 multiplying by the remaining bits and then doing a shift. */
2223 if ((t & 1) == 0)
2225 m = floor_log2 (t & -t); /* m = number of low zero bits */
2226 if (m < BITS_PER_WORD)
2228 q = t >> m;
2229 cost = shift_cost[m];
2230 synth_mult (alg_in, q, cost_limit - cost);
2232 cost += alg_in->cost;
2233 if (cost < cost_limit)
2235 struct algorithm *x;
2236 x = alg_in, alg_in = best_alg, best_alg = x;
2237 best_alg->log[best_alg->ops] = m;
2238 best_alg->op[best_alg->ops] = alg_shift;
2239 cost_limit = cost;
2244 /* If we have an odd number, add or subtract one. */
2245 if ((t & 1) != 0)
2247 unsigned HOST_WIDE_INT w;
2249 for (w = 1; (w & t) != 0; w <<= 1)
2251 /* If T was -1, then W will be zero after the loop. This is another
2252 case where T ends with ...111. Handling this with (T + 1) and
2253 subtract 1 produces slightly better code and results in algorithm
2254 selection much faster than treating it like the ...0111 case
2255 below. */
2256 if (w == 0
2257 || (w > 2
2258 /* Reject the case where t is 3.
2259 Thus we prefer addition in that case. */
2260 && t != 3))
2262 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2264 cost = add_cost;
2265 synth_mult (alg_in, t + 1, cost_limit - cost);
2267 cost += alg_in->cost;
2268 if (cost < cost_limit)
2270 struct algorithm *x;
2271 x = alg_in, alg_in = best_alg, best_alg = x;
2272 best_alg->log[best_alg->ops] = 0;
2273 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2274 cost_limit = cost;
2277 else
2279 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2281 cost = add_cost;
2282 synth_mult (alg_in, t - 1, cost_limit - cost);
2284 cost += alg_in->cost;
2285 if (cost < cost_limit)
2287 struct algorithm *x;
2288 x = alg_in, alg_in = best_alg, best_alg = x;
2289 best_alg->log[best_alg->ops] = 0;
2290 best_alg->op[best_alg->ops] = alg_add_t_m2;
2291 cost_limit = cost;
2296 /* Look for factors of t of the form
2297 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2298 If we find such a factor, we can multiply by t using an algorithm that
2299 multiplies by q, shift the result by m and add/subtract it to itself.
2301 We search for large factors first and loop down, even if large factors
2302 are less probable than small; if we find a large factor we will find a
2303 good sequence quickly, and therefore be able to prune (by decreasing
2304 COST_LIMIT) the search. */
2306 for (m = floor_log2 (t - 1); m >= 2; m--)
2308 unsigned HOST_WIDE_INT d;
2310 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2311 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2313 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2314 synth_mult (alg_in, t / d, cost_limit - cost);
2316 cost += alg_in->cost;
2317 if (cost < cost_limit)
2319 struct algorithm *x;
2320 x = alg_in, alg_in = best_alg, best_alg = x;
2321 best_alg->log[best_alg->ops] = m;
2322 best_alg->op[best_alg->ops] = alg_add_factor;
2323 cost_limit = cost;
2325 /* Other factors will have been taken care of in the recursion. */
2326 break;
2329 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2330 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2332 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2333 synth_mult (alg_in, t / d, cost_limit - cost);
2335 cost += alg_in->cost;
2336 if (cost < cost_limit)
2338 struct algorithm *x;
2339 x = alg_in, alg_in = best_alg, best_alg = x;
2340 best_alg->log[best_alg->ops] = m;
2341 best_alg->op[best_alg->ops] = alg_sub_factor;
2342 cost_limit = cost;
2344 break;
2348 /* Try shift-and-add (load effective address) instructions,
2349 i.e. do a*3, a*5, a*9. */
2350 if ((t & 1) != 0)
2352 q = t - 1;
2353 q = q & -q;
2354 m = exact_log2 (q);
2355 if (m >= 0 && m < BITS_PER_WORD)
2357 cost = shiftadd_cost[m];
2358 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2360 cost += alg_in->cost;
2361 if (cost < cost_limit)
2363 struct algorithm *x;
2364 x = alg_in, alg_in = best_alg, best_alg = x;
2365 best_alg->log[best_alg->ops] = m;
2366 best_alg->op[best_alg->ops] = alg_add_t2_m;
2367 cost_limit = cost;
2371 q = t + 1;
2372 q = q & -q;
2373 m = exact_log2 (q);
2374 if (m >= 0 && m < BITS_PER_WORD)
2376 cost = shiftsub_cost[m];
2377 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2379 cost += alg_in->cost;
2380 if (cost < cost_limit)
2382 struct algorithm *x;
2383 x = alg_in, alg_in = best_alg, best_alg = x;
2384 best_alg->log[best_alg->ops] = m;
2385 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2386 cost_limit = cost;
2391 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2392 we have not found any algorithm. */
2393 if (cost_limit == alg_out->cost)
2394 return;
2396 /* If we are getting a too long sequence for `struct algorithm'
2397 to record, make this search fail. */
2398 if (best_alg->ops == MAX_BITS_PER_WORD)
2399 return;
2401 /* Copy the algorithm from temporary space to the space at alg_out.
2402 We avoid using structure assignment because the majority of
2403 best_alg is normally undefined, and this is a critical function. */
2404 alg_out->ops = best_alg->ops + 1;
2405 alg_out->cost = cost_limit;
2406 memcpy (alg_out->op, best_alg->op,
2407 alg_out->ops * sizeof *alg_out->op);
2408 memcpy (alg_out->log, best_alg->log,
2409 alg_out->ops * sizeof *alg_out->log);
2412 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2413 Try three variations:
2415 - a shift/add sequence based on VAL itself
2416 - a shift/add sequence based on -VAL, followed by a negation
2417 - a shift/add sequence based on VAL - 1, followed by an addition.
2419 Return true if the cheapest of these cost less than MULT_COST,
2420 describing the algorithm in *ALG and final fixup in *VARIANT. */
2422 static bool
2423 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2424 struct algorithm *alg, enum mult_variant *variant,
2425 int mult_cost)
2427 struct algorithm alg2;
2429 *variant = basic_variant;
2430 synth_mult (alg, val, mult_cost);
2432 /* This works only if the inverted value actually fits in an
2433 `unsigned int' */
2434 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2436 synth_mult (&alg2, -val, MIN (alg->cost, mult_cost) - negate_cost);
2437 alg2.cost += negate_cost;
2438 if (alg2.cost < alg->cost)
2439 *alg = alg2, *variant = negate_variant;
2442 /* This proves very useful for division-by-constant. */
2443 synth_mult (&alg2, val - 1, MIN (alg->cost, mult_cost) - add_cost);
2444 alg2.cost += add_cost;
2445 if (alg2.cost < alg->cost)
2446 *alg = alg2, *variant = add_variant;
2448 return alg->cost < mult_cost;
2451 /* A subroutine of expand_mult, used for constant multiplications.
2452 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2453 convenient. Use the shift/add sequence described by ALG and apply
2454 the final fixup specified by VARIANT. */
2456 static rtx
2457 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2458 rtx target, const struct algorithm *alg,
2459 enum mult_variant variant)
2461 HOST_WIDE_INT val_so_far;
2462 rtx insn, accum, tem;
2463 int opno;
2464 enum machine_mode nmode;
2466 /* op0 must be register to make mult_cost match the precomputed
2467 shiftadd_cost array. */
2468 op0 = protect_from_queue (op0, 0);
2470 /* Avoid referencing memory over and over.
2471 For speed, but also for correctness when mem is volatile. */
2472 if (GET_CODE (op0) == MEM)
2473 op0 = force_reg (mode, op0);
2475 /* ACCUM starts out either as OP0 or as a zero, depending on
2476 the first operation. */
2478 if (alg->op[0] == alg_zero)
2480 accum = copy_to_mode_reg (mode, const0_rtx);
2481 val_so_far = 0;
2483 else if (alg->op[0] == alg_m)
2485 accum = copy_to_mode_reg (mode, op0);
2486 val_so_far = 1;
2488 else
2489 abort ();
2491 for (opno = 1; opno < alg->ops; opno++)
2493 int log = alg->log[opno];
2494 int preserve = preserve_subexpressions_p ();
2495 rtx shift_subtarget = preserve ? 0 : accum;
2496 rtx add_target
2497 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2498 && ! preserve)
2499 ? target : 0;
2500 rtx accum_target = preserve ? 0 : accum;
2502 switch (alg->op[opno])
2504 case alg_shift:
2505 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2506 build_int_2 (log, 0), NULL_RTX, 0);
2507 val_so_far <<= log;
2508 break;
2510 case alg_add_t_m2:
2511 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2512 build_int_2 (log, 0), NULL_RTX, 0);
2513 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2514 add_target ? add_target : accum_target);
2515 val_so_far += (HOST_WIDE_INT) 1 << log;
2516 break;
2518 case alg_sub_t_m2:
2519 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2520 build_int_2 (log, 0), NULL_RTX, 0);
2521 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2522 add_target ? add_target : accum_target);
2523 val_so_far -= (HOST_WIDE_INT) 1 << log;
2524 break;
2526 case alg_add_t2_m:
2527 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2528 build_int_2 (log, 0), shift_subtarget,
2530 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2531 add_target ? add_target : accum_target);
2532 val_so_far = (val_so_far << log) + 1;
2533 break;
2535 case alg_sub_t2_m:
2536 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2537 build_int_2 (log, 0), shift_subtarget, 0);
2538 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2539 add_target ? add_target : accum_target);
2540 val_so_far = (val_so_far << log) - 1;
2541 break;
2543 case alg_add_factor:
2544 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2545 build_int_2 (log, 0), NULL_RTX, 0);
2546 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2547 add_target ? add_target : accum_target);
2548 val_so_far += val_so_far << log;
2549 break;
2551 case alg_sub_factor:
2552 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2553 build_int_2 (log, 0), NULL_RTX, 0);
2554 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2555 (add_target ? add_target
2556 : preserve ? 0 : tem));
2557 val_so_far = (val_so_far << log) - val_so_far;
2558 break;
2560 default:
2561 abort ();
2564 /* Write a REG_EQUAL note on the last insn so that we can cse
2565 multiplication sequences. Note that if ACCUM is a SUBREG,
2566 we've set the inner register and must properly indicate
2567 that. */
2569 tem = op0, nmode = mode;
2570 if (GET_CODE (accum) == SUBREG)
2572 nmode = GET_MODE (SUBREG_REG (accum));
2573 tem = gen_lowpart (nmode, op0);
2576 insn = get_last_insn ();
2577 set_unique_reg_note (insn, REG_EQUAL,
2578 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
2581 if (variant == negate_variant)
2583 val_so_far = -val_so_far;
2584 accum = expand_unop (mode, neg_optab, accum, target, 0);
2586 else if (variant == add_variant)
2588 val_so_far = val_so_far + 1;
2589 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2592 if (val != val_so_far)
2593 abort ();
2595 return accum;
2598 /* Perform a multiplication and return an rtx for the result.
2599 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2600 TARGET is a suggestion for where to store the result (an rtx).
2602 We check specially for a constant integer as OP1.
2603 If you want this check for OP0 as well, then before calling
2604 you should swap the two operands if OP0 would be constant. */
2607 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2608 int unsignedp)
2610 rtx const_op1 = op1;
2611 enum mult_variant variant;
2612 struct algorithm algorithm;
2614 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2615 less than or equal in size to `unsigned int' this doesn't matter.
2616 If the mode is larger than `unsigned int', then synth_mult works only
2617 if the constant value exactly fits in an `unsigned int' without any
2618 truncation. This means that multiplying by negative values does
2619 not work; results are off by 2^32 on a 32 bit machine. */
2621 /* If we are multiplying in DImode, it may still be a win
2622 to try to work with shifts and adds. */
2623 if (GET_CODE (op1) == CONST_DOUBLE
2624 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2625 && HOST_BITS_PER_INT >= BITS_PER_WORD
2626 && CONST_DOUBLE_HIGH (op1) == 0)
2627 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2628 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2629 && GET_CODE (op1) == CONST_INT
2630 && INTVAL (op1) < 0)
2631 const_op1 = 0;
2633 /* We used to test optimize here, on the grounds that it's better to
2634 produce a smaller program when -O is not used.
2635 But this causes such a terrible slowdown sometimes
2636 that it seems better to use synth_mult always. */
2638 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2639 && (unsignedp || !flag_trapv))
2641 int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2642 mult_cost = MIN (12 * add_cost, mult_cost);
2644 if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant,
2645 mult_cost))
2646 return expand_mult_const (mode, op0, INTVAL (const_op1), target,
2647 &algorithm, variant);
2650 if (GET_CODE (op0) == CONST_DOUBLE)
2652 rtx temp = op0;
2653 op0 = op1;
2654 op1 = temp;
2657 /* Expand x*2.0 as x+x. */
2658 if (GET_CODE (op1) == CONST_DOUBLE
2659 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2661 REAL_VALUE_TYPE d;
2662 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2664 if (REAL_VALUES_EQUAL (d, dconst2))
2666 op0 = force_reg (GET_MODE (op0), op0);
2667 return expand_binop (mode, add_optab, op0, op0,
2668 target, unsignedp, OPTAB_LIB_WIDEN);
2672 /* This used to use umul_optab if unsigned, but for non-widening multiply
2673 there is no difference between signed and unsigned. */
2674 op0 = expand_binop (mode,
2675 ! unsignedp
2676 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2677 ? smulv_optab : smul_optab,
2678 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2679 if (op0 == 0)
2680 abort ();
2681 return op0;
2684 /* Return the smallest n such that 2**n >= X. */
2687 ceil_log2 (unsigned HOST_WIDE_INT x)
2689 return floor_log2 (x - 1) + 1;
2692 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2693 replace division by D, and put the least significant N bits of the result
2694 in *MULTIPLIER_PTR and return the most significant bit.
2696 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2697 needed precision is in PRECISION (should be <= N).
2699 PRECISION should be as small as possible so this function can choose
2700 multiplier more freely.
2702 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2703 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2705 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2706 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2708 static
2709 unsigned HOST_WIDE_INT
2710 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2711 unsigned HOST_WIDE_INT *multiplier_ptr,
2712 int *post_shift_ptr, int *lgup_ptr)
2714 HOST_WIDE_INT mhigh_hi, mlow_hi;
2715 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2716 int lgup, post_shift;
2717 int pow, pow2;
2718 unsigned HOST_WIDE_INT nl, dummy1;
2719 HOST_WIDE_INT nh, dummy2;
2721 /* lgup = ceil(log2(divisor)); */
2722 lgup = ceil_log2 (d);
2724 if (lgup > n)
2725 abort ();
2727 pow = n + lgup;
2728 pow2 = n + lgup - precision;
2730 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2732 /* We could handle this with some effort, but this case is much better
2733 handled directly with a scc insn, so rely on caller using that. */
2734 abort ();
2737 /* mlow = 2^(N + lgup)/d */
2738 if (pow >= HOST_BITS_PER_WIDE_INT)
2740 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2741 nl = 0;
2743 else
2745 nh = 0;
2746 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2748 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2749 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2751 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2752 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2753 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2754 else
2755 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2756 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2757 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2759 if (mhigh_hi && nh - d >= d)
2760 abort ();
2761 if (mhigh_hi > 1 || mlow_hi > 1)
2762 abort ();
2763 /* Assert that mlow < mhigh. */
2764 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2765 abort ();
2767 /* If precision == N, then mlow, mhigh exceed 2^N
2768 (but they do not exceed 2^(N+1)). */
2770 /* Reduce to lowest terms. */
2771 for (post_shift = lgup; post_shift > 0; post_shift--)
2773 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2774 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2775 if (ml_lo >= mh_lo)
2776 break;
2778 mlow_hi = 0;
2779 mlow_lo = ml_lo;
2780 mhigh_hi = 0;
2781 mhigh_lo = mh_lo;
2784 *post_shift_ptr = post_shift;
2785 *lgup_ptr = lgup;
2786 if (n < HOST_BITS_PER_WIDE_INT)
2788 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2789 *multiplier_ptr = mhigh_lo & mask;
2790 return mhigh_lo >= mask;
2792 else
2794 *multiplier_ptr = mhigh_lo;
2795 return mhigh_hi;
2799 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2800 congruent to 1 (mod 2**N). */
2802 static unsigned HOST_WIDE_INT
2803 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2805 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2807 /* The algorithm notes that the choice y = x satisfies
2808 x*y == 1 mod 2^3, since x is assumed odd.
2809 Each iteration doubles the number of bits of significance in y. */
2811 unsigned HOST_WIDE_INT mask;
2812 unsigned HOST_WIDE_INT y = x;
2813 int nbit = 3;
2815 mask = (n == HOST_BITS_PER_WIDE_INT
2816 ? ~(unsigned HOST_WIDE_INT) 0
2817 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2819 while (nbit < n)
2821 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2822 nbit *= 2;
2824 return y;
2827 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2828 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2829 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2830 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2831 become signed.
2833 The result is put in TARGET if that is convenient.
2835 MODE is the mode of operation. */
2838 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2839 rtx op1, rtx target, int unsignedp)
2841 rtx tem;
2842 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2844 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2845 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2846 NULL_RTX, 0);
2847 tem = expand_and (mode, tem, op1, NULL_RTX);
2848 adj_operand
2849 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2850 adj_operand);
2852 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2853 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2854 NULL_RTX, 0);
2855 tem = expand_and (mode, tem, op0, NULL_RTX);
2856 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2857 target);
2859 return target;
2862 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
2864 static rtx
2865 extract_high_half (enum machine_mode mode, rtx op)
2867 enum machine_mode wider_mode;
2869 if (mode == word_mode)
2870 return gen_highpart (mode, op);
2872 wider_mode = GET_MODE_WIDER_MODE (mode);
2873 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
2874 build_int_2 (GET_MODE_BITSIZE (mode), 0), 0, 1);
2875 return convert_modes (mode, wider_mode, op, 0);
2878 /* Like expand_mult_highpart, but only consider using a multiplication
2879 optab. OP1 is an rtx for the constant operand. */
2881 static rtx
2882 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
2883 rtx target, int unsignedp, int max_cost)
2885 enum machine_mode wider_mode;
2886 optab moptab;
2887 rtx tem;
2888 int size;
2890 wider_mode = GET_MODE_WIDER_MODE (mode);
2891 size = GET_MODE_BITSIZE (mode);
2893 /* Firstly, try using a multiplication insn that only generates the needed
2894 high part of the product, and in the sign flavor of unsignedp. */
2895 if (mul_highpart_cost[(int) mode] < max_cost)
2897 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2898 tem = expand_binop (mode, moptab, op0, op1, target,
2899 unsignedp, OPTAB_DIRECT);
2900 if (tem)
2901 return tem;
2904 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2905 Need to adjust the result after the multiplication. */
2906 if (size - 1 < BITS_PER_WORD
2907 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2908 < max_cost))
2910 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2911 tem = expand_binop (mode, moptab, op0, op1, target,
2912 unsignedp, OPTAB_DIRECT);
2913 if (tem)
2914 /* We used the wrong signedness. Adjust the result. */
2915 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2916 tem, unsignedp);
2919 /* Try widening multiplication. */
2920 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2921 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2922 && mul_widen_cost[(int) wider_mode] < max_cost)
2924 tem = expand_binop (wider_mode, moptab, op0, op1, 0,
2925 unsignedp, OPTAB_WIDEN);
2926 if (tem)
2927 return extract_high_half (mode, tem);
2930 /* Try widening the mode and perform a non-widening multiplication. */
2931 moptab = smul_optab;
2932 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2933 && size - 1 < BITS_PER_WORD
2934 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2936 tem = expand_binop (wider_mode, moptab, op0, op1, 0,
2937 unsignedp, OPTAB_WIDEN);
2938 if (tem)
2939 return extract_high_half (mode, tem);
2942 /* Try widening multiplication of opposite signedness, and adjust. */
2943 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2944 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2945 && size - 1 < BITS_PER_WORD
2946 && (mul_widen_cost[(int) wider_mode]
2947 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2949 rtx regop1 = force_reg (mode, op1);
2950 tem = expand_binop (wider_mode, moptab, op0, regop1,
2951 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2952 if (tem != 0)
2954 tem = extract_high_half (mode, tem);
2955 /* We used the wrong signedness. Adjust the result. */
2956 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2957 target, unsignedp);
2961 return 0;
2964 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2965 in TARGET if that is convenient, and return where the result is. If the
2966 operation can not be performed, 0 is returned.
2968 MODE is the mode of operation and result.
2970 UNSIGNEDP nonzero means unsigned multiply.
2972 MAX_COST is the total allowed cost for the expanded RTL. */
2975 expand_mult_highpart (enum machine_mode mode, rtx op0,
2976 unsigned HOST_WIDE_INT cnst1, rtx target,
2977 int unsignedp, int max_cost)
2979 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2980 int extra_cost;
2981 bool sign_adjust = false;
2982 enum mult_variant variant;
2983 struct algorithm alg;
2984 rtx op1, tem;
2986 /* We can't support modes wider than HOST_BITS_PER_INT. */
2987 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
2988 abort ();
2990 op1 = gen_int_mode (cnst1, mode);
2991 cnst1 &= GET_MODE_MASK (mode);
2993 /* We can't optimize modes wider than BITS_PER_WORD.
2994 ??? We might be able to perform double-word arithmetic if
2995 mode == word_mode, however all the cost calculations in
2996 synth_mult etc. assume single-word operations. */
2997 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
2998 return expand_mult_highpart_optab (mode, op0, op1, target,
2999 unsignedp, max_cost);
3001 extra_cost = shift_cost[GET_MODE_BITSIZE (mode) - 1];
3003 /* Check whether we try to multiply by a negative constant. */
3004 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3006 sign_adjust = true;
3007 extra_cost += add_cost;
3010 /* See whether shift/add multiplication is cheap enough. */
3011 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3012 max_cost - extra_cost))
3014 /* See whether the specialized multiplication optabs are
3015 cheaper than the shift/add version. */
3016 tem = expand_mult_highpart_optab (mode, op0, op1, target,
3017 unsignedp, alg.cost + extra_cost);
3018 if (tem)
3019 return tem;
3021 tem = convert_to_mode (wider_mode, op0, unsignedp);
3022 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3023 tem = extract_high_half (mode, tem);
3025 /* Adjust result for signedness. */
3026 if (sign_adjust)
3027 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3029 return tem;
3031 return expand_mult_highpart_optab (mode, op0, op1, target,
3032 unsignedp, max_cost);
3035 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3036 if that is convenient, and returning where the result is.
3037 You may request either the quotient or the remainder as the result;
3038 specify REM_FLAG nonzero to get the remainder.
3040 CODE is the expression code for which kind of division this is;
3041 it controls how rounding is done. MODE is the machine mode to use.
3042 UNSIGNEDP nonzero means do unsigned division. */
3044 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3045 and then correct it by or'ing in missing high bits
3046 if result of ANDI is nonzero.
3047 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3048 This could optimize to a bfexts instruction.
3049 But C doesn't use these operations, so their optimizations are
3050 left for later. */
3051 /* ??? For modulo, we don't actually need the highpart of the first product,
3052 the low part will do nicely. And for small divisors, the second multiply
3053 can also be a low-part only multiply or even be completely left out.
3054 E.g. to calculate the remainder of a division by 3 with a 32 bit
3055 multiply, multiply with 0x55555556 and extract the upper two bits;
3056 the result is exact for inputs up to 0x1fffffff.
3057 The input range can be reduced by using cross-sum rules.
3058 For odd divisors >= 3, the following table gives right shift counts
3059 so that if a number is shifted by an integer multiple of the given
3060 amount, the remainder stays the same:
3061 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3062 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3063 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3064 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3065 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3067 Cross-sum rules for even numbers can be derived by leaving as many bits
3068 to the right alone as the divisor has zeros to the right.
3069 E.g. if x is an unsigned 32 bit number:
3070 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3073 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3076 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3077 rtx op0, rtx op1, rtx target, int unsignedp)
3079 enum machine_mode compute_mode;
3080 rtx tquotient;
3081 rtx quotient = 0, remainder = 0;
3082 rtx last;
3083 int size;
3084 rtx insn, set;
3085 optab optab1, optab2;
3086 int op1_is_constant, op1_is_pow2 = 0;
3087 int max_cost, extra_cost;
3088 static HOST_WIDE_INT last_div_const = 0;
3089 static HOST_WIDE_INT ext_op1;
3091 op1_is_constant = GET_CODE (op1) == CONST_INT;
3092 if (op1_is_constant)
3094 ext_op1 = INTVAL (op1);
3095 if (unsignedp)
3096 ext_op1 &= GET_MODE_MASK (mode);
3097 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3098 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3102 This is the structure of expand_divmod:
3104 First comes code to fix up the operands so we can perform the operations
3105 correctly and efficiently.
3107 Second comes a switch statement with code specific for each rounding mode.
3108 For some special operands this code emits all RTL for the desired
3109 operation, for other cases, it generates only a quotient and stores it in
3110 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3111 to indicate that it has not done anything.
3113 Last comes code that finishes the operation. If QUOTIENT is set and
3114 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3115 QUOTIENT is not set, it is computed using trunc rounding.
3117 We try to generate special code for division and remainder when OP1 is a
3118 constant. If |OP1| = 2**n we can use shifts and some other fast
3119 operations. For other values of OP1, we compute a carefully selected
3120 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3121 by m.
3123 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3124 half of the product. Different strategies for generating the product are
3125 implemented in expand_mult_highpart.
3127 If what we actually want is the remainder, we generate that by another
3128 by-constant multiplication and a subtraction. */
3130 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3131 code below will malfunction if we are, so check here and handle
3132 the special case if so. */
3133 if (op1 == const1_rtx)
3134 return rem_flag ? const0_rtx : op0;
3136 /* When dividing by -1, we could get an overflow.
3137 negv_optab can handle overflows. */
3138 if (! unsignedp && op1 == constm1_rtx)
3140 if (rem_flag)
3141 return const0_rtx;
3142 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3143 ? negv_optab : neg_optab, op0, target, 0);
3146 if (target
3147 /* Don't use the function value register as a target
3148 since we have to read it as well as write it,
3149 and function-inlining gets confused by this. */
3150 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3151 /* Don't clobber an operand while doing a multi-step calculation. */
3152 || ((rem_flag || op1_is_constant)
3153 && (reg_mentioned_p (target, op0)
3154 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3155 || reg_mentioned_p (target, op1)
3156 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3157 target = 0;
3159 /* Get the mode in which to perform this computation. Normally it will
3160 be MODE, but sometimes we can't do the desired operation in MODE.
3161 If so, pick a wider mode in which we can do the operation. Convert
3162 to that mode at the start to avoid repeated conversions.
3164 First see what operations we need. These depend on the expression
3165 we are evaluating. (We assume that divxx3 insns exist under the
3166 same conditions that modxx3 insns and that these insns don't normally
3167 fail. If these assumptions are not correct, we may generate less
3168 efficient code in some cases.)
3170 Then see if we find a mode in which we can open-code that operation
3171 (either a division, modulus, or shift). Finally, check for the smallest
3172 mode for which we can do the operation with a library call. */
3174 /* We might want to refine this now that we have division-by-constant
3175 optimization. Since expand_mult_highpart tries so many variants, it is
3176 not straightforward to generalize this. Maybe we should make an array
3177 of possible modes in init_expmed? Save this for GCC 2.7. */
3179 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3180 ? (unsignedp ? lshr_optab : ashr_optab)
3181 : (unsignedp ? udiv_optab : sdiv_optab));
3182 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3183 ? optab1
3184 : (unsignedp ? udivmod_optab : sdivmod_optab));
3186 for (compute_mode = mode; compute_mode != VOIDmode;
3187 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3188 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3189 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3190 break;
3192 if (compute_mode == VOIDmode)
3193 for (compute_mode = mode; compute_mode != VOIDmode;
3194 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3195 if (optab1->handlers[(int) compute_mode].libfunc
3196 || optab2->handlers[(int) compute_mode].libfunc)
3197 break;
3199 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3200 in expand_binop. */
3201 if (compute_mode == VOIDmode)
3202 compute_mode = mode;
3204 if (target && GET_MODE (target) == compute_mode)
3205 tquotient = target;
3206 else
3207 tquotient = gen_reg_rtx (compute_mode);
3209 size = GET_MODE_BITSIZE (compute_mode);
3210 #if 0
3211 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3212 (mode), and thereby get better code when OP1 is a constant. Do that
3213 later. It will require going over all usages of SIZE below. */
3214 size = GET_MODE_BITSIZE (mode);
3215 #endif
3217 /* Only deduct something for a REM if the last divide done was
3218 for a different constant. Then set the constant of the last
3219 divide. */
3220 max_cost = div_cost[(int) compute_mode]
3221 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3222 && INTVAL (op1) == last_div_const)
3223 ? mul_cost[(int) compute_mode] + add_cost : 0);
3225 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3227 /* Now convert to the best mode to use. */
3228 if (compute_mode != mode)
3230 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3231 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3233 /* convert_modes may have placed op1 into a register, so we
3234 must recompute the following. */
3235 op1_is_constant = GET_CODE (op1) == CONST_INT;
3236 op1_is_pow2 = (op1_is_constant
3237 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3238 || (! unsignedp
3239 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3242 /* If one of the operands is a volatile MEM, copy it into a register. */
3244 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3245 op0 = force_reg (compute_mode, op0);
3246 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3247 op1 = force_reg (compute_mode, op1);
3249 /* If we need the remainder or if OP1 is constant, we need to
3250 put OP0 in a register in case it has any queued subexpressions. */
3251 if (rem_flag || op1_is_constant)
3252 op0 = force_reg (compute_mode, op0);
3254 last = get_last_insn ();
3256 /* Promote floor rounding to trunc rounding for unsigned operations. */
3257 if (unsignedp)
3259 if (code == FLOOR_DIV_EXPR)
3260 code = TRUNC_DIV_EXPR;
3261 if (code == FLOOR_MOD_EXPR)
3262 code = TRUNC_MOD_EXPR;
3263 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3264 code = TRUNC_DIV_EXPR;
3267 if (op1 != const0_rtx)
3268 switch (code)
3270 case TRUNC_MOD_EXPR:
3271 case TRUNC_DIV_EXPR:
3272 if (op1_is_constant)
3274 if (unsignedp)
3276 unsigned HOST_WIDE_INT mh, ml;
3277 int pre_shift, post_shift;
3278 int dummy;
3279 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3280 & GET_MODE_MASK (compute_mode));
3282 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3284 pre_shift = floor_log2 (d);
3285 if (rem_flag)
3287 remainder
3288 = expand_binop (compute_mode, and_optab, op0,
3289 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3290 remainder, 1,
3291 OPTAB_LIB_WIDEN);
3292 if (remainder)
3293 return gen_lowpart (mode, remainder);
3295 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3296 build_int_2 (pre_shift, 0),
3297 tquotient, 1);
3299 else if (size <= HOST_BITS_PER_WIDE_INT)
3301 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3303 /* Most significant bit of divisor is set; emit an scc
3304 insn. */
3305 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3306 compute_mode, 1, 1);
3307 if (quotient == 0)
3308 goto fail1;
3310 else
3312 /* Find a suitable multiplier and right shift count
3313 instead of multiplying with D. */
3315 mh = choose_multiplier (d, size, size,
3316 &ml, &post_shift, &dummy);
3318 /* If the suggested multiplier is more than SIZE bits,
3319 we can do better for even divisors, using an
3320 initial right shift. */
3321 if (mh != 0 && (d & 1) == 0)
3323 pre_shift = floor_log2 (d & -d);
3324 mh = choose_multiplier (d >> pre_shift, size,
3325 size - pre_shift,
3326 &ml, &post_shift, &dummy);
3327 if (mh)
3328 abort ();
3330 else
3331 pre_shift = 0;
3333 if (mh != 0)
3335 rtx t1, t2, t3, t4;
3337 if (post_shift - 1 >= BITS_PER_WORD)
3338 goto fail1;
3340 extra_cost = (shift_cost[post_shift - 1]
3341 + shift_cost[1] + 2 * add_cost);
3342 t1 = expand_mult_highpart (compute_mode, op0, ml,
3343 NULL_RTX, 1,
3344 max_cost - extra_cost);
3345 if (t1 == 0)
3346 goto fail1;
3347 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3348 op0, t1),
3349 NULL_RTX);
3350 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3351 build_int_2 (1, 0), NULL_RTX,1);
3352 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3353 t1, t3),
3354 NULL_RTX);
3355 quotient
3356 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3357 build_int_2 (post_shift - 1, 0),
3358 tquotient, 1);
3360 else
3362 rtx t1, t2;
3364 if (pre_shift >= BITS_PER_WORD
3365 || post_shift >= BITS_PER_WORD)
3366 goto fail1;
3368 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3369 build_int_2 (pre_shift, 0),
3370 NULL_RTX, 1);
3371 extra_cost = (shift_cost[pre_shift]
3372 + shift_cost[post_shift]);
3373 t2 = expand_mult_highpart (compute_mode, t1, ml,
3374 NULL_RTX, 1,
3375 max_cost - extra_cost);
3376 if (t2 == 0)
3377 goto fail1;
3378 quotient
3379 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3380 build_int_2 (post_shift, 0),
3381 tquotient, 1);
3385 else /* Too wide mode to use tricky code */
3386 break;
3388 insn = get_last_insn ();
3389 if (insn != last
3390 && (set = single_set (insn)) != 0
3391 && SET_DEST (set) == quotient)
3392 set_unique_reg_note (insn,
3393 REG_EQUAL,
3394 gen_rtx_UDIV (compute_mode, op0, op1));
3396 else /* TRUNC_DIV, signed */
3398 unsigned HOST_WIDE_INT ml;
3399 int lgup, post_shift;
3400 HOST_WIDE_INT d = INTVAL (op1);
3401 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3403 /* n rem d = n rem -d */
3404 if (rem_flag && d < 0)
3406 d = abs_d;
3407 op1 = gen_int_mode (abs_d, compute_mode);
3410 if (d == 1)
3411 quotient = op0;
3412 else if (d == -1)
3413 quotient = expand_unop (compute_mode, neg_optab, op0,
3414 tquotient, 0);
3415 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3417 /* This case is not handled correctly below. */
3418 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3419 compute_mode, 1, 1);
3420 if (quotient == 0)
3421 goto fail1;
3423 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3424 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3425 /* ??? The cheap metric is computed only for
3426 word_mode. If this operation is wider, this may
3427 not be so. Assume true if the optab has an
3428 expander for this mode. */
3429 && (((rem_flag ? smod_optab : sdiv_optab)
3430 ->handlers[(int) compute_mode].insn_code
3431 != CODE_FOR_nothing)
3432 || (sdivmod_optab->handlers[(int) compute_mode]
3433 .insn_code != CODE_FOR_nothing)))
3435 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3437 lgup = floor_log2 (abs_d);
3438 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3440 rtx label = gen_label_rtx ();
3441 rtx t1;
3443 t1 = copy_to_mode_reg (compute_mode, op0);
3444 do_cmp_and_jump (t1, const0_rtx, GE,
3445 compute_mode, label);
3446 expand_inc (t1, gen_int_mode (abs_d - 1,
3447 compute_mode));
3448 emit_label (label);
3449 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3450 build_int_2 (lgup, 0),
3451 tquotient, 0);
3453 else
3455 rtx t1, t2, t3;
3456 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3457 build_int_2 (size - 1, 0),
3458 NULL_RTX, 0);
3459 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3460 build_int_2 (size - lgup, 0),
3461 NULL_RTX, 1);
3462 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3463 op0, t2),
3464 NULL_RTX);
3465 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3466 build_int_2 (lgup, 0),
3467 tquotient, 0);
3470 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3471 the quotient. */
3472 if (d < 0)
3474 insn = get_last_insn ();
3475 if (insn != last
3476 && (set = single_set (insn)) != 0
3477 && SET_DEST (set) == quotient
3478 && abs_d < ((unsigned HOST_WIDE_INT) 1
3479 << (HOST_BITS_PER_WIDE_INT - 1)))
3480 set_unique_reg_note (insn,
3481 REG_EQUAL,
3482 gen_rtx_DIV (compute_mode,
3483 op0,
3484 GEN_INT
3485 (trunc_int_for_mode
3486 (abs_d,
3487 compute_mode))));
3489 quotient = expand_unop (compute_mode, neg_optab,
3490 quotient, quotient, 0);
3493 else if (size <= HOST_BITS_PER_WIDE_INT)
3495 choose_multiplier (abs_d, size, size - 1,
3496 &ml, &post_shift, &lgup);
3497 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3499 rtx t1, t2, t3;
3501 if (post_shift >= BITS_PER_WORD
3502 || size - 1 >= BITS_PER_WORD)
3503 goto fail1;
3505 extra_cost = (shift_cost[post_shift]
3506 + shift_cost[size - 1] + add_cost);
3507 t1 = expand_mult_highpart (compute_mode, op0, ml,
3508 NULL_RTX, 0,
3509 max_cost - extra_cost);
3510 if (t1 == 0)
3511 goto fail1;
3512 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3513 build_int_2 (post_shift, 0), NULL_RTX, 0);
3514 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3515 build_int_2 (size - 1, 0), NULL_RTX, 0);
3516 if (d < 0)
3517 quotient
3518 = force_operand (gen_rtx_MINUS (compute_mode,
3519 t3, t2),
3520 tquotient);
3521 else
3522 quotient
3523 = force_operand (gen_rtx_MINUS (compute_mode,
3524 t2, t3),
3525 tquotient);
3527 else
3529 rtx t1, t2, t3, t4;
3531 if (post_shift >= BITS_PER_WORD
3532 || size - 1 >= BITS_PER_WORD)
3533 goto fail1;
3535 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3536 extra_cost = (shift_cost[post_shift]
3537 + shift_cost[size - 1] + 2 * add_cost);
3538 t1 = expand_mult_highpart (compute_mode, op0, ml,
3539 NULL_RTX, 0,
3540 max_cost - extra_cost);
3541 if (t1 == 0)
3542 goto fail1;
3543 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3544 t1, op0),
3545 NULL_RTX);
3546 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3547 build_int_2 (post_shift, 0),
3548 NULL_RTX, 0);
3549 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3550 build_int_2 (size - 1, 0),
3551 NULL_RTX, 0);
3552 if (d < 0)
3553 quotient
3554 = force_operand (gen_rtx_MINUS (compute_mode,
3555 t4, t3),
3556 tquotient);
3557 else
3558 quotient
3559 = force_operand (gen_rtx_MINUS (compute_mode,
3560 t3, t4),
3561 tquotient);
3564 else /* Too wide mode to use tricky code */
3565 break;
3567 insn = get_last_insn ();
3568 if (insn != last
3569 && (set = single_set (insn)) != 0
3570 && SET_DEST (set) == quotient)
3571 set_unique_reg_note (insn,
3572 REG_EQUAL,
3573 gen_rtx_DIV (compute_mode, op0, op1));
3575 break;
3577 fail1:
3578 delete_insns_since (last);
3579 break;
3581 case FLOOR_DIV_EXPR:
3582 case FLOOR_MOD_EXPR:
3583 /* We will come here only for signed operations. */
3584 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3586 unsigned HOST_WIDE_INT mh, ml;
3587 int pre_shift, lgup, post_shift;
3588 HOST_WIDE_INT d = INTVAL (op1);
3590 if (d > 0)
3592 /* We could just as easily deal with negative constants here,
3593 but it does not seem worth the trouble for GCC 2.6. */
3594 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3596 pre_shift = floor_log2 (d);
3597 if (rem_flag)
3599 remainder = expand_binop (compute_mode, and_optab, op0,
3600 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3601 remainder, 0, OPTAB_LIB_WIDEN);
3602 if (remainder)
3603 return gen_lowpart (mode, remainder);
3605 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3606 build_int_2 (pre_shift, 0),
3607 tquotient, 0);
3609 else
3611 rtx t1, t2, t3, t4;
3613 mh = choose_multiplier (d, size, size - 1,
3614 &ml, &post_shift, &lgup);
3615 if (mh)
3616 abort ();
3618 if (post_shift < BITS_PER_WORD
3619 && size - 1 < BITS_PER_WORD)
3621 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3622 build_int_2 (size - 1, 0),
3623 NULL_RTX, 0);
3624 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3625 NULL_RTX, 0, OPTAB_WIDEN);
3626 extra_cost = (shift_cost[post_shift]
3627 + shift_cost[size - 1] + 2 * add_cost);
3628 t3 = expand_mult_highpart (compute_mode, t2, ml,
3629 NULL_RTX, 1,
3630 max_cost - extra_cost);
3631 if (t3 != 0)
3633 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3634 build_int_2 (post_shift, 0),
3635 NULL_RTX, 1);
3636 quotient = expand_binop (compute_mode, xor_optab,
3637 t4, t1, tquotient, 0,
3638 OPTAB_WIDEN);
3643 else
3645 rtx nsign, t1, t2, t3, t4;
3646 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3647 op0, constm1_rtx), NULL_RTX);
3648 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3649 0, OPTAB_WIDEN);
3650 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3651 build_int_2 (size - 1, 0), NULL_RTX, 0);
3652 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3653 NULL_RTX);
3654 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3655 NULL_RTX, 0);
3656 if (t4)
3658 rtx t5;
3659 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3660 NULL_RTX, 0);
3661 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3662 t4, t5),
3663 tquotient);
3668 if (quotient != 0)
3669 break;
3670 delete_insns_since (last);
3672 /* Try using an instruction that produces both the quotient and
3673 remainder, using truncation. We can easily compensate the quotient
3674 or remainder to get floor rounding, once we have the remainder.
3675 Notice that we compute also the final remainder value here,
3676 and return the result right away. */
3677 if (target == 0 || GET_MODE (target) != compute_mode)
3678 target = gen_reg_rtx (compute_mode);
3680 if (rem_flag)
3682 remainder
3683 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3684 quotient = gen_reg_rtx (compute_mode);
3686 else
3688 quotient
3689 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3690 remainder = gen_reg_rtx (compute_mode);
3693 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3694 quotient, remainder, 0))
3696 /* This could be computed with a branch-less sequence.
3697 Save that for later. */
3698 rtx tem;
3699 rtx label = gen_label_rtx ();
3700 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3701 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3702 NULL_RTX, 0, OPTAB_WIDEN);
3703 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3704 expand_dec (quotient, const1_rtx);
3705 expand_inc (remainder, op1);
3706 emit_label (label);
3707 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3710 /* No luck with division elimination or divmod. Have to do it
3711 by conditionally adjusting op0 *and* the result. */
3713 rtx label1, label2, label3, label4, label5;
3714 rtx adjusted_op0;
3715 rtx tem;
3717 quotient = gen_reg_rtx (compute_mode);
3718 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3719 label1 = gen_label_rtx ();
3720 label2 = gen_label_rtx ();
3721 label3 = gen_label_rtx ();
3722 label4 = gen_label_rtx ();
3723 label5 = gen_label_rtx ();
3724 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3725 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3726 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3727 quotient, 0, OPTAB_LIB_WIDEN);
3728 if (tem != quotient)
3729 emit_move_insn (quotient, tem);
3730 emit_jump_insn (gen_jump (label5));
3731 emit_barrier ();
3732 emit_label (label1);
3733 expand_inc (adjusted_op0, const1_rtx);
3734 emit_jump_insn (gen_jump (label4));
3735 emit_barrier ();
3736 emit_label (label2);
3737 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3738 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3739 quotient, 0, OPTAB_LIB_WIDEN);
3740 if (tem != quotient)
3741 emit_move_insn (quotient, tem);
3742 emit_jump_insn (gen_jump (label5));
3743 emit_barrier ();
3744 emit_label (label3);
3745 expand_dec (adjusted_op0, const1_rtx);
3746 emit_label (label4);
3747 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3748 quotient, 0, OPTAB_LIB_WIDEN);
3749 if (tem != quotient)
3750 emit_move_insn (quotient, tem);
3751 expand_dec (quotient, const1_rtx);
3752 emit_label (label5);
3754 break;
3756 case CEIL_DIV_EXPR:
3757 case CEIL_MOD_EXPR:
3758 if (unsignedp)
3760 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3762 rtx t1, t2, t3;
3763 unsigned HOST_WIDE_INT d = INTVAL (op1);
3764 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3765 build_int_2 (floor_log2 (d), 0),
3766 tquotient, 1);
3767 t2 = expand_binop (compute_mode, and_optab, op0,
3768 GEN_INT (d - 1),
3769 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3770 t3 = gen_reg_rtx (compute_mode);
3771 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3772 compute_mode, 1, 1);
3773 if (t3 == 0)
3775 rtx lab;
3776 lab = gen_label_rtx ();
3777 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3778 expand_inc (t1, const1_rtx);
3779 emit_label (lab);
3780 quotient = t1;
3782 else
3783 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3784 t1, t3),
3785 tquotient);
3786 break;
3789 /* Try using an instruction that produces both the quotient and
3790 remainder, using truncation. We can easily compensate the
3791 quotient or remainder to get ceiling rounding, once we have the
3792 remainder. Notice that we compute also the final remainder
3793 value here, and return the result right away. */
3794 if (target == 0 || GET_MODE (target) != compute_mode)
3795 target = gen_reg_rtx (compute_mode);
3797 if (rem_flag)
3799 remainder = (GET_CODE (target) == REG
3800 ? target : gen_reg_rtx (compute_mode));
3801 quotient = gen_reg_rtx (compute_mode);
3803 else
3805 quotient = (GET_CODE (target) == REG
3806 ? target : gen_reg_rtx (compute_mode));
3807 remainder = gen_reg_rtx (compute_mode);
3810 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3811 remainder, 1))
3813 /* This could be computed with a branch-less sequence.
3814 Save that for later. */
3815 rtx label = gen_label_rtx ();
3816 do_cmp_and_jump (remainder, const0_rtx, EQ,
3817 compute_mode, label);
3818 expand_inc (quotient, const1_rtx);
3819 expand_dec (remainder, op1);
3820 emit_label (label);
3821 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3824 /* No luck with division elimination or divmod. Have to do it
3825 by conditionally adjusting op0 *and* the result. */
3827 rtx label1, label2;
3828 rtx adjusted_op0, tem;
3830 quotient = gen_reg_rtx (compute_mode);
3831 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3832 label1 = gen_label_rtx ();
3833 label2 = gen_label_rtx ();
3834 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3835 compute_mode, label1);
3836 emit_move_insn (quotient, const0_rtx);
3837 emit_jump_insn (gen_jump (label2));
3838 emit_barrier ();
3839 emit_label (label1);
3840 expand_dec (adjusted_op0, const1_rtx);
3841 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3842 quotient, 1, OPTAB_LIB_WIDEN);
3843 if (tem != quotient)
3844 emit_move_insn (quotient, tem);
3845 expand_inc (quotient, const1_rtx);
3846 emit_label (label2);
3849 else /* signed */
3851 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3852 && INTVAL (op1) >= 0)
3854 /* This is extremely similar to the code for the unsigned case
3855 above. For 2.7 we should merge these variants, but for
3856 2.6.1 I don't want to touch the code for unsigned since that
3857 get used in C. The signed case will only be used by other
3858 languages (Ada). */
3860 rtx t1, t2, t3;
3861 unsigned HOST_WIDE_INT d = INTVAL (op1);
3862 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3863 build_int_2 (floor_log2 (d), 0),
3864 tquotient, 0);
3865 t2 = expand_binop (compute_mode, and_optab, op0,
3866 GEN_INT (d - 1),
3867 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3868 t3 = gen_reg_rtx (compute_mode);
3869 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3870 compute_mode, 1, 1);
3871 if (t3 == 0)
3873 rtx lab;
3874 lab = gen_label_rtx ();
3875 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3876 expand_inc (t1, const1_rtx);
3877 emit_label (lab);
3878 quotient = t1;
3880 else
3881 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3882 t1, t3),
3883 tquotient);
3884 break;
3887 /* Try using an instruction that produces both the quotient and
3888 remainder, using truncation. We can easily compensate the
3889 quotient or remainder to get ceiling rounding, once we have the
3890 remainder. Notice that we compute also the final remainder
3891 value here, and return the result right away. */
3892 if (target == 0 || GET_MODE (target) != compute_mode)
3893 target = gen_reg_rtx (compute_mode);
3894 if (rem_flag)
3896 remainder= (GET_CODE (target) == REG
3897 ? target : gen_reg_rtx (compute_mode));
3898 quotient = gen_reg_rtx (compute_mode);
3900 else
3902 quotient = (GET_CODE (target) == REG
3903 ? target : gen_reg_rtx (compute_mode));
3904 remainder = gen_reg_rtx (compute_mode);
3907 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3908 remainder, 0))
3910 /* This could be computed with a branch-less sequence.
3911 Save that for later. */
3912 rtx tem;
3913 rtx label = gen_label_rtx ();
3914 do_cmp_and_jump (remainder, const0_rtx, EQ,
3915 compute_mode, label);
3916 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3917 NULL_RTX, 0, OPTAB_WIDEN);
3918 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3919 expand_inc (quotient, const1_rtx);
3920 expand_dec (remainder, op1);
3921 emit_label (label);
3922 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3925 /* No luck with division elimination or divmod. Have to do it
3926 by conditionally adjusting op0 *and* the result. */
3928 rtx label1, label2, label3, label4, label5;
3929 rtx adjusted_op0;
3930 rtx tem;
3932 quotient = gen_reg_rtx (compute_mode);
3933 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3934 label1 = gen_label_rtx ();
3935 label2 = gen_label_rtx ();
3936 label3 = gen_label_rtx ();
3937 label4 = gen_label_rtx ();
3938 label5 = gen_label_rtx ();
3939 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3940 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3941 compute_mode, label1);
3942 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3943 quotient, 0, OPTAB_LIB_WIDEN);
3944 if (tem != quotient)
3945 emit_move_insn (quotient, tem);
3946 emit_jump_insn (gen_jump (label5));
3947 emit_barrier ();
3948 emit_label (label1);
3949 expand_dec (adjusted_op0, const1_rtx);
3950 emit_jump_insn (gen_jump (label4));
3951 emit_barrier ();
3952 emit_label (label2);
3953 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3954 compute_mode, label3);
3955 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3956 quotient, 0, OPTAB_LIB_WIDEN);
3957 if (tem != quotient)
3958 emit_move_insn (quotient, tem);
3959 emit_jump_insn (gen_jump (label5));
3960 emit_barrier ();
3961 emit_label (label3);
3962 expand_inc (adjusted_op0, const1_rtx);
3963 emit_label (label4);
3964 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3965 quotient, 0, OPTAB_LIB_WIDEN);
3966 if (tem != quotient)
3967 emit_move_insn (quotient, tem);
3968 expand_inc (quotient, const1_rtx);
3969 emit_label (label5);
3972 break;
3974 case EXACT_DIV_EXPR:
3975 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3977 HOST_WIDE_INT d = INTVAL (op1);
3978 unsigned HOST_WIDE_INT ml;
3979 int pre_shift;
3980 rtx t1;
3982 pre_shift = floor_log2 (d & -d);
3983 ml = invert_mod2n (d >> pre_shift, size);
3984 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3985 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3986 quotient = expand_mult (compute_mode, t1,
3987 gen_int_mode (ml, compute_mode),
3988 NULL_RTX, 1);
3990 insn = get_last_insn ();
3991 set_unique_reg_note (insn,
3992 REG_EQUAL,
3993 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3994 compute_mode,
3995 op0, op1));
3997 break;
3999 case ROUND_DIV_EXPR:
4000 case ROUND_MOD_EXPR:
4001 if (unsignedp)
4003 rtx tem;
4004 rtx label;
4005 label = gen_label_rtx ();
4006 quotient = gen_reg_rtx (compute_mode);
4007 remainder = gen_reg_rtx (compute_mode);
4008 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4010 rtx tem;
4011 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4012 quotient, 1, OPTAB_LIB_WIDEN);
4013 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4014 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4015 remainder, 1, OPTAB_LIB_WIDEN);
4017 tem = plus_constant (op1, -1);
4018 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4019 build_int_2 (1, 0), NULL_RTX, 1);
4020 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4021 expand_inc (quotient, const1_rtx);
4022 expand_dec (remainder, op1);
4023 emit_label (label);
4025 else
4027 rtx abs_rem, abs_op1, tem, mask;
4028 rtx label;
4029 label = gen_label_rtx ();
4030 quotient = gen_reg_rtx (compute_mode);
4031 remainder = gen_reg_rtx (compute_mode);
4032 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4034 rtx tem;
4035 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4036 quotient, 0, OPTAB_LIB_WIDEN);
4037 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4038 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4039 remainder, 0, OPTAB_LIB_WIDEN);
4041 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4042 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4043 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4044 build_int_2 (1, 0), NULL_RTX, 1);
4045 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4046 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4047 NULL_RTX, 0, OPTAB_WIDEN);
4048 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4049 build_int_2 (size - 1, 0), NULL_RTX, 0);
4050 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4051 NULL_RTX, 0, OPTAB_WIDEN);
4052 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4053 NULL_RTX, 0, OPTAB_WIDEN);
4054 expand_inc (quotient, tem);
4055 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4056 NULL_RTX, 0, OPTAB_WIDEN);
4057 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4058 NULL_RTX, 0, OPTAB_WIDEN);
4059 expand_dec (remainder, tem);
4060 emit_label (label);
4062 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4064 default:
4065 abort ();
4068 if (quotient == 0)
4070 if (target && GET_MODE (target) != compute_mode)
4071 target = 0;
4073 if (rem_flag)
4075 /* Try to produce the remainder without producing the quotient.
4076 If we seem to have a divmod pattern that does not require widening,
4077 don't try widening here. We should really have a WIDEN argument
4078 to expand_twoval_binop, since what we'd really like to do here is
4079 1) try a mod insn in compute_mode
4080 2) try a divmod insn in compute_mode
4081 3) try a div insn in compute_mode and multiply-subtract to get
4082 remainder
4083 4) try the same things with widening allowed. */
4084 remainder
4085 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4086 op0, op1, target,
4087 unsignedp,
4088 ((optab2->handlers[(int) compute_mode].insn_code
4089 != CODE_FOR_nothing)
4090 ? OPTAB_DIRECT : OPTAB_WIDEN));
4091 if (remainder == 0)
4093 /* No luck there. Can we do remainder and divide at once
4094 without a library call? */
4095 remainder = gen_reg_rtx (compute_mode);
4096 if (! expand_twoval_binop ((unsignedp
4097 ? udivmod_optab
4098 : sdivmod_optab),
4099 op0, op1,
4100 NULL_RTX, remainder, unsignedp))
4101 remainder = 0;
4104 if (remainder)
4105 return gen_lowpart (mode, remainder);
4108 /* Produce the quotient. Try a quotient insn, but not a library call.
4109 If we have a divmod in this mode, use it in preference to widening
4110 the div (for this test we assume it will not fail). Note that optab2
4111 is set to the one of the two optabs that the call below will use. */
4112 quotient
4113 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4114 op0, op1, rem_flag ? NULL_RTX : target,
4115 unsignedp,
4116 ((optab2->handlers[(int) compute_mode].insn_code
4117 != CODE_FOR_nothing)
4118 ? OPTAB_DIRECT : OPTAB_WIDEN));
4120 if (quotient == 0)
4122 /* No luck there. Try a quotient-and-remainder insn,
4123 keeping the quotient alone. */
4124 quotient = gen_reg_rtx (compute_mode);
4125 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4126 op0, op1,
4127 quotient, NULL_RTX, unsignedp))
4129 quotient = 0;
4130 if (! rem_flag)
4131 /* Still no luck. If we are not computing the remainder,
4132 use a library call for the quotient. */
4133 quotient = sign_expand_binop (compute_mode,
4134 udiv_optab, sdiv_optab,
4135 op0, op1, target,
4136 unsignedp, OPTAB_LIB_WIDEN);
4141 if (rem_flag)
4143 if (target && GET_MODE (target) != compute_mode)
4144 target = 0;
4146 if (quotient == 0)
4147 /* No divide instruction either. Use library for remainder. */
4148 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4149 op0, op1, target,
4150 unsignedp, OPTAB_LIB_WIDEN);
4151 else
4153 /* We divided. Now finish doing X - Y * (X / Y). */
4154 remainder = expand_mult (compute_mode, quotient, op1,
4155 NULL_RTX, unsignedp);
4156 remainder = expand_binop (compute_mode, sub_optab, op0,
4157 remainder, target, unsignedp,
4158 OPTAB_LIB_WIDEN);
4162 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4165 /* Return a tree node with data type TYPE, describing the value of X.
4166 Usually this is an RTL_EXPR, if there is no obvious better choice.
4167 X may be an expression, however we only support those expressions
4168 generated by loop.c. */
4170 tree
4171 make_tree (tree type, rtx x)
4173 tree t;
4175 switch (GET_CODE (x))
4177 case CONST_INT:
4178 t = build_int_2 (INTVAL (x),
4179 (TREE_UNSIGNED (type)
4180 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4181 || INTVAL (x) >= 0 ? 0 : -1);
4182 TREE_TYPE (t) = type;
4183 return t;
4185 case CONST_DOUBLE:
4186 if (GET_MODE (x) == VOIDmode)
4188 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4189 TREE_TYPE (t) = type;
4191 else
4193 REAL_VALUE_TYPE d;
4195 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4196 t = build_real (type, d);
4199 return t;
4201 case CONST_VECTOR:
4203 int i, units;
4204 rtx elt;
4205 tree t = NULL_TREE;
4207 units = CONST_VECTOR_NUNITS (x);
4209 /* Build a tree with vector elements. */
4210 for (i = units - 1; i >= 0; --i)
4212 elt = CONST_VECTOR_ELT (x, i);
4213 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4216 return build_vector (type, t);
4219 case PLUS:
4220 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4221 make_tree (type, XEXP (x, 1))));
4223 case MINUS:
4224 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4225 make_tree (type, XEXP (x, 1))));
4227 case NEG:
4228 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4230 case MULT:
4231 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4232 make_tree (type, XEXP (x, 1))));
4234 case ASHIFT:
4235 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4236 make_tree (type, XEXP (x, 1))));
4238 case LSHIFTRT:
4239 t = lang_hooks.types.unsigned_type (type);
4240 return fold (convert (type,
4241 build (RSHIFT_EXPR, t,
4242 make_tree (t, XEXP (x, 0)),
4243 make_tree (type, XEXP (x, 1)))));
4245 case ASHIFTRT:
4246 t = lang_hooks.types.signed_type (type);
4247 return fold (convert (type,
4248 build (RSHIFT_EXPR, t,
4249 make_tree (t, XEXP (x, 0)),
4250 make_tree (type, XEXP (x, 1)))));
4252 case DIV:
4253 if (TREE_CODE (type) != REAL_TYPE)
4254 t = lang_hooks.types.signed_type (type);
4255 else
4256 t = type;
4258 return fold (convert (type,
4259 build (TRUNC_DIV_EXPR, t,
4260 make_tree (t, XEXP (x, 0)),
4261 make_tree (t, XEXP (x, 1)))));
4262 case UDIV:
4263 t = lang_hooks.types.unsigned_type (type);
4264 return fold (convert (type,
4265 build (TRUNC_DIV_EXPR, t,
4266 make_tree (t, XEXP (x, 0)),
4267 make_tree (t, XEXP (x, 1)))));
4269 case SIGN_EXTEND:
4270 case ZERO_EXTEND:
4271 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4272 GET_CODE (x) == ZERO_EXTEND);
4273 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4275 default:
4276 t = make_node (RTL_EXPR);
4277 TREE_TYPE (t) = type;
4279 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4280 ptr_mode. So convert. */
4281 if (POINTER_TYPE_P (type))
4282 x = convert_memory_address (TYPE_MODE (type), x);
4284 RTL_EXPR_RTL (t) = x;
4285 /* There are no insns to be output
4286 when this rtl_expr is used. */
4287 RTL_EXPR_SEQUENCE (t) = 0;
4288 return t;
4292 /* Check whether the multiplication X * MULT + ADD overflows.
4293 X, MULT and ADD must be CONST_*.
4294 MODE is the machine mode for the computation.
4295 X and MULT must have mode MODE. ADD may have a different mode.
4296 So can X (defaults to same as MODE).
4297 UNSIGNEDP is nonzero to do unsigned multiplication. */
4299 bool
4300 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4302 tree type, mult_type, add_type, result;
4304 type = lang_hooks.types.type_for_mode (mode, unsignedp);
4306 /* In order to get a proper overflow indication from an unsigned
4307 type, we have to pretend that it's a sizetype. */
4308 mult_type = type;
4309 if (unsignedp)
4311 mult_type = copy_node (type);
4312 TYPE_IS_SIZETYPE (mult_type) = 1;
4315 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4316 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
4318 result = fold (build (PLUS_EXPR, mult_type,
4319 fold (build (MULT_EXPR, mult_type,
4320 make_tree (mult_type, x),
4321 make_tree (mult_type, mult))),
4322 make_tree (add_type, add)));
4324 return TREE_CONSTANT_OVERFLOW (result);
4327 /* Return an rtx representing the value of X * MULT + ADD.
4328 TARGET is a suggestion for where to store the result (an rtx).
4329 MODE is the machine mode for the computation.
4330 X and MULT must have mode MODE. ADD may have a different mode.
4331 So can X (defaults to same as MODE).
4332 UNSIGNEDP is nonzero to do unsigned multiplication.
4333 This may emit insns. */
4336 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4337 int unsignedp)
4339 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
4340 tree add_type = (GET_MODE (add) == VOIDmode
4341 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
4342 unsignedp));
4343 tree result = fold (build (PLUS_EXPR, type,
4344 fold (build (MULT_EXPR, type,
4345 make_tree (type, x),
4346 make_tree (type, mult))),
4347 make_tree (add_type, add)));
4349 return expand_expr (result, target, VOIDmode, 0);
4352 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4353 and returning TARGET.
4355 If TARGET is 0, a pseudo-register or constant is returned. */
4358 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4360 rtx tem = 0;
4362 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4363 tem = simplify_binary_operation (AND, mode, op0, op1);
4364 if (tem == 0)
4365 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4367 if (target == 0)
4368 target = tem;
4369 else if (tem != target)
4370 emit_move_insn (target, tem);
4371 return target;
4374 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4375 and storing in TARGET. Normally return TARGET.
4376 Return 0 if that cannot be done.
4378 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4379 it is VOIDmode, they cannot both be CONST_INT.
4381 UNSIGNEDP is for the case where we have to widen the operands
4382 to perform the operation. It says to use zero-extension.
4384 NORMALIZEP is 1 if we should convert the result to be either zero
4385 or one. Normalize is -1 if we should convert the result to be
4386 either zero or -1. If NORMALIZEP is zero, the result will be left
4387 "raw" out of the scc insn. */
4390 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4391 enum machine_mode mode, int unsignedp, int normalizep)
4393 rtx subtarget;
4394 enum insn_code icode;
4395 enum machine_mode compare_mode;
4396 enum machine_mode target_mode = GET_MODE (target);
4397 rtx tem;
4398 rtx last = get_last_insn ();
4399 rtx pattern, comparison;
4401 /* ??? Ok to do this and then fail? */
4402 op0 = protect_from_queue (op0, 0);
4403 op1 = protect_from_queue (op1, 0);
4405 if (unsignedp)
4406 code = unsigned_condition (code);
4408 /* If one operand is constant, make it the second one. Only do this
4409 if the other operand is not constant as well. */
4411 if (swap_commutative_operands_p (op0, op1))
4413 tem = op0;
4414 op0 = op1;
4415 op1 = tem;
4416 code = swap_condition (code);
4419 if (mode == VOIDmode)
4420 mode = GET_MODE (op0);
4422 /* For some comparisons with 1 and -1, we can convert this to
4423 comparisons with zero. This will often produce more opportunities for
4424 store-flag insns. */
4426 switch (code)
4428 case LT:
4429 if (op1 == const1_rtx)
4430 op1 = const0_rtx, code = LE;
4431 break;
4432 case LE:
4433 if (op1 == constm1_rtx)
4434 op1 = const0_rtx, code = LT;
4435 break;
4436 case GE:
4437 if (op1 == const1_rtx)
4438 op1 = const0_rtx, code = GT;
4439 break;
4440 case GT:
4441 if (op1 == constm1_rtx)
4442 op1 = const0_rtx, code = GE;
4443 break;
4444 case GEU:
4445 if (op1 == const1_rtx)
4446 op1 = const0_rtx, code = NE;
4447 break;
4448 case LTU:
4449 if (op1 == const1_rtx)
4450 op1 = const0_rtx, code = EQ;
4451 break;
4452 default:
4453 break;
4456 /* If we are comparing a double-word integer with zero, we can convert
4457 the comparison into one involving a single word. */
4458 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4459 && GET_MODE_CLASS (mode) == MODE_INT
4460 && op1 == const0_rtx
4461 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4463 if (code == EQ || code == NE)
4465 rtx op00, op01, op0both;
4467 /* Do a logical OR of the two words and compare the result. */
4468 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4469 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4470 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4471 NULL_RTX, unsignedp, OPTAB_DIRECT);
4472 if (op0both != 0)
4473 return emit_store_flag (target, code, op0both, op1, word_mode,
4474 unsignedp, normalizep);
4476 else if (code == LT || code == GE)
4478 rtx op0h;
4480 /* If testing the sign bit, can just test on high word. */
4481 op0h = simplify_gen_subreg (word_mode, op0, mode,
4482 subreg_highpart_offset (word_mode, mode));
4483 return emit_store_flag (target, code, op0h, op1, word_mode,
4484 unsignedp, normalizep);
4488 /* From now on, we won't change CODE, so set ICODE now. */
4489 icode = setcc_gen_code[(int) code];
4491 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4492 complement of A (for GE) and shifting the sign bit to the low bit. */
4493 if (op1 == const0_rtx && (code == LT || code == GE)
4494 && GET_MODE_CLASS (mode) == MODE_INT
4495 && (normalizep || STORE_FLAG_VALUE == 1
4496 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4497 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4498 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4500 subtarget = target;
4502 /* If the result is to be wider than OP0, it is best to convert it
4503 first. If it is to be narrower, it is *incorrect* to convert it
4504 first. */
4505 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4507 op0 = protect_from_queue (op0, 0);
4508 op0 = convert_modes (target_mode, mode, op0, 0);
4509 mode = target_mode;
4512 if (target_mode != mode)
4513 subtarget = 0;
4515 if (code == GE)
4516 op0 = expand_unop (mode, one_cmpl_optab, op0,
4517 ((STORE_FLAG_VALUE == 1 || normalizep)
4518 ? 0 : subtarget), 0);
4520 if (STORE_FLAG_VALUE == 1 || normalizep)
4521 /* If we are supposed to produce a 0/1 value, we want to do
4522 a logical shift from the sign bit to the low-order bit; for
4523 a -1/0 value, we do an arithmetic shift. */
4524 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4525 size_int (GET_MODE_BITSIZE (mode) - 1),
4526 subtarget, normalizep != -1);
4528 if (mode != target_mode)
4529 op0 = convert_modes (target_mode, mode, op0, 0);
4531 return op0;
4534 if (icode != CODE_FOR_nothing)
4536 insn_operand_predicate_fn pred;
4538 /* We think we may be able to do this with a scc insn. Emit the
4539 comparison and then the scc insn.
4541 compare_from_rtx may call emit_queue, which would be deleted below
4542 if the scc insn fails. So call it ourselves before setting LAST.
4543 Likewise for do_pending_stack_adjust. */
4545 emit_queue ();
4546 do_pending_stack_adjust ();
4547 last = get_last_insn ();
4549 comparison
4550 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4551 if (GET_CODE (comparison) == CONST_INT)
4552 return (comparison == const0_rtx ? const0_rtx
4553 : normalizep == 1 ? const1_rtx
4554 : normalizep == -1 ? constm1_rtx
4555 : const_true_rtx);
4557 /* The code of COMPARISON may not match CODE if compare_from_rtx
4558 decided to swap its operands and reverse the original code.
4560 We know that compare_from_rtx returns either a CONST_INT or
4561 a new comparison code, so it is safe to just extract the
4562 code from COMPARISON. */
4563 code = GET_CODE (comparison);
4565 /* Get a reference to the target in the proper mode for this insn. */
4566 compare_mode = insn_data[(int) icode].operand[0].mode;
4567 subtarget = target;
4568 pred = insn_data[(int) icode].operand[0].predicate;
4569 if (preserve_subexpressions_p ()
4570 || ! (*pred) (subtarget, compare_mode))
4571 subtarget = gen_reg_rtx (compare_mode);
4573 pattern = GEN_FCN (icode) (subtarget);
4574 if (pattern)
4576 emit_insn (pattern);
4578 /* If we are converting to a wider mode, first convert to
4579 TARGET_MODE, then normalize. This produces better combining
4580 opportunities on machines that have a SIGN_EXTRACT when we are
4581 testing a single bit. This mostly benefits the 68k.
4583 If STORE_FLAG_VALUE does not have the sign bit set when
4584 interpreted in COMPARE_MODE, we can do this conversion as
4585 unsigned, which is usually more efficient. */
4586 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4588 convert_move (target, subtarget,
4589 (GET_MODE_BITSIZE (compare_mode)
4590 <= HOST_BITS_PER_WIDE_INT)
4591 && 0 == (STORE_FLAG_VALUE
4592 & ((HOST_WIDE_INT) 1
4593 << (GET_MODE_BITSIZE (compare_mode) -1))));
4594 op0 = target;
4595 compare_mode = target_mode;
4597 else
4598 op0 = subtarget;
4600 /* If we want to keep subexpressions around, don't reuse our
4601 last target. */
4603 if (preserve_subexpressions_p ())
4604 subtarget = 0;
4606 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4607 we don't have to do anything. */
4608 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4610 /* STORE_FLAG_VALUE might be the most negative number, so write
4611 the comparison this way to avoid a compiler-time warning. */
4612 else if (- normalizep == STORE_FLAG_VALUE)
4613 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4615 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4616 makes it hard to use a value of just the sign bit due to
4617 ANSI integer constant typing rules. */
4618 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4619 && (STORE_FLAG_VALUE
4620 & ((HOST_WIDE_INT) 1
4621 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4622 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4623 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4624 subtarget, normalizep == 1);
4625 else if (STORE_FLAG_VALUE & 1)
4627 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4628 if (normalizep == -1)
4629 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4631 else
4632 abort ();
4634 /* If we were converting to a smaller mode, do the
4635 conversion now. */
4636 if (target_mode != compare_mode)
4638 convert_move (target, op0, 0);
4639 return target;
4641 else
4642 return op0;
4646 delete_insns_since (last);
4648 /* If expensive optimizations, use different pseudo registers for each
4649 insn, instead of reusing the same pseudo. This leads to better CSE,
4650 but slows down the compiler, since there are more pseudos */
4651 subtarget = (!flag_expensive_optimizations
4652 && (target_mode == mode)) ? target : NULL_RTX;
4654 /* If we reached here, we can't do this with a scc insn. However, there
4655 are some comparisons that can be done directly. For example, if
4656 this is an equality comparison of integers, we can try to exclusive-or
4657 (or subtract) the two operands and use a recursive call to try the
4658 comparison with zero. Don't do any of these cases if branches are
4659 very cheap. */
4661 if (BRANCH_COST > 0
4662 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4663 && op1 != const0_rtx)
4665 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4666 OPTAB_WIDEN);
4668 if (tem == 0)
4669 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4670 OPTAB_WIDEN);
4671 if (tem != 0)
4672 tem = emit_store_flag (target, code, tem, const0_rtx,
4673 mode, unsignedp, normalizep);
4674 if (tem == 0)
4675 delete_insns_since (last);
4676 return tem;
4679 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4680 the constant zero. Reject all other comparisons at this point. Only
4681 do LE and GT if branches are expensive since they are expensive on
4682 2-operand machines. */
4684 if (BRANCH_COST == 0
4685 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4686 || (code != EQ && code != NE
4687 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4688 return 0;
4690 /* See what we need to return. We can only return a 1, -1, or the
4691 sign bit. */
4693 if (normalizep == 0)
4695 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4696 normalizep = STORE_FLAG_VALUE;
4698 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4699 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4700 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4702 else
4703 return 0;
4706 /* Try to put the result of the comparison in the sign bit. Assume we can't
4707 do the necessary operation below. */
4709 tem = 0;
4711 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4712 the sign bit set. */
4714 if (code == LE)
4716 /* This is destructive, so SUBTARGET can't be OP0. */
4717 if (rtx_equal_p (subtarget, op0))
4718 subtarget = 0;
4720 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4721 OPTAB_WIDEN);
4722 if (tem)
4723 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4724 OPTAB_WIDEN);
4727 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4728 number of bits in the mode of OP0, minus one. */
4730 if (code == GT)
4732 if (rtx_equal_p (subtarget, op0))
4733 subtarget = 0;
4735 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4736 size_int (GET_MODE_BITSIZE (mode) - 1),
4737 subtarget, 0);
4738 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4739 OPTAB_WIDEN);
4742 if (code == EQ || code == NE)
4744 /* For EQ or NE, one way to do the comparison is to apply an operation
4745 that converts the operand into a positive number if it is nonzero
4746 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4747 for NE we negate. This puts the result in the sign bit. Then we
4748 normalize with a shift, if needed.
4750 Two operations that can do the above actions are ABS and FFS, so try
4751 them. If that doesn't work, and MODE is smaller than a full word,
4752 we can use zero-extension to the wider mode (an unsigned conversion)
4753 as the operation. */
4755 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4756 that is compensated by the subsequent overflow when subtracting
4757 one / negating. */
4759 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4760 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4761 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4762 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4763 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4765 op0 = protect_from_queue (op0, 0);
4766 tem = convert_modes (word_mode, mode, op0, 1);
4767 mode = word_mode;
4770 if (tem != 0)
4772 if (code == EQ)
4773 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4774 0, OPTAB_WIDEN);
4775 else
4776 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4779 /* If we couldn't do it that way, for NE we can "or" the two's complement
4780 of the value with itself. For EQ, we take the one's complement of
4781 that "or", which is an extra insn, so we only handle EQ if branches
4782 are expensive. */
4784 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4786 if (rtx_equal_p (subtarget, op0))
4787 subtarget = 0;
4789 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4790 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4791 OPTAB_WIDEN);
4793 if (tem && code == EQ)
4794 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4798 if (tem && normalizep)
4799 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4800 size_int (GET_MODE_BITSIZE (mode) - 1),
4801 subtarget, normalizep == 1);
4803 if (tem)
4805 if (GET_MODE (tem) != target_mode)
4807 convert_move (target, tem, 0);
4808 tem = target;
4810 else if (!subtarget)
4812 emit_move_insn (target, tem);
4813 tem = target;
4816 else
4817 delete_insns_since (last);
4819 return tem;
4822 /* Like emit_store_flag, but always succeeds. */
4825 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4826 enum machine_mode mode, int unsignedp, int normalizep)
4828 rtx tem, label;
4830 /* First see if emit_store_flag can do the job. */
4831 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4832 if (tem != 0)
4833 return tem;
4835 if (normalizep == 0)
4836 normalizep = 1;
4838 /* If this failed, we have to do this with set/compare/jump/set code. */
4840 if (GET_CODE (target) != REG
4841 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4842 target = gen_reg_rtx (GET_MODE (target));
4844 emit_move_insn (target, const1_rtx);
4845 label = gen_label_rtx ();
4846 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4847 NULL_RTX, label);
4849 emit_move_insn (target, const0_rtx);
4850 emit_label (label);
4852 return target;
4855 /* Perform possibly multi-word comparison and conditional jump to LABEL
4856 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4858 The algorithm is based on the code in expr.c:do_jump.
4860 Note that this does not perform a general comparison. Only variants
4861 generated within expmed.c are correctly handled, others abort (but could
4862 be handled if needed). */
4864 static void
4865 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4866 rtx label)
4868 /* If this mode is an integer too wide to compare properly,
4869 compare word by word. Rely on cse to optimize constant cases. */
4871 if (GET_MODE_CLASS (mode) == MODE_INT
4872 && ! can_compare_p (op, mode, ccp_jump))
4874 rtx label2 = gen_label_rtx ();
4876 switch (op)
4878 case LTU:
4879 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4880 break;
4882 case LEU:
4883 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4884 break;
4886 case LT:
4887 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4888 break;
4890 case GT:
4891 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4892 break;
4894 case GE:
4895 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4896 break;
4898 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4899 that's the only equality operations we do */
4900 case EQ:
4901 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4902 abort ();
4903 do_jump_by_parts_equality_rtx (arg1, label2, label);
4904 break;
4906 case NE:
4907 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4908 abort ();
4909 do_jump_by_parts_equality_rtx (arg1, label, label2);
4910 break;
4912 default:
4913 abort ();
4916 emit_label (label2);
4918 else
4919 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);