* gcc.dg/const-elim-1.c: xfail for xtensa.
[official-gcc.git] / gcc / expmed.c
blobfe81877fce8842a5d796a07431cf3c13498f91da
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
60 static int sdiv_pow2_cheap, smod_pow2_cheap;
62 #ifndef SLOW_UNALIGNED_ACCESS
63 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
64 #endif
66 /* For compilers that support multiple targets with different word sizes,
67 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
68 is the H8/300(H) compiler. */
70 #ifndef MAX_BITS_PER_WORD
71 #define MAX_BITS_PER_WORD BITS_PER_WORD
72 #endif
74 /* Reduce conditional compilation elsewhere. */
75 #ifndef HAVE_insv
76 #define HAVE_insv 0
77 #define CODE_FOR_insv CODE_FOR_nothing
78 #define gen_insv(a,b,c,d) NULL_RTX
79 #endif
80 #ifndef HAVE_extv
81 #define HAVE_extv 0
82 #define CODE_FOR_extv CODE_FOR_nothing
83 #define gen_extv(a,b,c,d) NULL_RTX
84 #endif
85 #ifndef HAVE_extzv
86 #define HAVE_extzv 0
87 #define CODE_FOR_extzv CODE_FOR_nothing
88 #define gen_extzv(a,b,c,d) NULL_RTX
89 #endif
91 /* Cost of various pieces of RTL. Note that some of these are indexed by
92 shift count and some by mode. */
93 static int add_cost, negate_cost, zero_cost;
94 static int shift_cost[MAX_BITS_PER_WORD];
95 static int shiftadd_cost[MAX_BITS_PER_WORD];
96 static int shiftsub_cost[MAX_BITS_PER_WORD];
97 static int mul_cost[NUM_MACHINE_MODES];
98 static int div_cost[NUM_MACHINE_MODES];
99 static int mul_widen_cost[NUM_MACHINE_MODES];
100 static int mul_highpart_cost[NUM_MACHINE_MODES];
102 void
103 init_expmed (void)
105 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
106 int dummy;
107 int m;
108 enum machine_mode mode, wider_mode;
110 start_sequence ();
112 /* This is "some random pseudo register" for purposes of calling recog
113 to see what insns exist. */
114 reg = gen_rtx_REG (word_mode, 10000);
116 zero_cost = rtx_cost (const0_rtx, 0);
117 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
119 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
120 gen_rtx_ASHIFT (word_mode, reg,
121 const0_rtx)));
123 shiftadd_insn
124 = emit_insn (gen_rtx_SET (VOIDmode, reg,
125 gen_rtx_PLUS (word_mode,
126 gen_rtx_MULT (word_mode,
127 reg, const0_rtx),
128 reg)));
130 shiftsub_insn
131 = emit_insn (gen_rtx_SET (VOIDmode, reg,
132 gen_rtx_MINUS (word_mode,
133 gen_rtx_MULT (word_mode,
134 reg, const0_rtx),
135 reg)));
137 init_recog ();
139 shift_cost[0] = 0;
140 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
147 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
148 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
149 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
151 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
152 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
153 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
156 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
157 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
160 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
162 sdiv_pow2_cheap
163 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
164 <= 2 * add_cost);
165 smod_pow2_cheap
166 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
169 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
170 mode != VOIDmode;
171 mode = GET_MODE_WIDER_MODE (mode))
173 reg = gen_rtx_REG (mode, 10000);
174 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
175 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
176 wider_mode = GET_MODE_WIDER_MODE (mode);
177 if (wider_mode != VOIDmode)
179 mul_widen_cost[(int) wider_mode]
180 = rtx_cost (gen_rtx_MULT (wider_mode,
181 gen_rtx_ZERO_EXTEND (wider_mode, reg),
182 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
183 SET);
184 mul_highpart_cost[(int) mode]
185 = rtx_cost (gen_rtx_TRUNCATE
186 (mode,
187 gen_rtx_LSHIFTRT (wider_mode,
188 gen_rtx_MULT (wider_mode,
189 gen_rtx_ZERO_EXTEND
190 (wider_mode, reg),
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg)),
193 GEN_INT (GET_MODE_BITSIZE (mode)))),
194 SET);
198 end_sequence ();
201 /* Return an rtx representing minus the value of X.
202 MODE is the intended mode of the result,
203 useful if X is a CONST_INT. */
206 negate_rtx (enum machine_mode mode, rtx x)
208 rtx result = simplify_unary_operation (NEG, mode, x, mode);
210 if (result == 0)
211 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
213 return result;
216 /* Report on the availability of insv/extv/extzv and the desired mode
217 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
218 is false; else the mode of the specified operand. If OPNO is -1,
219 all the caller cares about is whether the insn is available. */
220 enum machine_mode
221 mode_for_extraction (enum extraction_pattern pattern, int opno)
223 const struct insn_data *data;
225 switch (pattern)
227 case EP_insv:
228 if (HAVE_insv)
230 data = &insn_data[CODE_FOR_insv];
231 break;
233 return MAX_MACHINE_MODE;
235 case EP_extv:
236 if (HAVE_extv)
238 data = &insn_data[CODE_FOR_extv];
239 break;
241 return MAX_MACHINE_MODE;
243 case EP_extzv:
244 if (HAVE_extzv)
246 data = &insn_data[CODE_FOR_extzv];
247 break;
249 return MAX_MACHINE_MODE;
251 default:
252 abort ();
255 if (opno == -1)
256 return VOIDmode;
258 /* Everyone who uses this function used to follow it with
259 if (result == VOIDmode) result = word_mode; */
260 if (data->operand[opno].mode == VOIDmode)
261 return word_mode;
262 return data->operand[opno].mode;
266 /* Generate code to store value from rtx VALUE
267 into a bit-field within structure STR_RTX
268 containing BITSIZE bits starting at bit BITNUM.
269 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
270 ALIGN is the alignment that STR_RTX is known to have.
271 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
273 /* ??? Note that there are two different ideas here for how
274 to determine the size to count bits within, for a register.
275 One is BITS_PER_WORD, and the other is the size of operand 3
276 of the insv pattern.
278 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
279 else, we use the mode of operand 3. */
282 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
283 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
284 rtx value, HOST_WIDE_INT total_size)
286 unsigned int unit
287 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
288 unsigned HOST_WIDE_INT offset = bitnum / unit;
289 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
290 rtx op0 = str_rtx;
291 int byte_offset;
293 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
295 /* Discount the part of the structure before the desired byte.
296 We need to know how many bytes are safe to reference after it. */
297 if (total_size >= 0)
298 total_size -= (bitpos / BIGGEST_ALIGNMENT
299 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
301 while (GET_CODE (op0) == SUBREG)
303 /* The following line once was done only if WORDS_BIG_ENDIAN,
304 but I think that is a mistake. WORDS_BIG_ENDIAN is
305 meaningful at a much higher level; when structures are copied
306 between memory and regs, the higher-numbered regs
307 always get higher addresses. */
308 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
309 /* We used to adjust BITPOS here, but now we do the whole adjustment
310 right after the loop. */
311 op0 = SUBREG_REG (op0);
314 value = protect_from_queue (value, 0);
316 /* Use vec_extract patterns for extracting parts of vectors whenever
317 available. */
318 if (VECTOR_MODE_P (GET_MODE (op0))
319 && GET_CODE (op0) != MEM
320 && (vec_set_optab->handlers[(int)GET_MODE (op0)].insn_code
321 != CODE_FOR_nothing)
322 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
323 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
324 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
326 enum machine_mode outermode = GET_MODE (op0);
327 enum machine_mode innermode = GET_MODE_INNER (outermode);
328 int icode = (int) vec_set_optab->handlers[(int) outermode].insn_code;
329 int pos = bitnum / GET_MODE_BITSIZE (innermode);
330 rtx rtxpos = GEN_INT (pos);
331 rtx src = value;
332 rtx dest = op0;
333 rtx pat, seq;
334 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
335 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
336 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
338 start_sequence ();
340 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
341 src = copy_to_mode_reg (mode1, src);
343 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
344 rtxpos = copy_to_mode_reg (mode1, rtxpos);
346 /* We could handle this, but we should always be called with a pseudo
347 for our targets and all insns should take them as outputs. */
348 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
349 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
350 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
351 abort ();
352 pat = GEN_FCN (icode) (dest, src, rtxpos);
353 seq = get_insns ();
354 end_sequence ();
355 if (pat)
357 emit_insn (seq);
358 emit_insn (pat);
359 return dest;
363 if (flag_force_mem)
365 int old_generating_concat_p = generating_concat_p;
366 generating_concat_p = 0;
367 value = force_not_mem (value);
368 generating_concat_p = old_generating_concat_p;
371 /* If the target is a register, overwriting the entire object, or storing
372 a full-word or multi-word field can be done with just a SUBREG.
374 If the target is memory, storing any naturally aligned field can be
375 done with a simple store. For targets that support fast unaligned
376 memory, any naturally sized, unit aligned field can be done directly. */
378 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
379 + (offset * UNITS_PER_WORD);
381 if (bitpos == 0
382 && bitsize == GET_MODE_BITSIZE (fieldmode)
383 && (GET_CODE (op0) != MEM
384 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
385 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
386 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
387 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
388 || (offset * BITS_PER_UNIT % bitsize == 0
389 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
391 if (GET_MODE (op0) != fieldmode)
393 if (GET_CODE (op0) == SUBREG)
395 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
396 || GET_MODE_CLASS (fieldmode) == MODE_INT
397 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
398 op0 = SUBREG_REG (op0);
399 else
400 /* Else we've got some float mode source being extracted into
401 a different float mode destination -- this combination of
402 subregs results in Severe Tire Damage. */
403 abort ();
405 if (GET_CODE (op0) == REG)
406 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
407 else
408 op0 = adjust_address (op0, fieldmode, offset);
410 emit_move_insn (op0, value);
411 return value;
414 /* Make sure we are playing with integral modes. Pun with subregs
415 if we aren't. This must come after the entire register case above,
416 since that case is valid for any mode. The following cases are only
417 valid for integral modes. */
419 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
420 if (imode != GET_MODE (op0))
422 if (GET_CODE (op0) == MEM)
423 op0 = adjust_address (op0, imode, 0);
424 else if (imode != BLKmode)
425 op0 = gen_lowpart (imode, op0);
426 else
427 abort ();
431 /* We may be accessing data outside the field, which means
432 we can alias adjacent data. */
433 if (GET_CODE (op0) == MEM)
435 op0 = shallow_copy_rtx (op0);
436 set_mem_alias_set (op0, 0);
437 set_mem_expr (op0, 0);
440 /* If OP0 is a register, BITPOS must count within a word.
441 But as we have it, it counts within whatever size OP0 now has.
442 On a bigendian machine, these are not the same, so convert. */
443 if (BYTES_BIG_ENDIAN
444 && GET_CODE (op0) != MEM
445 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
446 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
448 /* Storing an lsb-aligned field in a register
449 can be done with a movestrict instruction. */
451 if (GET_CODE (op0) != MEM
452 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
454 && (movstrict_optab->handlers[(int) fieldmode].insn_code
455 != CODE_FOR_nothing))
457 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
459 /* Get appropriate low part of the value being stored. */
460 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
461 value = gen_lowpart (fieldmode, value);
462 else if (!(GET_CODE (value) == SYMBOL_REF
463 || GET_CODE (value) == LABEL_REF
464 || GET_CODE (value) == CONST))
465 value = convert_to_mode (fieldmode, value, 0);
467 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
468 value = copy_to_mode_reg (fieldmode, value);
470 if (GET_CODE (op0) == SUBREG)
472 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
473 || GET_MODE_CLASS (fieldmode) == MODE_INT
474 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
475 op0 = SUBREG_REG (op0);
476 else
477 /* Else we've got some float mode source being extracted into
478 a different float mode destination -- this combination of
479 subregs results in Severe Tire Damage. */
480 abort ();
483 emit_insn (GEN_FCN (icode)
484 (gen_rtx_SUBREG (fieldmode, op0,
485 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
486 + (offset * UNITS_PER_WORD)),
487 value));
489 return value;
492 /* Handle fields bigger than a word. */
494 if (bitsize > BITS_PER_WORD)
496 /* Here we transfer the words of the field
497 in the order least significant first.
498 This is because the most significant word is the one which may
499 be less than full.
500 However, only do that if the value is not BLKmode. */
502 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
503 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
504 unsigned int i;
506 /* This is the mode we must force value to, so that there will be enough
507 subwords to extract. Note that fieldmode will often (always?) be
508 VOIDmode, because that is what store_field uses to indicate that this
509 is a bit field, but passing VOIDmode to operand_subword_force will
510 result in an abort. */
511 fieldmode = GET_MODE (value);
512 if (fieldmode == VOIDmode)
513 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
515 for (i = 0; i < nwords; i++)
517 /* If I is 0, use the low-order word in both field and target;
518 if I is 1, use the next to lowest word; and so on. */
519 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
520 unsigned int bit_offset = (backwards
521 ? MAX ((int) bitsize - ((int) i + 1)
522 * BITS_PER_WORD,
524 : (int) i * BITS_PER_WORD);
526 store_bit_field (op0, MIN (BITS_PER_WORD,
527 bitsize - i * BITS_PER_WORD),
528 bitnum + bit_offset, word_mode,
529 operand_subword_force (value, wordnum, fieldmode),
530 total_size);
532 return value;
535 /* From here on we can assume that the field to be stored in is
536 a full-word (whatever type that is), since it is shorter than a word. */
538 /* OFFSET is the number of words or bytes (UNIT says which)
539 from STR_RTX to the first word or byte containing part of the field. */
541 if (GET_CODE (op0) != MEM)
543 if (offset != 0
544 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
546 if (GET_CODE (op0) != REG)
548 /* Since this is a destination (lvalue), we can't copy it to a
549 pseudo. We can trivially remove a SUBREG that does not
550 change the size of the operand. Such a SUBREG may have been
551 added above. Otherwise, abort. */
552 if (GET_CODE (op0) == SUBREG
553 && (GET_MODE_SIZE (GET_MODE (op0))
554 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
555 op0 = SUBREG_REG (op0);
556 else
557 abort ();
559 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
560 op0, (offset * UNITS_PER_WORD));
562 offset = 0;
564 else
565 op0 = protect_from_queue (op0, 1);
567 /* If VALUE is a floating-point mode, access it as an integer of the
568 corresponding size. This can occur on a machine with 64 bit registers
569 that uses SFmode for float. This can also occur for unaligned float
570 structure fields. */
571 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
572 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
573 value = gen_lowpart ((GET_MODE (value) == VOIDmode
574 ? word_mode : int_mode_for_mode (GET_MODE (value))),
575 value);
577 /* Now OFFSET is nonzero only if OP0 is memory
578 and is therefore always measured in bytes. */
580 if (HAVE_insv
581 && GET_MODE (value) != BLKmode
582 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
583 /* Ensure insv's size is wide enough for this field. */
584 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
585 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
586 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
588 int xbitpos = bitpos;
589 rtx value1;
590 rtx xop0 = op0;
591 rtx last = get_last_insn ();
592 rtx pat;
593 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
594 int save_volatile_ok = volatile_ok;
596 volatile_ok = 1;
598 /* If this machine's insv can only insert into a register, copy OP0
599 into a register and save it back later. */
600 /* This used to check flag_force_mem, but that was a serious
601 de-optimization now that flag_force_mem is enabled by -O2. */
602 if (GET_CODE (op0) == MEM
603 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
604 (op0, VOIDmode)))
606 rtx tempreg;
607 enum machine_mode bestmode;
609 /* Get the mode to use for inserting into this field. If OP0 is
610 BLKmode, get the smallest mode consistent with the alignment. If
611 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
612 mode. Otherwise, use the smallest mode containing the field. */
614 if (GET_MODE (op0) == BLKmode
615 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
616 bestmode
617 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
618 MEM_VOLATILE_P (op0));
619 else
620 bestmode = GET_MODE (op0);
622 if (bestmode == VOIDmode
623 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
624 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
625 goto insv_loses;
627 /* Adjust address to point to the containing unit of that mode.
628 Compute offset as multiple of this unit, counting in bytes. */
629 unit = GET_MODE_BITSIZE (bestmode);
630 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
631 bitpos = bitnum % unit;
632 op0 = adjust_address (op0, bestmode, offset);
634 /* Fetch that unit, store the bitfield in it, then store
635 the unit. */
636 tempreg = copy_to_reg (op0);
637 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
638 total_size);
639 emit_move_insn (op0, tempreg);
640 return value;
642 volatile_ok = save_volatile_ok;
644 /* Add OFFSET into OP0's address. */
645 if (GET_CODE (xop0) == MEM)
646 xop0 = adjust_address (xop0, byte_mode, offset);
648 /* If xop0 is a register, we need it in MAXMODE
649 to make it acceptable to the format of insv. */
650 if (GET_CODE (xop0) == SUBREG)
651 /* We can't just change the mode, because this might clobber op0,
652 and we will need the original value of op0 if insv fails. */
653 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
654 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
655 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
657 /* On big-endian machines, we count bits from the most significant.
658 If the bit field insn does not, we must invert. */
660 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
661 xbitpos = unit - bitsize - xbitpos;
663 /* We have been counting XBITPOS within UNIT.
664 Count instead within the size of the register. */
665 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
666 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
668 unit = GET_MODE_BITSIZE (maxmode);
670 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
671 value1 = value;
672 if (GET_MODE (value) != maxmode)
674 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
676 /* Optimization: Don't bother really extending VALUE
677 if it has all the bits we will actually use. However,
678 if we must narrow it, be sure we do it correctly. */
680 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
682 rtx tmp;
684 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
685 if (! tmp)
686 tmp = simplify_gen_subreg (maxmode,
687 force_reg (GET_MODE (value),
688 value1),
689 GET_MODE (value), 0);
690 value1 = tmp;
692 else
693 value1 = gen_lowpart (maxmode, value1);
695 else if (GET_CODE (value) == CONST_INT)
696 value1 = gen_int_mode (INTVAL (value), maxmode);
697 else if (!CONSTANT_P (value))
698 /* Parse phase is supposed to make VALUE's data type
699 match that of the component reference, which is a type
700 at least as wide as the field; so VALUE should have
701 a mode that corresponds to that type. */
702 abort ();
705 /* If this machine's insv insists on a register,
706 get VALUE1 into a register. */
707 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
708 (value1, maxmode)))
709 value1 = force_reg (maxmode, value1);
711 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
712 if (pat)
713 emit_insn (pat);
714 else
716 delete_insns_since (last);
717 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
720 else
721 insv_loses:
722 /* Insv is not available; store using shifts and boolean ops. */
723 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
724 return value;
727 /* Use shifts and boolean operations to store VALUE
728 into a bit field of width BITSIZE
729 in a memory location specified by OP0 except offset by OFFSET bytes.
730 (OFFSET must be 0 if OP0 is a register.)
731 The field starts at position BITPOS within the byte.
732 (If OP0 is a register, it may be a full word or a narrower mode,
733 but BITPOS still counts within a full word,
734 which is significant on bigendian machines.)
736 Note that protect_from_queue has already been done on OP0 and VALUE. */
738 static void
739 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
740 unsigned HOST_WIDE_INT bitsize,
741 unsigned HOST_WIDE_INT bitpos, rtx value)
743 enum machine_mode mode;
744 unsigned int total_bits = BITS_PER_WORD;
745 rtx subtarget, temp;
746 int all_zero = 0;
747 int all_one = 0;
749 /* There is a case not handled here:
750 a structure with a known alignment of just a halfword
751 and a field split across two aligned halfwords within the structure.
752 Or likewise a structure with a known alignment of just a byte
753 and a field split across two bytes.
754 Such cases are not supposed to be able to occur. */
756 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
758 if (offset != 0)
759 abort ();
760 /* Special treatment for a bit field split across two registers. */
761 if (bitsize + bitpos > BITS_PER_WORD)
763 store_split_bit_field (op0, bitsize, bitpos, value);
764 return;
767 else
769 /* Get the proper mode to use for this field. We want a mode that
770 includes the entire field. If such a mode would be larger than
771 a word, we won't be doing the extraction the normal way.
772 We don't want a mode bigger than the destination. */
774 mode = GET_MODE (op0);
775 if (GET_MODE_BITSIZE (mode) == 0
776 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
777 mode = word_mode;
778 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
779 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
781 if (mode == VOIDmode)
783 /* The only way this should occur is if the field spans word
784 boundaries. */
785 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
786 value);
787 return;
790 total_bits = GET_MODE_BITSIZE (mode);
792 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
793 be in the range 0 to total_bits-1, and put any excess bytes in
794 OFFSET. */
795 if (bitpos >= total_bits)
797 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
798 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
799 * BITS_PER_UNIT);
802 /* Get ref to an aligned byte, halfword, or word containing the field.
803 Adjust BITPOS to be position within a word,
804 and OFFSET to be the offset of that word.
805 Then alter OP0 to refer to that word. */
806 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
807 offset -= (offset % (total_bits / BITS_PER_UNIT));
808 op0 = adjust_address (op0, mode, offset);
811 mode = GET_MODE (op0);
813 /* Now MODE is either some integral mode for a MEM as OP0,
814 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
815 The bit field is contained entirely within OP0.
816 BITPOS is the starting bit number within OP0.
817 (OP0's mode may actually be narrower than MODE.) */
819 if (BYTES_BIG_ENDIAN)
820 /* BITPOS is the distance between our msb
821 and that of the containing datum.
822 Convert it to the distance from the lsb. */
823 bitpos = total_bits - bitsize - bitpos;
825 /* Now BITPOS is always the distance between our lsb
826 and that of OP0. */
828 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
829 we must first convert its mode to MODE. */
831 if (GET_CODE (value) == CONST_INT)
833 HOST_WIDE_INT v = INTVAL (value);
835 if (bitsize < HOST_BITS_PER_WIDE_INT)
836 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
838 if (v == 0)
839 all_zero = 1;
840 else if ((bitsize < HOST_BITS_PER_WIDE_INT
841 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
842 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
843 all_one = 1;
845 value = lshift_value (mode, value, bitpos, bitsize);
847 else
849 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
850 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
852 if (GET_MODE (value) != mode)
854 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
855 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
856 value = gen_lowpart (mode, value);
857 else
858 value = convert_to_mode (mode, value, 1);
861 if (must_and)
862 value = expand_binop (mode, and_optab, value,
863 mask_rtx (mode, 0, bitsize, 0),
864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
865 if (bitpos > 0)
866 value = expand_shift (LSHIFT_EXPR, mode, value,
867 build_int_2 (bitpos, 0), NULL_RTX, 1);
870 /* Now clear the chosen bits in OP0,
871 except that if VALUE is -1 we need not bother. */
873 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
875 if (! all_one)
877 temp = expand_binop (mode, and_optab, op0,
878 mask_rtx (mode, bitpos, bitsize, 1),
879 subtarget, 1, OPTAB_LIB_WIDEN);
880 subtarget = temp;
882 else
883 temp = op0;
885 /* Now logical-or VALUE into OP0, unless it is zero. */
887 if (! all_zero)
888 temp = expand_binop (mode, ior_optab, temp, value,
889 subtarget, 1, OPTAB_LIB_WIDEN);
890 if (op0 != temp)
891 emit_move_insn (op0, temp);
894 /* Store a bit field that is split across multiple accessible memory objects.
896 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
897 BITSIZE is the field width; BITPOS the position of its first bit
898 (within the word).
899 VALUE is the value to store.
901 This does not yet handle fields wider than BITS_PER_WORD. */
903 static void
904 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
905 unsigned HOST_WIDE_INT bitpos, rtx value)
907 unsigned int unit;
908 unsigned int bitsdone = 0;
910 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
911 much at a time. */
912 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
913 unit = BITS_PER_WORD;
914 else
915 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
917 /* If VALUE is a constant other than a CONST_INT, get it into a register in
918 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
919 that VALUE might be a floating-point constant. */
920 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
922 rtx word = gen_lowpart_common (word_mode, value);
924 if (word && (value != word))
925 value = word;
926 else
927 value = gen_lowpart_common (word_mode,
928 force_reg (GET_MODE (value) != VOIDmode
929 ? GET_MODE (value)
930 : word_mode, value));
932 else if (GET_CODE (value) == ADDRESSOF)
933 value = copy_to_reg (value);
935 while (bitsdone < bitsize)
937 unsigned HOST_WIDE_INT thissize;
938 rtx part, word;
939 unsigned HOST_WIDE_INT thispos;
940 unsigned HOST_WIDE_INT offset;
942 offset = (bitpos + bitsdone) / unit;
943 thispos = (bitpos + bitsdone) % unit;
945 /* THISSIZE must not overrun a word boundary. Otherwise,
946 store_fixed_bit_field will call us again, and we will mutually
947 recurse forever. */
948 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
949 thissize = MIN (thissize, unit - thispos);
951 if (BYTES_BIG_ENDIAN)
953 int total_bits;
955 /* We must do an endian conversion exactly the same way as it is
956 done in extract_bit_field, so that the two calls to
957 extract_fixed_bit_field will have comparable arguments. */
958 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
959 total_bits = BITS_PER_WORD;
960 else
961 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
963 /* Fetch successively less significant portions. */
964 if (GET_CODE (value) == CONST_INT)
965 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
966 >> (bitsize - bitsdone - thissize))
967 & (((HOST_WIDE_INT) 1 << thissize) - 1));
968 else
969 /* The args are chosen so that the last part includes the
970 lsb. Give extract_bit_field the value it needs (with
971 endianness compensation) to fetch the piece we want. */
972 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
973 total_bits - bitsize + bitsdone,
974 NULL_RTX, 1);
976 else
978 /* Fetch successively more significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> bitsdone)
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
985 bitsdone, NULL_RTX, 1);
988 /* If OP0 is a register, then handle OFFSET here.
990 When handling multiword bitfields, extract_bit_field may pass
991 down a word_mode SUBREG of a larger REG for a bitfield that actually
992 crosses a word boundary. Thus, for a SUBREG, we must find
993 the current word starting from the base register. */
994 if (GET_CODE (op0) == SUBREG)
996 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
997 word = operand_subword_force (SUBREG_REG (op0), word_offset,
998 GET_MODE (SUBREG_REG (op0)));
999 offset = 0;
1001 else if (GET_CODE (op0) == REG)
1003 word = operand_subword_force (op0, offset, GET_MODE (op0));
1004 offset = 0;
1006 else
1007 word = op0;
1009 /* OFFSET is in UNITs, and UNIT is in bits.
1010 store_fixed_bit_field wants offset in bytes. */
1011 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1012 thispos, part);
1013 bitsdone += thissize;
1017 /* Generate code to extract a byte-field from STR_RTX
1018 containing BITSIZE bits, starting at BITNUM,
1019 and put it in TARGET if possible (if TARGET is nonzero).
1020 Regardless of TARGET, we return the rtx for where the value is placed.
1021 It may be a QUEUED.
1023 STR_RTX is the structure containing the byte (a REG or MEM).
1024 UNSIGNEDP is nonzero if this is an unsigned bit field.
1025 MODE is the natural mode of the field value once extracted.
1026 TMODE is the mode the caller would like the value to have;
1027 but the value may be returned with type MODE instead.
1029 TOTAL_SIZE is the size in bytes of the containing structure,
1030 or -1 if varying.
1032 If a TARGET is specified and we can store in it at no extra cost,
1033 we do so, and return TARGET.
1034 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1035 if they are equally easy. */
1038 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1039 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1040 enum machine_mode mode, enum machine_mode tmode,
1041 HOST_WIDE_INT total_size)
1043 unsigned int unit
1044 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1045 unsigned HOST_WIDE_INT offset = bitnum / unit;
1046 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1047 rtx op0 = str_rtx;
1048 rtx spec_target = target;
1049 rtx spec_target_subreg = 0;
1050 enum machine_mode int_mode;
1051 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1052 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1053 enum machine_mode mode1;
1054 int byte_offset;
1056 /* Discount the part of the structure before the desired byte.
1057 We need to know how many bytes are safe to reference after it. */
1058 if (total_size >= 0)
1059 total_size -= (bitpos / BIGGEST_ALIGNMENT
1060 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1062 if (tmode == VOIDmode)
1063 tmode = mode;
1065 while (GET_CODE (op0) == SUBREG)
1067 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1068 if (bitpos > unit)
1070 offset += (bitpos / unit);
1071 bitpos %= unit;
1073 op0 = SUBREG_REG (op0);
1076 if (GET_CODE (op0) == REG
1077 && mode == GET_MODE (op0)
1078 && bitnum == 0
1079 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1081 /* We're trying to extract a full register from itself. */
1082 return op0;
1085 /* Use vec_extract patterns for extracting parts of vectors whenever
1086 available. */
1087 if (VECTOR_MODE_P (GET_MODE (op0))
1088 && GET_CODE (op0) != MEM
1089 && (vec_extract_optab->handlers[(int)GET_MODE (op0)].insn_code
1090 != CODE_FOR_nothing)
1091 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1092 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1094 enum machine_mode outermode = GET_MODE (op0);
1095 enum machine_mode innermode = GET_MODE_INNER (outermode);
1096 int icode = (int) vec_extract_optab->handlers[(int) outermode].insn_code;
1097 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1098 rtx rtxpos = GEN_INT (pos);
1099 rtx src = op0;
1100 rtx dest = NULL, pat, seq;
1101 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1102 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1103 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1105 if (innermode == tmode || innermode == mode)
1106 dest = target;
1108 if (!dest)
1109 dest = gen_reg_rtx (innermode);
1111 start_sequence ();
1113 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1114 dest = copy_to_mode_reg (mode0, dest);
1116 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1117 src = copy_to_mode_reg (mode1, src);
1119 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1120 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1122 /* We could handle this, but we should always be called with a pseudo
1123 for our targets and all insns should take them as outputs. */
1124 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1125 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1126 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1127 abort ();
1128 pat = GEN_FCN (icode) (dest, src, rtxpos);
1129 seq = get_insns ();
1130 end_sequence ();
1131 if (pat)
1133 emit_insn (seq);
1134 emit_insn (pat);
1135 return extract_bit_field (dest, bitsize,
1136 bitnum - pos * GET_MODE_BITSIZE (innermode),
1137 unsignedp, target, mode, tmode, total_size);
1141 /* Make sure we are playing with integral modes. Pun with subregs
1142 if we aren't. */
1144 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1145 if (imode != GET_MODE (op0))
1147 if (GET_CODE (op0) == MEM)
1148 op0 = adjust_address (op0, imode, 0);
1149 else if (imode != BLKmode)
1150 op0 = gen_lowpart (imode, op0);
1151 else
1152 abort ();
1156 /* We may be accessing data outside the field, which means
1157 we can alias adjacent data. */
1158 if (GET_CODE (op0) == MEM)
1160 op0 = shallow_copy_rtx (op0);
1161 set_mem_alias_set (op0, 0);
1162 set_mem_expr (op0, 0);
1165 /* Extraction of a full-word or multi-word value from a structure
1166 in a register or aligned memory can be done with just a SUBREG.
1167 A subword value in the least significant part of a register
1168 can also be extracted with a SUBREG. For this, we need the
1169 byte offset of the value in op0. */
1171 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1173 /* If OP0 is a register, BITPOS must count within a word.
1174 But as we have it, it counts within whatever size OP0 now has.
1175 On a bigendian machine, these are not the same, so convert. */
1176 if (BYTES_BIG_ENDIAN
1177 && GET_CODE (op0) != MEM
1178 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1179 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1181 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1182 If that's wrong, the solution is to test for it and set TARGET to 0
1183 if needed. */
1185 /* Only scalar integer modes can be converted via subregs. There is an
1186 additional problem for FP modes here in that they can have a precision
1187 which is different from the size. mode_for_size uses precision, but
1188 we want a mode based on the size, so we must avoid calling it for FP
1189 modes. */
1190 mode1 = (SCALAR_INT_MODE_P (tmode)
1191 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1192 : mode);
1194 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1195 && bitpos % BITS_PER_WORD == 0)
1196 || (mode1 != BLKmode
1197 /* ??? The big endian test here is wrong. This is correct
1198 if the value is in a register, and if mode_for_size is not
1199 the same mode as op0. This causes us to get unnecessarily
1200 inefficient code from the Thumb port when -mbig-endian. */
1201 && (BYTES_BIG_ENDIAN
1202 ? bitpos + bitsize == BITS_PER_WORD
1203 : bitpos == 0)))
1204 && ((GET_CODE (op0) != MEM
1205 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1206 GET_MODE_BITSIZE (GET_MODE (op0)))
1207 && GET_MODE_SIZE (mode1) != 0
1208 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1209 || (GET_CODE (op0) == MEM
1210 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1211 || (offset * BITS_PER_UNIT % bitsize == 0
1212 && MEM_ALIGN (op0) % bitsize == 0)))))
1214 if (mode1 != GET_MODE (op0))
1216 if (GET_CODE (op0) == SUBREG)
1218 if (GET_MODE (SUBREG_REG (op0)) == mode1
1219 || GET_MODE_CLASS (mode1) == MODE_INT
1220 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1221 op0 = SUBREG_REG (op0);
1222 else
1223 /* Else we've got some float mode source being extracted into
1224 a different float mode destination -- this combination of
1225 subregs results in Severe Tire Damage. */
1226 goto no_subreg_mode_swap;
1228 if (GET_CODE (op0) == REG)
1229 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1230 else
1231 op0 = adjust_address (op0, mode1, offset);
1233 if (mode1 != mode)
1234 return convert_to_mode (tmode, op0, unsignedp);
1235 return op0;
1237 no_subreg_mode_swap:
1239 /* Handle fields bigger than a word. */
1241 if (bitsize > BITS_PER_WORD)
1243 /* Here we transfer the words of the field
1244 in the order least significant first.
1245 This is because the most significant word is the one which may
1246 be less than full. */
1248 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1249 unsigned int i;
1251 if (target == 0 || GET_CODE (target) != REG)
1252 target = gen_reg_rtx (mode);
1254 /* Indicate for flow that the entire target reg is being set. */
1255 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1257 for (i = 0; i < nwords; i++)
1259 /* If I is 0, use the low-order word in both field and target;
1260 if I is 1, use the next to lowest word; and so on. */
1261 /* Word number in TARGET to use. */
1262 unsigned int wordnum
1263 = (WORDS_BIG_ENDIAN
1264 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1265 : i);
1266 /* Offset from start of field in OP0. */
1267 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1268 ? MAX (0, ((int) bitsize - ((int) i + 1)
1269 * (int) BITS_PER_WORD))
1270 : (int) i * BITS_PER_WORD);
1271 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1272 rtx result_part
1273 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1274 bitsize - i * BITS_PER_WORD),
1275 bitnum + bit_offset, 1, target_part, mode,
1276 word_mode, total_size);
1278 if (target_part == 0)
1279 abort ();
1281 if (result_part != target_part)
1282 emit_move_insn (target_part, result_part);
1285 if (unsignedp)
1287 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1288 need to be zero'd out. */
1289 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1291 unsigned int i, total_words;
1293 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1294 for (i = nwords; i < total_words; i++)
1295 emit_move_insn
1296 (operand_subword (target,
1297 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1298 1, VOIDmode),
1299 const0_rtx);
1301 return target;
1304 /* Signed bit field: sign-extend with two arithmetic shifts. */
1305 target = expand_shift (LSHIFT_EXPR, mode, target,
1306 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1307 NULL_RTX, 0);
1308 return expand_shift (RSHIFT_EXPR, mode, target,
1309 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1310 NULL_RTX, 0);
1313 /* From here on we know the desired field is smaller than a word. */
1315 /* Check if there is a correspondingly-sized integer field, so we can
1316 safely extract it as one size of integer, if necessary; then
1317 truncate or extend to the size that is wanted; then use SUBREGs or
1318 convert_to_mode to get one of the modes we really wanted. */
1320 int_mode = int_mode_for_mode (tmode);
1321 if (int_mode == BLKmode)
1322 int_mode = int_mode_for_mode (mode);
1323 if (int_mode == BLKmode)
1324 abort (); /* Should probably push op0 out to memory and then
1325 do a load. */
1327 /* OFFSET is the number of words or bytes (UNIT says which)
1328 from STR_RTX to the first word or byte containing part of the field. */
1330 if (GET_CODE (op0) != MEM)
1332 if (offset != 0
1333 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1335 if (GET_CODE (op0) != REG)
1336 op0 = copy_to_reg (op0);
1337 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1338 op0, (offset * UNITS_PER_WORD));
1340 offset = 0;
1342 else
1343 op0 = protect_from_queue (str_rtx, 1);
1345 /* Now OFFSET is nonzero only for memory operands. */
1347 if (unsignedp)
1349 if (HAVE_extzv
1350 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1351 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1352 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1354 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1355 rtx bitsize_rtx, bitpos_rtx;
1356 rtx last = get_last_insn ();
1357 rtx xop0 = op0;
1358 rtx xtarget = target;
1359 rtx xspec_target = spec_target;
1360 rtx xspec_target_subreg = spec_target_subreg;
1361 rtx pat;
1362 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1364 if (GET_CODE (xop0) == MEM)
1366 int save_volatile_ok = volatile_ok;
1367 volatile_ok = 1;
1369 /* Is the memory operand acceptable? */
1370 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1371 (xop0, GET_MODE (xop0))))
1373 /* No, load into a reg and extract from there. */
1374 enum machine_mode bestmode;
1376 /* Get the mode to use for inserting into this field. If
1377 OP0 is BLKmode, get the smallest mode consistent with the
1378 alignment. If OP0 is a non-BLKmode object that is no
1379 wider than MAXMODE, use its mode. Otherwise, use the
1380 smallest mode containing the field. */
1382 if (GET_MODE (xop0) == BLKmode
1383 || (GET_MODE_SIZE (GET_MODE (op0))
1384 > GET_MODE_SIZE (maxmode)))
1385 bestmode = get_best_mode (bitsize, bitnum,
1386 MEM_ALIGN (xop0), maxmode,
1387 MEM_VOLATILE_P (xop0));
1388 else
1389 bestmode = GET_MODE (xop0);
1391 if (bestmode == VOIDmode
1392 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1393 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1394 goto extzv_loses;
1396 /* Compute offset as multiple of this unit,
1397 counting in bytes. */
1398 unit = GET_MODE_BITSIZE (bestmode);
1399 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1400 xbitpos = bitnum % unit;
1401 xop0 = adjust_address (xop0, bestmode, xoffset);
1403 /* Fetch it to a register in that size. */
1404 xop0 = force_reg (bestmode, xop0);
1406 /* XBITPOS counts within UNIT, which is what is expected. */
1408 else
1409 /* Get ref to first byte containing part of the field. */
1410 xop0 = adjust_address (xop0, byte_mode, xoffset);
1412 volatile_ok = save_volatile_ok;
1415 /* If op0 is a register, we need it in MAXMODE (which is usually
1416 SImode). to make it acceptable to the format of extzv. */
1417 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1418 goto extzv_loses;
1419 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1420 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1422 /* On big-endian machines, we count bits from the most significant.
1423 If the bit field insn does not, we must invert. */
1424 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1425 xbitpos = unit - bitsize - xbitpos;
1427 /* Now convert from counting within UNIT to counting in MAXMODE. */
1428 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1429 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1431 unit = GET_MODE_BITSIZE (maxmode);
1433 if (xtarget == 0
1434 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1435 xtarget = xspec_target = gen_reg_rtx (tmode);
1437 if (GET_MODE (xtarget) != maxmode)
1439 if (GET_CODE (xtarget) == REG)
1441 int wider = (GET_MODE_SIZE (maxmode)
1442 > GET_MODE_SIZE (GET_MODE (xtarget)));
1443 xtarget = gen_lowpart (maxmode, xtarget);
1444 if (wider)
1445 xspec_target_subreg = xtarget;
1447 else
1448 xtarget = gen_reg_rtx (maxmode);
1451 /* If this machine's extzv insists on a register target,
1452 make sure we have one. */
1453 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1454 (xtarget, maxmode)))
1455 xtarget = gen_reg_rtx (maxmode);
1457 bitsize_rtx = GEN_INT (bitsize);
1458 bitpos_rtx = GEN_INT (xbitpos);
1460 pat = gen_extzv (protect_from_queue (xtarget, 1),
1461 xop0, bitsize_rtx, bitpos_rtx);
1462 if (pat)
1464 emit_insn (pat);
1465 target = xtarget;
1466 spec_target = xspec_target;
1467 spec_target_subreg = xspec_target_subreg;
1469 else
1471 delete_insns_since (last);
1472 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1473 bitpos, target, 1);
1476 else
1477 extzv_loses:
1478 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1479 bitpos, target, 1);
1481 else
1483 if (HAVE_extv
1484 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1485 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1486 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1488 int xbitpos = bitpos, xoffset = offset;
1489 rtx bitsize_rtx, bitpos_rtx;
1490 rtx last = get_last_insn ();
1491 rtx xop0 = op0, xtarget = target;
1492 rtx xspec_target = spec_target;
1493 rtx xspec_target_subreg = spec_target_subreg;
1494 rtx pat;
1495 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1497 if (GET_CODE (xop0) == MEM)
1499 /* Is the memory operand acceptable? */
1500 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1501 (xop0, GET_MODE (xop0))))
1503 /* No, load into a reg and extract from there. */
1504 enum machine_mode bestmode;
1506 /* Get the mode to use for inserting into this field. If
1507 OP0 is BLKmode, get the smallest mode consistent with the
1508 alignment. If OP0 is a non-BLKmode object that is no
1509 wider than MAXMODE, use its mode. Otherwise, use the
1510 smallest mode containing the field. */
1512 if (GET_MODE (xop0) == BLKmode
1513 || (GET_MODE_SIZE (GET_MODE (op0))
1514 > GET_MODE_SIZE (maxmode)))
1515 bestmode = get_best_mode (bitsize, bitnum,
1516 MEM_ALIGN (xop0), maxmode,
1517 MEM_VOLATILE_P (xop0));
1518 else
1519 bestmode = GET_MODE (xop0);
1521 if (bestmode == VOIDmode
1522 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1523 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1524 goto extv_loses;
1526 /* Compute offset as multiple of this unit,
1527 counting in bytes. */
1528 unit = GET_MODE_BITSIZE (bestmode);
1529 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1530 xbitpos = bitnum % unit;
1531 xop0 = adjust_address (xop0, bestmode, xoffset);
1533 /* Fetch it to a register in that size. */
1534 xop0 = force_reg (bestmode, xop0);
1536 /* XBITPOS counts within UNIT, which is what is expected. */
1538 else
1539 /* Get ref to first byte containing part of the field. */
1540 xop0 = adjust_address (xop0, byte_mode, xoffset);
1543 /* If op0 is a register, we need it in MAXMODE (which is usually
1544 SImode) to make it acceptable to the format of extv. */
1545 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1546 goto extv_loses;
1547 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1548 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1550 /* On big-endian machines, we count bits from the most significant.
1551 If the bit field insn does not, we must invert. */
1552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1553 xbitpos = unit - bitsize - xbitpos;
1555 /* XBITPOS counts within a size of UNIT.
1556 Adjust to count within a size of MAXMODE. */
1557 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1558 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1560 unit = GET_MODE_BITSIZE (maxmode);
1562 if (xtarget == 0
1563 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1564 xtarget = xspec_target = gen_reg_rtx (tmode);
1566 if (GET_MODE (xtarget) != maxmode)
1568 if (GET_CODE (xtarget) == REG)
1570 int wider = (GET_MODE_SIZE (maxmode)
1571 > GET_MODE_SIZE (GET_MODE (xtarget)));
1572 xtarget = gen_lowpart (maxmode, xtarget);
1573 if (wider)
1574 xspec_target_subreg = xtarget;
1576 else
1577 xtarget = gen_reg_rtx (maxmode);
1580 /* If this machine's extv insists on a register target,
1581 make sure we have one. */
1582 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1583 (xtarget, maxmode)))
1584 xtarget = gen_reg_rtx (maxmode);
1586 bitsize_rtx = GEN_INT (bitsize);
1587 bitpos_rtx = GEN_INT (xbitpos);
1589 pat = gen_extv (protect_from_queue (xtarget, 1),
1590 xop0, bitsize_rtx, bitpos_rtx);
1591 if (pat)
1593 emit_insn (pat);
1594 target = xtarget;
1595 spec_target = xspec_target;
1596 spec_target_subreg = xspec_target_subreg;
1598 else
1600 delete_insns_since (last);
1601 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1602 bitpos, target, 0);
1605 else
1606 extv_loses:
1607 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1608 bitpos, target, 0);
1610 if (target == spec_target)
1611 return target;
1612 if (target == spec_target_subreg)
1613 return spec_target;
1614 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1616 /* If the target mode is floating-point, first convert to the
1617 integer mode of that size and then access it as a floating-point
1618 value via a SUBREG. */
1619 if (GET_MODE_CLASS (tmode) != MODE_INT
1620 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1622 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1623 MODE_INT, 0),
1624 target, unsignedp);
1625 return gen_lowpart (tmode, target);
1627 else
1628 return convert_to_mode (tmode, target, unsignedp);
1630 return target;
1633 /* Extract a bit field using shifts and boolean operations
1634 Returns an rtx to represent the value.
1635 OP0 addresses a register (word) or memory (byte).
1636 BITPOS says which bit within the word or byte the bit field starts in.
1637 OFFSET says how many bytes farther the bit field starts;
1638 it is 0 if OP0 is a register.
1639 BITSIZE says how many bits long the bit field is.
1640 (If OP0 is a register, it may be narrower than a full word,
1641 but BITPOS still counts within a full word,
1642 which is significant on bigendian machines.)
1644 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1645 If TARGET is nonzero, attempts to store the value there
1646 and return TARGET, but this is not guaranteed.
1647 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1649 static rtx
1650 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1651 unsigned HOST_WIDE_INT offset,
1652 unsigned HOST_WIDE_INT bitsize,
1653 unsigned HOST_WIDE_INT bitpos, rtx target,
1654 int unsignedp)
1656 unsigned int total_bits = BITS_PER_WORD;
1657 enum machine_mode mode;
1659 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1661 /* Special treatment for a bit field split across two registers. */
1662 if (bitsize + bitpos > BITS_PER_WORD)
1663 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1665 else
1667 /* Get the proper mode to use for this field. We want a mode that
1668 includes the entire field. If such a mode would be larger than
1669 a word, we won't be doing the extraction the normal way. */
1671 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1672 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1674 if (mode == VOIDmode)
1675 /* The only way this should occur is if the field spans word
1676 boundaries. */
1677 return extract_split_bit_field (op0, bitsize,
1678 bitpos + offset * BITS_PER_UNIT,
1679 unsignedp);
1681 total_bits = GET_MODE_BITSIZE (mode);
1683 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1684 be in the range 0 to total_bits-1, and put any excess bytes in
1685 OFFSET. */
1686 if (bitpos >= total_bits)
1688 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1689 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1690 * BITS_PER_UNIT);
1693 /* Get ref to an aligned byte, halfword, or word containing the field.
1694 Adjust BITPOS to be position within a word,
1695 and OFFSET to be the offset of that word.
1696 Then alter OP0 to refer to that word. */
1697 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1698 offset -= (offset % (total_bits / BITS_PER_UNIT));
1699 op0 = adjust_address (op0, mode, offset);
1702 mode = GET_MODE (op0);
1704 if (BYTES_BIG_ENDIAN)
1705 /* BITPOS is the distance between our msb and that of OP0.
1706 Convert it to the distance from the lsb. */
1707 bitpos = total_bits - bitsize - bitpos;
1709 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1710 We have reduced the big-endian case to the little-endian case. */
1712 if (unsignedp)
1714 if (bitpos)
1716 /* If the field does not already start at the lsb,
1717 shift it so it does. */
1718 tree amount = build_int_2 (bitpos, 0);
1719 /* Maybe propagate the target for the shift. */
1720 /* But not if we will return it--could confuse integrate.c. */
1721 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1722 && !REG_FUNCTION_VALUE_P (target)
1723 ? target : 0);
1724 if (tmode != mode) subtarget = 0;
1725 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1727 /* Convert the value to the desired mode. */
1728 if (mode != tmode)
1729 op0 = convert_to_mode (tmode, op0, 1);
1731 /* Unless the msb of the field used to be the msb when we shifted,
1732 mask out the upper bits. */
1734 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1735 return expand_binop (GET_MODE (op0), and_optab, op0,
1736 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1737 target, 1, OPTAB_LIB_WIDEN);
1738 return op0;
1741 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1742 then arithmetic-shift its lsb to the lsb of the word. */
1743 op0 = force_reg (mode, op0);
1744 if (mode != tmode)
1745 target = 0;
1747 /* Find the narrowest integer mode that contains the field. */
1749 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1750 mode = GET_MODE_WIDER_MODE (mode))
1751 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1753 op0 = convert_to_mode (mode, op0, 0);
1754 break;
1757 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1759 tree amount
1760 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1761 /* Maybe propagate the target for the shift. */
1762 /* But not if we will return the result--could confuse integrate.c. */
1763 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1764 && ! REG_FUNCTION_VALUE_P (target)
1765 ? target : 0);
1766 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1769 return expand_shift (RSHIFT_EXPR, mode, op0,
1770 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1771 target, 0);
1774 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1775 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1776 complement of that if COMPLEMENT. The mask is truncated if
1777 necessary to the width of mode MODE. The mask is zero-extended if
1778 BITSIZE+BITPOS is too small for MODE. */
1780 static rtx
1781 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1783 HOST_WIDE_INT masklow, maskhigh;
1785 if (bitsize == 0)
1786 masklow = 0;
1787 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1788 masklow = (HOST_WIDE_INT) -1 << bitpos;
1789 else
1790 masklow = 0;
1792 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1793 masklow &= ((unsigned HOST_WIDE_INT) -1
1794 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1796 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1797 maskhigh = -1;
1798 else
1799 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1801 if (bitsize == 0)
1802 maskhigh = 0;
1803 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1804 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1805 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1806 else
1807 maskhigh = 0;
1809 if (complement)
1811 maskhigh = ~maskhigh;
1812 masklow = ~masklow;
1815 return immed_double_const (masklow, maskhigh, mode);
1818 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1819 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1821 static rtx
1822 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1824 unsigned HOST_WIDE_INT v = INTVAL (value);
1825 HOST_WIDE_INT low, high;
1827 if (bitsize < HOST_BITS_PER_WIDE_INT)
1828 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1830 if (bitpos < HOST_BITS_PER_WIDE_INT)
1832 low = v << bitpos;
1833 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1835 else
1837 low = 0;
1838 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1841 return immed_double_const (low, high, mode);
1844 /* Extract a bit field that is split across two words
1845 and return an RTX for the result.
1847 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1848 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1849 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1851 static rtx
1852 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1853 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1855 unsigned int unit;
1856 unsigned int bitsdone = 0;
1857 rtx result = NULL_RTX;
1858 int first = 1;
1860 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1861 much at a time. */
1862 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1863 unit = BITS_PER_WORD;
1864 else
1865 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1867 while (bitsdone < bitsize)
1869 unsigned HOST_WIDE_INT thissize;
1870 rtx part, word;
1871 unsigned HOST_WIDE_INT thispos;
1872 unsigned HOST_WIDE_INT offset;
1874 offset = (bitpos + bitsdone) / unit;
1875 thispos = (bitpos + bitsdone) % unit;
1877 /* THISSIZE must not overrun a word boundary. Otherwise,
1878 extract_fixed_bit_field will call us again, and we will mutually
1879 recurse forever. */
1880 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1881 thissize = MIN (thissize, unit - thispos);
1883 /* If OP0 is a register, then handle OFFSET here.
1885 When handling multiword bitfields, extract_bit_field may pass
1886 down a word_mode SUBREG of a larger REG for a bitfield that actually
1887 crosses a word boundary. Thus, for a SUBREG, we must find
1888 the current word starting from the base register. */
1889 if (GET_CODE (op0) == SUBREG)
1891 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1892 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1893 GET_MODE (SUBREG_REG (op0)));
1894 offset = 0;
1896 else if (GET_CODE (op0) == REG)
1898 word = operand_subword_force (op0, offset, GET_MODE (op0));
1899 offset = 0;
1901 else
1902 word = op0;
1904 /* Extract the parts in bit-counting order,
1905 whose meaning is determined by BYTES_PER_UNIT.
1906 OFFSET is in UNITs, and UNIT is in bits.
1907 extract_fixed_bit_field wants offset in bytes. */
1908 part = extract_fixed_bit_field (word_mode, word,
1909 offset * unit / BITS_PER_UNIT,
1910 thissize, thispos, 0, 1);
1911 bitsdone += thissize;
1913 /* Shift this part into place for the result. */
1914 if (BYTES_BIG_ENDIAN)
1916 if (bitsize != bitsdone)
1917 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1918 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1920 else
1922 if (bitsdone != thissize)
1923 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1924 build_int_2 (bitsdone - thissize, 0), 0, 1);
1927 if (first)
1928 result = part;
1929 else
1930 /* Combine the parts with bitwise or. This works
1931 because we extracted each part as an unsigned bit field. */
1932 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1933 OPTAB_LIB_WIDEN);
1935 first = 0;
1938 /* Unsigned bit field: we are done. */
1939 if (unsignedp)
1940 return result;
1941 /* Signed bit field: sign-extend with two arithmetic shifts. */
1942 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1943 build_int_2 (BITS_PER_WORD - bitsize, 0),
1944 NULL_RTX, 0);
1945 return expand_shift (RSHIFT_EXPR, word_mode, result,
1946 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1949 /* Add INC into TARGET. */
1951 void
1952 expand_inc (rtx target, rtx inc)
1954 rtx value = expand_binop (GET_MODE (target), add_optab,
1955 target, inc,
1956 target, 0, OPTAB_LIB_WIDEN);
1957 if (value != target)
1958 emit_move_insn (target, value);
1961 /* Subtract DEC from TARGET. */
1963 void
1964 expand_dec (rtx target, rtx dec)
1966 rtx value = expand_binop (GET_MODE (target), sub_optab,
1967 target, dec,
1968 target, 0, OPTAB_LIB_WIDEN);
1969 if (value != target)
1970 emit_move_insn (target, value);
1973 /* Output a shift instruction for expression code CODE,
1974 with SHIFTED being the rtx for the value to shift,
1975 and AMOUNT the tree for the amount to shift by.
1976 Store the result in the rtx TARGET, if that is convenient.
1977 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1978 Return the rtx for where the value is. */
1981 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1982 tree amount, rtx target, int unsignedp)
1984 rtx op1, temp = 0;
1985 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1986 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1987 int try;
1989 /* Previously detected shift-counts computed by NEGATE_EXPR
1990 and shifted in the other direction; but that does not work
1991 on all machines. */
1993 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1995 if (SHIFT_COUNT_TRUNCATED)
1997 if (GET_CODE (op1) == CONST_INT
1998 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1999 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2000 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2001 % GET_MODE_BITSIZE (mode));
2002 else if (GET_CODE (op1) == SUBREG
2003 && subreg_lowpart_p (op1))
2004 op1 = SUBREG_REG (op1);
2007 if (op1 == const0_rtx)
2008 return shifted;
2010 for (try = 0; temp == 0 && try < 3; try++)
2012 enum optab_methods methods;
2014 if (try == 0)
2015 methods = OPTAB_DIRECT;
2016 else if (try == 1)
2017 methods = OPTAB_WIDEN;
2018 else
2019 methods = OPTAB_LIB_WIDEN;
2021 if (rotate)
2023 /* Widening does not work for rotation. */
2024 if (methods == OPTAB_WIDEN)
2025 continue;
2026 else if (methods == OPTAB_LIB_WIDEN)
2028 /* If we have been unable to open-code this by a rotation,
2029 do it as the IOR of two shifts. I.e., to rotate A
2030 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2031 where C is the bitsize of A.
2033 It is theoretically possible that the target machine might
2034 not be able to perform either shift and hence we would
2035 be making two libcalls rather than just the one for the
2036 shift (similarly if IOR could not be done). We will allow
2037 this extremely unlikely lossage to avoid complicating the
2038 code below. */
2040 rtx subtarget = target == shifted ? 0 : target;
2041 rtx temp1;
2042 tree type = TREE_TYPE (amount);
2043 tree new_amount = make_tree (type, op1);
2044 tree other_amount
2045 = fold (build (MINUS_EXPR, type,
2046 convert (type,
2047 build_int_2 (GET_MODE_BITSIZE (mode),
2048 0)),
2049 amount));
2051 shifted = force_reg (mode, shifted);
2053 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2054 mode, shifted, new_amount, subtarget, 1);
2055 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2056 mode, shifted, other_amount, 0, 1);
2057 return expand_binop (mode, ior_optab, temp, temp1, target,
2058 unsignedp, methods);
2061 temp = expand_binop (mode,
2062 left ? rotl_optab : rotr_optab,
2063 shifted, op1, target, unsignedp, methods);
2065 /* If we don't have the rotate, but we are rotating by a constant
2066 that is in range, try a rotate in the opposite direction. */
2068 if (temp == 0 && GET_CODE (op1) == CONST_INT
2069 && INTVAL (op1) > 0
2070 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2071 temp = expand_binop (mode,
2072 left ? rotr_optab : rotl_optab,
2073 shifted,
2074 GEN_INT (GET_MODE_BITSIZE (mode)
2075 - INTVAL (op1)),
2076 target, unsignedp, methods);
2078 else if (unsignedp)
2079 temp = expand_binop (mode,
2080 left ? ashl_optab : lshr_optab,
2081 shifted, op1, target, unsignedp, methods);
2083 /* Do arithmetic shifts.
2084 Also, if we are going to widen the operand, we can just as well
2085 use an arithmetic right-shift instead of a logical one. */
2086 if (temp == 0 && ! rotate
2087 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2089 enum optab_methods methods1 = methods;
2091 /* If trying to widen a log shift to an arithmetic shift,
2092 don't accept an arithmetic shift of the same size. */
2093 if (unsignedp)
2094 methods1 = OPTAB_MUST_WIDEN;
2096 /* Arithmetic shift */
2098 temp = expand_binop (mode,
2099 left ? ashl_optab : ashr_optab,
2100 shifted, op1, target, unsignedp, methods1);
2103 /* We used to try extzv here for logical right shifts, but that was
2104 only useful for one machine, the VAX, and caused poor code
2105 generation there for lshrdi3, so the code was deleted and a
2106 define_expand for lshrsi3 was added to vax.md. */
2109 if (temp == 0)
2110 abort ();
2111 return temp;
2114 enum alg_code { alg_zero, alg_m, alg_shift,
2115 alg_add_t_m2, alg_sub_t_m2,
2116 alg_add_factor, alg_sub_factor,
2117 alg_add_t2_m, alg_sub_t2_m,
2118 alg_add, alg_subtract, alg_factor, alg_shiftop };
2120 /* This structure records a sequence of operations.
2121 `ops' is the number of operations recorded.
2122 `cost' is their total cost.
2123 The operations are stored in `op' and the corresponding
2124 logarithms of the integer coefficients in `log'.
2126 These are the operations:
2127 alg_zero total := 0;
2128 alg_m total := multiplicand;
2129 alg_shift total := total * coeff
2130 alg_add_t_m2 total := total + multiplicand * coeff;
2131 alg_sub_t_m2 total := total - multiplicand * coeff;
2132 alg_add_factor total := total * coeff + total;
2133 alg_sub_factor total := total * coeff - total;
2134 alg_add_t2_m total := total * coeff + multiplicand;
2135 alg_sub_t2_m total := total * coeff - multiplicand;
2137 The first operand must be either alg_zero or alg_m. */
2139 struct algorithm
2141 short cost;
2142 short ops;
2143 /* The size of the OP and LOG fields are not directly related to the
2144 word size, but the worst-case algorithms will be if we have few
2145 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2146 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2147 in total wordsize operations. */
2148 enum alg_code op[MAX_BITS_PER_WORD];
2149 char log[MAX_BITS_PER_WORD];
2152 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, int);
2153 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2154 int, unsigned HOST_WIDE_INT *,
2155 int *, int *);
2156 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2157 /* Compute and return the best algorithm for multiplying by T.
2158 The algorithm must cost less than cost_limit
2159 If retval.cost >= COST_LIMIT, no algorithm was found and all
2160 other field of the returned struct are undefined. */
2162 static void
2163 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2164 int cost_limit)
2166 int m;
2167 struct algorithm *alg_in, *best_alg;
2168 int cost;
2169 unsigned HOST_WIDE_INT q;
2171 /* Indicate that no algorithm is yet found. If no algorithm
2172 is found, this value will be returned and indicate failure. */
2173 alg_out->cost = cost_limit;
2175 if (cost_limit <= 0)
2176 return;
2178 /* t == 1 can be done in zero cost. */
2179 if (t == 1)
2181 alg_out->ops = 1;
2182 alg_out->cost = 0;
2183 alg_out->op[0] = alg_m;
2184 return;
2187 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2188 fail now. */
2189 if (t == 0)
2191 if (zero_cost >= cost_limit)
2192 return;
2193 else
2195 alg_out->ops = 1;
2196 alg_out->cost = zero_cost;
2197 alg_out->op[0] = alg_zero;
2198 return;
2202 /* We'll be needing a couple extra algorithm structures now. */
2204 alg_in = alloca (sizeof (struct algorithm));
2205 best_alg = alloca (sizeof (struct algorithm));
2207 /* If we have a group of zero bits at the low-order part of T, try
2208 multiplying by the remaining bits and then doing a shift. */
2210 if ((t & 1) == 0)
2212 m = floor_log2 (t & -t); /* m = number of low zero bits */
2213 if (m < BITS_PER_WORD)
2215 q = t >> m;
2216 cost = shift_cost[m];
2217 synth_mult (alg_in, q, cost_limit - cost);
2219 cost += alg_in->cost;
2220 if (cost < cost_limit)
2222 struct algorithm *x;
2223 x = alg_in, alg_in = best_alg, best_alg = x;
2224 best_alg->log[best_alg->ops] = m;
2225 best_alg->op[best_alg->ops] = alg_shift;
2226 cost_limit = cost;
2231 /* If we have an odd number, add or subtract one. */
2232 if ((t & 1) != 0)
2234 unsigned HOST_WIDE_INT w;
2236 for (w = 1; (w & t) != 0; w <<= 1)
2238 /* If T was -1, then W will be zero after the loop. This is another
2239 case where T ends with ...111. Handling this with (T + 1) and
2240 subtract 1 produces slightly better code and results in algorithm
2241 selection much faster than treating it like the ...0111 case
2242 below. */
2243 if (w == 0
2244 || (w > 2
2245 /* Reject the case where t is 3.
2246 Thus we prefer addition in that case. */
2247 && t != 3))
2249 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2251 cost = add_cost;
2252 synth_mult (alg_in, t + 1, cost_limit - cost);
2254 cost += alg_in->cost;
2255 if (cost < cost_limit)
2257 struct algorithm *x;
2258 x = alg_in, alg_in = best_alg, best_alg = x;
2259 best_alg->log[best_alg->ops] = 0;
2260 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2261 cost_limit = cost;
2264 else
2266 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2268 cost = add_cost;
2269 synth_mult (alg_in, t - 1, cost_limit - cost);
2271 cost += alg_in->cost;
2272 if (cost < cost_limit)
2274 struct algorithm *x;
2275 x = alg_in, alg_in = best_alg, best_alg = x;
2276 best_alg->log[best_alg->ops] = 0;
2277 best_alg->op[best_alg->ops] = alg_add_t_m2;
2278 cost_limit = cost;
2283 /* Look for factors of t of the form
2284 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2285 If we find such a factor, we can multiply by t using an algorithm that
2286 multiplies by q, shift the result by m and add/subtract it to itself.
2288 We search for large factors first and loop down, even if large factors
2289 are less probable than small; if we find a large factor we will find a
2290 good sequence quickly, and therefore be able to prune (by decreasing
2291 COST_LIMIT) the search. */
2293 for (m = floor_log2 (t - 1); m >= 2; m--)
2295 unsigned HOST_WIDE_INT d;
2297 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2298 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2300 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2301 synth_mult (alg_in, t / d, cost_limit - cost);
2303 cost += alg_in->cost;
2304 if (cost < cost_limit)
2306 struct algorithm *x;
2307 x = alg_in, alg_in = best_alg, best_alg = x;
2308 best_alg->log[best_alg->ops] = m;
2309 best_alg->op[best_alg->ops] = alg_add_factor;
2310 cost_limit = cost;
2312 /* Other factors will have been taken care of in the recursion. */
2313 break;
2316 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2317 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2319 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2320 synth_mult (alg_in, t / d, cost_limit - cost);
2322 cost += alg_in->cost;
2323 if (cost < cost_limit)
2325 struct algorithm *x;
2326 x = alg_in, alg_in = best_alg, best_alg = x;
2327 best_alg->log[best_alg->ops] = m;
2328 best_alg->op[best_alg->ops] = alg_sub_factor;
2329 cost_limit = cost;
2331 break;
2335 /* Try shift-and-add (load effective address) instructions,
2336 i.e. do a*3, a*5, a*9. */
2337 if ((t & 1) != 0)
2339 q = t - 1;
2340 q = q & -q;
2341 m = exact_log2 (q);
2342 if (m >= 0 && m < BITS_PER_WORD)
2344 cost = shiftadd_cost[m];
2345 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2347 cost += alg_in->cost;
2348 if (cost < cost_limit)
2350 struct algorithm *x;
2351 x = alg_in, alg_in = best_alg, best_alg = x;
2352 best_alg->log[best_alg->ops] = m;
2353 best_alg->op[best_alg->ops] = alg_add_t2_m;
2354 cost_limit = cost;
2358 q = t + 1;
2359 q = q & -q;
2360 m = exact_log2 (q);
2361 if (m >= 0 && m < BITS_PER_WORD)
2363 cost = shiftsub_cost[m];
2364 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2366 cost += alg_in->cost;
2367 if (cost < cost_limit)
2369 struct algorithm *x;
2370 x = alg_in, alg_in = best_alg, best_alg = x;
2371 best_alg->log[best_alg->ops] = m;
2372 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2373 cost_limit = cost;
2378 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2379 we have not found any algorithm. */
2380 if (cost_limit == alg_out->cost)
2381 return;
2383 /* If we are getting a too long sequence for `struct algorithm'
2384 to record, make this search fail. */
2385 if (best_alg->ops == MAX_BITS_PER_WORD)
2386 return;
2388 /* Copy the algorithm from temporary space to the space at alg_out.
2389 We avoid using structure assignment because the majority of
2390 best_alg is normally undefined, and this is a critical function. */
2391 alg_out->ops = best_alg->ops + 1;
2392 alg_out->cost = cost_limit;
2393 memcpy (alg_out->op, best_alg->op,
2394 alg_out->ops * sizeof *alg_out->op);
2395 memcpy (alg_out->log, best_alg->log,
2396 alg_out->ops * sizeof *alg_out->log);
2399 /* Perform a multiplication and return an rtx for the result.
2400 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2401 TARGET is a suggestion for where to store the result (an rtx).
2403 We check specially for a constant integer as OP1.
2404 If you want this check for OP0 as well, then before calling
2405 you should swap the two operands if OP0 would be constant. */
2408 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2409 int unsignedp)
2411 rtx const_op1 = op1;
2413 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2414 less than or equal in size to `unsigned int' this doesn't matter.
2415 If the mode is larger than `unsigned int', then synth_mult works only
2416 if the constant value exactly fits in an `unsigned int' without any
2417 truncation. This means that multiplying by negative values does
2418 not work; results are off by 2^32 on a 32 bit machine. */
2420 /* If we are multiplying in DImode, it may still be a win
2421 to try to work with shifts and adds. */
2422 if (GET_CODE (op1) == CONST_DOUBLE
2423 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2424 && HOST_BITS_PER_INT >= BITS_PER_WORD
2425 && CONST_DOUBLE_HIGH (op1) == 0)
2426 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2427 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2428 && GET_CODE (op1) == CONST_INT
2429 && INTVAL (op1) < 0)
2430 const_op1 = 0;
2432 /* We used to test optimize here, on the grounds that it's better to
2433 produce a smaller program when -O is not used.
2434 But this causes such a terrible slowdown sometimes
2435 that it seems better to use synth_mult always. */
2437 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2438 && (unsignedp || ! flag_trapv))
2440 struct algorithm alg;
2441 struct algorithm alg2;
2442 HOST_WIDE_INT val = INTVAL (op1);
2443 HOST_WIDE_INT val_so_far;
2444 rtx insn;
2445 int mult_cost;
2446 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2448 /* op0 must be register to make mult_cost match the precomputed
2449 shiftadd_cost array. */
2450 op0 = force_reg (mode, op0);
2452 /* Try to do the computation three ways: multiply by the negative of OP1
2453 and then negate, do the multiplication directly, or do multiplication
2454 by OP1 - 1. */
2456 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2457 mult_cost = MIN (12 * add_cost, mult_cost);
2459 synth_mult (&alg, val, mult_cost);
2461 /* This works only if the inverted value actually fits in an
2462 `unsigned int' */
2463 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2465 synth_mult (&alg2, - val,
2466 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2467 if (alg2.cost + negate_cost < alg.cost)
2468 alg = alg2, variant = negate_variant;
2471 /* This proves very useful for division-by-constant. */
2472 synth_mult (&alg2, val - 1,
2473 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2474 if (alg2.cost + add_cost < alg.cost)
2475 alg = alg2, variant = add_variant;
2477 if (alg.cost < mult_cost)
2479 /* We found something cheaper than a multiply insn. */
2480 int opno;
2481 rtx accum, tem;
2482 enum machine_mode nmode;
2484 op0 = protect_from_queue (op0, 0);
2486 /* Avoid referencing memory over and over.
2487 For speed, but also for correctness when mem is volatile. */
2488 if (GET_CODE (op0) == MEM)
2489 op0 = force_reg (mode, op0);
2491 /* ACCUM starts out either as OP0 or as a zero, depending on
2492 the first operation. */
2494 if (alg.op[0] == alg_zero)
2496 accum = copy_to_mode_reg (mode, const0_rtx);
2497 val_so_far = 0;
2499 else if (alg.op[0] == alg_m)
2501 accum = copy_to_mode_reg (mode, op0);
2502 val_so_far = 1;
2504 else
2505 abort ();
2507 for (opno = 1; opno < alg.ops; opno++)
2509 int log = alg.log[opno];
2510 int preserve = preserve_subexpressions_p ();
2511 rtx shift_subtarget = preserve ? 0 : accum;
2512 rtx add_target
2513 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2514 && ! preserve)
2515 ? target : 0;
2516 rtx accum_target = preserve ? 0 : accum;
2518 switch (alg.op[opno])
2520 case alg_shift:
2521 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2522 build_int_2 (log, 0), NULL_RTX, 0);
2523 val_so_far <<= log;
2524 break;
2526 case alg_add_t_m2:
2527 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2528 build_int_2 (log, 0), NULL_RTX, 0);
2529 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2530 add_target
2531 ? add_target : accum_target);
2532 val_so_far += (HOST_WIDE_INT) 1 << log;
2533 break;
2535 case alg_sub_t_m2:
2536 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2537 build_int_2 (log, 0), NULL_RTX, 0);
2538 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2539 add_target
2540 ? add_target : accum_target);
2541 val_so_far -= (HOST_WIDE_INT) 1 << log;
2542 break;
2544 case alg_add_t2_m:
2545 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2546 build_int_2 (log, 0), shift_subtarget,
2548 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2549 add_target
2550 ? add_target : accum_target);
2551 val_so_far = (val_so_far << log) + 1;
2552 break;
2554 case alg_sub_t2_m:
2555 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2556 build_int_2 (log, 0), shift_subtarget,
2558 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2559 add_target
2560 ? add_target : accum_target);
2561 val_so_far = (val_so_far << log) - 1;
2562 break;
2564 case alg_add_factor:
2565 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2566 build_int_2 (log, 0), NULL_RTX, 0);
2567 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2568 add_target
2569 ? add_target : accum_target);
2570 val_so_far += val_so_far << log;
2571 break;
2573 case alg_sub_factor:
2574 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2575 build_int_2 (log, 0), NULL_RTX, 0);
2576 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2577 (add_target ? add_target
2578 : preserve ? 0 : tem));
2579 val_so_far = (val_so_far << log) - val_so_far;
2580 break;
2582 default:
2583 abort ();
2586 /* Write a REG_EQUAL note on the last insn so that we can cse
2587 multiplication sequences. Note that if ACCUM is a SUBREG,
2588 we've set the inner register and must properly indicate
2589 that. */
2591 tem = op0, nmode = mode;
2592 if (GET_CODE (accum) == SUBREG)
2594 nmode = GET_MODE (SUBREG_REG (accum));
2595 tem = gen_lowpart (nmode, op0);
2598 insn = get_last_insn ();
2599 set_unique_reg_note (insn,
2600 REG_EQUAL,
2601 gen_rtx_MULT (nmode, tem,
2602 GEN_INT (val_so_far)));
2605 if (variant == negate_variant)
2607 val_so_far = - val_so_far;
2608 accum = expand_unop (mode, neg_optab, accum, target, 0);
2610 else if (variant == add_variant)
2612 val_so_far = val_so_far + 1;
2613 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2616 if (val != val_so_far)
2617 abort ();
2619 return accum;
2623 if (GET_CODE (op0) == CONST_DOUBLE)
2625 rtx temp = op0;
2626 op0 = op1;
2627 op1 = temp;
2630 /* Expand x*2.0 as x+x. */
2631 if (GET_CODE (op1) == CONST_DOUBLE
2632 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2634 REAL_VALUE_TYPE d;
2635 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2637 if (REAL_VALUES_EQUAL (d, dconst2))
2639 op0 = force_reg (GET_MODE (op0), op0);
2640 return expand_binop (mode, add_optab, op0, op0,
2641 target, unsignedp, OPTAB_LIB_WIDEN);
2645 /* This used to use umul_optab if unsigned, but for non-widening multiply
2646 there is no difference between signed and unsigned. */
2647 op0 = expand_binop (mode,
2648 ! unsignedp
2649 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2650 ? smulv_optab : smul_optab,
2651 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2652 if (op0 == 0)
2653 abort ();
2654 return op0;
2657 /* Return the smallest n such that 2**n >= X. */
2660 ceil_log2 (unsigned HOST_WIDE_INT x)
2662 return floor_log2 (x - 1) + 1;
2665 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2666 replace division by D, and put the least significant N bits of the result
2667 in *MULTIPLIER_PTR and return the most significant bit.
2669 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2670 needed precision is in PRECISION (should be <= N).
2672 PRECISION should be as small as possible so this function can choose
2673 multiplier more freely.
2675 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2676 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2678 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2679 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2681 static
2682 unsigned HOST_WIDE_INT
2683 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2684 unsigned HOST_WIDE_INT *multiplier_ptr,
2685 int *post_shift_ptr, int *lgup_ptr)
2687 HOST_WIDE_INT mhigh_hi, mlow_hi;
2688 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2689 int lgup, post_shift;
2690 int pow, pow2;
2691 unsigned HOST_WIDE_INT nl, dummy1;
2692 HOST_WIDE_INT nh, dummy2;
2694 /* lgup = ceil(log2(divisor)); */
2695 lgup = ceil_log2 (d);
2697 if (lgup > n)
2698 abort ();
2700 pow = n + lgup;
2701 pow2 = n + lgup - precision;
2703 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2705 /* We could handle this with some effort, but this case is much better
2706 handled directly with a scc insn, so rely on caller using that. */
2707 abort ();
2710 /* mlow = 2^(N + lgup)/d */
2711 if (pow >= HOST_BITS_PER_WIDE_INT)
2713 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2714 nl = 0;
2716 else
2718 nh = 0;
2719 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2721 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2722 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2724 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2725 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2726 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2727 else
2728 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2729 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2730 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2732 if (mhigh_hi && nh - d >= d)
2733 abort ();
2734 if (mhigh_hi > 1 || mlow_hi > 1)
2735 abort ();
2736 /* Assert that mlow < mhigh. */
2737 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2738 abort ();
2740 /* If precision == N, then mlow, mhigh exceed 2^N
2741 (but they do not exceed 2^(N+1)). */
2743 /* Reduce to lowest terms. */
2744 for (post_shift = lgup; post_shift > 0; post_shift--)
2746 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2747 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2748 if (ml_lo >= mh_lo)
2749 break;
2751 mlow_hi = 0;
2752 mlow_lo = ml_lo;
2753 mhigh_hi = 0;
2754 mhigh_lo = mh_lo;
2757 *post_shift_ptr = post_shift;
2758 *lgup_ptr = lgup;
2759 if (n < HOST_BITS_PER_WIDE_INT)
2761 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2762 *multiplier_ptr = mhigh_lo & mask;
2763 return mhigh_lo >= mask;
2765 else
2767 *multiplier_ptr = mhigh_lo;
2768 return mhigh_hi;
2772 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2773 congruent to 1 (mod 2**N). */
2775 static unsigned HOST_WIDE_INT
2776 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2778 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2780 /* The algorithm notes that the choice y = x satisfies
2781 x*y == 1 mod 2^3, since x is assumed odd.
2782 Each iteration doubles the number of bits of significance in y. */
2784 unsigned HOST_WIDE_INT mask;
2785 unsigned HOST_WIDE_INT y = x;
2786 int nbit = 3;
2788 mask = (n == HOST_BITS_PER_WIDE_INT
2789 ? ~(unsigned HOST_WIDE_INT) 0
2790 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2792 while (nbit < n)
2794 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2795 nbit *= 2;
2797 return y;
2800 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2801 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2802 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2803 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2804 become signed.
2806 The result is put in TARGET if that is convenient.
2808 MODE is the mode of operation. */
2811 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2812 rtx op1, rtx target, int unsignedp)
2814 rtx tem;
2815 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2817 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2818 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2819 NULL_RTX, 0);
2820 tem = expand_and (mode, tem, op1, NULL_RTX);
2821 adj_operand
2822 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2823 adj_operand);
2825 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2826 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2827 NULL_RTX, 0);
2828 tem = expand_and (mode, tem, op0, NULL_RTX);
2829 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2830 target);
2832 return target;
2835 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2836 in TARGET if that is convenient, and return where the result is. If the
2837 operation can not be performed, 0 is returned.
2839 MODE is the mode of operation and result.
2841 UNSIGNEDP nonzero means unsigned multiply.
2843 MAX_COST is the total allowed cost for the expanded RTL. */
2846 expand_mult_highpart (enum machine_mode mode, rtx op0,
2847 unsigned HOST_WIDE_INT cnst1, rtx target,
2848 int unsignedp, int max_cost)
2850 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2851 optab mul_highpart_optab;
2852 optab moptab;
2853 rtx tem;
2854 int size = GET_MODE_BITSIZE (mode);
2855 rtx op1, wide_op1;
2857 /* We can't support modes wider than HOST_BITS_PER_INT. */
2858 if (size > HOST_BITS_PER_WIDE_INT)
2859 abort ();
2861 op1 = gen_int_mode (cnst1, mode);
2863 wide_op1
2864 = immed_double_const (cnst1,
2865 (unsignedp
2866 ? (HOST_WIDE_INT) 0
2867 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2868 wider_mode);
2870 /* expand_mult handles constant multiplication of word_mode
2871 or narrower. It does a poor job for large modes. */
2872 if (size < BITS_PER_WORD
2873 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2875 /* We have to do this, since expand_binop doesn't do conversion for
2876 multiply. Maybe change expand_binop to handle widening multiply? */
2877 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2879 /* We know that this can't have signed overflow, so pretend this is
2880 an unsigned multiply. */
2881 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2882 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2883 build_int_2 (size, 0), NULL_RTX, 1);
2884 return convert_modes (mode, wider_mode, tem, unsignedp);
2887 if (target == 0)
2888 target = gen_reg_rtx (mode);
2890 /* Firstly, try using a multiplication insn that only generates the needed
2891 high part of the product, and in the sign flavor of unsignedp. */
2892 if (mul_highpart_cost[(int) mode] < max_cost)
2894 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2895 target = expand_binop (mode, mul_highpart_optab,
2896 op0, op1, target, unsignedp, OPTAB_DIRECT);
2897 if (target)
2898 return target;
2901 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2902 Need to adjust the result after the multiplication. */
2903 if (size - 1 < BITS_PER_WORD
2904 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2905 < max_cost))
2907 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2908 target = expand_binop (mode, mul_highpart_optab,
2909 op0, op1, target, unsignedp, OPTAB_DIRECT);
2910 if (target)
2911 /* We used the wrong signedness. Adjust the result. */
2912 return expand_mult_highpart_adjust (mode, target, op0,
2913 op1, target, unsignedp);
2916 /* Try widening multiplication. */
2917 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2918 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2919 && mul_widen_cost[(int) wider_mode] < max_cost)
2921 op1 = force_reg (mode, op1);
2922 goto try;
2925 /* Try widening the mode and perform a non-widening multiplication. */
2926 moptab = smul_optab;
2927 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2928 && size - 1 < BITS_PER_WORD
2929 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2931 op1 = wide_op1;
2932 goto try;
2935 /* Try widening multiplication of opposite signedness, and adjust. */
2936 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2937 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2938 && size - 1 < BITS_PER_WORD
2939 && (mul_widen_cost[(int) wider_mode]
2940 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2942 rtx regop1 = force_reg (mode, op1);
2943 tem = expand_binop (wider_mode, moptab, op0, regop1,
2944 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2945 if (tem != 0)
2947 /* Extract the high half of the just generated product. */
2948 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2949 build_int_2 (size, 0), NULL_RTX, 1);
2950 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2951 /* We used the wrong signedness. Adjust the result. */
2952 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2953 target, unsignedp);
2957 return 0;
2959 try:
2960 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2961 tem = expand_binop (wider_mode, moptab, op0, op1,
2962 NULL_RTX, unsignedp, OPTAB_WIDEN);
2963 if (tem == 0)
2964 return 0;
2966 /* Extract the high half of the just generated product. */
2967 if (mode == word_mode)
2969 return gen_highpart (mode, tem);
2971 else
2973 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2974 build_int_2 (size, 0), NULL_RTX, 1);
2975 return convert_modes (mode, wider_mode, tem, unsignedp);
2979 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2980 if that is convenient, and returning where the result is.
2981 You may request either the quotient or the remainder as the result;
2982 specify REM_FLAG nonzero to get the remainder.
2984 CODE is the expression code for which kind of division this is;
2985 it controls how rounding is done. MODE is the machine mode to use.
2986 UNSIGNEDP nonzero means do unsigned division. */
2988 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2989 and then correct it by or'ing in missing high bits
2990 if result of ANDI is nonzero.
2991 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2992 This could optimize to a bfexts instruction.
2993 But C doesn't use these operations, so their optimizations are
2994 left for later. */
2995 /* ??? For modulo, we don't actually need the highpart of the first product,
2996 the low part will do nicely. And for small divisors, the second multiply
2997 can also be a low-part only multiply or even be completely left out.
2998 E.g. to calculate the remainder of a division by 3 with a 32 bit
2999 multiply, multiply with 0x55555556 and extract the upper two bits;
3000 the result is exact for inputs up to 0x1fffffff.
3001 The input range can be reduced by using cross-sum rules.
3002 For odd divisors >= 3, the following table gives right shift counts
3003 so that if a number is shifted by an integer multiple of the given
3004 amount, the remainder stays the same:
3005 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3006 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3007 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3008 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3009 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3011 Cross-sum rules for even numbers can be derived by leaving as many bits
3012 to the right alone as the divisor has zeros to the right.
3013 E.g. if x is an unsigned 32 bit number:
3014 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3017 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3020 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3021 rtx op0, rtx op1, rtx target, int unsignedp)
3023 enum machine_mode compute_mode;
3024 rtx tquotient;
3025 rtx quotient = 0, remainder = 0;
3026 rtx last;
3027 int size;
3028 rtx insn, set;
3029 optab optab1, optab2;
3030 int op1_is_constant, op1_is_pow2 = 0;
3031 int max_cost, extra_cost;
3032 static HOST_WIDE_INT last_div_const = 0;
3033 static HOST_WIDE_INT ext_op1;
3035 op1_is_constant = GET_CODE (op1) == CONST_INT;
3036 if (op1_is_constant)
3038 ext_op1 = INTVAL (op1);
3039 if (unsignedp)
3040 ext_op1 &= GET_MODE_MASK (mode);
3041 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3042 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3046 This is the structure of expand_divmod:
3048 First comes code to fix up the operands so we can perform the operations
3049 correctly and efficiently.
3051 Second comes a switch statement with code specific for each rounding mode.
3052 For some special operands this code emits all RTL for the desired
3053 operation, for other cases, it generates only a quotient and stores it in
3054 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3055 to indicate that it has not done anything.
3057 Last comes code that finishes the operation. If QUOTIENT is set and
3058 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3059 QUOTIENT is not set, it is computed using trunc rounding.
3061 We try to generate special code for division and remainder when OP1 is a
3062 constant. If |OP1| = 2**n we can use shifts and some other fast
3063 operations. For other values of OP1, we compute a carefully selected
3064 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3065 by m.
3067 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3068 half of the product. Different strategies for generating the product are
3069 implemented in expand_mult_highpart.
3071 If what we actually want is the remainder, we generate that by another
3072 by-constant multiplication and a subtraction. */
3074 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3075 code below will malfunction if we are, so check here and handle
3076 the special case if so. */
3077 if (op1 == const1_rtx)
3078 return rem_flag ? const0_rtx : op0;
3080 /* When dividing by -1, we could get an overflow.
3081 negv_optab can handle overflows. */
3082 if (! unsignedp && op1 == constm1_rtx)
3084 if (rem_flag)
3085 return const0_rtx;
3086 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3087 ? negv_optab : neg_optab, op0, target, 0);
3090 if (target
3091 /* Don't use the function value register as a target
3092 since we have to read it as well as write it,
3093 and function-inlining gets confused by this. */
3094 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3095 /* Don't clobber an operand while doing a multi-step calculation. */
3096 || ((rem_flag || op1_is_constant)
3097 && (reg_mentioned_p (target, op0)
3098 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3099 || reg_mentioned_p (target, op1)
3100 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3101 target = 0;
3103 /* Get the mode in which to perform this computation. Normally it will
3104 be MODE, but sometimes we can't do the desired operation in MODE.
3105 If so, pick a wider mode in which we can do the operation. Convert
3106 to that mode at the start to avoid repeated conversions.
3108 First see what operations we need. These depend on the expression
3109 we are evaluating. (We assume that divxx3 insns exist under the
3110 same conditions that modxx3 insns and that these insns don't normally
3111 fail. If these assumptions are not correct, we may generate less
3112 efficient code in some cases.)
3114 Then see if we find a mode in which we can open-code that operation
3115 (either a division, modulus, or shift). Finally, check for the smallest
3116 mode for which we can do the operation with a library call. */
3118 /* We might want to refine this now that we have division-by-constant
3119 optimization. Since expand_mult_highpart tries so many variants, it is
3120 not straightforward to generalize this. Maybe we should make an array
3121 of possible modes in init_expmed? Save this for GCC 2.7. */
3123 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3124 ? (unsignedp ? lshr_optab : ashr_optab)
3125 : (unsignedp ? udiv_optab : sdiv_optab));
3126 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3127 ? optab1
3128 : (unsignedp ? udivmod_optab : sdivmod_optab));
3130 for (compute_mode = mode; compute_mode != VOIDmode;
3131 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3132 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3133 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3134 break;
3136 if (compute_mode == VOIDmode)
3137 for (compute_mode = mode; compute_mode != VOIDmode;
3138 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3139 if (optab1->handlers[(int) compute_mode].libfunc
3140 || optab2->handlers[(int) compute_mode].libfunc)
3141 break;
3143 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3144 in expand_binop. */
3145 if (compute_mode == VOIDmode)
3146 compute_mode = mode;
3148 if (target && GET_MODE (target) == compute_mode)
3149 tquotient = target;
3150 else
3151 tquotient = gen_reg_rtx (compute_mode);
3153 size = GET_MODE_BITSIZE (compute_mode);
3154 #if 0
3155 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3156 (mode), and thereby get better code when OP1 is a constant. Do that
3157 later. It will require going over all usages of SIZE below. */
3158 size = GET_MODE_BITSIZE (mode);
3159 #endif
3161 /* Only deduct something for a REM if the last divide done was
3162 for a different constant. Then set the constant of the last
3163 divide. */
3164 max_cost = div_cost[(int) compute_mode]
3165 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3166 && INTVAL (op1) == last_div_const)
3167 ? mul_cost[(int) compute_mode] + add_cost : 0);
3169 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3171 /* Now convert to the best mode to use. */
3172 if (compute_mode != mode)
3174 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3175 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3177 /* convert_modes may have placed op1 into a register, so we
3178 must recompute the following. */
3179 op1_is_constant = GET_CODE (op1) == CONST_INT;
3180 op1_is_pow2 = (op1_is_constant
3181 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3182 || (! unsignedp
3183 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3186 /* If one of the operands is a volatile MEM, copy it into a register. */
3188 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3189 op0 = force_reg (compute_mode, op0);
3190 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3191 op1 = force_reg (compute_mode, op1);
3193 /* If we need the remainder or if OP1 is constant, we need to
3194 put OP0 in a register in case it has any queued subexpressions. */
3195 if (rem_flag || op1_is_constant)
3196 op0 = force_reg (compute_mode, op0);
3198 last = get_last_insn ();
3200 /* Promote floor rounding to trunc rounding for unsigned operations. */
3201 if (unsignedp)
3203 if (code == FLOOR_DIV_EXPR)
3204 code = TRUNC_DIV_EXPR;
3205 if (code == FLOOR_MOD_EXPR)
3206 code = TRUNC_MOD_EXPR;
3207 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3208 code = TRUNC_DIV_EXPR;
3211 if (op1 != const0_rtx)
3212 switch (code)
3214 case TRUNC_MOD_EXPR:
3215 case TRUNC_DIV_EXPR:
3216 if (op1_is_constant)
3218 if (unsignedp)
3220 unsigned HOST_WIDE_INT mh, ml;
3221 int pre_shift, post_shift;
3222 int dummy;
3223 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3224 & GET_MODE_MASK (compute_mode));
3226 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3228 pre_shift = floor_log2 (d);
3229 if (rem_flag)
3231 remainder
3232 = expand_binop (compute_mode, and_optab, op0,
3233 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3234 remainder, 1,
3235 OPTAB_LIB_WIDEN);
3236 if (remainder)
3237 return gen_lowpart (mode, remainder);
3239 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3240 build_int_2 (pre_shift, 0),
3241 tquotient, 1);
3243 else if (size <= HOST_BITS_PER_WIDE_INT)
3245 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3247 /* Most significant bit of divisor is set; emit an scc
3248 insn. */
3249 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3250 compute_mode, 1, 1);
3251 if (quotient == 0)
3252 goto fail1;
3254 else
3256 /* Find a suitable multiplier and right shift count
3257 instead of multiplying with D. */
3259 mh = choose_multiplier (d, size, size,
3260 &ml, &post_shift, &dummy);
3262 /* If the suggested multiplier is more than SIZE bits,
3263 we can do better for even divisors, using an
3264 initial right shift. */
3265 if (mh != 0 && (d & 1) == 0)
3267 pre_shift = floor_log2 (d & -d);
3268 mh = choose_multiplier (d >> pre_shift, size,
3269 size - pre_shift,
3270 &ml, &post_shift, &dummy);
3271 if (mh)
3272 abort ();
3274 else
3275 pre_shift = 0;
3277 if (mh != 0)
3279 rtx t1, t2, t3, t4;
3281 if (post_shift - 1 >= BITS_PER_WORD)
3282 goto fail1;
3284 extra_cost = (shift_cost[post_shift - 1]
3285 + shift_cost[1] + 2 * add_cost);
3286 t1 = expand_mult_highpart (compute_mode, op0, ml,
3287 NULL_RTX, 1,
3288 max_cost - extra_cost);
3289 if (t1 == 0)
3290 goto fail1;
3291 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3292 op0, t1),
3293 NULL_RTX);
3294 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3295 build_int_2 (1, 0), NULL_RTX,1);
3296 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3297 t1, t3),
3298 NULL_RTX);
3299 quotient
3300 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3301 build_int_2 (post_shift - 1, 0),
3302 tquotient, 1);
3304 else
3306 rtx t1, t2;
3308 if (pre_shift >= BITS_PER_WORD
3309 || post_shift >= BITS_PER_WORD)
3310 goto fail1;
3312 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3313 build_int_2 (pre_shift, 0),
3314 NULL_RTX, 1);
3315 extra_cost = (shift_cost[pre_shift]
3316 + shift_cost[post_shift]);
3317 t2 = expand_mult_highpart (compute_mode, t1, ml,
3318 NULL_RTX, 1,
3319 max_cost - extra_cost);
3320 if (t2 == 0)
3321 goto fail1;
3322 quotient
3323 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3324 build_int_2 (post_shift, 0),
3325 tquotient, 1);
3329 else /* Too wide mode to use tricky code */
3330 break;
3332 insn = get_last_insn ();
3333 if (insn != last
3334 && (set = single_set (insn)) != 0
3335 && SET_DEST (set) == quotient)
3336 set_unique_reg_note (insn,
3337 REG_EQUAL,
3338 gen_rtx_UDIV (compute_mode, op0, op1));
3340 else /* TRUNC_DIV, signed */
3342 unsigned HOST_WIDE_INT ml;
3343 int lgup, post_shift;
3344 HOST_WIDE_INT d = INTVAL (op1);
3345 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3347 /* n rem d = n rem -d */
3348 if (rem_flag && d < 0)
3350 d = abs_d;
3351 op1 = gen_int_mode (abs_d, compute_mode);
3354 if (d == 1)
3355 quotient = op0;
3356 else if (d == -1)
3357 quotient = expand_unop (compute_mode, neg_optab, op0,
3358 tquotient, 0);
3359 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3361 /* This case is not handled correctly below. */
3362 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3363 compute_mode, 1, 1);
3364 if (quotient == 0)
3365 goto fail1;
3367 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3368 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3369 /* ??? The cheap metric is computed only for
3370 word_mode. If this operation is wider, this may
3371 not be so. Assume true if the optab has an
3372 expander for this mode. */
3373 && (((rem_flag ? smod_optab : sdiv_optab)
3374 ->handlers[(int) compute_mode].insn_code
3375 != CODE_FOR_nothing)
3376 || (sdivmod_optab->handlers[(int) compute_mode]
3377 .insn_code != CODE_FOR_nothing)))
3379 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3381 lgup = floor_log2 (abs_d);
3382 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3384 rtx label = gen_label_rtx ();
3385 rtx t1;
3387 t1 = copy_to_mode_reg (compute_mode, op0);
3388 do_cmp_and_jump (t1, const0_rtx, GE,
3389 compute_mode, label);
3390 expand_inc (t1, gen_int_mode (abs_d - 1,
3391 compute_mode));
3392 emit_label (label);
3393 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3394 build_int_2 (lgup, 0),
3395 tquotient, 0);
3397 else
3399 rtx t1, t2, t3;
3400 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3401 build_int_2 (size - 1, 0),
3402 NULL_RTX, 0);
3403 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3404 build_int_2 (size - lgup, 0),
3405 NULL_RTX, 1);
3406 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3407 op0, t2),
3408 NULL_RTX);
3409 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3410 build_int_2 (lgup, 0),
3411 tquotient, 0);
3414 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3415 the quotient. */
3416 if (d < 0)
3418 insn = get_last_insn ();
3419 if (insn != last
3420 && (set = single_set (insn)) != 0
3421 && SET_DEST (set) == quotient
3422 && abs_d < ((unsigned HOST_WIDE_INT) 1
3423 << (HOST_BITS_PER_WIDE_INT - 1)))
3424 set_unique_reg_note (insn,
3425 REG_EQUAL,
3426 gen_rtx_DIV (compute_mode,
3427 op0,
3428 GEN_INT
3429 (trunc_int_for_mode
3430 (abs_d,
3431 compute_mode))));
3433 quotient = expand_unop (compute_mode, neg_optab,
3434 quotient, quotient, 0);
3437 else if (size <= HOST_BITS_PER_WIDE_INT)
3439 choose_multiplier (abs_d, size, size - 1,
3440 &ml, &post_shift, &lgup);
3441 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3443 rtx t1, t2, t3;
3445 if (post_shift >= BITS_PER_WORD
3446 || size - 1 >= BITS_PER_WORD)
3447 goto fail1;
3449 extra_cost = (shift_cost[post_shift]
3450 + shift_cost[size - 1] + add_cost);
3451 t1 = expand_mult_highpart (compute_mode, op0, ml,
3452 NULL_RTX, 0,
3453 max_cost - extra_cost);
3454 if (t1 == 0)
3455 goto fail1;
3456 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3457 build_int_2 (post_shift, 0), NULL_RTX, 0);
3458 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3459 build_int_2 (size - 1, 0), NULL_RTX, 0);
3460 if (d < 0)
3461 quotient
3462 = force_operand (gen_rtx_MINUS (compute_mode,
3463 t3, t2),
3464 tquotient);
3465 else
3466 quotient
3467 = force_operand (gen_rtx_MINUS (compute_mode,
3468 t2, t3),
3469 tquotient);
3471 else
3473 rtx t1, t2, t3, t4;
3475 if (post_shift >= BITS_PER_WORD
3476 || size - 1 >= BITS_PER_WORD)
3477 goto fail1;
3479 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3480 extra_cost = (shift_cost[post_shift]
3481 + shift_cost[size - 1] + 2 * add_cost);
3482 t1 = expand_mult_highpart (compute_mode, op0, ml,
3483 NULL_RTX, 0,
3484 max_cost - extra_cost);
3485 if (t1 == 0)
3486 goto fail1;
3487 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3488 t1, op0),
3489 NULL_RTX);
3490 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3491 build_int_2 (post_shift, 0),
3492 NULL_RTX, 0);
3493 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3494 build_int_2 (size - 1, 0),
3495 NULL_RTX, 0);
3496 if (d < 0)
3497 quotient
3498 = force_operand (gen_rtx_MINUS (compute_mode,
3499 t4, t3),
3500 tquotient);
3501 else
3502 quotient
3503 = force_operand (gen_rtx_MINUS (compute_mode,
3504 t3, t4),
3505 tquotient);
3508 else /* Too wide mode to use tricky code */
3509 break;
3511 insn = get_last_insn ();
3512 if (insn != last
3513 && (set = single_set (insn)) != 0
3514 && SET_DEST (set) == quotient)
3515 set_unique_reg_note (insn,
3516 REG_EQUAL,
3517 gen_rtx_DIV (compute_mode, op0, op1));
3519 break;
3521 fail1:
3522 delete_insns_since (last);
3523 break;
3525 case FLOOR_DIV_EXPR:
3526 case FLOOR_MOD_EXPR:
3527 /* We will come here only for signed operations. */
3528 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3530 unsigned HOST_WIDE_INT mh, ml;
3531 int pre_shift, lgup, post_shift;
3532 HOST_WIDE_INT d = INTVAL (op1);
3534 if (d > 0)
3536 /* We could just as easily deal with negative constants here,
3537 but it does not seem worth the trouble for GCC 2.6. */
3538 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3540 pre_shift = floor_log2 (d);
3541 if (rem_flag)
3543 remainder = expand_binop (compute_mode, and_optab, op0,
3544 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3545 remainder, 0, OPTAB_LIB_WIDEN);
3546 if (remainder)
3547 return gen_lowpart (mode, remainder);
3549 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3550 build_int_2 (pre_shift, 0),
3551 tquotient, 0);
3553 else
3555 rtx t1, t2, t3, t4;
3557 mh = choose_multiplier (d, size, size - 1,
3558 &ml, &post_shift, &lgup);
3559 if (mh)
3560 abort ();
3562 if (post_shift < BITS_PER_WORD
3563 && size - 1 < BITS_PER_WORD)
3565 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3566 build_int_2 (size - 1, 0),
3567 NULL_RTX, 0);
3568 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3569 NULL_RTX, 0, OPTAB_WIDEN);
3570 extra_cost = (shift_cost[post_shift]
3571 + shift_cost[size - 1] + 2 * add_cost);
3572 t3 = expand_mult_highpart (compute_mode, t2, ml,
3573 NULL_RTX, 1,
3574 max_cost - extra_cost);
3575 if (t3 != 0)
3577 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3578 build_int_2 (post_shift, 0),
3579 NULL_RTX, 1);
3580 quotient = expand_binop (compute_mode, xor_optab,
3581 t4, t1, tquotient, 0,
3582 OPTAB_WIDEN);
3587 else
3589 rtx nsign, t1, t2, t3, t4;
3590 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3591 op0, constm1_rtx), NULL_RTX);
3592 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3593 0, OPTAB_WIDEN);
3594 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3595 build_int_2 (size - 1, 0), NULL_RTX, 0);
3596 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3597 NULL_RTX);
3598 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3599 NULL_RTX, 0);
3600 if (t4)
3602 rtx t5;
3603 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3604 NULL_RTX, 0);
3605 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3606 t4, t5),
3607 tquotient);
3612 if (quotient != 0)
3613 break;
3614 delete_insns_since (last);
3616 /* Try using an instruction that produces both the quotient and
3617 remainder, using truncation. We can easily compensate the quotient
3618 or remainder to get floor rounding, once we have the remainder.
3619 Notice that we compute also the final remainder value here,
3620 and return the result right away. */
3621 if (target == 0 || GET_MODE (target) != compute_mode)
3622 target = gen_reg_rtx (compute_mode);
3624 if (rem_flag)
3626 remainder
3627 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3628 quotient = gen_reg_rtx (compute_mode);
3630 else
3632 quotient
3633 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3634 remainder = gen_reg_rtx (compute_mode);
3637 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3638 quotient, remainder, 0))
3640 /* This could be computed with a branch-less sequence.
3641 Save that for later. */
3642 rtx tem;
3643 rtx label = gen_label_rtx ();
3644 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3645 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3646 NULL_RTX, 0, OPTAB_WIDEN);
3647 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3648 expand_dec (quotient, const1_rtx);
3649 expand_inc (remainder, op1);
3650 emit_label (label);
3651 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3654 /* No luck with division elimination or divmod. Have to do it
3655 by conditionally adjusting op0 *and* the result. */
3657 rtx label1, label2, label3, label4, label5;
3658 rtx adjusted_op0;
3659 rtx tem;
3661 quotient = gen_reg_rtx (compute_mode);
3662 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3663 label1 = gen_label_rtx ();
3664 label2 = gen_label_rtx ();
3665 label3 = gen_label_rtx ();
3666 label4 = gen_label_rtx ();
3667 label5 = gen_label_rtx ();
3668 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3669 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3670 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3671 quotient, 0, OPTAB_LIB_WIDEN);
3672 if (tem != quotient)
3673 emit_move_insn (quotient, tem);
3674 emit_jump_insn (gen_jump (label5));
3675 emit_barrier ();
3676 emit_label (label1);
3677 expand_inc (adjusted_op0, const1_rtx);
3678 emit_jump_insn (gen_jump (label4));
3679 emit_barrier ();
3680 emit_label (label2);
3681 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3682 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3683 quotient, 0, OPTAB_LIB_WIDEN);
3684 if (tem != quotient)
3685 emit_move_insn (quotient, tem);
3686 emit_jump_insn (gen_jump (label5));
3687 emit_barrier ();
3688 emit_label (label3);
3689 expand_dec (adjusted_op0, const1_rtx);
3690 emit_label (label4);
3691 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3692 quotient, 0, OPTAB_LIB_WIDEN);
3693 if (tem != quotient)
3694 emit_move_insn (quotient, tem);
3695 expand_dec (quotient, const1_rtx);
3696 emit_label (label5);
3698 break;
3700 case CEIL_DIV_EXPR:
3701 case CEIL_MOD_EXPR:
3702 if (unsignedp)
3704 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3706 rtx t1, t2, t3;
3707 unsigned HOST_WIDE_INT d = INTVAL (op1);
3708 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3709 build_int_2 (floor_log2 (d), 0),
3710 tquotient, 1);
3711 t2 = expand_binop (compute_mode, and_optab, op0,
3712 GEN_INT (d - 1),
3713 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3714 t3 = gen_reg_rtx (compute_mode);
3715 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3716 compute_mode, 1, 1);
3717 if (t3 == 0)
3719 rtx lab;
3720 lab = gen_label_rtx ();
3721 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3722 expand_inc (t1, const1_rtx);
3723 emit_label (lab);
3724 quotient = t1;
3726 else
3727 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3728 t1, t3),
3729 tquotient);
3730 break;
3733 /* Try using an instruction that produces both the quotient and
3734 remainder, using truncation. We can easily compensate the
3735 quotient or remainder to get ceiling rounding, once we have the
3736 remainder. Notice that we compute also the final remainder
3737 value here, and return the result right away. */
3738 if (target == 0 || GET_MODE (target) != compute_mode)
3739 target = gen_reg_rtx (compute_mode);
3741 if (rem_flag)
3743 remainder = (GET_CODE (target) == REG
3744 ? target : gen_reg_rtx (compute_mode));
3745 quotient = gen_reg_rtx (compute_mode);
3747 else
3749 quotient = (GET_CODE (target) == REG
3750 ? target : gen_reg_rtx (compute_mode));
3751 remainder = gen_reg_rtx (compute_mode);
3754 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3755 remainder, 1))
3757 /* This could be computed with a branch-less sequence.
3758 Save that for later. */
3759 rtx label = gen_label_rtx ();
3760 do_cmp_and_jump (remainder, const0_rtx, EQ,
3761 compute_mode, label);
3762 expand_inc (quotient, const1_rtx);
3763 expand_dec (remainder, op1);
3764 emit_label (label);
3765 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3768 /* No luck with division elimination or divmod. Have to do it
3769 by conditionally adjusting op0 *and* the result. */
3771 rtx label1, label2;
3772 rtx adjusted_op0, tem;
3774 quotient = gen_reg_rtx (compute_mode);
3775 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3776 label1 = gen_label_rtx ();
3777 label2 = gen_label_rtx ();
3778 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3779 compute_mode, label1);
3780 emit_move_insn (quotient, const0_rtx);
3781 emit_jump_insn (gen_jump (label2));
3782 emit_barrier ();
3783 emit_label (label1);
3784 expand_dec (adjusted_op0, const1_rtx);
3785 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3786 quotient, 1, OPTAB_LIB_WIDEN);
3787 if (tem != quotient)
3788 emit_move_insn (quotient, tem);
3789 expand_inc (quotient, const1_rtx);
3790 emit_label (label2);
3793 else /* signed */
3795 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3796 && INTVAL (op1) >= 0)
3798 /* This is extremely similar to the code for the unsigned case
3799 above. For 2.7 we should merge these variants, but for
3800 2.6.1 I don't want to touch the code for unsigned since that
3801 get used in C. The signed case will only be used by other
3802 languages (Ada). */
3804 rtx t1, t2, t3;
3805 unsigned HOST_WIDE_INT d = INTVAL (op1);
3806 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3807 build_int_2 (floor_log2 (d), 0),
3808 tquotient, 0);
3809 t2 = expand_binop (compute_mode, and_optab, op0,
3810 GEN_INT (d - 1),
3811 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3812 t3 = gen_reg_rtx (compute_mode);
3813 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3814 compute_mode, 1, 1);
3815 if (t3 == 0)
3817 rtx lab;
3818 lab = gen_label_rtx ();
3819 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3820 expand_inc (t1, const1_rtx);
3821 emit_label (lab);
3822 quotient = t1;
3824 else
3825 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3826 t1, t3),
3827 tquotient);
3828 break;
3831 /* Try using an instruction that produces both the quotient and
3832 remainder, using truncation. We can easily compensate the
3833 quotient or remainder to get ceiling rounding, once we have the
3834 remainder. Notice that we compute also the final remainder
3835 value here, and return the result right away. */
3836 if (target == 0 || GET_MODE (target) != compute_mode)
3837 target = gen_reg_rtx (compute_mode);
3838 if (rem_flag)
3840 remainder= (GET_CODE (target) == REG
3841 ? target : gen_reg_rtx (compute_mode));
3842 quotient = gen_reg_rtx (compute_mode);
3844 else
3846 quotient = (GET_CODE (target) == REG
3847 ? target : gen_reg_rtx (compute_mode));
3848 remainder = gen_reg_rtx (compute_mode);
3851 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3852 remainder, 0))
3854 /* This could be computed with a branch-less sequence.
3855 Save that for later. */
3856 rtx tem;
3857 rtx label = gen_label_rtx ();
3858 do_cmp_and_jump (remainder, const0_rtx, EQ,
3859 compute_mode, label);
3860 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3861 NULL_RTX, 0, OPTAB_WIDEN);
3862 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3863 expand_inc (quotient, const1_rtx);
3864 expand_dec (remainder, op1);
3865 emit_label (label);
3866 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3869 /* No luck with division elimination or divmod. Have to do it
3870 by conditionally adjusting op0 *and* the result. */
3872 rtx label1, label2, label3, label4, label5;
3873 rtx adjusted_op0;
3874 rtx tem;
3876 quotient = gen_reg_rtx (compute_mode);
3877 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3878 label1 = gen_label_rtx ();
3879 label2 = gen_label_rtx ();
3880 label3 = gen_label_rtx ();
3881 label4 = gen_label_rtx ();
3882 label5 = gen_label_rtx ();
3883 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3884 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3885 compute_mode, label1);
3886 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3887 quotient, 0, OPTAB_LIB_WIDEN);
3888 if (tem != quotient)
3889 emit_move_insn (quotient, tem);
3890 emit_jump_insn (gen_jump (label5));
3891 emit_barrier ();
3892 emit_label (label1);
3893 expand_dec (adjusted_op0, const1_rtx);
3894 emit_jump_insn (gen_jump (label4));
3895 emit_barrier ();
3896 emit_label (label2);
3897 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3898 compute_mode, label3);
3899 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3900 quotient, 0, OPTAB_LIB_WIDEN);
3901 if (tem != quotient)
3902 emit_move_insn (quotient, tem);
3903 emit_jump_insn (gen_jump (label5));
3904 emit_barrier ();
3905 emit_label (label3);
3906 expand_inc (adjusted_op0, const1_rtx);
3907 emit_label (label4);
3908 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3909 quotient, 0, OPTAB_LIB_WIDEN);
3910 if (tem != quotient)
3911 emit_move_insn (quotient, tem);
3912 expand_inc (quotient, const1_rtx);
3913 emit_label (label5);
3916 break;
3918 case EXACT_DIV_EXPR:
3919 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3921 HOST_WIDE_INT d = INTVAL (op1);
3922 unsigned HOST_WIDE_INT ml;
3923 int pre_shift;
3924 rtx t1;
3926 pre_shift = floor_log2 (d & -d);
3927 ml = invert_mod2n (d >> pre_shift, size);
3928 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3929 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3930 quotient = expand_mult (compute_mode, t1,
3931 gen_int_mode (ml, compute_mode),
3932 NULL_RTX, 1);
3934 insn = get_last_insn ();
3935 set_unique_reg_note (insn,
3936 REG_EQUAL,
3937 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3938 compute_mode,
3939 op0, op1));
3941 break;
3943 case ROUND_DIV_EXPR:
3944 case ROUND_MOD_EXPR:
3945 if (unsignedp)
3947 rtx tem;
3948 rtx label;
3949 label = gen_label_rtx ();
3950 quotient = gen_reg_rtx (compute_mode);
3951 remainder = gen_reg_rtx (compute_mode);
3952 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3954 rtx tem;
3955 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3956 quotient, 1, OPTAB_LIB_WIDEN);
3957 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3958 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3959 remainder, 1, OPTAB_LIB_WIDEN);
3961 tem = plus_constant (op1, -1);
3962 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3963 build_int_2 (1, 0), NULL_RTX, 1);
3964 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3965 expand_inc (quotient, const1_rtx);
3966 expand_dec (remainder, op1);
3967 emit_label (label);
3969 else
3971 rtx abs_rem, abs_op1, tem, mask;
3972 rtx label;
3973 label = gen_label_rtx ();
3974 quotient = gen_reg_rtx (compute_mode);
3975 remainder = gen_reg_rtx (compute_mode);
3976 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3978 rtx tem;
3979 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3980 quotient, 0, OPTAB_LIB_WIDEN);
3981 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3982 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3983 remainder, 0, OPTAB_LIB_WIDEN);
3985 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3986 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3987 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3988 build_int_2 (1, 0), NULL_RTX, 1);
3989 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3990 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3991 NULL_RTX, 0, OPTAB_WIDEN);
3992 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3993 build_int_2 (size - 1, 0), NULL_RTX, 0);
3994 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3995 NULL_RTX, 0, OPTAB_WIDEN);
3996 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3997 NULL_RTX, 0, OPTAB_WIDEN);
3998 expand_inc (quotient, tem);
3999 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4000 NULL_RTX, 0, OPTAB_WIDEN);
4001 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4002 NULL_RTX, 0, OPTAB_WIDEN);
4003 expand_dec (remainder, tem);
4004 emit_label (label);
4006 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4008 default:
4009 abort ();
4012 if (quotient == 0)
4014 if (target && GET_MODE (target) != compute_mode)
4015 target = 0;
4017 if (rem_flag)
4019 /* Try to produce the remainder without producing the quotient.
4020 If we seem to have a divmod pattern that does not require widening,
4021 don't try widening here. We should really have a WIDEN argument
4022 to expand_twoval_binop, since what we'd really like to do here is
4023 1) try a mod insn in compute_mode
4024 2) try a divmod insn in compute_mode
4025 3) try a div insn in compute_mode and multiply-subtract to get
4026 remainder
4027 4) try the same things with widening allowed. */
4028 remainder
4029 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4030 op0, op1, target,
4031 unsignedp,
4032 ((optab2->handlers[(int) compute_mode].insn_code
4033 != CODE_FOR_nothing)
4034 ? OPTAB_DIRECT : OPTAB_WIDEN));
4035 if (remainder == 0)
4037 /* No luck there. Can we do remainder and divide at once
4038 without a library call? */
4039 remainder = gen_reg_rtx (compute_mode);
4040 if (! expand_twoval_binop ((unsignedp
4041 ? udivmod_optab
4042 : sdivmod_optab),
4043 op0, op1,
4044 NULL_RTX, remainder, unsignedp))
4045 remainder = 0;
4048 if (remainder)
4049 return gen_lowpart (mode, remainder);
4052 /* Produce the quotient. Try a quotient insn, but not a library call.
4053 If we have a divmod in this mode, use it in preference to widening
4054 the div (for this test we assume it will not fail). Note that optab2
4055 is set to the one of the two optabs that the call below will use. */
4056 quotient
4057 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4058 op0, op1, rem_flag ? NULL_RTX : target,
4059 unsignedp,
4060 ((optab2->handlers[(int) compute_mode].insn_code
4061 != CODE_FOR_nothing)
4062 ? OPTAB_DIRECT : OPTAB_WIDEN));
4064 if (quotient == 0)
4066 /* No luck there. Try a quotient-and-remainder insn,
4067 keeping the quotient alone. */
4068 quotient = gen_reg_rtx (compute_mode);
4069 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4070 op0, op1,
4071 quotient, NULL_RTX, unsignedp))
4073 quotient = 0;
4074 if (! rem_flag)
4075 /* Still no luck. If we are not computing the remainder,
4076 use a library call for the quotient. */
4077 quotient = sign_expand_binop (compute_mode,
4078 udiv_optab, sdiv_optab,
4079 op0, op1, target,
4080 unsignedp, OPTAB_LIB_WIDEN);
4085 if (rem_flag)
4087 if (target && GET_MODE (target) != compute_mode)
4088 target = 0;
4090 if (quotient == 0)
4091 /* No divide instruction either. Use library for remainder. */
4092 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4093 op0, op1, target,
4094 unsignedp, OPTAB_LIB_WIDEN);
4095 else
4097 /* We divided. Now finish doing X - Y * (X / Y). */
4098 remainder = expand_mult (compute_mode, quotient, op1,
4099 NULL_RTX, unsignedp);
4100 remainder = expand_binop (compute_mode, sub_optab, op0,
4101 remainder, target, unsignedp,
4102 OPTAB_LIB_WIDEN);
4106 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4109 /* Return a tree node with data type TYPE, describing the value of X.
4110 Usually this is an RTL_EXPR, if there is no obvious better choice.
4111 X may be an expression, however we only support those expressions
4112 generated by loop.c. */
4114 tree
4115 make_tree (tree type, rtx x)
4117 tree t;
4119 switch (GET_CODE (x))
4121 case CONST_INT:
4122 t = build_int_2 (INTVAL (x),
4123 (TREE_UNSIGNED (type)
4124 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4125 || INTVAL (x) >= 0 ? 0 : -1);
4126 TREE_TYPE (t) = type;
4127 return t;
4129 case CONST_DOUBLE:
4130 if (GET_MODE (x) == VOIDmode)
4132 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4133 TREE_TYPE (t) = type;
4135 else
4137 REAL_VALUE_TYPE d;
4139 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4140 t = build_real (type, d);
4143 return t;
4145 case CONST_VECTOR:
4147 int i, units;
4148 rtx elt;
4149 tree t = NULL_TREE;
4151 units = CONST_VECTOR_NUNITS (x);
4153 /* Build a tree with vector elements. */
4154 for (i = units - 1; i >= 0; --i)
4156 elt = CONST_VECTOR_ELT (x, i);
4157 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4160 return build_vector (type, t);
4163 case PLUS:
4164 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4165 make_tree (type, XEXP (x, 1))));
4167 case MINUS:
4168 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4169 make_tree (type, XEXP (x, 1))));
4171 case NEG:
4172 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4174 case MULT:
4175 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4176 make_tree (type, XEXP (x, 1))));
4178 case ASHIFT:
4179 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4180 make_tree (type, XEXP (x, 1))));
4182 case LSHIFTRT:
4183 t = (*lang_hooks.types.unsigned_type) (type);
4184 return fold (convert (type,
4185 build (RSHIFT_EXPR, t,
4186 make_tree (t, XEXP (x, 0)),
4187 make_tree (type, XEXP (x, 1)))));
4189 case ASHIFTRT:
4190 t = (*lang_hooks.types.signed_type) (type);
4191 return fold (convert (type,
4192 build (RSHIFT_EXPR, t,
4193 make_tree (t, XEXP (x, 0)),
4194 make_tree (type, XEXP (x, 1)))));
4196 case DIV:
4197 if (TREE_CODE (type) != REAL_TYPE)
4198 t = (*lang_hooks.types.signed_type) (type);
4199 else
4200 t = type;
4202 return fold (convert (type,
4203 build (TRUNC_DIV_EXPR, t,
4204 make_tree (t, XEXP (x, 0)),
4205 make_tree (t, XEXP (x, 1)))));
4206 case UDIV:
4207 t = (*lang_hooks.types.unsigned_type) (type);
4208 return fold (convert (type,
4209 build (TRUNC_DIV_EXPR, t,
4210 make_tree (t, XEXP (x, 0)),
4211 make_tree (t, XEXP (x, 1)))));
4213 case SIGN_EXTEND:
4214 case ZERO_EXTEND:
4215 t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4216 GET_CODE (x) == ZERO_EXTEND);
4217 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4219 default:
4220 t = make_node (RTL_EXPR);
4221 TREE_TYPE (t) = type;
4223 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4224 ptr_mode. So convert. */
4225 if (POINTER_TYPE_P (type))
4226 x = convert_memory_address (TYPE_MODE (type), x);
4228 RTL_EXPR_RTL (t) = x;
4229 /* There are no insns to be output
4230 when this rtl_expr is used. */
4231 RTL_EXPR_SEQUENCE (t) = 0;
4232 return t;
4236 /* Check whether the multiplication X * MULT + ADD overflows.
4237 X, MULT and ADD must be CONST_*.
4238 MODE is the machine mode for the computation.
4239 X and MULT must have mode MODE. ADD may have a different mode.
4240 So can X (defaults to same as MODE).
4241 UNSIGNEDP is nonzero to do unsigned multiplication. */
4243 bool
4244 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4246 tree type, mult_type, add_type, result;
4248 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4250 /* In order to get a proper overflow indication from an unsigned
4251 type, we have to pretend that it's a sizetype. */
4252 mult_type = type;
4253 if (unsignedp)
4255 mult_type = copy_node (type);
4256 TYPE_IS_SIZETYPE (mult_type) = 1;
4259 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4260 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4262 result = fold (build (PLUS_EXPR, mult_type,
4263 fold (build (MULT_EXPR, mult_type,
4264 make_tree (mult_type, x),
4265 make_tree (mult_type, mult))),
4266 make_tree (add_type, add)));
4268 return TREE_CONSTANT_OVERFLOW (result);
4271 /* Return an rtx representing the value of X * MULT + ADD.
4272 TARGET is a suggestion for where to store the result (an rtx).
4273 MODE is the machine mode for the computation.
4274 X and MULT must have mode MODE. ADD may have a different mode.
4275 So can X (defaults to same as MODE).
4276 UNSIGNEDP is nonzero to do unsigned multiplication.
4277 This may emit insns. */
4280 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4281 int unsignedp)
4283 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4284 tree add_type = (GET_MODE (add) == VOIDmode
4285 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4286 unsignedp));
4287 tree result = fold (build (PLUS_EXPR, type,
4288 fold (build (MULT_EXPR, type,
4289 make_tree (type, x),
4290 make_tree (type, mult))),
4291 make_tree (add_type, add)));
4293 return expand_expr (result, target, VOIDmode, 0);
4296 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4297 and returning TARGET.
4299 If TARGET is 0, a pseudo-register or constant is returned. */
4302 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4304 rtx tem = 0;
4306 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4307 tem = simplify_binary_operation (AND, mode, op0, op1);
4308 if (tem == 0)
4309 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4311 if (target == 0)
4312 target = tem;
4313 else if (tem != target)
4314 emit_move_insn (target, tem);
4315 return target;
4318 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4319 and storing in TARGET. Normally return TARGET.
4320 Return 0 if that cannot be done.
4322 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4323 it is VOIDmode, they cannot both be CONST_INT.
4325 UNSIGNEDP is for the case where we have to widen the operands
4326 to perform the operation. It says to use zero-extension.
4328 NORMALIZEP is 1 if we should convert the result to be either zero
4329 or one. Normalize is -1 if we should convert the result to be
4330 either zero or -1. If NORMALIZEP is zero, the result will be left
4331 "raw" out of the scc insn. */
4334 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4335 enum machine_mode mode, int unsignedp, int normalizep)
4337 rtx subtarget;
4338 enum insn_code icode;
4339 enum machine_mode compare_mode;
4340 enum machine_mode target_mode = GET_MODE (target);
4341 rtx tem;
4342 rtx last = get_last_insn ();
4343 rtx pattern, comparison;
4345 /* ??? Ok to do this and then fail? */
4346 op0 = protect_from_queue (op0, 0);
4347 op1 = protect_from_queue (op1, 0);
4349 if (unsignedp)
4350 code = unsigned_condition (code);
4352 /* If one operand is constant, make it the second one. Only do this
4353 if the other operand is not constant as well. */
4355 if (swap_commutative_operands_p (op0, op1))
4357 tem = op0;
4358 op0 = op1;
4359 op1 = tem;
4360 code = swap_condition (code);
4363 if (mode == VOIDmode)
4364 mode = GET_MODE (op0);
4366 /* For some comparisons with 1 and -1, we can convert this to
4367 comparisons with zero. This will often produce more opportunities for
4368 store-flag insns. */
4370 switch (code)
4372 case LT:
4373 if (op1 == const1_rtx)
4374 op1 = const0_rtx, code = LE;
4375 break;
4376 case LE:
4377 if (op1 == constm1_rtx)
4378 op1 = const0_rtx, code = LT;
4379 break;
4380 case GE:
4381 if (op1 == const1_rtx)
4382 op1 = const0_rtx, code = GT;
4383 break;
4384 case GT:
4385 if (op1 == constm1_rtx)
4386 op1 = const0_rtx, code = GE;
4387 break;
4388 case GEU:
4389 if (op1 == const1_rtx)
4390 op1 = const0_rtx, code = NE;
4391 break;
4392 case LTU:
4393 if (op1 == const1_rtx)
4394 op1 = const0_rtx, code = EQ;
4395 break;
4396 default:
4397 break;
4400 /* If we are comparing a double-word integer with zero, we can convert
4401 the comparison into one involving a single word. */
4402 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4403 && GET_MODE_CLASS (mode) == MODE_INT
4404 && op1 == const0_rtx
4405 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4407 if (code == EQ || code == NE)
4409 rtx op00, op01, op0both;
4411 /* Do a logical OR of the two words and compare the result. */
4412 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4413 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4414 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4415 NULL_RTX, unsignedp, OPTAB_DIRECT);
4416 if (op0both != 0)
4417 return emit_store_flag (target, code, op0both, op1, word_mode,
4418 unsignedp, normalizep);
4420 else if (code == LT || code == GE)
4422 rtx op0h;
4424 /* If testing the sign bit, can just test on high word. */
4425 op0h = simplify_gen_subreg (word_mode, op0, mode,
4426 subreg_highpart_offset (word_mode, mode));
4427 return emit_store_flag (target, code, op0h, op1, word_mode,
4428 unsignedp, normalizep);
4432 /* From now on, we won't change CODE, so set ICODE now. */
4433 icode = setcc_gen_code[(int) code];
4435 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4436 complement of A (for GE) and shifting the sign bit to the low bit. */
4437 if (op1 == const0_rtx && (code == LT || code == GE)
4438 && GET_MODE_CLASS (mode) == MODE_INT
4439 && (normalizep || STORE_FLAG_VALUE == 1
4440 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4441 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4442 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4444 subtarget = target;
4446 /* If the result is to be wider than OP0, it is best to convert it
4447 first. If it is to be narrower, it is *incorrect* to convert it
4448 first. */
4449 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4451 op0 = protect_from_queue (op0, 0);
4452 op0 = convert_modes (target_mode, mode, op0, 0);
4453 mode = target_mode;
4456 if (target_mode != mode)
4457 subtarget = 0;
4459 if (code == GE)
4460 op0 = expand_unop (mode, one_cmpl_optab, op0,
4461 ((STORE_FLAG_VALUE == 1 || normalizep)
4462 ? 0 : subtarget), 0);
4464 if (STORE_FLAG_VALUE == 1 || normalizep)
4465 /* If we are supposed to produce a 0/1 value, we want to do
4466 a logical shift from the sign bit to the low-order bit; for
4467 a -1/0 value, we do an arithmetic shift. */
4468 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4469 size_int (GET_MODE_BITSIZE (mode) - 1),
4470 subtarget, normalizep != -1);
4472 if (mode != target_mode)
4473 op0 = convert_modes (target_mode, mode, op0, 0);
4475 return op0;
4478 if (icode != CODE_FOR_nothing)
4480 insn_operand_predicate_fn pred;
4482 /* We think we may be able to do this with a scc insn. Emit the
4483 comparison and then the scc insn.
4485 compare_from_rtx may call emit_queue, which would be deleted below
4486 if the scc insn fails. So call it ourselves before setting LAST.
4487 Likewise for do_pending_stack_adjust. */
4489 emit_queue ();
4490 do_pending_stack_adjust ();
4491 last = get_last_insn ();
4493 comparison
4494 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4495 if (GET_CODE (comparison) == CONST_INT)
4496 return (comparison == const0_rtx ? const0_rtx
4497 : normalizep == 1 ? const1_rtx
4498 : normalizep == -1 ? constm1_rtx
4499 : const_true_rtx);
4501 /* The code of COMPARISON may not match CODE if compare_from_rtx
4502 decided to swap its operands and reverse the original code.
4504 We know that compare_from_rtx returns either a CONST_INT or
4505 a new comparison code, so it is safe to just extract the
4506 code from COMPARISON. */
4507 code = GET_CODE (comparison);
4509 /* Get a reference to the target in the proper mode for this insn. */
4510 compare_mode = insn_data[(int) icode].operand[0].mode;
4511 subtarget = target;
4512 pred = insn_data[(int) icode].operand[0].predicate;
4513 if (preserve_subexpressions_p ()
4514 || ! (*pred) (subtarget, compare_mode))
4515 subtarget = gen_reg_rtx (compare_mode);
4517 pattern = GEN_FCN (icode) (subtarget);
4518 if (pattern)
4520 emit_insn (pattern);
4522 /* If we are converting to a wider mode, first convert to
4523 TARGET_MODE, then normalize. This produces better combining
4524 opportunities on machines that have a SIGN_EXTRACT when we are
4525 testing a single bit. This mostly benefits the 68k.
4527 If STORE_FLAG_VALUE does not have the sign bit set when
4528 interpreted in COMPARE_MODE, we can do this conversion as
4529 unsigned, which is usually more efficient. */
4530 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4532 convert_move (target, subtarget,
4533 (GET_MODE_BITSIZE (compare_mode)
4534 <= HOST_BITS_PER_WIDE_INT)
4535 && 0 == (STORE_FLAG_VALUE
4536 & ((HOST_WIDE_INT) 1
4537 << (GET_MODE_BITSIZE (compare_mode) -1))));
4538 op0 = target;
4539 compare_mode = target_mode;
4541 else
4542 op0 = subtarget;
4544 /* If we want to keep subexpressions around, don't reuse our
4545 last target. */
4547 if (preserve_subexpressions_p ())
4548 subtarget = 0;
4550 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4551 we don't have to do anything. */
4552 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4554 /* STORE_FLAG_VALUE might be the most negative number, so write
4555 the comparison this way to avoid a compiler-time warning. */
4556 else if (- normalizep == STORE_FLAG_VALUE)
4557 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4559 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4560 makes it hard to use a value of just the sign bit due to
4561 ANSI integer constant typing rules. */
4562 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4563 && (STORE_FLAG_VALUE
4564 & ((HOST_WIDE_INT) 1
4565 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4566 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4567 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4568 subtarget, normalizep == 1);
4569 else if (STORE_FLAG_VALUE & 1)
4571 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4572 if (normalizep == -1)
4573 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4575 else
4576 abort ();
4578 /* If we were converting to a smaller mode, do the
4579 conversion now. */
4580 if (target_mode != compare_mode)
4582 convert_move (target, op0, 0);
4583 return target;
4585 else
4586 return op0;
4590 delete_insns_since (last);
4592 /* If expensive optimizations, use different pseudo registers for each
4593 insn, instead of reusing the same pseudo. This leads to better CSE,
4594 but slows down the compiler, since there are more pseudos */
4595 subtarget = (!flag_expensive_optimizations
4596 && (target_mode == mode)) ? target : NULL_RTX;
4598 /* If we reached here, we can't do this with a scc insn. However, there
4599 are some comparisons that can be done directly. For example, if
4600 this is an equality comparison of integers, we can try to exclusive-or
4601 (or subtract) the two operands and use a recursive call to try the
4602 comparison with zero. Don't do any of these cases if branches are
4603 very cheap. */
4605 if (BRANCH_COST > 0
4606 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4607 && op1 != const0_rtx)
4609 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4610 OPTAB_WIDEN);
4612 if (tem == 0)
4613 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4614 OPTAB_WIDEN);
4615 if (tem != 0)
4616 tem = emit_store_flag (target, code, tem, const0_rtx,
4617 mode, unsignedp, normalizep);
4618 if (tem == 0)
4619 delete_insns_since (last);
4620 return tem;
4623 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4624 the constant zero. Reject all other comparisons at this point. Only
4625 do LE and GT if branches are expensive since they are expensive on
4626 2-operand machines. */
4628 if (BRANCH_COST == 0
4629 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4630 || (code != EQ && code != NE
4631 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4632 return 0;
4634 /* See what we need to return. We can only return a 1, -1, or the
4635 sign bit. */
4637 if (normalizep == 0)
4639 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4640 normalizep = STORE_FLAG_VALUE;
4642 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4643 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4644 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4646 else
4647 return 0;
4650 /* Try to put the result of the comparison in the sign bit. Assume we can't
4651 do the necessary operation below. */
4653 tem = 0;
4655 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4656 the sign bit set. */
4658 if (code == LE)
4660 /* This is destructive, so SUBTARGET can't be OP0. */
4661 if (rtx_equal_p (subtarget, op0))
4662 subtarget = 0;
4664 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4665 OPTAB_WIDEN);
4666 if (tem)
4667 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4668 OPTAB_WIDEN);
4671 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4672 number of bits in the mode of OP0, minus one. */
4674 if (code == GT)
4676 if (rtx_equal_p (subtarget, op0))
4677 subtarget = 0;
4679 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4680 size_int (GET_MODE_BITSIZE (mode) - 1),
4681 subtarget, 0);
4682 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4683 OPTAB_WIDEN);
4686 if (code == EQ || code == NE)
4688 /* For EQ or NE, one way to do the comparison is to apply an operation
4689 that converts the operand into a positive number if it is nonzero
4690 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4691 for NE we negate. This puts the result in the sign bit. Then we
4692 normalize with a shift, if needed.
4694 Two operations that can do the above actions are ABS and FFS, so try
4695 them. If that doesn't work, and MODE is smaller than a full word,
4696 we can use zero-extension to the wider mode (an unsigned conversion)
4697 as the operation. */
4699 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4700 that is compensated by the subsequent overflow when subtracting
4701 one / negating. */
4703 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4704 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4705 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4706 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4707 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4709 op0 = protect_from_queue (op0, 0);
4710 tem = convert_modes (word_mode, mode, op0, 1);
4711 mode = word_mode;
4714 if (tem != 0)
4716 if (code == EQ)
4717 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4718 0, OPTAB_WIDEN);
4719 else
4720 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4723 /* If we couldn't do it that way, for NE we can "or" the two's complement
4724 of the value with itself. For EQ, we take the one's complement of
4725 that "or", which is an extra insn, so we only handle EQ if branches
4726 are expensive. */
4728 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4730 if (rtx_equal_p (subtarget, op0))
4731 subtarget = 0;
4733 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4734 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4735 OPTAB_WIDEN);
4737 if (tem && code == EQ)
4738 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4742 if (tem && normalizep)
4743 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4744 size_int (GET_MODE_BITSIZE (mode) - 1),
4745 subtarget, normalizep == 1);
4747 if (tem)
4749 if (GET_MODE (tem) != target_mode)
4751 convert_move (target, tem, 0);
4752 tem = target;
4754 else if (!subtarget)
4756 emit_move_insn (target, tem);
4757 tem = target;
4760 else
4761 delete_insns_since (last);
4763 return tem;
4766 /* Like emit_store_flag, but always succeeds. */
4769 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4770 enum machine_mode mode, int unsignedp, int normalizep)
4772 rtx tem, label;
4774 /* First see if emit_store_flag can do the job. */
4775 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4776 if (tem != 0)
4777 return tem;
4779 if (normalizep == 0)
4780 normalizep = 1;
4782 /* If this failed, we have to do this with set/compare/jump/set code. */
4784 if (GET_CODE (target) != REG
4785 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4786 target = gen_reg_rtx (GET_MODE (target));
4788 emit_move_insn (target, const1_rtx);
4789 label = gen_label_rtx ();
4790 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4791 NULL_RTX, label);
4793 emit_move_insn (target, const0_rtx);
4794 emit_label (label);
4796 return target;
4799 /* Perform possibly multi-word comparison and conditional jump to LABEL
4800 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4802 The algorithm is based on the code in expr.c:do_jump.
4804 Note that this does not perform a general comparison. Only variants
4805 generated within expmed.c are correctly handled, others abort (but could
4806 be handled if needed). */
4808 static void
4809 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4810 rtx label)
4812 /* If this mode is an integer too wide to compare properly,
4813 compare word by word. Rely on cse to optimize constant cases. */
4815 if (GET_MODE_CLASS (mode) == MODE_INT
4816 && ! can_compare_p (op, mode, ccp_jump))
4818 rtx label2 = gen_label_rtx ();
4820 switch (op)
4822 case LTU:
4823 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4824 break;
4826 case LEU:
4827 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4828 break;
4830 case LT:
4831 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4832 break;
4834 case GT:
4835 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4836 break;
4838 case GE:
4839 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4840 break;
4842 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4843 that's the only equality operations we do */
4844 case EQ:
4845 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4846 abort ();
4847 do_jump_by_parts_equality_rtx (arg1, label2, label);
4848 break;
4850 case NE:
4851 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4852 abort ();
4853 do_jump_by_parts_equality_rtx (arg1, label, label2);
4854 break;
4856 default:
4857 abort ();
4860 emit_label (label2);
4862 else
4863 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);