* class.c (build_base_path): Tidy a bit.
[official-gcc.git] / gcc / expmed.c
bloba8cd617f8599d212138616ab54bc21604ddf7fe3
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
60 static int sdiv_pow2_cheap, smod_pow2_cheap;
62 #ifndef SLOW_UNALIGNED_ACCESS
63 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
64 #endif
66 /* For compilers that support multiple targets with different word sizes,
67 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
68 is the H8/300(H) compiler. */
70 #ifndef MAX_BITS_PER_WORD
71 #define MAX_BITS_PER_WORD BITS_PER_WORD
72 #endif
74 /* Reduce conditional compilation elsewhere. */
75 #ifndef HAVE_insv
76 #define HAVE_insv 0
77 #define CODE_FOR_insv CODE_FOR_nothing
78 #define gen_insv(a,b,c,d) NULL_RTX
79 #endif
80 #ifndef HAVE_extv
81 #define HAVE_extv 0
82 #define CODE_FOR_extv CODE_FOR_nothing
83 #define gen_extv(a,b,c,d) NULL_RTX
84 #endif
85 #ifndef HAVE_extzv
86 #define HAVE_extzv 0
87 #define CODE_FOR_extzv CODE_FOR_nothing
88 #define gen_extzv(a,b,c,d) NULL_RTX
89 #endif
91 /* Cost of various pieces of RTL. Note that some of these are indexed by
92 shift count and some by mode. */
93 static int add_cost, negate_cost, zero_cost;
94 static int shift_cost[MAX_BITS_PER_WORD];
95 static int shiftadd_cost[MAX_BITS_PER_WORD];
96 static int shiftsub_cost[MAX_BITS_PER_WORD];
97 static int mul_cost[NUM_MACHINE_MODES];
98 static int div_cost[NUM_MACHINE_MODES];
99 static int mul_widen_cost[NUM_MACHINE_MODES];
100 static int mul_highpart_cost[NUM_MACHINE_MODES];
102 void
103 init_expmed (void)
105 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
106 int dummy;
107 int m;
108 enum machine_mode mode, wider_mode;
110 start_sequence ();
112 /* This is "some random pseudo register" for purposes of calling recog
113 to see what insns exist. */
114 reg = gen_rtx_REG (word_mode, 10000);
116 zero_cost = rtx_cost (const0_rtx, 0);
117 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
119 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
120 gen_rtx_ASHIFT (word_mode, reg,
121 const0_rtx)));
123 shiftadd_insn
124 = emit_insn (gen_rtx_SET (VOIDmode, reg,
125 gen_rtx_PLUS (word_mode,
126 gen_rtx_MULT (word_mode,
127 reg, const0_rtx),
128 reg)));
130 shiftsub_insn
131 = emit_insn (gen_rtx_SET (VOIDmode, reg,
132 gen_rtx_MINUS (word_mode,
133 gen_rtx_MULT (word_mode,
134 reg, const0_rtx),
135 reg)));
137 init_recog ();
139 shift_cost[0] = 0;
140 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
147 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
148 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
149 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
151 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
152 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
153 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
156 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
157 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
160 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
162 sdiv_pow2_cheap
163 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
164 <= 2 * add_cost);
165 smod_pow2_cheap
166 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
169 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
170 mode != VOIDmode;
171 mode = GET_MODE_WIDER_MODE (mode))
173 reg = gen_rtx_REG (mode, 10000);
174 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
175 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
176 wider_mode = GET_MODE_WIDER_MODE (mode);
177 if (wider_mode != VOIDmode)
179 mul_widen_cost[(int) wider_mode]
180 = rtx_cost (gen_rtx_MULT (wider_mode,
181 gen_rtx_ZERO_EXTEND (wider_mode, reg),
182 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
183 SET);
184 mul_highpart_cost[(int) mode]
185 = rtx_cost (gen_rtx_TRUNCATE
186 (mode,
187 gen_rtx_LSHIFTRT (wider_mode,
188 gen_rtx_MULT (wider_mode,
189 gen_rtx_ZERO_EXTEND
190 (wider_mode, reg),
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg)),
193 GEN_INT (GET_MODE_BITSIZE (mode)))),
194 SET);
198 end_sequence ();
201 /* Return an rtx representing minus the value of X.
202 MODE is the intended mode of the result,
203 useful if X is a CONST_INT. */
206 negate_rtx (enum machine_mode mode, rtx x)
208 rtx result = simplify_unary_operation (NEG, mode, x, mode);
210 if (result == 0)
211 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
213 return result;
216 /* Report on the availability of insv/extv/extzv and the desired mode
217 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
218 is false; else the mode of the specified operand. If OPNO is -1,
219 all the caller cares about is whether the insn is available. */
220 enum machine_mode
221 mode_for_extraction (enum extraction_pattern pattern, int opno)
223 const struct insn_data *data;
225 switch (pattern)
227 case EP_insv:
228 if (HAVE_insv)
230 data = &insn_data[CODE_FOR_insv];
231 break;
233 return MAX_MACHINE_MODE;
235 case EP_extv:
236 if (HAVE_extv)
238 data = &insn_data[CODE_FOR_extv];
239 break;
241 return MAX_MACHINE_MODE;
243 case EP_extzv:
244 if (HAVE_extzv)
246 data = &insn_data[CODE_FOR_extzv];
247 break;
249 return MAX_MACHINE_MODE;
251 default:
252 abort ();
255 if (opno == -1)
256 return VOIDmode;
258 /* Everyone who uses this function used to follow it with
259 if (result == VOIDmode) result = word_mode; */
260 if (data->operand[opno].mode == VOIDmode)
261 return word_mode;
262 return data->operand[opno].mode;
266 /* Generate code to store value from rtx VALUE
267 into a bit-field within structure STR_RTX
268 containing BITSIZE bits starting at bit BITNUM.
269 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
270 ALIGN is the alignment that STR_RTX is known to have.
271 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
273 /* ??? Note that there are two different ideas here for how
274 to determine the size to count bits within, for a register.
275 One is BITS_PER_WORD, and the other is the size of operand 3
276 of the insv pattern.
278 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
279 else, we use the mode of operand 3. */
282 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
283 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
284 rtx value, HOST_WIDE_INT total_size)
286 unsigned int unit
287 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
288 unsigned HOST_WIDE_INT offset = bitnum / unit;
289 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
290 rtx op0 = str_rtx;
291 int byte_offset;
293 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
295 /* Discount the part of the structure before the desired byte.
296 We need to know how many bytes are safe to reference after it. */
297 if (total_size >= 0)
298 total_size -= (bitpos / BIGGEST_ALIGNMENT
299 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
301 while (GET_CODE (op0) == SUBREG)
303 /* The following line once was done only if WORDS_BIG_ENDIAN,
304 but I think that is a mistake. WORDS_BIG_ENDIAN is
305 meaningful at a much higher level; when structures are copied
306 between memory and regs, the higher-numbered regs
307 always get higher addresses. */
308 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
309 /* We used to adjust BITPOS here, but now we do the whole adjustment
310 right after the loop. */
311 op0 = SUBREG_REG (op0);
314 value = protect_from_queue (value, 0);
316 /* Use vec_extract patterns for extracting parts of vectors whenever
317 available. */
318 if (VECTOR_MODE_P (GET_MODE (op0))
319 && GET_CODE (op0) != MEM
320 && (vec_set_optab->handlers[(int)GET_MODE (op0)].insn_code
321 != CODE_FOR_nothing)
322 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
323 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
324 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
326 enum machine_mode outermode = GET_MODE (op0);
327 enum machine_mode innermode = GET_MODE_INNER (outermode);
328 int icode = (int) vec_set_optab->handlers[(int) outermode].insn_code;
329 int pos = bitnum / GET_MODE_BITSIZE (innermode);
330 rtx rtxpos = GEN_INT (pos);
331 rtx src = value;
332 rtx dest = op0;
333 rtx pat, seq;
334 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
335 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
336 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
338 start_sequence ();
340 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
341 src = copy_to_mode_reg (mode1, src);
343 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
344 rtxpos = copy_to_mode_reg (mode1, rtxpos);
346 /* We could handle this, but we should always be called with a pseudo
347 for our targets and all insns should take them as outputs. */
348 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
349 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
350 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
351 abort ();
352 pat = GEN_FCN (icode) (dest, src, rtxpos);
353 seq = get_insns ();
354 end_sequence ();
355 if (pat)
357 emit_insn (seq);
358 emit_insn (pat);
359 return dest;
363 if (flag_force_mem)
365 int old_generating_concat_p = generating_concat_p;
366 generating_concat_p = 0;
367 value = force_not_mem (value);
368 generating_concat_p = old_generating_concat_p;
371 /* If the target is a register, overwriting the entire object, or storing
372 a full-word or multi-word field can be done with just a SUBREG.
374 If the target is memory, storing any naturally aligned field can be
375 done with a simple store. For targets that support fast unaligned
376 memory, any naturally sized, unit aligned field can be done directly. */
378 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
379 + (offset * UNITS_PER_WORD);
381 if (bitpos == 0
382 && bitsize == GET_MODE_BITSIZE (fieldmode)
383 && (GET_CODE (op0) != MEM
384 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
385 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
386 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
387 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
388 || (offset * BITS_PER_UNIT % bitsize == 0
389 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
391 if (GET_MODE (op0) != fieldmode)
393 if (GET_CODE (op0) == SUBREG)
395 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
396 || GET_MODE_CLASS (fieldmode) == MODE_INT
397 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
398 op0 = SUBREG_REG (op0);
399 else
400 /* Else we've got some float mode source being extracted into
401 a different float mode destination -- this combination of
402 subregs results in Severe Tire Damage. */
403 abort ();
405 if (GET_CODE (op0) == REG)
406 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
407 else
408 op0 = adjust_address (op0, fieldmode, offset);
410 emit_move_insn (op0, value);
411 return value;
414 /* Make sure we are playing with integral modes. Pun with subregs
415 if we aren't. This must come after the entire register case above,
416 since that case is valid for any mode. The following cases are only
417 valid for integral modes. */
419 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
420 if (imode != GET_MODE (op0))
422 if (GET_CODE (op0) == MEM)
423 op0 = adjust_address (op0, imode, 0);
424 else if (imode != BLKmode)
425 op0 = gen_lowpart (imode, op0);
426 else
427 abort ();
431 /* We may be accessing data outside the field, which means
432 we can alias adjacent data. */
433 if (GET_CODE (op0) == MEM)
435 op0 = shallow_copy_rtx (op0);
436 set_mem_alias_set (op0, 0);
437 set_mem_expr (op0, 0);
440 /* If OP0 is a register, BITPOS must count within a word.
441 But as we have it, it counts within whatever size OP0 now has.
442 On a bigendian machine, these are not the same, so convert. */
443 if (BYTES_BIG_ENDIAN
444 && GET_CODE (op0) != MEM
445 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
446 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
448 /* Storing an lsb-aligned field in a register
449 can be done with a movestrict instruction. */
451 if (GET_CODE (op0) != MEM
452 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
454 && (movstrict_optab->handlers[(int) fieldmode].insn_code
455 != CODE_FOR_nothing))
457 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
459 /* Get appropriate low part of the value being stored. */
460 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
461 value = gen_lowpart (fieldmode, value);
462 else if (!(GET_CODE (value) == SYMBOL_REF
463 || GET_CODE (value) == LABEL_REF
464 || GET_CODE (value) == CONST))
465 value = convert_to_mode (fieldmode, value, 0);
467 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
468 value = copy_to_mode_reg (fieldmode, value);
470 if (GET_CODE (op0) == SUBREG)
472 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
473 || GET_MODE_CLASS (fieldmode) == MODE_INT
474 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
475 op0 = SUBREG_REG (op0);
476 else
477 /* Else we've got some float mode source being extracted into
478 a different float mode destination -- this combination of
479 subregs results in Severe Tire Damage. */
480 abort ();
483 emit_insn (GEN_FCN (icode)
484 (gen_rtx_SUBREG (fieldmode, op0,
485 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
486 + (offset * UNITS_PER_WORD)),
487 value));
489 return value;
492 /* Handle fields bigger than a word. */
494 if (bitsize > BITS_PER_WORD)
496 /* Here we transfer the words of the field
497 in the order least significant first.
498 This is because the most significant word is the one which may
499 be less than full.
500 However, only do that if the value is not BLKmode. */
502 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
503 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
504 unsigned int i;
506 /* This is the mode we must force value to, so that there will be enough
507 subwords to extract. Note that fieldmode will often (always?) be
508 VOIDmode, because that is what store_field uses to indicate that this
509 is a bit field, but passing VOIDmode to operand_subword_force will
510 result in an abort. */
511 fieldmode = GET_MODE (value);
512 if (fieldmode == VOIDmode)
513 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
515 for (i = 0; i < nwords; i++)
517 /* If I is 0, use the low-order word in both field and target;
518 if I is 1, use the next to lowest word; and so on. */
519 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
520 unsigned int bit_offset = (backwards
521 ? MAX ((int) bitsize - ((int) i + 1)
522 * BITS_PER_WORD,
524 : (int) i * BITS_PER_WORD);
526 store_bit_field (op0, MIN (BITS_PER_WORD,
527 bitsize - i * BITS_PER_WORD),
528 bitnum + bit_offset, word_mode,
529 operand_subword_force (value, wordnum, fieldmode),
530 total_size);
532 return value;
535 /* From here on we can assume that the field to be stored in is
536 a full-word (whatever type that is), since it is shorter than a word. */
538 /* OFFSET is the number of words or bytes (UNIT says which)
539 from STR_RTX to the first word or byte containing part of the field. */
541 if (GET_CODE (op0) != MEM)
543 if (offset != 0
544 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
546 if (GET_CODE (op0) != REG)
548 /* Since this is a destination (lvalue), we can't copy it to a
549 pseudo. We can trivially remove a SUBREG that does not
550 change the size of the operand. Such a SUBREG may have been
551 added above. Otherwise, abort. */
552 if (GET_CODE (op0) == SUBREG
553 && (GET_MODE_SIZE (GET_MODE (op0))
554 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
555 op0 = SUBREG_REG (op0);
556 else
557 abort ();
559 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
560 op0, (offset * UNITS_PER_WORD));
562 offset = 0;
564 else
565 op0 = protect_from_queue (op0, 1);
567 /* If VALUE is a floating-point mode, access it as an integer of the
568 corresponding size. This can occur on a machine with 64 bit registers
569 that uses SFmode for float. This can also occur for unaligned float
570 structure fields. */
571 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
572 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
573 value = gen_lowpart ((GET_MODE (value) == VOIDmode
574 ? word_mode : int_mode_for_mode (GET_MODE (value))),
575 value);
577 /* Now OFFSET is nonzero only if OP0 is memory
578 and is therefore always measured in bytes. */
580 if (HAVE_insv
581 && GET_MODE (value) != BLKmode
582 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
583 /* Ensure insv's size is wide enough for this field. */
584 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
585 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
586 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
588 int xbitpos = bitpos;
589 rtx value1;
590 rtx xop0 = op0;
591 rtx last = get_last_insn ();
592 rtx pat;
593 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
594 int save_volatile_ok = volatile_ok;
596 volatile_ok = 1;
598 /* If this machine's insv can only insert into a register, copy OP0
599 into a register and save it back later. */
600 /* This used to check flag_force_mem, but that was a serious
601 de-optimization now that flag_force_mem is enabled by -O2. */
602 if (GET_CODE (op0) == MEM
603 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
604 (op0, VOIDmode)))
606 rtx tempreg;
607 enum machine_mode bestmode;
609 /* Get the mode to use for inserting into this field. If OP0 is
610 BLKmode, get the smallest mode consistent with the alignment. If
611 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
612 mode. Otherwise, use the smallest mode containing the field. */
614 if (GET_MODE (op0) == BLKmode
615 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
616 bestmode
617 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
618 MEM_VOLATILE_P (op0));
619 else
620 bestmode = GET_MODE (op0);
622 if (bestmode == VOIDmode
623 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
624 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
625 goto insv_loses;
627 /* Adjust address to point to the containing unit of that mode.
628 Compute offset as multiple of this unit, counting in bytes. */
629 unit = GET_MODE_BITSIZE (bestmode);
630 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
631 bitpos = bitnum % unit;
632 op0 = adjust_address (op0, bestmode, offset);
634 /* Fetch that unit, store the bitfield in it, then store
635 the unit. */
636 tempreg = copy_to_reg (op0);
637 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
638 total_size);
639 emit_move_insn (op0, tempreg);
640 return value;
642 volatile_ok = save_volatile_ok;
644 /* Add OFFSET into OP0's address. */
645 if (GET_CODE (xop0) == MEM)
646 xop0 = adjust_address (xop0, byte_mode, offset);
648 /* If xop0 is a register, we need it in MAXMODE
649 to make it acceptable to the format of insv. */
650 if (GET_CODE (xop0) == SUBREG)
651 /* We can't just change the mode, because this might clobber op0,
652 and we will need the original value of op0 if insv fails. */
653 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
654 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
655 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
657 /* On big-endian machines, we count bits from the most significant.
658 If the bit field insn does not, we must invert. */
660 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
661 xbitpos = unit - bitsize - xbitpos;
663 /* We have been counting XBITPOS within UNIT.
664 Count instead within the size of the register. */
665 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
666 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
668 unit = GET_MODE_BITSIZE (maxmode);
670 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
671 value1 = value;
672 if (GET_MODE (value) != maxmode)
674 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
676 /* Optimization: Don't bother really extending VALUE
677 if it has all the bits we will actually use. However,
678 if we must narrow it, be sure we do it correctly. */
680 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
682 rtx tmp;
684 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
685 if (! tmp)
686 tmp = simplify_gen_subreg (maxmode,
687 force_reg (GET_MODE (value),
688 value1),
689 GET_MODE (value), 0);
690 value1 = tmp;
692 else
693 value1 = gen_lowpart (maxmode, value1);
695 else if (GET_CODE (value) == CONST_INT)
696 value1 = gen_int_mode (INTVAL (value), maxmode);
697 else if (!CONSTANT_P (value))
698 /* Parse phase is supposed to make VALUE's data type
699 match that of the component reference, which is a type
700 at least as wide as the field; so VALUE should have
701 a mode that corresponds to that type. */
702 abort ();
705 /* If this machine's insv insists on a register,
706 get VALUE1 into a register. */
707 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
708 (value1, maxmode)))
709 value1 = force_reg (maxmode, value1);
711 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
712 if (pat)
713 emit_insn (pat);
714 else
716 delete_insns_since (last);
717 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
720 else
721 insv_loses:
722 /* Insv is not available; store using shifts and boolean ops. */
723 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
724 return value;
727 /* Use shifts and boolean operations to store VALUE
728 into a bit field of width BITSIZE
729 in a memory location specified by OP0 except offset by OFFSET bytes.
730 (OFFSET must be 0 if OP0 is a register.)
731 The field starts at position BITPOS within the byte.
732 (If OP0 is a register, it may be a full word or a narrower mode,
733 but BITPOS still counts within a full word,
734 which is significant on bigendian machines.)
736 Note that protect_from_queue has already been done on OP0 and VALUE. */
738 static void
739 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
740 unsigned HOST_WIDE_INT bitsize,
741 unsigned HOST_WIDE_INT bitpos, rtx value)
743 enum machine_mode mode;
744 unsigned int total_bits = BITS_PER_WORD;
745 rtx subtarget, temp;
746 int all_zero = 0;
747 int all_one = 0;
749 /* There is a case not handled here:
750 a structure with a known alignment of just a halfword
751 and a field split across two aligned halfwords within the structure.
752 Or likewise a structure with a known alignment of just a byte
753 and a field split across two bytes.
754 Such cases are not supposed to be able to occur. */
756 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
758 if (offset != 0)
759 abort ();
760 /* Special treatment for a bit field split across two registers. */
761 if (bitsize + bitpos > BITS_PER_WORD)
763 store_split_bit_field (op0, bitsize, bitpos, value);
764 return;
767 else
769 /* Get the proper mode to use for this field. We want a mode that
770 includes the entire field. If such a mode would be larger than
771 a word, we won't be doing the extraction the normal way.
772 We don't want a mode bigger than the destination. */
774 mode = GET_MODE (op0);
775 if (GET_MODE_BITSIZE (mode) == 0
776 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
777 mode = word_mode;
778 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
779 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
781 if (mode == VOIDmode)
783 /* The only way this should occur is if the field spans word
784 boundaries. */
785 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
786 value);
787 return;
790 total_bits = GET_MODE_BITSIZE (mode);
792 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
793 be in the range 0 to total_bits-1, and put any excess bytes in
794 OFFSET. */
795 if (bitpos >= total_bits)
797 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
798 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
799 * BITS_PER_UNIT);
802 /* Get ref to an aligned byte, halfword, or word containing the field.
803 Adjust BITPOS to be position within a word,
804 and OFFSET to be the offset of that word.
805 Then alter OP0 to refer to that word. */
806 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
807 offset -= (offset % (total_bits / BITS_PER_UNIT));
808 op0 = adjust_address (op0, mode, offset);
811 mode = GET_MODE (op0);
813 /* Now MODE is either some integral mode for a MEM as OP0,
814 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
815 The bit field is contained entirely within OP0.
816 BITPOS is the starting bit number within OP0.
817 (OP0's mode may actually be narrower than MODE.) */
819 if (BYTES_BIG_ENDIAN)
820 /* BITPOS is the distance between our msb
821 and that of the containing datum.
822 Convert it to the distance from the lsb. */
823 bitpos = total_bits - bitsize - bitpos;
825 /* Now BITPOS is always the distance between our lsb
826 and that of OP0. */
828 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
829 we must first convert its mode to MODE. */
831 if (GET_CODE (value) == CONST_INT)
833 HOST_WIDE_INT v = INTVAL (value);
835 if (bitsize < HOST_BITS_PER_WIDE_INT)
836 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
838 if (v == 0)
839 all_zero = 1;
840 else if ((bitsize < HOST_BITS_PER_WIDE_INT
841 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
842 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
843 all_one = 1;
845 value = lshift_value (mode, value, bitpos, bitsize);
847 else
849 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
850 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
852 if (GET_MODE (value) != mode)
854 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
855 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
856 value = gen_lowpart (mode, value);
857 else
858 value = convert_to_mode (mode, value, 1);
861 if (must_and)
862 value = expand_binop (mode, and_optab, value,
863 mask_rtx (mode, 0, bitsize, 0),
864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
865 if (bitpos > 0)
866 value = expand_shift (LSHIFT_EXPR, mode, value,
867 build_int_2 (bitpos, 0), NULL_RTX, 1);
870 /* Now clear the chosen bits in OP0,
871 except that if VALUE is -1 we need not bother. */
873 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
875 if (! all_one)
877 temp = expand_binop (mode, and_optab, op0,
878 mask_rtx (mode, bitpos, bitsize, 1),
879 subtarget, 1, OPTAB_LIB_WIDEN);
880 subtarget = temp;
882 else
883 temp = op0;
885 /* Now logical-or VALUE into OP0, unless it is zero. */
887 if (! all_zero)
888 temp = expand_binop (mode, ior_optab, temp, value,
889 subtarget, 1, OPTAB_LIB_WIDEN);
890 if (op0 != temp)
891 emit_move_insn (op0, temp);
894 /* Store a bit field that is split across multiple accessible memory objects.
896 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
897 BITSIZE is the field width; BITPOS the position of its first bit
898 (within the word).
899 VALUE is the value to store.
901 This does not yet handle fields wider than BITS_PER_WORD. */
903 static void
904 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
905 unsigned HOST_WIDE_INT bitpos, rtx value)
907 unsigned int unit;
908 unsigned int bitsdone = 0;
910 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
911 much at a time. */
912 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
913 unit = BITS_PER_WORD;
914 else
915 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
917 /* If VALUE is a constant other than a CONST_INT, get it into a register in
918 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
919 that VALUE might be a floating-point constant. */
920 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
922 rtx word = gen_lowpart_common (word_mode, value);
924 if (word && (value != word))
925 value = word;
926 else
927 value = gen_lowpart_common (word_mode,
928 force_reg (GET_MODE (value) != VOIDmode
929 ? GET_MODE (value)
930 : word_mode, value));
932 else if (GET_CODE (value) == ADDRESSOF)
933 value = copy_to_reg (value);
935 while (bitsdone < bitsize)
937 unsigned HOST_WIDE_INT thissize;
938 rtx part, word;
939 unsigned HOST_WIDE_INT thispos;
940 unsigned HOST_WIDE_INT offset;
942 offset = (bitpos + bitsdone) / unit;
943 thispos = (bitpos + bitsdone) % unit;
945 /* THISSIZE must not overrun a word boundary. Otherwise,
946 store_fixed_bit_field will call us again, and we will mutually
947 recurse forever. */
948 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
949 thissize = MIN (thissize, unit - thispos);
951 if (BYTES_BIG_ENDIAN)
953 int total_bits;
955 /* We must do an endian conversion exactly the same way as it is
956 done in extract_bit_field, so that the two calls to
957 extract_fixed_bit_field will have comparable arguments. */
958 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
959 total_bits = BITS_PER_WORD;
960 else
961 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
963 /* Fetch successively less significant portions. */
964 if (GET_CODE (value) == CONST_INT)
965 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
966 >> (bitsize - bitsdone - thissize))
967 & (((HOST_WIDE_INT) 1 << thissize) - 1));
968 else
969 /* The args are chosen so that the last part includes the
970 lsb. Give extract_bit_field the value it needs (with
971 endianness compensation) to fetch the piece we want. */
972 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
973 total_bits - bitsize + bitsdone,
974 NULL_RTX, 1);
976 else
978 /* Fetch successively more significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> bitsdone)
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
985 bitsdone, NULL_RTX, 1);
988 /* If OP0 is a register, then handle OFFSET here.
990 When handling multiword bitfields, extract_bit_field may pass
991 down a word_mode SUBREG of a larger REG for a bitfield that actually
992 crosses a word boundary. Thus, for a SUBREG, we must find
993 the current word starting from the base register. */
994 if (GET_CODE (op0) == SUBREG)
996 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
997 word = operand_subword_force (SUBREG_REG (op0), word_offset,
998 GET_MODE (SUBREG_REG (op0)));
999 offset = 0;
1001 else if (GET_CODE (op0) == REG)
1003 word = operand_subword_force (op0, offset, GET_MODE (op0));
1004 offset = 0;
1006 else
1007 word = op0;
1009 /* OFFSET is in UNITs, and UNIT is in bits.
1010 store_fixed_bit_field wants offset in bytes. */
1011 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1012 thispos, part);
1013 bitsdone += thissize;
1017 /* Generate code to extract a byte-field from STR_RTX
1018 containing BITSIZE bits, starting at BITNUM,
1019 and put it in TARGET if possible (if TARGET is nonzero).
1020 Regardless of TARGET, we return the rtx for where the value is placed.
1021 It may be a QUEUED.
1023 STR_RTX is the structure containing the byte (a REG or MEM).
1024 UNSIGNEDP is nonzero if this is an unsigned bit field.
1025 MODE is the natural mode of the field value once extracted.
1026 TMODE is the mode the caller would like the value to have;
1027 but the value may be returned with type MODE instead.
1029 TOTAL_SIZE is the size in bytes of the containing structure,
1030 or -1 if varying.
1032 If a TARGET is specified and we can store in it at no extra cost,
1033 we do so, and return TARGET.
1034 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1035 if they are equally easy. */
1038 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1039 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1040 enum machine_mode mode, enum machine_mode tmode,
1041 HOST_WIDE_INT total_size)
1043 unsigned int unit
1044 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1045 unsigned HOST_WIDE_INT offset = bitnum / unit;
1046 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1047 rtx op0 = str_rtx;
1048 rtx spec_target = target;
1049 rtx spec_target_subreg = 0;
1050 enum machine_mode int_mode;
1051 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1052 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1053 enum machine_mode mode1;
1054 int byte_offset;
1056 /* Discount the part of the structure before the desired byte.
1057 We need to know how many bytes are safe to reference after it. */
1058 if (total_size >= 0)
1059 total_size -= (bitpos / BIGGEST_ALIGNMENT
1060 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1062 if (tmode == VOIDmode)
1063 tmode = mode;
1065 while (GET_CODE (op0) == SUBREG)
1067 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1068 if (bitpos > unit)
1070 offset += (bitpos / unit);
1071 bitpos %= unit;
1073 op0 = SUBREG_REG (op0);
1076 if (GET_CODE (op0) == REG
1077 && mode == GET_MODE (op0)
1078 && bitnum == 0
1079 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1081 /* We're trying to extract a full register from itself. */
1082 return op0;
1085 /* Use vec_extract patterns for extracting parts of vectors whenever
1086 available. */
1087 if (VECTOR_MODE_P (GET_MODE (op0))
1088 && GET_CODE (op0) != MEM
1089 && (vec_extract_optab->handlers[(int)GET_MODE (op0)].insn_code
1090 != CODE_FOR_nothing)
1091 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1092 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1094 enum machine_mode outermode = GET_MODE (op0);
1095 enum machine_mode innermode = GET_MODE_INNER (outermode);
1096 int icode = (int) vec_extract_optab->handlers[(int) outermode].insn_code;
1097 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1098 rtx rtxpos = GEN_INT (pos);
1099 rtx src = op0;
1100 rtx dest = NULL, pat, seq;
1101 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1102 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1103 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1105 if (innermode == tmode || innermode == mode)
1106 dest = target;
1108 if (!dest)
1109 dest = gen_reg_rtx (innermode);
1111 start_sequence ();
1113 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1114 dest = copy_to_mode_reg (mode0, dest);
1116 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1117 src = copy_to_mode_reg (mode1, src);
1119 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1120 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1122 /* We could handle this, but we should always be called with a pseudo
1123 for our targets and all insns should take them as outputs. */
1124 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1125 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1126 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1127 abort ();
1128 pat = GEN_FCN (icode) (dest, src, rtxpos);
1129 seq = get_insns ();
1130 end_sequence ();
1131 if (pat)
1133 emit_insn (seq);
1134 emit_insn (pat);
1135 return extract_bit_field (dest, bitsize,
1136 bitnum - pos * GET_MODE_BITSIZE (innermode),
1137 unsignedp, target, mode, tmode, total_size);
1141 /* Make sure we are playing with integral modes. Pun with subregs
1142 if we aren't. */
1144 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1145 if (imode != GET_MODE (op0))
1147 if (GET_CODE (op0) == MEM)
1148 op0 = adjust_address (op0, imode, 0);
1149 else if (imode != BLKmode)
1150 op0 = gen_lowpart (imode, op0);
1151 else
1152 abort ();
1156 /* We may be accessing data outside the field, which means
1157 we can alias adjacent data. */
1158 if (GET_CODE (op0) == MEM)
1160 op0 = shallow_copy_rtx (op0);
1161 set_mem_alias_set (op0, 0);
1162 set_mem_expr (op0, 0);
1165 /* Extraction of a full-word or multi-word value from a structure
1166 in a register or aligned memory can be done with just a SUBREG.
1167 A subword value in the least significant part of a register
1168 can also be extracted with a SUBREG. For this, we need the
1169 byte offset of the value in op0. */
1171 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1173 /* If OP0 is a register, BITPOS must count within a word.
1174 But as we have it, it counts within whatever size OP0 now has.
1175 On a bigendian machine, these are not the same, so convert. */
1176 if (BYTES_BIG_ENDIAN
1177 && GET_CODE (op0) != MEM
1178 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1179 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1181 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1182 If that's wrong, the solution is to test for it and set TARGET to 0
1183 if needed. */
1185 /* Only scalar integer modes can be converted via subregs. There is an
1186 additional problem for FP modes here in that they can have a precision
1187 which is different from the size. mode_for_size uses precision, but
1188 we want a mode based on the size, so we must avoid calling it for FP
1189 modes. */
1190 mode1 = (SCALAR_INT_MODE_P (tmode)
1191 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1192 : mode);
1194 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1195 && bitpos % BITS_PER_WORD == 0)
1196 || (mode1 != BLKmode
1197 /* ??? The big endian test here is wrong. This is correct
1198 if the value is in a register, and if mode_for_size is not
1199 the same mode as op0. This causes us to get unnecessarily
1200 inefficient code from the Thumb port when -mbig-endian. */
1201 && (BYTES_BIG_ENDIAN
1202 ? bitpos + bitsize == BITS_PER_WORD
1203 : bitpos == 0)))
1204 && ((GET_CODE (op0) != MEM
1205 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1206 GET_MODE_BITSIZE (GET_MODE (op0)))
1207 && GET_MODE_SIZE (mode1) != 0
1208 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1209 || (GET_CODE (op0) == MEM
1210 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1211 || (offset * BITS_PER_UNIT % bitsize == 0
1212 && MEM_ALIGN (op0) % bitsize == 0)))))
1214 if (mode1 != GET_MODE (op0))
1216 if (GET_CODE (op0) == SUBREG)
1218 if (GET_MODE (SUBREG_REG (op0)) == mode1
1219 || GET_MODE_CLASS (mode1) == MODE_INT
1220 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1221 op0 = SUBREG_REG (op0);
1222 else
1223 /* Else we've got some float mode source being extracted into
1224 a different float mode destination -- this combination of
1225 subregs results in Severe Tire Damage. */
1226 goto no_subreg_mode_swap;
1228 if (GET_CODE (op0) == REG)
1229 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1230 else
1231 op0 = adjust_address (op0, mode1, offset);
1233 if (mode1 != mode)
1234 return convert_to_mode (tmode, op0, unsignedp);
1235 return op0;
1237 no_subreg_mode_swap:
1239 /* Handle fields bigger than a word. */
1241 if (bitsize > BITS_PER_WORD)
1243 /* Here we transfer the words of the field
1244 in the order least significant first.
1245 This is because the most significant word is the one which may
1246 be less than full. */
1248 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1249 unsigned int i;
1251 if (target == 0 || GET_CODE (target) != REG)
1252 target = gen_reg_rtx (mode);
1254 /* Indicate for flow that the entire target reg is being set. */
1255 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1257 for (i = 0; i < nwords; i++)
1259 /* If I is 0, use the low-order word in both field and target;
1260 if I is 1, use the next to lowest word; and so on. */
1261 /* Word number in TARGET to use. */
1262 unsigned int wordnum
1263 = (WORDS_BIG_ENDIAN
1264 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1265 : i);
1266 /* Offset from start of field in OP0. */
1267 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1268 ? MAX (0, ((int) bitsize - ((int) i + 1)
1269 * (int) BITS_PER_WORD))
1270 : (int) i * BITS_PER_WORD);
1271 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1272 rtx result_part
1273 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1274 bitsize - i * BITS_PER_WORD),
1275 bitnum + bit_offset, 1, target_part, mode,
1276 word_mode, total_size);
1278 if (target_part == 0)
1279 abort ();
1281 if (result_part != target_part)
1282 emit_move_insn (target_part, result_part);
1285 if (unsignedp)
1287 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1288 need to be zero'd out. */
1289 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1291 unsigned int i, total_words;
1293 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1294 for (i = nwords; i < total_words; i++)
1295 emit_move_insn
1296 (operand_subword (target,
1297 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1298 1, VOIDmode),
1299 const0_rtx);
1301 return target;
1304 /* Signed bit field: sign-extend with two arithmetic shifts. */
1305 target = expand_shift (LSHIFT_EXPR, mode, target,
1306 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1307 NULL_RTX, 0);
1308 return expand_shift (RSHIFT_EXPR, mode, target,
1309 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1310 NULL_RTX, 0);
1313 /* From here on we know the desired field is smaller than a word. */
1315 /* Check if there is a correspondingly-sized integer field, so we can
1316 safely extract it as one size of integer, if necessary; then
1317 truncate or extend to the size that is wanted; then use SUBREGs or
1318 convert_to_mode to get one of the modes we really wanted. */
1320 int_mode = int_mode_for_mode (tmode);
1321 if (int_mode == BLKmode)
1322 int_mode = int_mode_for_mode (mode);
1323 if (int_mode == BLKmode)
1324 abort (); /* Should probably push op0 out to memory and then
1325 do a load. */
1327 /* OFFSET is the number of words or bytes (UNIT says which)
1328 from STR_RTX to the first word or byte containing part of the field. */
1330 if (GET_CODE (op0) != MEM)
1332 if (offset != 0
1333 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1335 if (GET_CODE (op0) != REG)
1336 op0 = copy_to_reg (op0);
1337 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1338 op0, (offset * UNITS_PER_WORD));
1340 offset = 0;
1342 else
1343 op0 = protect_from_queue (str_rtx, 1);
1345 /* Now OFFSET is nonzero only for memory operands. */
1347 if (unsignedp)
1349 if (HAVE_extzv
1350 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1351 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1352 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1354 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1355 rtx bitsize_rtx, bitpos_rtx;
1356 rtx last = get_last_insn ();
1357 rtx xop0 = op0;
1358 rtx xtarget = target;
1359 rtx xspec_target = spec_target;
1360 rtx xspec_target_subreg = spec_target_subreg;
1361 rtx pat;
1362 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1364 if (GET_CODE (xop0) == MEM)
1366 int save_volatile_ok = volatile_ok;
1367 volatile_ok = 1;
1369 /* Is the memory operand acceptable? */
1370 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1371 (xop0, GET_MODE (xop0))))
1373 /* No, load into a reg and extract from there. */
1374 enum machine_mode bestmode;
1376 /* Get the mode to use for inserting into this field. If
1377 OP0 is BLKmode, get the smallest mode consistent with the
1378 alignment. If OP0 is a non-BLKmode object that is no
1379 wider than MAXMODE, use its mode. Otherwise, use the
1380 smallest mode containing the field. */
1382 if (GET_MODE (xop0) == BLKmode
1383 || (GET_MODE_SIZE (GET_MODE (op0))
1384 > GET_MODE_SIZE (maxmode)))
1385 bestmode = get_best_mode (bitsize, bitnum,
1386 MEM_ALIGN (xop0), maxmode,
1387 MEM_VOLATILE_P (xop0));
1388 else
1389 bestmode = GET_MODE (xop0);
1391 if (bestmode == VOIDmode
1392 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1393 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1394 goto extzv_loses;
1396 /* Compute offset as multiple of this unit,
1397 counting in bytes. */
1398 unit = GET_MODE_BITSIZE (bestmode);
1399 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1400 xbitpos = bitnum % unit;
1401 xop0 = adjust_address (xop0, bestmode, xoffset);
1403 /* Fetch it to a register in that size. */
1404 xop0 = force_reg (bestmode, xop0);
1406 /* XBITPOS counts within UNIT, which is what is expected. */
1408 else
1409 /* Get ref to first byte containing part of the field. */
1410 xop0 = adjust_address (xop0, byte_mode, xoffset);
1412 volatile_ok = save_volatile_ok;
1415 /* If op0 is a register, we need it in MAXMODE (which is usually
1416 SImode). to make it acceptable to the format of extzv. */
1417 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1418 goto extzv_loses;
1419 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1420 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1422 /* On big-endian machines, we count bits from the most significant.
1423 If the bit field insn does not, we must invert. */
1424 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1425 xbitpos = unit - bitsize - xbitpos;
1427 /* Now convert from counting within UNIT to counting in MAXMODE. */
1428 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1429 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1431 unit = GET_MODE_BITSIZE (maxmode);
1433 if (xtarget == 0
1434 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1435 xtarget = xspec_target = gen_reg_rtx (tmode);
1437 if (GET_MODE (xtarget) != maxmode)
1439 if (GET_CODE (xtarget) == REG)
1441 int wider = (GET_MODE_SIZE (maxmode)
1442 > GET_MODE_SIZE (GET_MODE (xtarget)));
1443 xtarget = gen_lowpart (maxmode, xtarget);
1444 if (wider)
1445 xspec_target_subreg = xtarget;
1447 else
1448 xtarget = gen_reg_rtx (maxmode);
1451 /* If this machine's extzv insists on a register target,
1452 make sure we have one. */
1453 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1454 (xtarget, maxmode)))
1455 xtarget = gen_reg_rtx (maxmode);
1457 bitsize_rtx = GEN_INT (bitsize);
1458 bitpos_rtx = GEN_INT (xbitpos);
1460 pat = gen_extzv (protect_from_queue (xtarget, 1),
1461 xop0, bitsize_rtx, bitpos_rtx);
1462 if (pat)
1464 emit_insn (pat);
1465 target = xtarget;
1466 spec_target = xspec_target;
1467 spec_target_subreg = xspec_target_subreg;
1469 else
1471 delete_insns_since (last);
1472 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1473 bitpos, target, 1);
1476 else
1477 extzv_loses:
1478 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1479 bitpos, target, 1);
1481 else
1483 if (HAVE_extv
1484 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1485 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1486 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1488 int xbitpos = bitpos, xoffset = offset;
1489 rtx bitsize_rtx, bitpos_rtx;
1490 rtx last = get_last_insn ();
1491 rtx xop0 = op0, xtarget = target;
1492 rtx xspec_target = spec_target;
1493 rtx xspec_target_subreg = spec_target_subreg;
1494 rtx pat;
1495 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1497 if (GET_CODE (xop0) == MEM)
1499 /* Is the memory operand acceptable? */
1500 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1501 (xop0, GET_MODE (xop0))))
1503 /* No, load into a reg and extract from there. */
1504 enum machine_mode bestmode;
1506 /* Get the mode to use for inserting into this field. If
1507 OP0 is BLKmode, get the smallest mode consistent with the
1508 alignment. If OP0 is a non-BLKmode object that is no
1509 wider than MAXMODE, use its mode. Otherwise, use the
1510 smallest mode containing the field. */
1512 if (GET_MODE (xop0) == BLKmode
1513 || (GET_MODE_SIZE (GET_MODE (op0))
1514 > GET_MODE_SIZE (maxmode)))
1515 bestmode = get_best_mode (bitsize, bitnum,
1516 MEM_ALIGN (xop0), maxmode,
1517 MEM_VOLATILE_P (xop0));
1518 else
1519 bestmode = GET_MODE (xop0);
1521 if (bestmode == VOIDmode
1522 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1523 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1524 goto extv_loses;
1526 /* Compute offset as multiple of this unit,
1527 counting in bytes. */
1528 unit = GET_MODE_BITSIZE (bestmode);
1529 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1530 xbitpos = bitnum % unit;
1531 xop0 = adjust_address (xop0, bestmode, xoffset);
1533 /* Fetch it to a register in that size. */
1534 xop0 = force_reg (bestmode, xop0);
1536 /* XBITPOS counts within UNIT, which is what is expected. */
1538 else
1539 /* Get ref to first byte containing part of the field. */
1540 xop0 = adjust_address (xop0, byte_mode, xoffset);
1543 /* If op0 is a register, we need it in MAXMODE (which is usually
1544 SImode) to make it acceptable to the format of extv. */
1545 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1546 goto extv_loses;
1547 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1548 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1550 /* On big-endian machines, we count bits from the most significant.
1551 If the bit field insn does not, we must invert. */
1552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1553 xbitpos = unit - bitsize - xbitpos;
1555 /* XBITPOS counts within a size of UNIT.
1556 Adjust to count within a size of MAXMODE. */
1557 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1558 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1560 unit = GET_MODE_BITSIZE (maxmode);
1562 if (xtarget == 0
1563 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1564 xtarget = xspec_target = gen_reg_rtx (tmode);
1566 if (GET_MODE (xtarget) != maxmode)
1568 if (GET_CODE (xtarget) == REG)
1570 int wider = (GET_MODE_SIZE (maxmode)
1571 > GET_MODE_SIZE (GET_MODE (xtarget)));
1572 xtarget = gen_lowpart (maxmode, xtarget);
1573 if (wider)
1574 xspec_target_subreg = xtarget;
1576 else
1577 xtarget = gen_reg_rtx (maxmode);
1580 /* If this machine's extv insists on a register target,
1581 make sure we have one. */
1582 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1583 (xtarget, maxmode)))
1584 xtarget = gen_reg_rtx (maxmode);
1586 bitsize_rtx = GEN_INT (bitsize);
1587 bitpos_rtx = GEN_INT (xbitpos);
1589 pat = gen_extv (protect_from_queue (xtarget, 1),
1590 xop0, bitsize_rtx, bitpos_rtx);
1591 if (pat)
1593 emit_insn (pat);
1594 target = xtarget;
1595 spec_target = xspec_target;
1596 spec_target_subreg = xspec_target_subreg;
1598 else
1600 delete_insns_since (last);
1601 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1602 bitpos, target, 0);
1605 else
1606 extv_loses:
1607 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1608 bitpos, target, 0);
1610 if (target == spec_target)
1611 return target;
1612 if (target == spec_target_subreg)
1613 return spec_target;
1614 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1616 /* If the target mode is floating-point, first convert to the
1617 integer mode of that size and then access it as a floating-point
1618 value via a SUBREG. */
1619 if (GET_MODE_CLASS (tmode) != MODE_INT
1620 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1622 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1623 MODE_INT, 0),
1624 target, unsignedp);
1625 return gen_lowpart (tmode, target);
1627 else
1628 return convert_to_mode (tmode, target, unsignedp);
1630 return target;
1633 /* Extract a bit field using shifts and boolean operations
1634 Returns an rtx to represent the value.
1635 OP0 addresses a register (word) or memory (byte).
1636 BITPOS says which bit within the word or byte the bit field starts in.
1637 OFFSET says how many bytes farther the bit field starts;
1638 it is 0 if OP0 is a register.
1639 BITSIZE says how many bits long the bit field is.
1640 (If OP0 is a register, it may be narrower than a full word,
1641 but BITPOS still counts within a full word,
1642 which is significant on bigendian machines.)
1644 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1645 If TARGET is nonzero, attempts to store the value there
1646 and return TARGET, but this is not guaranteed.
1647 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1649 static rtx
1650 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1651 unsigned HOST_WIDE_INT offset,
1652 unsigned HOST_WIDE_INT bitsize,
1653 unsigned HOST_WIDE_INT bitpos, rtx target,
1654 int unsignedp)
1656 unsigned int total_bits = BITS_PER_WORD;
1657 enum machine_mode mode;
1659 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1661 /* Special treatment for a bit field split across two registers. */
1662 if (bitsize + bitpos > BITS_PER_WORD)
1663 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1665 else
1667 /* Get the proper mode to use for this field. We want a mode that
1668 includes the entire field. If such a mode would be larger than
1669 a word, we won't be doing the extraction the normal way. */
1671 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1672 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1674 if (mode == VOIDmode)
1675 /* The only way this should occur is if the field spans word
1676 boundaries. */
1677 return extract_split_bit_field (op0, bitsize,
1678 bitpos + offset * BITS_PER_UNIT,
1679 unsignedp);
1681 total_bits = GET_MODE_BITSIZE (mode);
1683 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1684 be in the range 0 to total_bits-1, and put any excess bytes in
1685 OFFSET. */
1686 if (bitpos >= total_bits)
1688 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1689 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1690 * BITS_PER_UNIT);
1693 /* Get ref to an aligned byte, halfword, or word containing the field.
1694 Adjust BITPOS to be position within a word,
1695 and OFFSET to be the offset of that word.
1696 Then alter OP0 to refer to that word. */
1697 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1698 offset -= (offset % (total_bits / BITS_PER_UNIT));
1699 op0 = adjust_address (op0, mode, offset);
1702 mode = GET_MODE (op0);
1704 if (BYTES_BIG_ENDIAN)
1705 /* BITPOS is the distance between our msb and that of OP0.
1706 Convert it to the distance from the lsb. */
1707 bitpos = total_bits - bitsize - bitpos;
1709 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1710 We have reduced the big-endian case to the little-endian case. */
1712 if (unsignedp)
1714 if (bitpos)
1716 /* If the field does not already start at the lsb,
1717 shift it so it does. */
1718 tree amount = build_int_2 (bitpos, 0);
1719 /* Maybe propagate the target for the shift. */
1720 /* But not if we will return it--could confuse integrate.c. */
1721 rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
1722 if (tmode != mode) subtarget = 0;
1723 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1725 /* Convert the value to the desired mode. */
1726 if (mode != tmode)
1727 op0 = convert_to_mode (tmode, op0, 1);
1729 /* Unless the msb of the field used to be the msb when we shifted,
1730 mask out the upper bits. */
1732 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1733 return expand_binop (GET_MODE (op0), and_optab, op0,
1734 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1735 target, 1, OPTAB_LIB_WIDEN);
1736 return op0;
1739 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1740 then arithmetic-shift its lsb to the lsb of the word. */
1741 op0 = force_reg (mode, op0);
1742 if (mode != tmode)
1743 target = 0;
1745 /* Find the narrowest integer mode that contains the field. */
1747 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1748 mode = GET_MODE_WIDER_MODE (mode))
1749 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1751 op0 = convert_to_mode (mode, op0, 0);
1752 break;
1755 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1757 tree amount
1758 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1759 /* Maybe propagate the target for the shift. */
1760 rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
1761 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1764 return expand_shift (RSHIFT_EXPR, mode, op0,
1765 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1766 target, 0);
1769 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1770 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1771 complement of that if COMPLEMENT. The mask is truncated if
1772 necessary to the width of mode MODE. The mask is zero-extended if
1773 BITSIZE+BITPOS is too small for MODE. */
1775 static rtx
1776 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1778 HOST_WIDE_INT masklow, maskhigh;
1780 if (bitsize == 0)
1781 masklow = 0;
1782 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1783 masklow = (HOST_WIDE_INT) -1 << bitpos;
1784 else
1785 masklow = 0;
1787 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1788 masklow &= ((unsigned HOST_WIDE_INT) -1
1789 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1791 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1792 maskhigh = -1;
1793 else
1794 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1796 if (bitsize == 0)
1797 maskhigh = 0;
1798 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1799 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1800 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1801 else
1802 maskhigh = 0;
1804 if (complement)
1806 maskhigh = ~maskhigh;
1807 masklow = ~masklow;
1810 return immed_double_const (masklow, maskhigh, mode);
1813 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1814 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1816 static rtx
1817 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1819 unsigned HOST_WIDE_INT v = INTVAL (value);
1820 HOST_WIDE_INT low, high;
1822 if (bitsize < HOST_BITS_PER_WIDE_INT)
1823 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1825 if (bitpos < HOST_BITS_PER_WIDE_INT)
1827 low = v << bitpos;
1828 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1830 else
1832 low = 0;
1833 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1836 return immed_double_const (low, high, mode);
1839 /* Extract a bit field that is split across two words
1840 and return an RTX for the result.
1842 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1843 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1844 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1846 static rtx
1847 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1848 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1850 unsigned int unit;
1851 unsigned int bitsdone = 0;
1852 rtx result = NULL_RTX;
1853 int first = 1;
1855 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1856 much at a time. */
1857 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1858 unit = BITS_PER_WORD;
1859 else
1860 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1862 while (bitsdone < bitsize)
1864 unsigned HOST_WIDE_INT thissize;
1865 rtx part, word;
1866 unsigned HOST_WIDE_INT thispos;
1867 unsigned HOST_WIDE_INT offset;
1869 offset = (bitpos + bitsdone) / unit;
1870 thispos = (bitpos + bitsdone) % unit;
1872 /* THISSIZE must not overrun a word boundary. Otherwise,
1873 extract_fixed_bit_field will call us again, and we will mutually
1874 recurse forever. */
1875 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1876 thissize = MIN (thissize, unit - thispos);
1878 /* If OP0 is a register, then handle OFFSET here.
1880 When handling multiword bitfields, extract_bit_field may pass
1881 down a word_mode SUBREG of a larger REG for a bitfield that actually
1882 crosses a word boundary. Thus, for a SUBREG, we must find
1883 the current word starting from the base register. */
1884 if (GET_CODE (op0) == SUBREG)
1886 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1887 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1888 GET_MODE (SUBREG_REG (op0)));
1889 offset = 0;
1891 else if (GET_CODE (op0) == REG)
1893 word = operand_subword_force (op0, offset, GET_MODE (op0));
1894 offset = 0;
1896 else
1897 word = op0;
1899 /* Extract the parts in bit-counting order,
1900 whose meaning is determined by BYTES_PER_UNIT.
1901 OFFSET is in UNITs, and UNIT is in bits.
1902 extract_fixed_bit_field wants offset in bytes. */
1903 part = extract_fixed_bit_field (word_mode, word,
1904 offset * unit / BITS_PER_UNIT,
1905 thissize, thispos, 0, 1);
1906 bitsdone += thissize;
1908 /* Shift this part into place for the result. */
1909 if (BYTES_BIG_ENDIAN)
1911 if (bitsize != bitsdone)
1912 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1913 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1915 else
1917 if (bitsdone != thissize)
1918 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1919 build_int_2 (bitsdone - thissize, 0), 0, 1);
1922 if (first)
1923 result = part;
1924 else
1925 /* Combine the parts with bitwise or. This works
1926 because we extracted each part as an unsigned bit field. */
1927 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1928 OPTAB_LIB_WIDEN);
1930 first = 0;
1933 /* Unsigned bit field: we are done. */
1934 if (unsignedp)
1935 return result;
1936 /* Signed bit field: sign-extend with two arithmetic shifts. */
1937 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1938 build_int_2 (BITS_PER_WORD - bitsize, 0),
1939 NULL_RTX, 0);
1940 return expand_shift (RSHIFT_EXPR, word_mode, result,
1941 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1944 /* Add INC into TARGET. */
1946 void
1947 expand_inc (rtx target, rtx inc)
1949 rtx value = expand_binop (GET_MODE (target), add_optab,
1950 target, inc,
1951 target, 0, OPTAB_LIB_WIDEN);
1952 if (value != target)
1953 emit_move_insn (target, value);
1956 /* Subtract DEC from TARGET. */
1958 void
1959 expand_dec (rtx target, rtx dec)
1961 rtx value = expand_binop (GET_MODE (target), sub_optab,
1962 target, dec,
1963 target, 0, OPTAB_LIB_WIDEN);
1964 if (value != target)
1965 emit_move_insn (target, value);
1968 /* Output a shift instruction for expression code CODE,
1969 with SHIFTED being the rtx for the value to shift,
1970 and AMOUNT the tree for the amount to shift by.
1971 Store the result in the rtx TARGET, if that is convenient.
1972 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1973 Return the rtx for where the value is. */
1976 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1977 tree amount, rtx target, int unsignedp)
1979 rtx op1, temp = 0;
1980 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1981 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1982 int try;
1984 /* Previously detected shift-counts computed by NEGATE_EXPR
1985 and shifted in the other direction; but that does not work
1986 on all machines. */
1988 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1990 if (SHIFT_COUNT_TRUNCATED)
1992 if (GET_CODE (op1) == CONST_INT
1993 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1994 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1995 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1996 % GET_MODE_BITSIZE (mode));
1997 else if (GET_CODE (op1) == SUBREG
1998 && subreg_lowpart_p (op1))
1999 op1 = SUBREG_REG (op1);
2002 if (op1 == const0_rtx)
2003 return shifted;
2005 for (try = 0; temp == 0 && try < 3; try++)
2007 enum optab_methods methods;
2009 if (try == 0)
2010 methods = OPTAB_DIRECT;
2011 else if (try == 1)
2012 methods = OPTAB_WIDEN;
2013 else
2014 methods = OPTAB_LIB_WIDEN;
2016 if (rotate)
2018 /* Widening does not work for rotation. */
2019 if (methods == OPTAB_WIDEN)
2020 continue;
2021 else if (methods == OPTAB_LIB_WIDEN)
2023 /* If we have been unable to open-code this by a rotation,
2024 do it as the IOR of two shifts. I.e., to rotate A
2025 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2026 where C is the bitsize of A.
2028 It is theoretically possible that the target machine might
2029 not be able to perform either shift and hence we would
2030 be making two libcalls rather than just the one for the
2031 shift (similarly if IOR could not be done). We will allow
2032 this extremely unlikely lossage to avoid complicating the
2033 code below. */
2035 rtx subtarget = target == shifted ? 0 : target;
2036 rtx temp1;
2037 tree type = TREE_TYPE (amount);
2038 tree new_amount = make_tree (type, op1);
2039 tree other_amount
2040 = fold (build (MINUS_EXPR, type,
2041 convert (type,
2042 build_int_2 (GET_MODE_BITSIZE (mode),
2043 0)),
2044 amount));
2046 shifted = force_reg (mode, shifted);
2048 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2049 mode, shifted, new_amount, subtarget, 1);
2050 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2051 mode, shifted, other_amount, 0, 1);
2052 return expand_binop (mode, ior_optab, temp, temp1, target,
2053 unsignedp, methods);
2056 temp = expand_binop (mode,
2057 left ? rotl_optab : rotr_optab,
2058 shifted, op1, target, unsignedp, methods);
2060 /* If we don't have the rotate, but we are rotating by a constant
2061 that is in range, try a rotate in the opposite direction. */
2063 if (temp == 0 && GET_CODE (op1) == CONST_INT
2064 && INTVAL (op1) > 0
2065 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2066 temp = expand_binop (mode,
2067 left ? rotr_optab : rotl_optab,
2068 shifted,
2069 GEN_INT (GET_MODE_BITSIZE (mode)
2070 - INTVAL (op1)),
2071 target, unsignedp, methods);
2073 else if (unsignedp)
2074 temp = expand_binop (mode,
2075 left ? ashl_optab : lshr_optab,
2076 shifted, op1, target, unsignedp, methods);
2078 /* Do arithmetic shifts.
2079 Also, if we are going to widen the operand, we can just as well
2080 use an arithmetic right-shift instead of a logical one. */
2081 if (temp == 0 && ! rotate
2082 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2084 enum optab_methods methods1 = methods;
2086 /* If trying to widen a log shift to an arithmetic shift,
2087 don't accept an arithmetic shift of the same size. */
2088 if (unsignedp)
2089 methods1 = OPTAB_MUST_WIDEN;
2091 /* Arithmetic shift */
2093 temp = expand_binop (mode,
2094 left ? ashl_optab : ashr_optab,
2095 shifted, op1, target, unsignedp, methods1);
2098 /* We used to try extzv here for logical right shifts, but that was
2099 only useful for one machine, the VAX, and caused poor code
2100 generation there for lshrdi3, so the code was deleted and a
2101 define_expand for lshrsi3 was added to vax.md. */
2104 if (temp == 0)
2105 abort ();
2106 return temp;
2109 enum alg_code { alg_zero, alg_m, alg_shift,
2110 alg_add_t_m2, alg_sub_t_m2,
2111 alg_add_factor, alg_sub_factor,
2112 alg_add_t2_m, alg_sub_t2_m,
2113 alg_add, alg_subtract, alg_factor, alg_shiftop };
2115 /* This structure records a sequence of operations.
2116 `ops' is the number of operations recorded.
2117 `cost' is their total cost.
2118 The operations are stored in `op' and the corresponding
2119 logarithms of the integer coefficients in `log'.
2121 These are the operations:
2122 alg_zero total := 0;
2123 alg_m total := multiplicand;
2124 alg_shift total := total * coeff
2125 alg_add_t_m2 total := total + multiplicand * coeff;
2126 alg_sub_t_m2 total := total - multiplicand * coeff;
2127 alg_add_factor total := total * coeff + total;
2128 alg_sub_factor total := total * coeff - total;
2129 alg_add_t2_m total := total * coeff + multiplicand;
2130 alg_sub_t2_m total := total * coeff - multiplicand;
2132 The first operand must be either alg_zero or alg_m. */
2134 struct algorithm
2136 short cost;
2137 short ops;
2138 /* The size of the OP and LOG fields are not directly related to the
2139 word size, but the worst-case algorithms will be if we have few
2140 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2141 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2142 in total wordsize operations. */
2143 enum alg_code op[MAX_BITS_PER_WORD];
2144 char log[MAX_BITS_PER_WORD];
2147 /* Indicates the type of fixup needed after a constant multiplication.
2148 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2149 the result should be negated, and ADD_VARIANT means that the
2150 multiplicand should be added to the result. */
2151 enum mult_variant {basic_variant, negate_variant, add_variant};
2153 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, int);
2154 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2155 struct algorithm *, enum mult_variant *, int);
2156 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2157 const struct algorithm *, enum mult_variant);
2158 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2159 int, unsigned HOST_WIDE_INT *,
2160 int *, int *);
2161 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2162 static rtx extract_high_half (enum machine_mode, rtx);
2163 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2164 int, int);
2165 /* Compute and return the best algorithm for multiplying by T.
2166 The algorithm must cost less than cost_limit
2167 If retval.cost >= COST_LIMIT, no algorithm was found and all
2168 other field of the returned struct are undefined. */
2170 static void
2171 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2172 int cost_limit)
2174 int m;
2175 struct algorithm *alg_in, *best_alg;
2176 int cost;
2177 unsigned HOST_WIDE_INT q;
2179 /* Indicate that no algorithm is yet found. If no algorithm
2180 is found, this value will be returned and indicate failure. */
2181 alg_out->cost = cost_limit;
2183 if (cost_limit <= 0)
2184 return;
2186 /* t == 1 can be done in zero cost. */
2187 if (t == 1)
2189 alg_out->ops = 1;
2190 alg_out->cost = 0;
2191 alg_out->op[0] = alg_m;
2192 return;
2195 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2196 fail now. */
2197 if (t == 0)
2199 if (zero_cost >= cost_limit)
2200 return;
2201 else
2203 alg_out->ops = 1;
2204 alg_out->cost = zero_cost;
2205 alg_out->op[0] = alg_zero;
2206 return;
2210 /* We'll be needing a couple extra algorithm structures now. */
2212 alg_in = alloca (sizeof (struct algorithm));
2213 best_alg = alloca (sizeof (struct algorithm));
2215 /* If we have a group of zero bits at the low-order part of T, try
2216 multiplying by the remaining bits and then doing a shift. */
2218 if ((t & 1) == 0)
2220 m = floor_log2 (t & -t); /* m = number of low zero bits */
2221 if (m < BITS_PER_WORD)
2223 q = t >> m;
2224 cost = shift_cost[m];
2225 synth_mult (alg_in, q, cost_limit - cost);
2227 cost += alg_in->cost;
2228 if (cost < cost_limit)
2230 struct algorithm *x;
2231 x = alg_in, alg_in = best_alg, best_alg = x;
2232 best_alg->log[best_alg->ops] = m;
2233 best_alg->op[best_alg->ops] = alg_shift;
2234 cost_limit = cost;
2239 /* If we have an odd number, add or subtract one. */
2240 if ((t & 1) != 0)
2242 unsigned HOST_WIDE_INT w;
2244 for (w = 1; (w & t) != 0; w <<= 1)
2246 /* If T was -1, then W will be zero after the loop. This is another
2247 case where T ends with ...111. Handling this with (T + 1) and
2248 subtract 1 produces slightly better code and results in algorithm
2249 selection much faster than treating it like the ...0111 case
2250 below. */
2251 if (w == 0
2252 || (w > 2
2253 /* Reject the case where t is 3.
2254 Thus we prefer addition in that case. */
2255 && t != 3))
2257 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2259 cost = add_cost;
2260 synth_mult (alg_in, t + 1, cost_limit - cost);
2262 cost += alg_in->cost;
2263 if (cost < cost_limit)
2265 struct algorithm *x;
2266 x = alg_in, alg_in = best_alg, best_alg = x;
2267 best_alg->log[best_alg->ops] = 0;
2268 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2269 cost_limit = cost;
2272 else
2274 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2276 cost = add_cost;
2277 synth_mult (alg_in, t - 1, cost_limit - cost);
2279 cost += alg_in->cost;
2280 if (cost < cost_limit)
2282 struct algorithm *x;
2283 x = alg_in, alg_in = best_alg, best_alg = x;
2284 best_alg->log[best_alg->ops] = 0;
2285 best_alg->op[best_alg->ops] = alg_add_t_m2;
2286 cost_limit = cost;
2291 /* Look for factors of t of the form
2292 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2293 If we find such a factor, we can multiply by t using an algorithm that
2294 multiplies by q, shift the result by m and add/subtract it to itself.
2296 We search for large factors first and loop down, even if large factors
2297 are less probable than small; if we find a large factor we will find a
2298 good sequence quickly, and therefore be able to prune (by decreasing
2299 COST_LIMIT) the search. */
2301 for (m = floor_log2 (t - 1); m >= 2; m--)
2303 unsigned HOST_WIDE_INT d;
2305 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2306 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2308 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2309 synth_mult (alg_in, t / d, cost_limit - cost);
2311 cost += alg_in->cost;
2312 if (cost < cost_limit)
2314 struct algorithm *x;
2315 x = alg_in, alg_in = best_alg, best_alg = x;
2316 best_alg->log[best_alg->ops] = m;
2317 best_alg->op[best_alg->ops] = alg_add_factor;
2318 cost_limit = cost;
2320 /* Other factors will have been taken care of in the recursion. */
2321 break;
2324 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2325 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2327 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2328 synth_mult (alg_in, t / d, cost_limit - cost);
2330 cost += alg_in->cost;
2331 if (cost < cost_limit)
2333 struct algorithm *x;
2334 x = alg_in, alg_in = best_alg, best_alg = x;
2335 best_alg->log[best_alg->ops] = m;
2336 best_alg->op[best_alg->ops] = alg_sub_factor;
2337 cost_limit = cost;
2339 break;
2343 /* Try shift-and-add (load effective address) instructions,
2344 i.e. do a*3, a*5, a*9. */
2345 if ((t & 1) != 0)
2347 q = t - 1;
2348 q = q & -q;
2349 m = exact_log2 (q);
2350 if (m >= 0 && m < BITS_PER_WORD)
2352 cost = shiftadd_cost[m];
2353 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2355 cost += alg_in->cost;
2356 if (cost < cost_limit)
2358 struct algorithm *x;
2359 x = alg_in, alg_in = best_alg, best_alg = x;
2360 best_alg->log[best_alg->ops] = m;
2361 best_alg->op[best_alg->ops] = alg_add_t2_m;
2362 cost_limit = cost;
2366 q = t + 1;
2367 q = q & -q;
2368 m = exact_log2 (q);
2369 if (m >= 0 && m < BITS_PER_WORD)
2371 cost = shiftsub_cost[m];
2372 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2374 cost += alg_in->cost;
2375 if (cost < cost_limit)
2377 struct algorithm *x;
2378 x = alg_in, alg_in = best_alg, best_alg = x;
2379 best_alg->log[best_alg->ops] = m;
2380 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2381 cost_limit = cost;
2386 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2387 we have not found any algorithm. */
2388 if (cost_limit == alg_out->cost)
2389 return;
2391 /* If we are getting a too long sequence for `struct algorithm'
2392 to record, make this search fail. */
2393 if (best_alg->ops == MAX_BITS_PER_WORD)
2394 return;
2396 /* Copy the algorithm from temporary space to the space at alg_out.
2397 We avoid using structure assignment because the majority of
2398 best_alg is normally undefined, and this is a critical function. */
2399 alg_out->ops = best_alg->ops + 1;
2400 alg_out->cost = cost_limit;
2401 memcpy (alg_out->op, best_alg->op,
2402 alg_out->ops * sizeof *alg_out->op);
2403 memcpy (alg_out->log, best_alg->log,
2404 alg_out->ops * sizeof *alg_out->log);
2407 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2408 Try three variations:
2410 - a shift/add sequence based on VAL itself
2411 - a shift/add sequence based on -VAL, followed by a negation
2412 - a shift/add sequence based on VAL - 1, followed by an addition.
2414 Return true if the cheapest of these cost less than MULT_COST,
2415 describing the algorithm in *ALG and final fixup in *VARIANT. */
2417 static bool
2418 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2419 struct algorithm *alg, enum mult_variant *variant,
2420 int mult_cost)
2422 struct algorithm alg2;
2424 *variant = basic_variant;
2425 synth_mult (alg, val, mult_cost);
2427 /* This works only if the inverted value actually fits in an
2428 `unsigned int' */
2429 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2431 synth_mult (&alg2, -val, MIN (alg->cost, mult_cost) - negate_cost);
2432 alg2.cost += negate_cost;
2433 if (alg2.cost < alg->cost)
2434 *alg = alg2, *variant = negate_variant;
2437 /* This proves very useful for division-by-constant. */
2438 synth_mult (&alg2, val - 1, MIN (alg->cost, mult_cost) - add_cost);
2439 alg2.cost += add_cost;
2440 if (alg2.cost < alg->cost)
2441 *alg = alg2, *variant = add_variant;
2443 return alg->cost < mult_cost;
2446 /* A subroutine of expand_mult, used for constant multiplications.
2447 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2448 convenient. Use the shift/add sequence described by ALG and apply
2449 the final fixup specified by VARIANT. */
2451 static rtx
2452 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2453 rtx target, const struct algorithm *alg,
2454 enum mult_variant variant)
2456 HOST_WIDE_INT val_so_far;
2457 rtx insn, accum, tem;
2458 int opno;
2459 enum machine_mode nmode;
2461 /* op0 must be register to make mult_cost match the precomputed
2462 shiftadd_cost array. */
2463 op0 = protect_from_queue (op0, 0);
2465 /* Avoid referencing memory over and over.
2466 For speed, but also for correctness when mem is volatile. */
2467 if (GET_CODE (op0) == MEM)
2468 op0 = force_reg (mode, op0);
2470 /* ACCUM starts out either as OP0 or as a zero, depending on
2471 the first operation. */
2473 if (alg->op[0] == alg_zero)
2475 accum = copy_to_mode_reg (mode, const0_rtx);
2476 val_so_far = 0;
2478 else if (alg->op[0] == alg_m)
2480 accum = copy_to_mode_reg (mode, op0);
2481 val_so_far = 1;
2483 else
2484 abort ();
2486 for (opno = 1; opno < alg->ops; opno++)
2488 int log = alg->log[opno];
2489 int preserve = preserve_subexpressions_p ();
2490 rtx shift_subtarget = preserve ? 0 : accum;
2491 rtx add_target
2492 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2493 && ! preserve)
2494 ? target : 0;
2495 rtx accum_target = preserve ? 0 : accum;
2497 switch (alg->op[opno])
2499 case alg_shift:
2500 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2501 build_int_2 (log, 0), NULL_RTX, 0);
2502 val_so_far <<= log;
2503 break;
2505 case alg_add_t_m2:
2506 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2507 build_int_2 (log, 0), NULL_RTX, 0);
2508 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2509 add_target ? add_target : accum_target);
2510 val_so_far += (HOST_WIDE_INT) 1 << log;
2511 break;
2513 case alg_sub_t_m2:
2514 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2515 build_int_2 (log, 0), NULL_RTX, 0);
2516 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2517 add_target ? add_target : accum_target);
2518 val_so_far -= (HOST_WIDE_INT) 1 << log;
2519 break;
2521 case alg_add_t2_m:
2522 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2523 build_int_2 (log, 0), shift_subtarget,
2525 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2526 add_target ? add_target : accum_target);
2527 val_so_far = (val_so_far << log) + 1;
2528 break;
2530 case alg_sub_t2_m:
2531 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2532 build_int_2 (log, 0), shift_subtarget, 0);
2533 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2534 add_target ? add_target : accum_target);
2535 val_so_far = (val_so_far << log) - 1;
2536 break;
2538 case alg_add_factor:
2539 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2540 build_int_2 (log, 0), NULL_RTX, 0);
2541 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2542 add_target ? add_target : accum_target);
2543 val_so_far += val_so_far << log;
2544 break;
2546 case alg_sub_factor:
2547 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2548 build_int_2 (log, 0), NULL_RTX, 0);
2549 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2550 (add_target ? add_target
2551 : preserve ? 0 : tem));
2552 val_so_far = (val_so_far << log) - val_so_far;
2553 break;
2555 default:
2556 abort ();
2559 /* Write a REG_EQUAL note on the last insn so that we can cse
2560 multiplication sequences. Note that if ACCUM is a SUBREG,
2561 we've set the inner register and must properly indicate
2562 that. */
2564 tem = op0, nmode = mode;
2565 if (GET_CODE (accum) == SUBREG)
2567 nmode = GET_MODE (SUBREG_REG (accum));
2568 tem = gen_lowpart (nmode, op0);
2571 insn = get_last_insn ();
2572 set_unique_reg_note (insn, REG_EQUAL,
2573 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
2576 if (variant == negate_variant)
2578 val_so_far = -val_so_far;
2579 accum = expand_unop (mode, neg_optab, accum, target, 0);
2581 else if (variant == add_variant)
2583 val_so_far = val_so_far + 1;
2584 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2587 if (val != val_so_far)
2588 abort ();
2590 return accum;
2593 /* Perform a multiplication and return an rtx for the result.
2594 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2595 TARGET is a suggestion for where to store the result (an rtx).
2597 We check specially for a constant integer as OP1.
2598 If you want this check for OP0 as well, then before calling
2599 you should swap the two operands if OP0 would be constant. */
2602 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2603 int unsignedp)
2605 rtx const_op1 = op1;
2606 enum mult_variant variant;
2607 struct algorithm algorithm;
2609 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2610 less than or equal in size to `unsigned int' this doesn't matter.
2611 If the mode is larger than `unsigned int', then synth_mult works only
2612 if the constant value exactly fits in an `unsigned int' without any
2613 truncation. This means that multiplying by negative values does
2614 not work; results are off by 2^32 on a 32 bit machine. */
2616 /* If we are multiplying in DImode, it may still be a win
2617 to try to work with shifts and adds. */
2618 if (GET_CODE (op1) == CONST_DOUBLE
2619 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2620 && HOST_BITS_PER_INT >= BITS_PER_WORD
2621 && CONST_DOUBLE_HIGH (op1) == 0)
2622 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2623 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2624 && GET_CODE (op1) == CONST_INT
2625 && INTVAL (op1) < 0)
2626 const_op1 = 0;
2628 /* We used to test optimize here, on the grounds that it's better to
2629 produce a smaller program when -O is not used.
2630 But this causes such a terrible slowdown sometimes
2631 that it seems better to use synth_mult always. */
2633 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2634 && (unsignedp || !flag_trapv))
2636 int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2637 mult_cost = MIN (12 * add_cost, mult_cost);
2639 if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant,
2640 mult_cost))
2641 return expand_mult_const (mode, op0, INTVAL (const_op1), target,
2642 &algorithm, variant);
2645 if (GET_CODE (op0) == CONST_DOUBLE)
2647 rtx temp = op0;
2648 op0 = op1;
2649 op1 = temp;
2652 /* Expand x*2.0 as x+x. */
2653 if (GET_CODE (op1) == CONST_DOUBLE
2654 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2656 REAL_VALUE_TYPE d;
2657 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2659 if (REAL_VALUES_EQUAL (d, dconst2))
2661 op0 = force_reg (GET_MODE (op0), op0);
2662 return expand_binop (mode, add_optab, op0, op0,
2663 target, unsignedp, OPTAB_LIB_WIDEN);
2667 /* This used to use umul_optab if unsigned, but for non-widening multiply
2668 there is no difference between signed and unsigned. */
2669 op0 = expand_binop (mode,
2670 ! unsignedp
2671 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2672 ? smulv_optab : smul_optab,
2673 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2674 if (op0 == 0)
2675 abort ();
2676 return op0;
2679 /* Return the smallest n such that 2**n >= X. */
2682 ceil_log2 (unsigned HOST_WIDE_INT x)
2684 return floor_log2 (x - 1) + 1;
2687 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2688 replace division by D, and put the least significant N bits of the result
2689 in *MULTIPLIER_PTR and return the most significant bit.
2691 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2692 needed precision is in PRECISION (should be <= N).
2694 PRECISION should be as small as possible so this function can choose
2695 multiplier more freely.
2697 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2698 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2700 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2701 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2703 static
2704 unsigned HOST_WIDE_INT
2705 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2706 unsigned HOST_WIDE_INT *multiplier_ptr,
2707 int *post_shift_ptr, int *lgup_ptr)
2709 HOST_WIDE_INT mhigh_hi, mlow_hi;
2710 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2711 int lgup, post_shift;
2712 int pow, pow2;
2713 unsigned HOST_WIDE_INT nl, dummy1;
2714 HOST_WIDE_INT nh, dummy2;
2716 /* lgup = ceil(log2(divisor)); */
2717 lgup = ceil_log2 (d);
2719 if (lgup > n)
2720 abort ();
2722 pow = n + lgup;
2723 pow2 = n + lgup - precision;
2725 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2727 /* We could handle this with some effort, but this case is much better
2728 handled directly with a scc insn, so rely on caller using that. */
2729 abort ();
2732 /* mlow = 2^(N + lgup)/d */
2733 if (pow >= HOST_BITS_PER_WIDE_INT)
2735 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2736 nl = 0;
2738 else
2740 nh = 0;
2741 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2743 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2744 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2746 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2747 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2748 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2749 else
2750 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2751 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2752 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2754 if (mhigh_hi && nh - d >= d)
2755 abort ();
2756 if (mhigh_hi > 1 || mlow_hi > 1)
2757 abort ();
2758 /* Assert that mlow < mhigh. */
2759 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2760 abort ();
2762 /* If precision == N, then mlow, mhigh exceed 2^N
2763 (but they do not exceed 2^(N+1)). */
2765 /* Reduce to lowest terms. */
2766 for (post_shift = lgup; post_shift > 0; post_shift--)
2768 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2769 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2770 if (ml_lo >= mh_lo)
2771 break;
2773 mlow_hi = 0;
2774 mlow_lo = ml_lo;
2775 mhigh_hi = 0;
2776 mhigh_lo = mh_lo;
2779 *post_shift_ptr = post_shift;
2780 *lgup_ptr = lgup;
2781 if (n < HOST_BITS_PER_WIDE_INT)
2783 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2784 *multiplier_ptr = mhigh_lo & mask;
2785 return mhigh_lo >= mask;
2787 else
2789 *multiplier_ptr = mhigh_lo;
2790 return mhigh_hi;
2794 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2795 congruent to 1 (mod 2**N). */
2797 static unsigned HOST_WIDE_INT
2798 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2800 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2802 /* The algorithm notes that the choice y = x satisfies
2803 x*y == 1 mod 2^3, since x is assumed odd.
2804 Each iteration doubles the number of bits of significance in y. */
2806 unsigned HOST_WIDE_INT mask;
2807 unsigned HOST_WIDE_INT y = x;
2808 int nbit = 3;
2810 mask = (n == HOST_BITS_PER_WIDE_INT
2811 ? ~(unsigned HOST_WIDE_INT) 0
2812 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2814 while (nbit < n)
2816 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2817 nbit *= 2;
2819 return y;
2822 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2823 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2824 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2825 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2826 become signed.
2828 The result is put in TARGET if that is convenient.
2830 MODE is the mode of operation. */
2833 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2834 rtx op1, rtx target, int unsignedp)
2836 rtx tem;
2837 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2839 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2840 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2841 NULL_RTX, 0);
2842 tem = expand_and (mode, tem, op1, NULL_RTX);
2843 adj_operand
2844 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2845 adj_operand);
2847 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2848 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2849 NULL_RTX, 0);
2850 tem = expand_and (mode, tem, op0, NULL_RTX);
2851 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2852 target);
2854 return target;
2857 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
2859 static rtx
2860 extract_high_half (enum machine_mode mode, rtx op)
2862 enum machine_mode wider_mode;
2864 if (mode == word_mode)
2865 return gen_highpart (mode, op);
2867 wider_mode = GET_MODE_WIDER_MODE (mode);
2868 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
2869 build_int_2 (GET_MODE_BITSIZE (mode), 0), 0, 1);
2870 return convert_modes (mode, wider_mode, op, 0);
2873 /* Like expand_mult_highpart, but only consider using a multiplication
2874 optab. OP1 is an rtx for the constant operand. */
2876 static rtx
2877 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
2878 rtx target, int unsignedp, int max_cost)
2880 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
2881 enum machine_mode wider_mode;
2882 optab moptab;
2883 rtx tem;
2884 int size;
2886 wider_mode = GET_MODE_WIDER_MODE (mode);
2887 size = GET_MODE_BITSIZE (mode);
2889 /* Firstly, try using a multiplication insn that only generates the needed
2890 high part of the product, and in the sign flavor of unsignedp. */
2891 if (mul_highpart_cost[(int) mode] < max_cost)
2893 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2894 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2895 unsignedp, OPTAB_DIRECT);
2896 if (tem)
2897 return tem;
2900 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2901 Need to adjust the result after the multiplication. */
2902 if (size - 1 < BITS_PER_WORD
2903 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2904 < max_cost))
2906 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2907 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
2908 unsignedp, OPTAB_DIRECT);
2909 if (tem)
2910 /* We used the wrong signedness. Adjust the result. */
2911 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2912 tem, unsignedp);
2915 /* Try widening multiplication. */
2916 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2917 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2918 && mul_widen_cost[(int) wider_mode] < max_cost)
2920 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
2921 unsignedp, OPTAB_WIDEN);
2922 if (tem)
2923 return extract_high_half (mode, tem);
2926 /* Try widening the mode and perform a non-widening multiplication. */
2927 moptab = smul_optab;
2928 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2929 && size - 1 < BITS_PER_WORD
2930 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2932 tem = expand_binop (wider_mode, moptab, op0, op1, 0,
2933 unsignedp, OPTAB_WIDEN);
2934 if (tem)
2935 return extract_high_half (mode, tem);
2938 /* Try widening multiplication of opposite signedness, and adjust. */
2939 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2940 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2941 && size - 1 < BITS_PER_WORD
2942 && (mul_widen_cost[(int) wider_mode]
2943 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2945 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
2946 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2947 if (tem != 0)
2949 tem = extract_high_half (mode, tem);
2950 /* We used the wrong signedness. Adjust the result. */
2951 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
2952 target, unsignedp);
2956 return 0;
2959 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2960 in TARGET if that is convenient, and return where the result is. If the
2961 operation can not be performed, 0 is returned.
2963 MODE is the mode of operation and result.
2965 UNSIGNEDP nonzero means unsigned multiply.
2967 MAX_COST is the total allowed cost for the expanded RTL. */
2970 expand_mult_highpart (enum machine_mode mode, rtx op0,
2971 unsigned HOST_WIDE_INT cnst1, rtx target,
2972 int unsignedp, int max_cost)
2974 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2975 int extra_cost;
2976 bool sign_adjust = false;
2977 enum mult_variant variant;
2978 struct algorithm alg;
2979 rtx op1, tem;
2981 /* We can't support modes wider than HOST_BITS_PER_INT. */
2982 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
2983 abort ();
2985 op1 = gen_int_mode (cnst1, wider_mode);
2986 cnst1 &= GET_MODE_MASK (mode);
2988 /* We can't optimize modes wider than BITS_PER_WORD.
2989 ??? We might be able to perform double-word arithmetic if
2990 mode == word_mode, however all the cost calculations in
2991 synth_mult etc. assume single-word operations. */
2992 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
2993 return expand_mult_highpart_optab (mode, op0, op1, target,
2994 unsignedp, max_cost);
2996 extra_cost = shift_cost[GET_MODE_BITSIZE (mode) - 1];
2998 /* Check whether we try to multiply by a negative constant. */
2999 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3001 sign_adjust = true;
3002 extra_cost += add_cost;
3005 /* See whether shift/add multiplication is cheap enough. */
3006 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3007 max_cost - extra_cost))
3009 /* See whether the specialized multiplication optabs are
3010 cheaper than the shift/add version. */
3011 tem = expand_mult_highpart_optab (mode, op0, op1, target,
3012 unsignedp, alg.cost + extra_cost);
3013 if (tem)
3014 return tem;
3016 tem = convert_to_mode (wider_mode, op0, unsignedp);
3017 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3018 tem = extract_high_half (mode, tem);
3020 /* Adjust result for signedness. */
3021 if (sign_adjust)
3022 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3024 return tem;
3026 return expand_mult_highpart_optab (mode, op0, op1, target,
3027 unsignedp, max_cost);
3030 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3031 if that is convenient, and returning where the result is.
3032 You may request either the quotient or the remainder as the result;
3033 specify REM_FLAG nonzero to get the remainder.
3035 CODE is the expression code for which kind of division this is;
3036 it controls how rounding is done. MODE is the machine mode to use.
3037 UNSIGNEDP nonzero means do unsigned division. */
3039 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3040 and then correct it by or'ing in missing high bits
3041 if result of ANDI is nonzero.
3042 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3043 This could optimize to a bfexts instruction.
3044 But C doesn't use these operations, so their optimizations are
3045 left for later. */
3046 /* ??? For modulo, we don't actually need the highpart of the first product,
3047 the low part will do nicely. And for small divisors, the second multiply
3048 can also be a low-part only multiply or even be completely left out.
3049 E.g. to calculate the remainder of a division by 3 with a 32 bit
3050 multiply, multiply with 0x55555556 and extract the upper two bits;
3051 the result is exact for inputs up to 0x1fffffff.
3052 The input range can be reduced by using cross-sum rules.
3053 For odd divisors >= 3, the following table gives right shift counts
3054 so that if a number is shifted by an integer multiple of the given
3055 amount, the remainder stays the same:
3056 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3057 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3058 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3059 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3060 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3062 Cross-sum rules for even numbers can be derived by leaving as many bits
3063 to the right alone as the divisor has zeros to the right.
3064 E.g. if x is an unsigned 32 bit number:
3065 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3068 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3071 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3072 rtx op0, rtx op1, rtx target, int unsignedp)
3074 enum machine_mode compute_mode;
3075 rtx tquotient;
3076 rtx quotient = 0, remainder = 0;
3077 rtx last;
3078 int size;
3079 rtx insn, set;
3080 optab optab1, optab2;
3081 int op1_is_constant, op1_is_pow2 = 0;
3082 int max_cost, extra_cost;
3083 static HOST_WIDE_INT last_div_const = 0;
3084 static HOST_WIDE_INT ext_op1;
3086 op1_is_constant = GET_CODE (op1) == CONST_INT;
3087 if (op1_is_constant)
3089 ext_op1 = INTVAL (op1);
3090 if (unsignedp)
3091 ext_op1 &= GET_MODE_MASK (mode);
3092 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3093 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3097 This is the structure of expand_divmod:
3099 First comes code to fix up the operands so we can perform the operations
3100 correctly and efficiently.
3102 Second comes a switch statement with code specific for each rounding mode.
3103 For some special operands this code emits all RTL for the desired
3104 operation, for other cases, it generates only a quotient and stores it in
3105 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3106 to indicate that it has not done anything.
3108 Last comes code that finishes the operation. If QUOTIENT is set and
3109 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3110 QUOTIENT is not set, it is computed using trunc rounding.
3112 We try to generate special code for division and remainder when OP1 is a
3113 constant. If |OP1| = 2**n we can use shifts and some other fast
3114 operations. For other values of OP1, we compute a carefully selected
3115 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3116 by m.
3118 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3119 half of the product. Different strategies for generating the product are
3120 implemented in expand_mult_highpart.
3122 If what we actually want is the remainder, we generate that by another
3123 by-constant multiplication and a subtraction. */
3125 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3126 code below will malfunction if we are, so check here and handle
3127 the special case if so. */
3128 if (op1 == const1_rtx)
3129 return rem_flag ? const0_rtx : op0;
3131 /* When dividing by -1, we could get an overflow.
3132 negv_optab can handle overflows. */
3133 if (! unsignedp && op1 == constm1_rtx)
3135 if (rem_flag)
3136 return const0_rtx;
3137 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3138 ? negv_optab : neg_optab, op0, target, 0);
3141 if (target
3142 /* Don't use the function value register as a target
3143 since we have to read it as well as write it,
3144 and function-inlining gets confused by this. */
3145 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3146 /* Don't clobber an operand while doing a multi-step calculation. */
3147 || ((rem_flag || op1_is_constant)
3148 && (reg_mentioned_p (target, op0)
3149 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3150 || reg_mentioned_p (target, op1)
3151 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3152 target = 0;
3154 /* Get the mode in which to perform this computation. Normally it will
3155 be MODE, but sometimes we can't do the desired operation in MODE.
3156 If so, pick a wider mode in which we can do the operation. Convert
3157 to that mode at the start to avoid repeated conversions.
3159 First see what operations we need. These depend on the expression
3160 we are evaluating. (We assume that divxx3 insns exist under the
3161 same conditions that modxx3 insns and that these insns don't normally
3162 fail. If these assumptions are not correct, we may generate less
3163 efficient code in some cases.)
3165 Then see if we find a mode in which we can open-code that operation
3166 (either a division, modulus, or shift). Finally, check for the smallest
3167 mode for which we can do the operation with a library call. */
3169 /* We might want to refine this now that we have division-by-constant
3170 optimization. Since expand_mult_highpart tries so many variants, it is
3171 not straightforward to generalize this. Maybe we should make an array
3172 of possible modes in init_expmed? Save this for GCC 2.7. */
3174 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3175 ? (unsignedp ? lshr_optab : ashr_optab)
3176 : (unsignedp ? udiv_optab : sdiv_optab));
3177 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3178 ? optab1
3179 : (unsignedp ? udivmod_optab : sdivmod_optab));
3181 for (compute_mode = mode; compute_mode != VOIDmode;
3182 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3183 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3184 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3185 break;
3187 if (compute_mode == VOIDmode)
3188 for (compute_mode = mode; compute_mode != VOIDmode;
3189 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3190 if (optab1->handlers[(int) compute_mode].libfunc
3191 || optab2->handlers[(int) compute_mode].libfunc)
3192 break;
3194 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3195 in expand_binop. */
3196 if (compute_mode == VOIDmode)
3197 compute_mode = mode;
3199 if (target && GET_MODE (target) == compute_mode)
3200 tquotient = target;
3201 else
3202 tquotient = gen_reg_rtx (compute_mode);
3204 size = GET_MODE_BITSIZE (compute_mode);
3205 #if 0
3206 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3207 (mode), and thereby get better code when OP1 is a constant. Do that
3208 later. It will require going over all usages of SIZE below. */
3209 size = GET_MODE_BITSIZE (mode);
3210 #endif
3212 /* Only deduct something for a REM if the last divide done was
3213 for a different constant. Then set the constant of the last
3214 divide. */
3215 max_cost = div_cost[(int) compute_mode]
3216 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3217 && INTVAL (op1) == last_div_const)
3218 ? mul_cost[(int) compute_mode] + add_cost : 0);
3220 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3222 /* Now convert to the best mode to use. */
3223 if (compute_mode != mode)
3225 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3226 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3228 /* convert_modes may have placed op1 into a register, so we
3229 must recompute the following. */
3230 op1_is_constant = GET_CODE (op1) == CONST_INT;
3231 op1_is_pow2 = (op1_is_constant
3232 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3233 || (! unsignedp
3234 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3237 /* If one of the operands is a volatile MEM, copy it into a register. */
3239 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3240 op0 = force_reg (compute_mode, op0);
3241 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3242 op1 = force_reg (compute_mode, op1);
3244 /* If we need the remainder or if OP1 is constant, we need to
3245 put OP0 in a register in case it has any queued subexpressions. */
3246 if (rem_flag || op1_is_constant)
3247 op0 = force_reg (compute_mode, op0);
3249 last = get_last_insn ();
3251 /* Promote floor rounding to trunc rounding for unsigned operations. */
3252 if (unsignedp)
3254 if (code == FLOOR_DIV_EXPR)
3255 code = TRUNC_DIV_EXPR;
3256 if (code == FLOOR_MOD_EXPR)
3257 code = TRUNC_MOD_EXPR;
3258 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3259 code = TRUNC_DIV_EXPR;
3262 if (op1 != const0_rtx)
3263 switch (code)
3265 case TRUNC_MOD_EXPR:
3266 case TRUNC_DIV_EXPR:
3267 if (op1_is_constant)
3269 if (unsignedp)
3271 unsigned HOST_WIDE_INT mh, ml;
3272 int pre_shift, post_shift;
3273 int dummy;
3274 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3275 & GET_MODE_MASK (compute_mode));
3277 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3279 pre_shift = floor_log2 (d);
3280 if (rem_flag)
3282 remainder
3283 = expand_binop (compute_mode, and_optab, op0,
3284 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3285 remainder, 1,
3286 OPTAB_LIB_WIDEN);
3287 if (remainder)
3288 return gen_lowpart (mode, remainder);
3290 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3291 build_int_2 (pre_shift, 0),
3292 tquotient, 1);
3294 else if (size <= HOST_BITS_PER_WIDE_INT)
3296 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3298 /* Most significant bit of divisor is set; emit an scc
3299 insn. */
3300 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3301 compute_mode, 1, 1);
3302 if (quotient == 0)
3303 goto fail1;
3305 else
3307 /* Find a suitable multiplier and right shift count
3308 instead of multiplying with D. */
3310 mh = choose_multiplier (d, size, size,
3311 &ml, &post_shift, &dummy);
3313 /* If the suggested multiplier is more than SIZE bits,
3314 we can do better for even divisors, using an
3315 initial right shift. */
3316 if (mh != 0 && (d & 1) == 0)
3318 pre_shift = floor_log2 (d & -d);
3319 mh = choose_multiplier (d >> pre_shift, size,
3320 size - pre_shift,
3321 &ml, &post_shift, &dummy);
3322 if (mh)
3323 abort ();
3325 else
3326 pre_shift = 0;
3328 if (mh != 0)
3330 rtx t1, t2, t3, t4;
3332 if (post_shift - 1 >= BITS_PER_WORD)
3333 goto fail1;
3335 extra_cost = (shift_cost[post_shift - 1]
3336 + shift_cost[1] + 2 * add_cost);
3337 t1 = expand_mult_highpart (compute_mode, op0, ml,
3338 NULL_RTX, 1,
3339 max_cost - extra_cost);
3340 if (t1 == 0)
3341 goto fail1;
3342 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3343 op0, t1),
3344 NULL_RTX);
3345 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3346 build_int_2 (1, 0), NULL_RTX,1);
3347 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3348 t1, t3),
3349 NULL_RTX);
3350 quotient
3351 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3352 build_int_2 (post_shift - 1, 0),
3353 tquotient, 1);
3355 else
3357 rtx t1, t2;
3359 if (pre_shift >= BITS_PER_WORD
3360 || post_shift >= BITS_PER_WORD)
3361 goto fail1;
3363 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3364 build_int_2 (pre_shift, 0),
3365 NULL_RTX, 1);
3366 extra_cost = (shift_cost[pre_shift]
3367 + shift_cost[post_shift]);
3368 t2 = expand_mult_highpart (compute_mode, t1, ml,
3369 NULL_RTX, 1,
3370 max_cost - extra_cost);
3371 if (t2 == 0)
3372 goto fail1;
3373 quotient
3374 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3375 build_int_2 (post_shift, 0),
3376 tquotient, 1);
3380 else /* Too wide mode to use tricky code */
3381 break;
3383 insn = get_last_insn ();
3384 if (insn != last
3385 && (set = single_set (insn)) != 0
3386 && SET_DEST (set) == quotient)
3387 set_unique_reg_note (insn,
3388 REG_EQUAL,
3389 gen_rtx_UDIV (compute_mode, op0, op1));
3391 else /* TRUNC_DIV, signed */
3393 unsigned HOST_WIDE_INT ml;
3394 int lgup, post_shift;
3395 HOST_WIDE_INT d = INTVAL (op1);
3396 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3398 /* n rem d = n rem -d */
3399 if (rem_flag && d < 0)
3401 d = abs_d;
3402 op1 = gen_int_mode (abs_d, compute_mode);
3405 if (d == 1)
3406 quotient = op0;
3407 else if (d == -1)
3408 quotient = expand_unop (compute_mode, neg_optab, op0,
3409 tquotient, 0);
3410 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3412 /* This case is not handled correctly below. */
3413 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3414 compute_mode, 1, 1);
3415 if (quotient == 0)
3416 goto fail1;
3418 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3419 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3420 /* ??? The cheap metric is computed only for
3421 word_mode. If this operation is wider, this may
3422 not be so. Assume true if the optab has an
3423 expander for this mode. */
3424 && (((rem_flag ? smod_optab : sdiv_optab)
3425 ->handlers[(int) compute_mode].insn_code
3426 != CODE_FOR_nothing)
3427 || (sdivmod_optab->handlers[(int) compute_mode]
3428 .insn_code != CODE_FOR_nothing)))
3430 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3432 lgup = floor_log2 (abs_d);
3433 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3435 rtx label = gen_label_rtx ();
3436 rtx t1;
3438 t1 = copy_to_mode_reg (compute_mode, op0);
3439 do_cmp_and_jump (t1, const0_rtx, GE,
3440 compute_mode, label);
3441 expand_inc (t1, gen_int_mode (abs_d - 1,
3442 compute_mode));
3443 emit_label (label);
3444 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3445 build_int_2 (lgup, 0),
3446 tquotient, 0);
3448 else
3450 rtx t1, t2, t3;
3451 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3452 build_int_2 (size - 1, 0),
3453 NULL_RTX, 0);
3454 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3455 build_int_2 (size - lgup, 0),
3456 NULL_RTX, 1);
3457 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3458 op0, t2),
3459 NULL_RTX);
3460 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3461 build_int_2 (lgup, 0),
3462 tquotient, 0);
3465 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3466 the quotient. */
3467 if (d < 0)
3469 insn = get_last_insn ();
3470 if (insn != last
3471 && (set = single_set (insn)) != 0
3472 && SET_DEST (set) == quotient
3473 && abs_d < ((unsigned HOST_WIDE_INT) 1
3474 << (HOST_BITS_PER_WIDE_INT - 1)))
3475 set_unique_reg_note (insn,
3476 REG_EQUAL,
3477 gen_rtx_DIV (compute_mode,
3478 op0,
3479 GEN_INT
3480 (trunc_int_for_mode
3481 (abs_d,
3482 compute_mode))));
3484 quotient = expand_unop (compute_mode, neg_optab,
3485 quotient, quotient, 0);
3488 else if (size <= HOST_BITS_PER_WIDE_INT)
3490 choose_multiplier (abs_d, size, size - 1,
3491 &ml, &post_shift, &lgup);
3492 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3494 rtx t1, t2, t3;
3496 if (post_shift >= BITS_PER_WORD
3497 || size - 1 >= BITS_PER_WORD)
3498 goto fail1;
3500 extra_cost = (shift_cost[post_shift]
3501 + shift_cost[size - 1] + add_cost);
3502 t1 = expand_mult_highpart (compute_mode, op0, ml,
3503 NULL_RTX, 0,
3504 max_cost - extra_cost);
3505 if (t1 == 0)
3506 goto fail1;
3507 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3508 build_int_2 (post_shift, 0), NULL_RTX, 0);
3509 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3510 build_int_2 (size - 1, 0), NULL_RTX, 0);
3511 if (d < 0)
3512 quotient
3513 = force_operand (gen_rtx_MINUS (compute_mode,
3514 t3, t2),
3515 tquotient);
3516 else
3517 quotient
3518 = force_operand (gen_rtx_MINUS (compute_mode,
3519 t2, t3),
3520 tquotient);
3522 else
3524 rtx t1, t2, t3, t4;
3526 if (post_shift >= BITS_PER_WORD
3527 || size - 1 >= BITS_PER_WORD)
3528 goto fail1;
3530 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3531 extra_cost = (shift_cost[post_shift]
3532 + shift_cost[size - 1] + 2 * add_cost);
3533 t1 = expand_mult_highpart (compute_mode, op0, ml,
3534 NULL_RTX, 0,
3535 max_cost - extra_cost);
3536 if (t1 == 0)
3537 goto fail1;
3538 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3539 t1, op0),
3540 NULL_RTX);
3541 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3542 build_int_2 (post_shift, 0),
3543 NULL_RTX, 0);
3544 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3545 build_int_2 (size - 1, 0),
3546 NULL_RTX, 0);
3547 if (d < 0)
3548 quotient
3549 = force_operand (gen_rtx_MINUS (compute_mode,
3550 t4, t3),
3551 tquotient);
3552 else
3553 quotient
3554 = force_operand (gen_rtx_MINUS (compute_mode,
3555 t3, t4),
3556 tquotient);
3559 else /* Too wide mode to use tricky code */
3560 break;
3562 insn = get_last_insn ();
3563 if (insn != last
3564 && (set = single_set (insn)) != 0
3565 && SET_DEST (set) == quotient)
3566 set_unique_reg_note (insn,
3567 REG_EQUAL,
3568 gen_rtx_DIV (compute_mode, op0, op1));
3570 break;
3572 fail1:
3573 delete_insns_since (last);
3574 break;
3576 case FLOOR_DIV_EXPR:
3577 case FLOOR_MOD_EXPR:
3578 /* We will come here only for signed operations. */
3579 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3581 unsigned HOST_WIDE_INT mh, ml;
3582 int pre_shift, lgup, post_shift;
3583 HOST_WIDE_INT d = INTVAL (op1);
3585 if (d > 0)
3587 /* We could just as easily deal with negative constants here,
3588 but it does not seem worth the trouble for GCC 2.6. */
3589 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3591 pre_shift = floor_log2 (d);
3592 if (rem_flag)
3594 remainder = expand_binop (compute_mode, and_optab, op0,
3595 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3596 remainder, 0, OPTAB_LIB_WIDEN);
3597 if (remainder)
3598 return gen_lowpart (mode, remainder);
3600 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3601 build_int_2 (pre_shift, 0),
3602 tquotient, 0);
3604 else
3606 rtx t1, t2, t3, t4;
3608 mh = choose_multiplier (d, size, size - 1,
3609 &ml, &post_shift, &lgup);
3610 if (mh)
3611 abort ();
3613 if (post_shift < BITS_PER_WORD
3614 && size - 1 < BITS_PER_WORD)
3616 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3617 build_int_2 (size - 1, 0),
3618 NULL_RTX, 0);
3619 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3620 NULL_RTX, 0, OPTAB_WIDEN);
3621 extra_cost = (shift_cost[post_shift]
3622 + shift_cost[size - 1] + 2 * add_cost);
3623 t3 = expand_mult_highpart (compute_mode, t2, ml,
3624 NULL_RTX, 1,
3625 max_cost - extra_cost);
3626 if (t3 != 0)
3628 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3629 build_int_2 (post_shift, 0),
3630 NULL_RTX, 1);
3631 quotient = expand_binop (compute_mode, xor_optab,
3632 t4, t1, tquotient, 0,
3633 OPTAB_WIDEN);
3638 else
3640 rtx nsign, t1, t2, t3, t4;
3641 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3642 op0, constm1_rtx), NULL_RTX);
3643 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3644 0, OPTAB_WIDEN);
3645 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3646 build_int_2 (size - 1, 0), NULL_RTX, 0);
3647 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3648 NULL_RTX);
3649 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3650 NULL_RTX, 0);
3651 if (t4)
3653 rtx t5;
3654 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3655 NULL_RTX, 0);
3656 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3657 t4, t5),
3658 tquotient);
3663 if (quotient != 0)
3664 break;
3665 delete_insns_since (last);
3667 /* Try using an instruction that produces both the quotient and
3668 remainder, using truncation. We can easily compensate the quotient
3669 or remainder to get floor rounding, once we have the remainder.
3670 Notice that we compute also the final remainder value here,
3671 and return the result right away. */
3672 if (target == 0 || GET_MODE (target) != compute_mode)
3673 target = gen_reg_rtx (compute_mode);
3675 if (rem_flag)
3677 remainder
3678 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3679 quotient = gen_reg_rtx (compute_mode);
3681 else
3683 quotient
3684 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3685 remainder = gen_reg_rtx (compute_mode);
3688 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3689 quotient, remainder, 0))
3691 /* This could be computed with a branch-less sequence.
3692 Save that for later. */
3693 rtx tem;
3694 rtx label = gen_label_rtx ();
3695 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3696 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3697 NULL_RTX, 0, OPTAB_WIDEN);
3698 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3699 expand_dec (quotient, const1_rtx);
3700 expand_inc (remainder, op1);
3701 emit_label (label);
3702 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3705 /* No luck with division elimination or divmod. Have to do it
3706 by conditionally adjusting op0 *and* the result. */
3708 rtx label1, label2, label3, label4, label5;
3709 rtx adjusted_op0;
3710 rtx tem;
3712 quotient = gen_reg_rtx (compute_mode);
3713 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3714 label1 = gen_label_rtx ();
3715 label2 = gen_label_rtx ();
3716 label3 = gen_label_rtx ();
3717 label4 = gen_label_rtx ();
3718 label5 = gen_label_rtx ();
3719 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3720 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3721 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3722 quotient, 0, OPTAB_LIB_WIDEN);
3723 if (tem != quotient)
3724 emit_move_insn (quotient, tem);
3725 emit_jump_insn (gen_jump (label5));
3726 emit_barrier ();
3727 emit_label (label1);
3728 expand_inc (adjusted_op0, const1_rtx);
3729 emit_jump_insn (gen_jump (label4));
3730 emit_barrier ();
3731 emit_label (label2);
3732 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3733 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3734 quotient, 0, OPTAB_LIB_WIDEN);
3735 if (tem != quotient)
3736 emit_move_insn (quotient, tem);
3737 emit_jump_insn (gen_jump (label5));
3738 emit_barrier ();
3739 emit_label (label3);
3740 expand_dec (adjusted_op0, const1_rtx);
3741 emit_label (label4);
3742 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3743 quotient, 0, OPTAB_LIB_WIDEN);
3744 if (tem != quotient)
3745 emit_move_insn (quotient, tem);
3746 expand_dec (quotient, const1_rtx);
3747 emit_label (label5);
3749 break;
3751 case CEIL_DIV_EXPR:
3752 case CEIL_MOD_EXPR:
3753 if (unsignedp)
3755 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3757 rtx t1, t2, t3;
3758 unsigned HOST_WIDE_INT d = INTVAL (op1);
3759 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3760 build_int_2 (floor_log2 (d), 0),
3761 tquotient, 1);
3762 t2 = expand_binop (compute_mode, and_optab, op0,
3763 GEN_INT (d - 1),
3764 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3765 t3 = gen_reg_rtx (compute_mode);
3766 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3767 compute_mode, 1, 1);
3768 if (t3 == 0)
3770 rtx lab;
3771 lab = gen_label_rtx ();
3772 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3773 expand_inc (t1, const1_rtx);
3774 emit_label (lab);
3775 quotient = t1;
3777 else
3778 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3779 t1, t3),
3780 tquotient);
3781 break;
3784 /* Try using an instruction that produces both the quotient and
3785 remainder, using truncation. We can easily compensate the
3786 quotient or remainder to get ceiling rounding, once we have the
3787 remainder. Notice that we compute also the final remainder
3788 value here, and return the result right away. */
3789 if (target == 0 || GET_MODE (target) != compute_mode)
3790 target = gen_reg_rtx (compute_mode);
3792 if (rem_flag)
3794 remainder = (GET_CODE (target) == REG
3795 ? target : gen_reg_rtx (compute_mode));
3796 quotient = gen_reg_rtx (compute_mode);
3798 else
3800 quotient = (GET_CODE (target) == REG
3801 ? target : gen_reg_rtx (compute_mode));
3802 remainder = gen_reg_rtx (compute_mode);
3805 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3806 remainder, 1))
3808 /* This could be computed with a branch-less sequence.
3809 Save that for later. */
3810 rtx label = gen_label_rtx ();
3811 do_cmp_and_jump (remainder, const0_rtx, EQ,
3812 compute_mode, label);
3813 expand_inc (quotient, const1_rtx);
3814 expand_dec (remainder, op1);
3815 emit_label (label);
3816 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3819 /* No luck with division elimination or divmod. Have to do it
3820 by conditionally adjusting op0 *and* the result. */
3822 rtx label1, label2;
3823 rtx adjusted_op0, tem;
3825 quotient = gen_reg_rtx (compute_mode);
3826 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3827 label1 = gen_label_rtx ();
3828 label2 = gen_label_rtx ();
3829 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3830 compute_mode, label1);
3831 emit_move_insn (quotient, const0_rtx);
3832 emit_jump_insn (gen_jump (label2));
3833 emit_barrier ();
3834 emit_label (label1);
3835 expand_dec (adjusted_op0, const1_rtx);
3836 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3837 quotient, 1, OPTAB_LIB_WIDEN);
3838 if (tem != quotient)
3839 emit_move_insn (quotient, tem);
3840 expand_inc (quotient, const1_rtx);
3841 emit_label (label2);
3844 else /* signed */
3846 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3847 && INTVAL (op1) >= 0)
3849 /* This is extremely similar to the code for the unsigned case
3850 above. For 2.7 we should merge these variants, but for
3851 2.6.1 I don't want to touch the code for unsigned since that
3852 get used in C. The signed case will only be used by other
3853 languages (Ada). */
3855 rtx t1, t2, t3;
3856 unsigned HOST_WIDE_INT d = INTVAL (op1);
3857 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3858 build_int_2 (floor_log2 (d), 0),
3859 tquotient, 0);
3860 t2 = expand_binop (compute_mode, and_optab, op0,
3861 GEN_INT (d - 1),
3862 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3863 t3 = gen_reg_rtx (compute_mode);
3864 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3865 compute_mode, 1, 1);
3866 if (t3 == 0)
3868 rtx lab;
3869 lab = gen_label_rtx ();
3870 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3871 expand_inc (t1, const1_rtx);
3872 emit_label (lab);
3873 quotient = t1;
3875 else
3876 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3877 t1, t3),
3878 tquotient);
3879 break;
3882 /* Try using an instruction that produces both the quotient and
3883 remainder, using truncation. We can easily compensate the
3884 quotient or remainder to get ceiling rounding, once we have the
3885 remainder. Notice that we compute also the final remainder
3886 value here, and return the result right away. */
3887 if (target == 0 || GET_MODE (target) != compute_mode)
3888 target = gen_reg_rtx (compute_mode);
3889 if (rem_flag)
3891 remainder= (GET_CODE (target) == REG
3892 ? target : gen_reg_rtx (compute_mode));
3893 quotient = gen_reg_rtx (compute_mode);
3895 else
3897 quotient = (GET_CODE (target) == REG
3898 ? target : gen_reg_rtx (compute_mode));
3899 remainder = gen_reg_rtx (compute_mode);
3902 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3903 remainder, 0))
3905 /* This could be computed with a branch-less sequence.
3906 Save that for later. */
3907 rtx tem;
3908 rtx label = gen_label_rtx ();
3909 do_cmp_and_jump (remainder, const0_rtx, EQ,
3910 compute_mode, label);
3911 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3912 NULL_RTX, 0, OPTAB_WIDEN);
3913 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3914 expand_inc (quotient, const1_rtx);
3915 expand_dec (remainder, op1);
3916 emit_label (label);
3917 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3920 /* No luck with division elimination or divmod. Have to do it
3921 by conditionally adjusting op0 *and* the result. */
3923 rtx label1, label2, label3, label4, label5;
3924 rtx adjusted_op0;
3925 rtx tem;
3927 quotient = gen_reg_rtx (compute_mode);
3928 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3929 label1 = gen_label_rtx ();
3930 label2 = gen_label_rtx ();
3931 label3 = gen_label_rtx ();
3932 label4 = gen_label_rtx ();
3933 label5 = gen_label_rtx ();
3934 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3935 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3936 compute_mode, label1);
3937 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3938 quotient, 0, OPTAB_LIB_WIDEN);
3939 if (tem != quotient)
3940 emit_move_insn (quotient, tem);
3941 emit_jump_insn (gen_jump (label5));
3942 emit_barrier ();
3943 emit_label (label1);
3944 expand_dec (adjusted_op0, const1_rtx);
3945 emit_jump_insn (gen_jump (label4));
3946 emit_barrier ();
3947 emit_label (label2);
3948 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3949 compute_mode, label3);
3950 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3951 quotient, 0, OPTAB_LIB_WIDEN);
3952 if (tem != quotient)
3953 emit_move_insn (quotient, tem);
3954 emit_jump_insn (gen_jump (label5));
3955 emit_barrier ();
3956 emit_label (label3);
3957 expand_inc (adjusted_op0, const1_rtx);
3958 emit_label (label4);
3959 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3960 quotient, 0, OPTAB_LIB_WIDEN);
3961 if (tem != quotient)
3962 emit_move_insn (quotient, tem);
3963 expand_inc (quotient, const1_rtx);
3964 emit_label (label5);
3967 break;
3969 case EXACT_DIV_EXPR:
3970 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3972 HOST_WIDE_INT d = INTVAL (op1);
3973 unsigned HOST_WIDE_INT ml;
3974 int pre_shift;
3975 rtx t1;
3977 pre_shift = floor_log2 (d & -d);
3978 ml = invert_mod2n (d >> pre_shift, size);
3979 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3980 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3981 quotient = expand_mult (compute_mode, t1,
3982 gen_int_mode (ml, compute_mode),
3983 NULL_RTX, 1);
3985 insn = get_last_insn ();
3986 set_unique_reg_note (insn,
3987 REG_EQUAL,
3988 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3989 compute_mode,
3990 op0, op1));
3992 break;
3994 case ROUND_DIV_EXPR:
3995 case ROUND_MOD_EXPR:
3996 if (unsignedp)
3998 rtx tem;
3999 rtx label;
4000 label = gen_label_rtx ();
4001 quotient = gen_reg_rtx (compute_mode);
4002 remainder = gen_reg_rtx (compute_mode);
4003 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4005 rtx tem;
4006 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4007 quotient, 1, OPTAB_LIB_WIDEN);
4008 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4009 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4010 remainder, 1, OPTAB_LIB_WIDEN);
4012 tem = plus_constant (op1, -1);
4013 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4014 build_int_2 (1, 0), NULL_RTX, 1);
4015 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4016 expand_inc (quotient, const1_rtx);
4017 expand_dec (remainder, op1);
4018 emit_label (label);
4020 else
4022 rtx abs_rem, abs_op1, tem, mask;
4023 rtx label;
4024 label = gen_label_rtx ();
4025 quotient = gen_reg_rtx (compute_mode);
4026 remainder = gen_reg_rtx (compute_mode);
4027 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4029 rtx tem;
4030 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4031 quotient, 0, OPTAB_LIB_WIDEN);
4032 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4033 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4034 remainder, 0, OPTAB_LIB_WIDEN);
4036 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4037 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4038 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4039 build_int_2 (1, 0), NULL_RTX, 1);
4040 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4041 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4042 NULL_RTX, 0, OPTAB_WIDEN);
4043 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4044 build_int_2 (size - 1, 0), NULL_RTX, 0);
4045 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4046 NULL_RTX, 0, OPTAB_WIDEN);
4047 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4048 NULL_RTX, 0, OPTAB_WIDEN);
4049 expand_inc (quotient, tem);
4050 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4051 NULL_RTX, 0, OPTAB_WIDEN);
4052 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4053 NULL_RTX, 0, OPTAB_WIDEN);
4054 expand_dec (remainder, tem);
4055 emit_label (label);
4057 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4059 default:
4060 abort ();
4063 if (quotient == 0)
4065 if (target && GET_MODE (target) != compute_mode)
4066 target = 0;
4068 if (rem_flag)
4070 /* Try to produce the remainder without producing the quotient.
4071 If we seem to have a divmod pattern that does not require widening,
4072 don't try widening here. We should really have a WIDEN argument
4073 to expand_twoval_binop, since what we'd really like to do here is
4074 1) try a mod insn in compute_mode
4075 2) try a divmod insn in compute_mode
4076 3) try a div insn in compute_mode and multiply-subtract to get
4077 remainder
4078 4) try the same things with widening allowed. */
4079 remainder
4080 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4081 op0, op1, target,
4082 unsignedp,
4083 ((optab2->handlers[(int) compute_mode].insn_code
4084 != CODE_FOR_nothing)
4085 ? OPTAB_DIRECT : OPTAB_WIDEN));
4086 if (remainder == 0)
4088 /* No luck there. Can we do remainder and divide at once
4089 without a library call? */
4090 remainder = gen_reg_rtx (compute_mode);
4091 if (! expand_twoval_binop ((unsignedp
4092 ? udivmod_optab
4093 : sdivmod_optab),
4094 op0, op1,
4095 NULL_RTX, remainder, unsignedp))
4096 remainder = 0;
4099 if (remainder)
4100 return gen_lowpart (mode, remainder);
4103 /* Produce the quotient. Try a quotient insn, but not a library call.
4104 If we have a divmod in this mode, use it in preference to widening
4105 the div (for this test we assume it will not fail). Note that optab2
4106 is set to the one of the two optabs that the call below will use. */
4107 quotient
4108 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4109 op0, op1, rem_flag ? NULL_RTX : target,
4110 unsignedp,
4111 ((optab2->handlers[(int) compute_mode].insn_code
4112 != CODE_FOR_nothing)
4113 ? OPTAB_DIRECT : OPTAB_WIDEN));
4115 if (quotient == 0)
4117 /* No luck there. Try a quotient-and-remainder insn,
4118 keeping the quotient alone. */
4119 quotient = gen_reg_rtx (compute_mode);
4120 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4121 op0, op1,
4122 quotient, NULL_RTX, unsignedp))
4124 quotient = 0;
4125 if (! rem_flag)
4126 /* Still no luck. If we are not computing the remainder,
4127 use a library call for the quotient. */
4128 quotient = sign_expand_binop (compute_mode,
4129 udiv_optab, sdiv_optab,
4130 op0, op1, target,
4131 unsignedp, OPTAB_LIB_WIDEN);
4136 if (rem_flag)
4138 if (target && GET_MODE (target) != compute_mode)
4139 target = 0;
4141 if (quotient == 0)
4142 /* No divide instruction either. Use library for remainder. */
4143 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4144 op0, op1, target,
4145 unsignedp, OPTAB_LIB_WIDEN);
4146 else
4148 /* We divided. Now finish doing X - Y * (X / Y). */
4149 remainder = expand_mult (compute_mode, quotient, op1,
4150 NULL_RTX, unsignedp);
4151 remainder = expand_binop (compute_mode, sub_optab, op0,
4152 remainder, target, unsignedp,
4153 OPTAB_LIB_WIDEN);
4157 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4160 /* Return a tree node with data type TYPE, describing the value of X.
4161 Usually this is an RTL_EXPR, if there is no obvious better choice.
4162 X may be an expression, however we only support those expressions
4163 generated by loop.c. */
4165 tree
4166 make_tree (tree type, rtx x)
4168 tree t;
4170 switch (GET_CODE (x))
4172 case CONST_INT:
4173 t = build_int_2 (INTVAL (x),
4174 (TYPE_UNSIGNED (type)
4175 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4176 < HOST_BITS_PER_WIDE_INT))
4177 || INTVAL (x) >= 0 ? 0 : -1);
4178 TREE_TYPE (t) = type;
4179 return t;
4181 case CONST_DOUBLE:
4182 if (GET_MODE (x) == VOIDmode)
4184 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4185 TREE_TYPE (t) = type;
4187 else
4189 REAL_VALUE_TYPE d;
4191 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4192 t = build_real (type, d);
4195 return t;
4197 case CONST_VECTOR:
4199 int i, units;
4200 rtx elt;
4201 tree t = NULL_TREE;
4203 units = CONST_VECTOR_NUNITS (x);
4205 /* Build a tree with vector elements. */
4206 for (i = units - 1; i >= 0; --i)
4208 elt = CONST_VECTOR_ELT (x, i);
4209 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4212 return build_vector (type, t);
4215 case PLUS:
4216 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4217 make_tree (type, XEXP (x, 1))));
4219 case MINUS:
4220 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4221 make_tree (type, XEXP (x, 1))));
4223 case NEG:
4224 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4226 case MULT:
4227 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4228 make_tree (type, XEXP (x, 1))));
4230 case ASHIFT:
4231 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4232 make_tree (type, XEXP (x, 1))));
4234 case LSHIFTRT:
4235 t = lang_hooks.types.unsigned_type (type);
4236 return fold (convert (type,
4237 build (RSHIFT_EXPR, t,
4238 make_tree (t, XEXP (x, 0)),
4239 make_tree (type, XEXP (x, 1)))));
4241 case ASHIFTRT:
4242 t = lang_hooks.types.signed_type (type);
4243 return fold (convert (type,
4244 build (RSHIFT_EXPR, t,
4245 make_tree (t, XEXP (x, 0)),
4246 make_tree (type, XEXP (x, 1)))));
4248 case DIV:
4249 if (TREE_CODE (type) != REAL_TYPE)
4250 t = lang_hooks.types.signed_type (type);
4251 else
4252 t = type;
4254 return fold (convert (type,
4255 build (TRUNC_DIV_EXPR, t,
4256 make_tree (t, XEXP (x, 0)),
4257 make_tree (t, XEXP (x, 1)))));
4258 case UDIV:
4259 t = lang_hooks.types.unsigned_type (type);
4260 return fold (convert (type,
4261 build (TRUNC_DIV_EXPR, t,
4262 make_tree (t, XEXP (x, 0)),
4263 make_tree (t, XEXP (x, 1)))));
4265 case SIGN_EXTEND:
4266 case ZERO_EXTEND:
4267 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4268 GET_CODE (x) == ZERO_EXTEND);
4269 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4271 default:
4272 t = make_node (RTL_EXPR);
4273 TREE_TYPE (t) = type;
4275 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4276 ptr_mode. So convert. */
4277 if (POINTER_TYPE_P (type))
4278 x = convert_memory_address (TYPE_MODE (type), x);
4280 RTL_EXPR_RTL (t) = x;
4281 /* There are no insns to be output
4282 when this rtl_expr is used. */
4283 RTL_EXPR_SEQUENCE (t) = 0;
4284 return t;
4288 /* Check whether the multiplication X * MULT + ADD overflows.
4289 X, MULT and ADD must be CONST_*.
4290 MODE is the machine mode for the computation.
4291 X and MULT must have mode MODE. ADD may have a different mode.
4292 So can X (defaults to same as MODE).
4293 UNSIGNEDP is nonzero to do unsigned multiplication. */
4295 bool
4296 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4298 tree type, mult_type, add_type, result;
4300 type = lang_hooks.types.type_for_mode (mode, unsignedp);
4302 /* In order to get a proper overflow indication from an unsigned
4303 type, we have to pretend that it's a sizetype. */
4304 mult_type = type;
4305 if (unsignedp)
4307 mult_type = copy_node (type);
4308 TYPE_IS_SIZETYPE (mult_type) = 1;
4311 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4312 : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp));
4314 result = fold (build (PLUS_EXPR, mult_type,
4315 fold (build (MULT_EXPR, mult_type,
4316 make_tree (mult_type, x),
4317 make_tree (mult_type, mult))),
4318 make_tree (add_type, add)));
4320 return TREE_CONSTANT_OVERFLOW (result);
4323 /* Return an rtx representing the value of X * MULT + ADD.
4324 TARGET is a suggestion for where to store the result (an rtx).
4325 MODE is the machine mode for the computation.
4326 X and MULT must have mode MODE. ADD may have a different mode.
4327 So can X (defaults to same as MODE).
4328 UNSIGNEDP is nonzero to do unsigned multiplication.
4329 This may emit insns. */
4332 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4333 int unsignedp)
4335 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
4336 tree add_type = (GET_MODE (add) == VOIDmode
4337 ? type: lang_hooks.types.type_for_mode (GET_MODE (add),
4338 unsignedp));
4339 tree result = fold (build (PLUS_EXPR, type,
4340 fold (build (MULT_EXPR, type,
4341 make_tree (type, x),
4342 make_tree (type, mult))),
4343 make_tree (add_type, add)));
4345 return expand_expr (result, target, VOIDmode, 0);
4348 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4349 and returning TARGET.
4351 If TARGET is 0, a pseudo-register or constant is returned. */
4354 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4356 rtx tem = 0;
4358 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4359 tem = simplify_binary_operation (AND, mode, op0, op1);
4360 if (tem == 0)
4361 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4363 if (target == 0)
4364 target = tem;
4365 else if (tem != target)
4366 emit_move_insn (target, tem);
4367 return target;
4370 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4371 and storing in TARGET. Normally return TARGET.
4372 Return 0 if that cannot be done.
4374 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4375 it is VOIDmode, they cannot both be CONST_INT.
4377 UNSIGNEDP is for the case where we have to widen the operands
4378 to perform the operation. It says to use zero-extension.
4380 NORMALIZEP is 1 if we should convert the result to be either zero
4381 or one. Normalize is -1 if we should convert the result to be
4382 either zero or -1. If NORMALIZEP is zero, the result will be left
4383 "raw" out of the scc insn. */
4386 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4387 enum machine_mode mode, int unsignedp, int normalizep)
4389 rtx subtarget;
4390 enum insn_code icode;
4391 enum machine_mode compare_mode;
4392 enum machine_mode target_mode = GET_MODE (target);
4393 rtx tem;
4394 rtx last = get_last_insn ();
4395 rtx pattern, comparison;
4397 /* ??? Ok to do this and then fail? */
4398 op0 = protect_from_queue (op0, 0);
4399 op1 = protect_from_queue (op1, 0);
4401 if (unsignedp)
4402 code = unsigned_condition (code);
4404 /* If one operand is constant, make it the second one. Only do this
4405 if the other operand is not constant as well. */
4407 if (swap_commutative_operands_p (op0, op1))
4409 tem = op0;
4410 op0 = op1;
4411 op1 = tem;
4412 code = swap_condition (code);
4415 if (mode == VOIDmode)
4416 mode = GET_MODE (op0);
4418 /* For some comparisons with 1 and -1, we can convert this to
4419 comparisons with zero. This will often produce more opportunities for
4420 store-flag insns. */
4422 switch (code)
4424 case LT:
4425 if (op1 == const1_rtx)
4426 op1 = const0_rtx, code = LE;
4427 break;
4428 case LE:
4429 if (op1 == constm1_rtx)
4430 op1 = const0_rtx, code = LT;
4431 break;
4432 case GE:
4433 if (op1 == const1_rtx)
4434 op1 = const0_rtx, code = GT;
4435 break;
4436 case GT:
4437 if (op1 == constm1_rtx)
4438 op1 = const0_rtx, code = GE;
4439 break;
4440 case GEU:
4441 if (op1 == const1_rtx)
4442 op1 = const0_rtx, code = NE;
4443 break;
4444 case LTU:
4445 if (op1 == const1_rtx)
4446 op1 = const0_rtx, code = EQ;
4447 break;
4448 default:
4449 break;
4452 /* If we are comparing a double-word integer with zero, we can convert
4453 the comparison into one involving a single word. */
4454 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4455 && GET_MODE_CLASS (mode) == MODE_INT
4456 && op1 == const0_rtx
4457 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4459 if (code == EQ || code == NE)
4461 rtx op00, op01, op0both;
4463 /* Do a logical OR of the two words and compare the result. */
4464 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4465 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4466 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4467 NULL_RTX, unsignedp, OPTAB_DIRECT);
4468 if (op0both != 0)
4469 return emit_store_flag (target, code, op0both, op1, word_mode,
4470 unsignedp, normalizep);
4472 else if (code == LT || code == GE)
4474 rtx op0h;
4476 /* If testing the sign bit, can just test on high word. */
4477 op0h = simplify_gen_subreg (word_mode, op0, mode,
4478 subreg_highpart_offset (word_mode, mode));
4479 return emit_store_flag (target, code, op0h, op1, word_mode,
4480 unsignedp, normalizep);
4484 /* From now on, we won't change CODE, so set ICODE now. */
4485 icode = setcc_gen_code[(int) code];
4487 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4488 complement of A (for GE) and shifting the sign bit to the low bit. */
4489 if (op1 == const0_rtx && (code == LT || code == GE)
4490 && GET_MODE_CLASS (mode) == MODE_INT
4491 && (normalizep || STORE_FLAG_VALUE == 1
4492 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4493 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4494 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4496 subtarget = target;
4498 /* If the result is to be wider than OP0, it is best to convert it
4499 first. If it is to be narrower, it is *incorrect* to convert it
4500 first. */
4501 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4503 op0 = protect_from_queue (op0, 0);
4504 op0 = convert_modes (target_mode, mode, op0, 0);
4505 mode = target_mode;
4508 if (target_mode != mode)
4509 subtarget = 0;
4511 if (code == GE)
4512 op0 = expand_unop (mode, one_cmpl_optab, op0,
4513 ((STORE_FLAG_VALUE == 1 || normalizep)
4514 ? 0 : subtarget), 0);
4516 if (STORE_FLAG_VALUE == 1 || normalizep)
4517 /* If we are supposed to produce a 0/1 value, we want to do
4518 a logical shift from the sign bit to the low-order bit; for
4519 a -1/0 value, we do an arithmetic shift. */
4520 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4521 size_int (GET_MODE_BITSIZE (mode) - 1),
4522 subtarget, normalizep != -1);
4524 if (mode != target_mode)
4525 op0 = convert_modes (target_mode, mode, op0, 0);
4527 return op0;
4530 if (icode != CODE_FOR_nothing)
4532 insn_operand_predicate_fn pred;
4534 /* We think we may be able to do this with a scc insn. Emit the
4535 comparison and then the scc insn.
4537 compare_from_rtx may call emit_queue, which would be deleted below
4538 if the scc insn fails. So call it ourselves before setting LAST.
4539 Likewise for do_pending_stack_adjust. */
4541 emit_queue ();
4542 do_pending_stack_adjust ();
4543 last = get_last_insn ();
4545 comparison
4546 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4547 if (GET_CODE (comparison) == CONST_INT)
4548 return (comparison == const0_rtx ? const0_rtx
4549 : normalizep == 1 ? const1_rtx
4550 : normalizep == -1 ? constm1_rtx
4551 : const_true_rtx);
4553 /* The code of COMPARISON may not match CODE if compare_from_rtx
4554 decided to swap its operands and reverse the original code.
4556 We know that compare_from_rtx returns either a CONST_INT or
4557 a new comparison code, so it is safe to just extract the
4558 code from COMPARISON. */
4559 code = GET_CODE (comparison);
4561 /* Get a reference to the target in the proper mode for this insn. */
4562 compare_mode = insn_data[(int) icode].operand[0].mode;
4563 subtarget = target;
4564 pred = insn_data[(int) icode].operand[0].predicate;
4565 if (preserve_subexpressions_p ()
4566 || ! (*pred) (subtarget, compare_mode))
4567 subtarget = gen_reg_rtx (compare_mode);
4569 pattern = GEN_FCN (icode) (subtarget);
4570 if (pattern)
4572 emit_insn (pattern);
4574 /* If we are converting to a wider mode, first convert to
4575 TARGET_MODE, then normalize. This produces better combining
4576 opportunities on machines that have a SIGN_EXTRACT when we are
4577 testing a single bit. This mostly benefits the 68k.
4579 If STORE_FLAG_VALUE does not have the sign bit set when
4580 interpreted in COMPARE_MODE, we can do this conversion as
4581 unsigned, which is usually more efficient. */
4582 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4584 convert_move (target, subtarget,
4585 (GET_MODE_BITSIZE (compare_mode)
4586 <= HOST_BITS_PER_WIDE_INT)
4587 && 0 == (STORE_FLAG_VALUE
4588 & ((HOST_WIDE_INT) 1
4589 << (GET_MODE_BITSIZE (compare_mode) -1))));
4590 op0 = target;
4591 compare_mode = target_mode;
4593 else
4594 op0 = subtarget;
4596 /* If we want to keep subexpressions around, don't reuse our
4597 last target. */
4599 if (preserve_subexpressions_p ())
4600 subtarget = 0;
4602 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4603 we don't have to do anything. */
4604 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4606 /* STORE_FLAG_VALUE might be the most negative number, so write
4607 the comparison this way to avoid a compiler-time warning. */
4608 else if (- normalizep == STORE_FLAG_VALUE)
4609 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4611 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4612 makes it hard to use a value of just the sign bit due to
4613 ANSI integer constant typing rules. */
4614 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4615 && (STORE_FLAG_VALUE
4616 & ((HOST_WIDE_INT) 1
4617 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4618 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4619 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4620 subtarget, normalizep == 1);
4621 else if (STORE_FLAG_VALUE & 1)
4623 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4624 if (normalizep == -1)
4625 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4627 else
4628 abort ();
4630 /* If we were converting to a smaller mode, do the
4631 conversion now. */
4632 if (target_mode != compare_mode)
4634 convert_move (target, op0, 0);
4635 return target;
4637 else
4638 return op0;
4642 delete_insns_since (last);
4644 /* If expensive optimizations, use different pseudo registers for each
4645 insn, instead of reusing the same pseudo. This leads to better CSE,
4646 but slows down the compiler, since there are more pseudos */
4647 subtarget = (!flag_expensive_optimizations
4648 && (target_mode == mode)) ? target : NULL_RTX;
4650 /* If we reached here, we can't do this with a scc insn. However, there
4651 are some comparisons that can be done directly. For example, if
4652 this is an equality comparison of integers, we can try to exclusive-or
4653 (or subtract) the two operands and use a recursive call to try the
4654 comparison with zero. Don't do any of these cases if branches are
4655 very cheap. */
4657 if (BRANCH_COST > 0
4658 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4659 && op1 != const0_rtx)
4661 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4662 OPTAB_WIDEN);
4664 if (tem == 0)
4665 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4666 OPTAB_WIDEN);
4667 if (tem != 0)
4668 tem = emit_store_flag (target, code, tem, const0_rtx,
4669 mode, unsignedp, normalizep);
4670 if (tem == 0)
4671 delete_insns_since (last);
4672 return tem;
4675 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4676 the constant zero. Reject all other comparisons at this point. Only
4677 do LE and GT if branches are expensive since they are expensive on
4678 2-operand machines. */
4680 if (BRANCH_COST == 0
4681 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4682 || (code != EQ && code != NE
4683 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4684 return 0;
4686 /* See what we need to return. We can only return a 1, -1, or the
4687 sign bit. */
4689 if (normalizep == 0)
4691 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4692 normalizep = STORE_FLAG_VALUE;
4694 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4695 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4696 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4698 else
4699 return 0;
4702 /* Try to put the result of the comparison in the sign bit. Assume we can't
4703 do the necessary operation below. */
4705 tem = 0;
4707 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4708 the sign bit set. */
4710 if (code == LE)
4712 /* This is destructive, so SUBTARGET can't be OP0. */
4713 if (rtx_equal_p (subtarget, op0))
4714 subtarget = 0;
4716 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4717 OPTAB_WIDEN);
4718 if (tem)
4719 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4720 OPTAB_WIDEN);
4723 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4724 number of bits in the mode of OP0, minus one. */
4726 if (code == GT)
4728 if (rtx_equal_p (subtarget, op0))
4729 subtarget = 0;
4731 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4732 size_int (GET_MODE_BITSIZE (mode) - 1),
4733 subtarget, 0);
4734 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4735 OPTAB_WIDEN);
4738 if (code == EQ || code == NE)
4740 /* For EQ or NE, one way to do the comparison is to apply an operation
4741 that converts the operand into a positive number if it is nonzero
4742 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4743 for NE we negate. This puts the result in the sign bit. Then we
4744 normalize with a shift, if needed.
4746 Two operations that can do the above actions are ABS and FFS, so try
4747 them. If that doesn't work, and MODE is smaller than a full word,
4748 we can use zero-extension to the wider mode (an unsigned conversion)
4749 as the operation. */
4751 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4752 that is compensated by the subsequent overflow when subtracting
4753 one / negating. */
4755 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4756 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4757 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4758 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4759 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4761 op0 = protect_from_queue (op0, 0);
4762 tem = convert_modes (word_mode, mode, op0, 1);
4763 mode = word_mode;
4766 if (tem != 0)
4768 if (code == EQ)
4769 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4770 0, OPTAB_WIDEN);
4771 else
4772 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4775 /* If we couldn't do it that way, for NE we can "or" the two's complement
4776 of the value with itself. For EQ, we take the one's complement of
4777 that "or", which is an extra insn, so we only handle EQ if branches
4778 are expensive. */
4780 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4782 if (rtx_equal_p (subtarget, op0))
4783 subtarget = 0;
4785 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4786 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4787 OPTAB_WIDEN);
4789 if (tem && code == EQ)
4790 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4794 if (tem && normalizep)
4795 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4796 size_int (GET_MODE_BITSIZE (mode) - 1),
4797 subtarget, normalizep == 1);
4799 if (tem)
4801 if (GET_MODE (tem) != target_mode)
4803 convert_move (target, tem, 0);
4804 tem = target;
4806 else if (!subtarget)
4808 emit_move_insn (target, tem);
4809 tem = target;
4812 else
4813 delete_insns_since (last);
4815 return tem;
4818 /* Like emit_store_flag, but always succeeds. */
4821 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4822 enum machine_mode mode, int unsignedp, int normalizep)
4824 rtx tem, label;
4826 /* First see if emit_store_flag can do the job. */
4827 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4828 if (tem != 0)
4829 return tem;
4831 if (normalizep == 0)
4832 normalizep = 1;
4834 /* If this failed, we have to do this with set/compare/jump/set code. */
4836 if (GET_CODE (target) != REG
4837 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4838 target = gen_reg_rtx (GET_MODE (target));
4840 emit_move_insn (target, const1_rtx);
4841 label = gen_label_rtx ();
4842 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4843 NULL_RTX, label);
4845 emit_move_insn (target, const0_rtx);
4846 emit_label (label);
4848 return target;
4851 /* Perform possibly multi-word comparison and conditional jump to LABEL
4852 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4854 The algorithm is based on the code in expr.c:do_jump.
4856 Note that this does not perform a general comparison. Only variants
4857 generated within expmed.c are correctly handled, others abort (but could
4858 be handled if needed). */
4860 static void
4861 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4862 rtx label)
4864 /* If this mode is an integer too wide to compare properly,
4865 compare word by word. Rely on cse to optimize constant cases. */
4867 if (GET_MODE_CLASS (mode) == MODE_INT
4868 && ! can_compare_p (op, mode, ccp_jump))
4870 rtx label2 = gen_label_rtx ();
4872 switch (op)
4874 case LTU:
4875 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4876 break;
4878 case LEU:
4879 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4880 break;
4882 case LT:
4883 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4884 break;
4886 case GT:
4887 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4888 break;
4890 case GE:
4891 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4892 break;
4894 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4895 that's the only equality operations we do */
4896 case EQ:
4897 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4898 abort ();
4899 do_jump_by_parts_equality_rtx (arg1, label2, label);
4900 break;
4902 case NE:
4903 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4904 abort ();
4905 do_jump_by_parts_equality_rtx (arg1, label, label2);
4906 break;
4908 default:
4909 abort ();
4912 emit_label (label2);
4914 else
4915 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);