* c-semantics.c (genrtl_while_stmt, genrtl_do_stmt_1)
[official-gcc.git] / gcc / expmed.c
blob3d61a35f95313f6c368c7c795e0ec099994d3af3
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx);
43 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx);
45 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT, rtx, int);
49 static rtx mask_rtx (enum machine_mode, int, int, int);
50 static rtx lshift_value (enum machine_mode, rtx, int, int);
51 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int);
53 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 /* Nonzero means divides or modulus operations are relatively cheap for
56 powers of two, so don't use branches; emit the operation instead.
57 Usually, this will mean that the MD file will emit non-branch
58 sequences. */
60 static int sdiv_pow2_cheap, smod_pow2_cheap;
62 #ifndef SLOW_UNALIGNED_ACCESS
63 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
64 #endif
66 /* For compilers that support multiple targets with different word sizes,
67 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
68 is the H8/300(H) compiler. */
70 #ifndef MAX_BITS_PER_WORD
71 #define MAX_BITS_PER_WORD BITS_PER_WORD
72 #endif
74 /* Reduce conditional compilation elsewhere. */
75 #ifndef HAVE_insv
76 #define HAVE_insv 0
77 #define CODE_FOR_insv CODE_FOR_nothing
78 #define gen_insv(a,b,c,d) NULL_RTX
79 #endif
80 #ifndef HAVE_extv
81 #define HAVE_extv 0
82 #define CODE_FOR_extv CODE_FOR_nothing
83 #define gen_extv(a,b,c,d) NULL_RTX
84 #endif
85 #ifndef HAVE_extzv
86 #define HAVE_extzv 0
87 #define CODE_FOR_extzv CODE_FOR_nothing
88 #define gen_extzv(a,b,c,d) NULL_RTX
89 #endif
91 /* Cost of various pieces of RTL. Note that some of these are indexed by
92 shift count and some by mode. */
93 static int add_cost, negate_cost, zero_cost;
94 static int shift_cost[MAX_BITS_PER_WORD];
95 static int shiftadd_cost[MAX_BITS_PER_WORD];
96 static int shiftsub_cost[MAX_BITS_PER_WORD];
97 static int mul_cost[NUM_MACHINE_MODES];
98 static int div_cost[NUM_MACHINE_MODES];
99 static int mul_widen_cost[NUM_MACHINE_MODES];
100 static int mul_highpart_cost[NUM_MACHINE_MODES];
102 void
103 init_expmed (void)
105 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
106 int dummy;
107 int m;
108 enum machine_mode mode, wider_mode;
110 start_sequence ();
112 /* This is "some random pseudo register" for purposes of calling recog
113 to see what insns exist. */
114 reg = gen_rtx_REG (word_mode, 10000);
116 zero_cost = rtx_cost (const0_rtx, 0);
117 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
119 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
120 gen_rtx_ASHIFT (word_mode, reg,
121 const0_rtx)));
123 shiftadd_insn
124 = emit_insn (gen_rtx_SET (VOIDmode, reg,
125 gen_rtx_PLUS (word_mode,
126 gen_rtx_MULT (word_mode,
127 reg, const0_rtx),
128 reg)));
130 shiftsub_insn
131 = emit_insn (gen_rtx_SET (VOIDmode, reg,
132 gen_rtx_MINUS (word_mode,
133 gen_rtx_MULT (word_mode,
134 reg, const0_rtx),
135 reg)));
137 init_recog ();
139 shift_cost[0] = 0;
140 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
142 for (m = 1; m < MAX_BITS_PER_WORD; m++)
144 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
147 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
148 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
149 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
151 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
152 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
153 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
156 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
157 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
160 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
162 sdiv_pow2_cheap
163 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
164 <= 2 * add_cost);
165 smod_pow2_cheap
166 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
169 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
170 mode != VOIDmode;
171 mode = GET_MODE_WIDER_MODE (mode))
173 reg = gen_rtx_REG (mode, 10000);
174 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
175 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
176 wider_mode = GET_MODE_WIDER_MODE (mode);
177 if (wider_mode != VOIDmode)
179 mul_widen_cost[(int) wider_mode]
180 = rtx_cost (gen_rtx_MULT (wider_mode,
181 gen_rtx_ZERO_EXTEND (wider_mode, reg),
182 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
183 SET);
184 mul_highpart_cost[(int) mode]
185 = rtx_cost (gen_rtx_TRUNCATE
186 (mode,
187 gen_rtx_LSHIFTRT (wider_mode,
188 gen_rtx_MULT (wider_mode,
189 gen_rtx_ZERO_EXTEND
190 (wider_mode, reg),
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg)),
193 GEN_INT (GET_MODE_BITSIZE (mode)))),
194 SET);
198 end_sequence ();
201 /* Return an rtx representing minus the value of X.
202 MODE is the intended mode of the result,
203 useful if X is a CONST_INT. */
206 negate_rtx (enum machine_mode mode, rtx x)
208 rtx result = simplify_unary_operation (NEG, mode, x, mode);
210 if (result == 0)
211 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
213 return result;
216 /* Report on the availability of insv/extv/extzv and the desired mode
217 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
218 is false; else the mode of the specified operand. If OPNO is -1,
219 all the caller cares about is whether the insn is available. */
220 enum machine_mode
221 mode_for_extraction (enum extraction_pattern pattern, int opno)
223 const struct insn_data *data;
225 switch (pattern)
227 case EP_insv:
228 if (HAVE_insv)
230 data = &insn_data[CODE_FOR_insv];
231 break;
233 return MAX_MACHINE_MODE;
235 case EP_extv:
236 if (HAVE_extv)
238 data = &insn_data[CODE_FOR_extv];
239 break;
241 return MAX_MACHINE_MODE;
243 case EP_extzv:
244 if (HAVE_extzv)
246 data = &insn_data[CODE_FOR_extzv];
247 break;
249 return MAX_MACHINE_MODE;
251 default:
252 abort ();
255 if (opno == -1)
256 return VOIDmode;
258 /* Everyone who uses this function used to follow it with
259 if (result == VOIDmode) result = word_mode; */
260 if (data->operand[opno].mode == VOIDmode)
261 return word_mode;
262 return data->operand[opno].mode;
266 /* Generate code to store value from rtx VALUE
267 into a bit-field within structure STR_RTX
268 containing BITSIZE bits starting at bit BITNUM.
269 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
270 ALIGN is the alignment that STR_RTX is known to have.
271 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
273 /* ??? Note that there are two different ideas here for how
274 to determine the size to count bits within, for a register.
275 One is BITS_PER_WORD, and the other is the size of operand 3
276 of the insv pattern.
278 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
279 else, we use the mode of operand 3. */
282 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
283 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
284 rtx value, HOST_WIDE_INT total_size)
286 unsigned int unit
287 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
288 unsigned HOST_WIDE_INT offset = bitnum / unit;
289 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
290 rtx op0 = str_rtx;
291 int byte_offset;
293 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
295 /* Discount the part of the structure before the desired byte.
296 We need to know how many bytes are safe to reference after it. */
297 if (total_size >= 0)
298 total_size -= (bitpos / BIGGEST_ALIGNMENT
299 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
301 while (GET_CODE (op0) == SUBREG)
303 /* The following line once was done only if WORDS_BIG_ENDIAN,
304 but I think that is a mistake. WORDS_BIG_ENDIAN is
305 meaningful at a much higher level; when structures are copied
306 between memory and regs, the higher-numbered regs
307 always get higher addresses. */
308 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
309 /* We used to adjust BITPOS here, but now we do the whole adjustment
310 right after the loop. */
311 op0 = SUBREG_REG (op0);
314 value = protect_from_queue (value, 0);
316 /* Use vec_extract patterns for extracting parts of vectors whenever
317 available. */
318 if (VECTOR_MODE_P (GET_MODE (op0))
319 && GET_CODE (op0) != MEM
320 && (vec_set_optab->handlers[(int)GET_MODE (op0)].insn_code
321 != CODE_FOR_nothing)
322 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
323 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
324 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
326 enum machine_mode outermode = GET_MODE (op0);
327 enum machine_mode innermode = GET_MODE_INNER (outermode);
328 int icode = (int) vec_set_optab->handlers[(int) outermode].insn_code;
329 int pos = bitnum / GET_MODE_BITSIZE (innermode);
330 rtx rtxpos = GEN_INT (pos);
331 rtx src = value;
332 rtx dest = op0;
333 rtx pat, seq;
334 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
335 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
336 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
338 start_sequence ();
340 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
341 src = copy_to_mode_reg (mode1, src);
343 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
344 rtxpos = copy_to_mode_reg (mode1, rtxpos);
346 /* We could handle this, but we should always be called with a pseudo
347 for our targets and all insns should take them as outputs. */
348 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
349 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
350 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
351 abort ();
352 pat = GEN_FCN (icode) (dest, src, rtxpos);
353 seq = get_insns ();
354 end_sequence ();
355 if (pat)
357 emit_insn (seq);
358 emit_insn (pat);
359 return dest;
363 if (flag_force_mem)
365 int old_generating_concat_p = generating_concat_p;
366 generating_concat_p = 0;
367 value = force_not_mem (value);
368 generating_concat_p = old_generating_concat_p;
371 /* If the target is a register, overwriting the entire object, or storing
372 a full-word or multi-word field can be done with just a SUBREG.
374 If the target is memory, storing any naturally aligned field can be
375 done with a simple store. For targets that support fast unaligned
376 memory, any naturally sized, unit aligned field can be done directly. */
378 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
379 + (offset * UNITS_PER_WORD);
381 if (bitpos == 0
382 && bitsize == GET_MODE_BITSIZE (fieldmode)
383 && (GET_CODE (op0) != MEM
384 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
385 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
386 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
387 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
388 || (offset * BITS_PER_UNIT % bitsize == 0
389 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
391 if (GET_MODE (op0) != fieldmode)
393 if (GET_CODE (op0) == SUBREG)
395 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
396 || GET_MODE_CLASS (fieldmode) == MODE_INT
397 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
398 op0 = SUBREG_REG (op0);
399 else
400 /* Else we've got some float mode source being extracted into
401 a different float mode destination -- this combination of
402 subregs results in Severe Tire Damage. */
403 abort ();
405 if (GET_CODE (op0) == REG)
406 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
407 else
408 op0 = adjust_address (op0, fieldmode, offset);
410 emit_move_insn (op0, value);
411 return value;
414 /* Make sure we are playing with integral modes. Pun with subregs
415 if we aren't. This must come after the entire register case above,
416 since that case is valid for any mode. The following cases are only
417 valid for integral modes. */
419 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
420 if (imode != GET_MODE (op0))
422 if (GET_CODE (op0) == MEM)
423 op0 = adjust_address (op0, imode, 0);
424 else if (imode != BLKmode)
425 op0 = gen_lowpart (imode, op0);
426 else
427 abort ();
431 /* We may be accessing data outside the field, which means
432 we can alias adjacent data. */
433 if (GET_CODE (op0) == MEM)
435 op0 = shallow_copy_rtx (op0);
436 set_mem_alias_set (op0, 0);
437 set_mem_expr (op0, 0);
440 /* If OP0 is a register, BITPOS must count within a word.
441 But as we have it, it counts within whatever size OP0 now has.
442 On a bigendian machine, these are not the same, so convert. */
443 if (BYTES_BIG_ENDIAN
444 && GET_CODE (op0) != MEM
445 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
446 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
448 /* Storing an lsb-aligned field in a register
449 can be done with a movestrict instruction. */
451 if (GET_CODE (op0) != MEM
452 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
453 && bitsize == GET_MODE_BITSIZE (fieldmode)
454 && (movstrict_optab->handlers[(int) fieldmode].insn_code
455 != CODE_FOR_nothing))
457 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
459 /* Get appropriate low part of the value being stored. */
460 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
461 value = gen_lowpart (fieldmode, value);
462 else if (!(GET_CODE (value) == SYMBOL_REF
463 || GET_CODE (value) == LABEL_REF
464 || GET_CODE (value) == CONST))
465 value = convert_to_mode (fieldmode, value, 0);
467 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
468 value = copy_to_mode_reg (fieldmode, value);
470 if (GET_CODE (op0) == SUBREG)
472 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
473 || GET_MODE_CLASS (fieldmode) == MODE_INT
474 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
475 op0 = SUBREG_REG (op0);
476 else
477 /* Else we've got some float mode source being extracted into
478 a different float mode destination -- this combination of
479 subregs results in Severe Tire Damage. */
480 abort ();
483 emit_insn (GEN_FCN (icode)
484 (gen_rtx_SUBREG (fieldmode, op0,
485 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
486 + (offset * UNITS_PER_WORD)),
487 value));
489 return value;
492 /* Handle fields bigger than a word. */
494 if (bitsize > BITS_PER_WORD)
496 /* Here we transfer the words of the field
497 in the order least significant first.
498 This is because the most significant word is the one which may
499 be less than full.
500 However, only do that if the value is not BLKmode. */
502 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
503 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
504 unsigned int i;
506 /* This is the mode we must force value to, so that there will be enough
507 subwords to extract. Note that fieldmode will often (always?) be
508 VOIDmode, because that is what store_field uses to indicate that this
509 is a bit field, but passing VOIDmode to operand_subword_force will
510 result in an abort. */
511 fieldmode = GET_MODE (value);
512 if (fieldmode == VOIDmode)
513 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
515 for (i = 0; i < nwords; i++)
517 /* If I is 0, use the low-order word in both field and target;
518 if I is 1, use the next to lowest word; and so on. */
519 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
520 unsigned int bit_offset = (backwards
521 ? MAX ((int) bitsize - ((int) i + 1)
522 * BITS_PER_WORD,
524 : (int) i * BITS_PER_WORD);
526 store_bit_field (op0, MIN (BITS_PER_WORD,
527 bitsize - i * BITS_PER_WORD),
528 bitnum + bit_offset, word_mode,
529 operand_subword_force (value, wordnum, fieldmode),
530 total_size);
532 return value;
535 /* From here on we can assume that the field to be stored in is
536 a full-word (whatever type that is), since it is shorter than a word. */
538 /* OFFSET is the number of words or bytes (UNIT says which)
539 from STR_RTX to the first word or byte containing part of the field. */
541 if (GET_CODE (op0) != MEM)
543 if (offset != 0
544 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
546 if (GET_CODE (op0) != REG)
548 /* Since this is a destination (lvalue), we can't copy it to a
549 pseudo. We can trivially remove a SUBREG that does not
550 change the size of the operand. Such a SUBREG may have been
551 added above. Otherwise, abort. */
552 if (GET_CODE (op0) == SUBREG
553 && (GET_MODE_SIZE (GET_MODE (op0))
554 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
555 op0 = SUBREG_REG (op0);
556 else
557 abort ();
559 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
560 op0, (offset * UNITS_PER_WORD));
562 offset = 0;
564 else
565 op0 = protect_from_queue (op0, 1);
567 /* If VALUE is a floating-point mode, access it as an integer of the
568 corresponding size. This can occur on a machine with 64 bit registers
569 that uses SFmode for float. This can also occur for unaligned float
570 structure fields. */
571 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
572 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
573 value = gen_lowpart ((GET_MODE (value) == VOIDmode
574 ? word_mode : int_mode_for_mode (GET_MODE (value))),
575 value);
577 /* Now OFFSET is nonzero only if OP0 is memory
578 and is therefore always measured in bytes. */
580 if (HAVE_insv
581 && GET_MODE (value) != BLKmode
582 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
583 /* Ensure insv's size is wide enough for this field. */
584 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
585 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
586 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
588 int xbitpos = bitpos;
589 rtx value1;
590 rtx xop0 = op0;
591 rtx last = get_last_insn ();
592 rtx pat;
593 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
594 int save_volatile_ok = volatile_ok;
596 volatile_ok = 1;
598 /* If this machine's insv can only insert into a register, copy OP0
599 into a register and save it back later. */
600 /* This used to check flag_force_mem, but that was a serious
601 de-optimization now that flag_force_mem is enabled by -O2. */
602 if (GET_CODE (op0) == MEM
603 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
604 (op0, VOIDmode)))
606 rtx tempreg;
607 enum machine_mode bestmode;
609 /* Get the mode to use for inserting into this field. If OP0 is
610 BLKmode, get the smallest mode consistent with the alignment. If
611 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
612 mode. Otherwise, use the smallest mode containing the field. */
614 if (GET_MODE (op0) == BLKmode
615 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
616 bestmode
617 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
618 MEM_VOLATILE_P (op0));
619 else
620 bestmode = GET_MODE (op0);
622 if (bestmode == VOIDmode
623 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
624 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
625 goto insv_loses;
627 /* Adjust address to point to the containing unit of that mode.
628 Compute offset as multiple of this unit, counting in bytes. */
629 unit = GET_MODE_BITSIZE (bestmode);
630 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
631 bitpos = bitnum % unit;
632 op0 = adjust_address (op0, bestmode, offset);
634 /* Fetch that unit, store the bitfield in it, then store
635 the unit. */
636 tempreg = copy_to_reg (op0);
637 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
638 total_size);
639 emit_move_insn (op0, tempreg);
640 return value;
642 volatile_ok = save_volatile_ok;
644 /* Add OFFSET into OP0's address. */
645 if (GET_CODE (xop0) == MEM)
646 xop0 = adjust_address (xop0, byte_mode, offset);
648 /* If xop0 is a register, we need it in MAXMODE
649 to make it acceptable to the format of insv. */
650 if (GET_CODE (xop0) == SUBREG)
651 /* We can't just change the mode, because this might clobber op0,
652 and we will need the original value of op0 if insv fails. */
653 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
654 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
655 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
657 /* On big-endian machines, we count bits from the most significant.
658 If the bit field insn does not, we must invert. */
660 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
661 xbitpos = unit - bitsize - xbitpos;
663 /* We have been counting XBITPOS within UNIT.
664 Count instead within the size of the register. */
665 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
666 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
668 unit = GET_MODE_BITSIZE (maxmode);
670 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
671 value1 = value;
672 if (GET_MODE (value) != maxmode)
674 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
676 /* Optimization: Don't bother really extending VALUE
677 if it has all the bits we will actually use. However,
678 if we must narrow it, be sure we do it correctly. */
680 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
682 rtx tmp;
684 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
685 if (! tmp)
686 tmp = simplify_gen_subreg (maxmode,
687 force_reg (GET_MODE (value),
688 value1),
689 GET_MODE (value), 0);
690 value1 = tmp;
692 else
693 value1 = gen_lowpart (maxmode, value1);
695 else if (GET_CODE (value) == CONST_INT)
696 value1 = gen_int_mode (INTVAL (value), maxmode);
697 else if (!CONSTANT_P (value))
698 /* Parse phase is supposed to make VALUE's data type
699 match that of the component reference, which is a type
700 at least as wide as the field; so VALUE should have
701 a mode that corresponds to that type. */
702 abort ();
705 /* If this machine's insv insists on a register,
706 get VALUE1 into a register. */
707 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
708 (value1, maxmode)))
709 value1 = force_reg (maxmode, value1);
711 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
712 if (pat)
713 emit_insn (pat);
714 else
716 delete_insns_since (last);
717 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
720 else
721 insv_loses:
722 /* Insv is not available; store using shifts and boolean ops. */
723 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
724 return value;
727 /* Use shifts and boolean operations to store VALUE
728 into a bit field of width BITSIZE
729 in a memory location specified by OP0 except offset by OFFSET bytes.
730 (OFFSET must be 0 if OP0 is a register.)
731 The field starts at position BITPOS within the byte.
732 (If OP0 is a register, it may be a full word or a narrower mode,
733 but BITPOS still counts within a full word,
734 which is significant on bigendian machines.)
736 Note that protect_from_queue has already been done on OP0 and VALUE. */
738 static void
739 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
740 unsigned HOST_WIDE_INT bitsize,
741 unsigned HOST_WIDE_INT bitpos, rtx value)
743 enum machine_mode mode;
744 unsigned int total_bits = BITS_PER_WORD;
745 rtx subtarget, temp;
746 int all_zero = 0;
747 int all_one = 0;
749 /* There is a case not handled here:
750 a structure with a known alignment of just a halfword
751 and a field split across two aligned halfwords within the structure.
752 Or likewise a structure with a known alignment of just a byte
753 and a field split across two bytes.
754 Such cases are not supposed to be able to occur. */
756 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
758 if (offset != 0)
759 abort ();
760 /* Special treatment for a bit field split across two registers. */
761 if (bitsize + bitpos > BITS_PER_WORD)
763 store_split_bit_field (op0, bitsize, bitpos, value);
764 return;
767 else
769 /* Get the proper mode to use for this field. We want a mode that
770 includes the entire field. If such a mode would be larger than
771 a word, we won't be doing the extraction the normal way.
772 We don't want a mode bigger than the destination. */
774 mode = GET_MODE (op0);
775 if (GET_MODE_BITSIZE (mode) == 0
776 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
777 mode = word_mode;
778 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
779 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
781 if (mode == VOIDmode)
783 /* The only way this should occur is if the field spans word
784 boundaries. */
785 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
786 value);
787 return;
790 total_bits = GET_MODE_BITSIZE (mode);
792 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
793 be in the range 0 to total_bits-1, and put any excess bytes in
794 OFFSET. */
795 if (bitpos >= total_bits)
797 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
798 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
799 * BITS_PER_UNIT);
802 /* Get ref to an aligned byte, halfword, or word containing the field.
803 Adjust BITPOS to be position within a word,
804 and OFFSET to be the offset of that word.
805 Then alter OP0 to refer to that word. */
806 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
807 offset -= (offset % (total_bits / BITS_PER_UNIT));
808 op0 = adjust_address (op0, mode, offset);
811 mode = GET_MODE (op0);
813 /* Now MODE is either some integral mode for a MEM as OP0,
814 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
815 The bit field is contained entirely within OP0.
816 BITPOS is the starting bit number within OP0.
817 (OP0's mode may actually be narrower than MODE.) */
819 if (BYTES_BIG_ENDIAN)
820 /* BITPOS is the distance between our msb
821 and that of the containing datum.
822 Convert it to the distance from the lsb. */
823 bitpos = total_bits - bitsize - bitpos;
825 /* Now BITPOS is always the distance between our lsb
826 and that of OP0. */
828 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
829 we must first convert its mode to MODE. */
831 if (GET_CODE (value) == CONST_INT)
833 HOST_WIDE_INT v = INTVAL (value);
835 if (bitsize < HOST_BITS_PER_WIDE_INT)
836 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
838 if (v == 0)
839 all_zero = 1;
840 else if ((bitsize < HOST_BITS_PER_WIDE_INT
841 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
842 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
843 all_one = 1;
845 value = lshift_value (mode, value, bitpos, bitsize);
847 else
849 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
850 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
852 if (GET_MODE (value) != mode)
854 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
855 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
856 value = gen_lowpart (mode, value);
857 else
858 value = convert_to_mode (mode, value, 1);
861 if (must_and)
862 value = expand_binop (mode, and_optab, value,
863 mask_rtx (mode, 0, bitsize, 0),
864 NULL_RTX, 1, OPTAB_LIB_WIDEN);
865 if (bitpos > 0)
866 value = expand_shift (LSHIFT_EXPR, mode, value,
867 build_int_2 (bitpos, 0), NULL_RTX, 1);
870 /* Now clear the chosen bits in OP0,
871 except that if VALUE is -1 we need not bother. */
873 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
875 if (! all_one)
877 temp = expand_binop (mode, and_optab, op0,
878 mask_rtx (mode, bitpos, bitsize, 1),
879 subtarget, 1, OPTAB_LIB_WIDEN);
880 subtarget = temp;
882 else
883 temp = op0;
885 /* Now logical-or VALUE into OP0, unless it is zero. */
887 if (! all_zero)
888 temp = expand_binop (mode, ior_optab, temp, value,
889 subtarget, 1, OPTAB_LIB_WIDEN);
890 if (op0 != temp)
891 emit_move_insn (op0, temp);
894 /* Store a bit field that is split across multiple accessible memory objects.
896 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
897 BITSIZE is the field width; BITPOS the position of its first bit
898 (within the word).
899 VALUE is the value to store.
901 This does not yet handle fields wider than BITS_PER_WORD. */
903 static void
904 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
905 unsigned HOST_WIDE_INT bitpos, rtx value)
907 unsigned int unit;
908 unsigned int bitsdone = 0;
910 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
911 much at a time. */
912 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
913 unit = BITS_PER_WORD;
914 else
915 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
917 /* If VALUE is a constant other than a CONST_INT, get it into a register in
918 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
919 that VALUE might be a floating-point constant. */
920 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
922 rtx word = gen_lowpart_common (word_mode, value);
924 if (word && (value != word))
925 value = word;
926 else
927 value = gen_lowpart_common (word_mode,
928 force_reg (GET_MODE (value) != VOIDmode
929 ? GET_MODE (value)
930 : word_mode, value));
932 else if (GET_CODE (value) == ADDRESSOF)
933 value = copy_to_reg (value);
935 while (bitsdone < bitsize)
937 unsigned HOST_WIDE_INT thissize;
938 rtx part, word;
939 unsigned HOST_WIDE_INT thispos;
940 unsigned HOST_WIDE_INT offset;
942 offset = (bitpos + bitsdone) / unit;
943 thispos = (bitpos + bitsdone) % unit;
945 /* THISSIZE must not overrun a word boundary. Otherwise,
946 store_fixed_bit_field will call us again, and we will mutually
947 recurse forever. */
948 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
949 thissize = MIN (thissize, unit - thispos);
951 if (BYTES_BIG_ENDIAN)
953 int total_bits;
955 /* We must do an endian conversion exactly the same way as it is
956 done in extract_bit_field, so that the two calls to
957 extract_fixed_bit_field will have comparable arguments. */
958 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
959 total_bits = BITS_PER_WORD;
960 else
961 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
963 /* Fetch successively less significant portions. */
964 if (GET_CODE (value) == CONST_INT)
965 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
966 >> (bitsize - bitsdone - thissize))
967 & (((HOST_WIDE_INT) 1 << thissize) - 1));
968 else
969 /* The args are chosen so that the last part includes the
970 lsb. Give extract_bit_field the value it needs (with
971 endianness compensation) to fetch the piece we want. */
972 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
973 total_bits - bitsize + bitsdone,
974 NULL_RTX, 1);
976 else
978 /* Fetch successively more significant portions. */
979 if (GET_CODE (value) == CONST_INT)
980 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
981 >> bitsdone)
982 & (((HOST_WIDE_INT) 1 << thissize) - 1));
983 else
984 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
985 bitsdone, NULL_RTX, 1);
988 /* If OP0 is a register, then handle OFFSET here.
990 When handling multiword bitfields, extract_bit_field may pass
991 down a word_mode SUBREG of a larger REG for a bitfield that actually
992 crosses a word boundary. Thus, for a SUBREG, we must find
993 the current word starting from the base register. */
994 if (GET_CODE (op0) == SUBREG)
996 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
997 word = operand_subword_force (SUBREG_REG (op0), word_offset,
998 GET_MODE (SUBREG_REG (op0)));
999 offset = 0;
1001 else if (GET_CODE (op0) == REG)
1003 word = operand_subword_force (op0, offset, GET_MODE (op0));
1004 offset = 0;
1006 else
1007 word = op0;
1009 /* OFFSET is in UNITs, and UNIT is in bits.
1010 store_fixed_bit_field wants offset in bytes. */
1011 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1012 thispos, part);
1013 bitsdone += thissize;
1017 /* Generate code to extract a byte-field from STR_RTX
1018 containing BITSIZE bits, starting at BITNUM,
1019 and put it in TARGET if possible (if TARGET is nonzero).
1020 Regardless of TARGET, we return the rtx for where the value is placed.
1021 It may be a QUEUED.
1023 STR_RTX is the structure containing the byte (a REG or MEM).
1024 UNSIGNEDP is nonzero if this is an unsigned bit field.
1025 MODE is the natural mode of the field value once extracted.
1026 TMODE is the mode the caller would like the value to have;
1027 but the value may be returned with type MODE instead.
1029 TOTAL_SIZE is the size in bytes of the containing structure,
1030 or -1 if varying.
1032 If a TARGET is specified and we can store in it at no extra cost,
1033 we do so, and return TARGET.
1034 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1035 if they are equally easy. */
1038 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1039 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1040 enum machine_mode mode, enum machine_mode tmode,
1041 HOST_WIDE_INT total_size)
1043 unsigned int unit
1044 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1045 unsigned HOST_WIDE_INT offset = bitnum / unit;
1046 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1047 rtx op0 = str_rtx;
1048 rtx spec_target = target;
1049 rtx spec_target_subreg = 0;
1050 enum machine_mode int_mode;
1051 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1052 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1053 enum machine_mode mode1;
1054 int byte_offset;
1056 /* Discount the part of the structure before the desired byte.
1057 We need to know how many bytes are safe to reference after it. */
1058 if (total_size >= 0)
1059 total_size -= (bitpos / BIGGEST_ALIGNMENT
1060 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1062 if (tmode == VOIDmode)
1063 tmode = mode;
1065 while (GET_CODE (op0) == SUBREG)
1067 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1068 if (bitpos > unit)
1070 offset += (bitpos / unit);
1071 bitpos %= unit;
1073 op0 = SUBREG_REG (op0);
1076 if (GET_CODE (op0) == REG
1077 && mode == GET_MODE (op0)
1078 && bitnum == 0
1079 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1081 /* We're trying to extract a full register from itself. */
1082 return op0;
1085 /* Use vec_extract patterns for extracting parts of vectors whenever
1086 available. */
1087 if (VECTOR_MODE_P (GET_MODE (op0))
1088 && GET_CODE (op0) != MEM
1089 && (vec_extract_optab->handlers[(int)GET_MODE (op0)].insn_code
1090 != CODE_FOR_nothing)
1091 && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1092 == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1094 enum machine_mode outermode = GET_MODE (op0);
1095 enum machine_mode innermode = GET_MODE_INNER (outermode);
1096 int icode = (int) vec_extract_optab->handlers[(int) outermode].insn_code;
1097 int pos = bitnum / GET_MODE_BITSIZE (innermode);
1098 rtx rtxpos = GEN_INT (pos);
1099 rtx src = op0;
1100 rtx dest = NULL, pat, seq;
1101 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1102 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1103 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1105 if (innermode == tmode || innermode == mode)
1106 dest = target;
1108 if (!dest)
1109 dest = gen_reg_rtx (innermode);
1111 start_sequence ();
1113 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1114 dest = copy_to_mode_reg (mode0, dest);
1116 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1117 src = copy_to_mode_reg (mode1, src);
1119 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1120 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1122 /* We could handle this, but we should always be called with a pseudo
1123 for our targets and all insns should take them as outputs. */
1124 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
1125 || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
1126 || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1127 abort ();
1128 pat = GEN_FCN (icode) (dest, src, rtxpos);
1129 seq = get_insns ();
1130 end_sequence ();
1131 if (pat)
1133 emit_insn (seq);
1134 emit_insn (pat);
1135 return extract_bit_field (dest, bitsize,
1136 bitnum - pos * GET_MODE_BITSIZE (innermode),
1137 unsignedp, target, mode, tmode, total_size);
1141 /* Make sure we are playing with integral modes. Pun with subregs
1142 if we aren't. */
1144 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1145 if (imode != GET_MODE (op0))
1147 if (GET_CODE (op0) == MEM)
1148 op0 = adjust_address (op0, imode, 0);
1149 else if (imode != BLKmode)
1150 op0 = gen_lowpart (imode, op0);
1151 else
1152 abort ();
1156 /* We may be accessing data outside the field, which means
1157 we can alias adjacent data. */
1158 if (GET_CODE (op0) == MEM)
1160 op0 = shallow_copy_rtx (op0);
1161 set_mem_alias_set (op0, 0);
1162 set_mem_expr (op0, 0);
1165 /* Extraction of a full-word or multi-word value from a structure
1166 in a register or aligned memory can be done with just a SUBREG.
1167 A subword value in the least significant part of a register
1168 can also be extracted with a SUBREG. For this, we need the
1169 byte offset of the value in op0. */
1171 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1173 /* If OP0 is a register, BITPOS must count within a word.
1174 But as we have it, it counts within whatever size OP0 now has.
1175 On a bigendian machine, these are not the same, so convert. */
1176 if (BYTES_BIG_ENDIAN
1177 && GET_CODE (op0) != MEM
1178 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1179 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1181 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1182 If that's wrong, the solution is to test for it and set TARGET to 0
1183 if needed. */
1185 /* Only scalar integer modes can be converted via subregs. There is an
1186 additional problem for FP modes here in that they can have a precision
1187 which is different from the size. mode_for_size uses precision, but
1188 we want a mode based on the size, so we must avoid calling it for FP
1189 modes. */
1190 mode1 = (SCALAR_INT_MODE_P (tmode)
1191 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1192 : mode);
1194 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1195 && bitpos % BITS_PER_WORD == 0)
1196 || (mode1 != BLKmode
1197 /* ??? The big endian test here is wrong. This is correct
1198 if the value is in a register, and if mode_for_size is not
1199 the same mode as op0. This causes us to get unnecessarily
1200 inefficient code from the Thumb port when -mbig-endian. */
1201 && (BYTES_BIG_ENDIAN
1202 ? bitpos + bitsize == BITS_PER_WORD
1203 : bitpos == 0)))
1204 && ((GET_CODE (op0) != MEM
1205 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1206 GET_MODE_BITSIZE (GET_MODE (op0)))
1207 && GET_MODE_SIZE (mode1) != 0
1208 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1209 || (GET_CODE (op0) == MEM
1210 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1211 || (offset * BITS_PER_UNIT % bitsize == 0
1212 && MEM_ALIGN (op0) % bitsize == 0)))))
1214 if (mode1 != GET_MODE (op0))
1216 if (GET_CODE (op0) == SUBREG)
1218 if (GET_MODE (SUBREG_REG (op0)) == mode1
1219 || GET_MODE_CLASS (mode1) == MODE_INT
1220 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1221 op0 = SUBREG_REG (op0);
1222 else
1223 /* Else we've got some float mode source being extracted into
1224 a different float mode destination -- this combination of
1225 subregs results in Severe Tire Damage. */
1226 goto no_subreg_mode_swap;
1228 if (GET_CODE (op0) == REG)
1229 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1230 else
1231 op0 = adjust_address (op0, mode1, offset);
1233 if (mode1 != mode)
1234 return convert_to_mode (tmode, op0, unsignedp);
1235 return op0;
1237 no_subreg_mode_swap:
1239 /* Handle fields bigger than a word. */
1241 if (bitsize > BITS_PER_WORD)
1243 /* Here we transfer the words of the field
1244 in the order least significant first.
1245 This is because the most significant word is the one which may
1246 be less than full. */
1248 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1249 unsigned int i;
1251 if (target == 0 || GET_CODE (target) != REG)
1252 target = gen_reg_rtx (mode);
1254 /* Indicate for flow that the entire target reg is being set. */
1255 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1257 for (i = 0; i < nwords; i++)
1259 /* If I is 0, use the low-order word in both field and target;
1260 if I is 1, use the next to lowest word; and so on. */
1261 /* Word number in TARGET to use. */
1262 unsigned int wordnum
1263 = (WORDS_BIG_ENDIAN
1264 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1265 : i);
1266 /* Offset from start of field in OP0. */
1267 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1268 ? MAX (0, ((int) bitsize - ((int) i + 1)
1269 * (int) BITS_PER_WORD))
1270 : (int) i * BITS_PER_WORD);
1271 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1272 rtx result_part
1273 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1274 bitsize - i * BITS_PER_WORD),
1275 bitnum + bit_offset, 1, target_part, mode,
1276 word_mode, total_size);
1278 if (target_part == 0)
1279 abort ();
1281 if (result_part != target_part)
1282 emit_move_insn (target_part, result_part);
1285 if (unsignedp)
1287 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1288 need to be zero'd out. */
1289 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1291 unsigned int i, total_words;
1293 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1294 for (i = nwords; i < total_words; i++)
1295 emit_move_insn
1296 (operand_subword (target,
1297 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1298 1, VOIDmode),
1299 const0_rtx);
1301 return target;
1304 /* Signed bit field: sign-extend with two arithmetic shifts. */
1305 target = expand_shift (LSHIFT_EXPR, mode, target,
1306 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1307 NULL_RTX, 0);
1308 return expand_shift (RSHIFT_EXPR, mode, target,
1309 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1310 NULL_RTX, 0);
1313 /* From here on we know the desired field is smaller than a word. */
1315 /* Check if there is a correspondingly-sized integer field, so we can
1316 safely extract it as one size of integer, if necessary; then
1317 truncate or extend to the size that is wanted; then use SUBREGs or
1318 convert_to_mode to get one of the modes we really wanted. */
1320 int_mode = int_mode_for_mode (tmode);
1321 if (int_mode == BLKmode)
1322 int_mode = int_mode_for_mode (mode);
1323 if (int_mode == BLKmode)
1324 abort (); /* Should probably push op0 out to memory and then
1325 do a load. */
1327 /* OFFSET is the number of words or bytes (UNIT says which)
1328 from STR_RTX to the first word or byte containing part of the field. */
1330 if (GET_CODE (op0) != MEM)
1332 if (offset != 0
1333 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1335 if (GET_CODE (op0) != REG)
1336 op0 = copy_to_reg (op0);
1337 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1338 op0, (offset * UNITS_PER_WORD));
1340 offset = 0;
1342 else
1343 op0 = protect_from_queue (str_rtx, 1);
1345 /* Now OFFSET is nonzero only for memory operands. */
1347 if (unsignedp)
1349 if (HAVE_extzv
1350 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1351 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1352 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1354 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1355 rtx bitsize_rtx, bitpos_rtx;
1356 rtx last = get_last_insn ();
1357 rtx xop0 = op0;
1358 rtx xtarget = target;
1359 rtx xspec_target = spec_target;
1360 rtx xspec_target_subreg = spec_target_subreg;
1361 rtx pat;
1362 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1364 if (GET_CODE (xop0) == MEM)
1366 int save_volatile_ok = volatile_ok;
1367 volatile_ok = 1;
1369 /* Is the memory operand acceptable? */
1370 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1371 (xop0, GET_MODE (xop0))))
1373 /* No, load into a reg and extract from there. */
1374 enum machine_mode bestmode;
1376 /* Get the mode to use for inserting into this field. If
1377 OP0 is BLKmode, get the smallest mode consistent with the
1378 alignment. If OP0 is a non-BLKmode object that is no
1379 wider than MAXMODE, use its mode. Otherwise, use the
1380 smallest mode containing the field. */
1382 if (GET_MODE (xop0) == BLKmode
1383 || (GET_MODE_SIZE (GET_MODE (op0))
1384 > GET_MODE_SIZE (maxmode)))
1385 bestmode = get_best_mode (bitsize, bitnum,
1386 MEM_ALIGN (xop0), maxmode,
1387 MEM_VOLATILE_P (xop0));
1388 else
1389 bestmode = GET_MODE (xop0);
1391 if (bestmode == VOIDmode
1392 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1393 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1394 goto extzv_loses;
1396 /* Compute offset as multiple of this unit,
1397 counting in bytes. */
1398 unit = GET_MODE_BITSIZE (bestmode);
1399 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1400 xbitpos = bitnum % unit;
1401 xop0 = adjust_address (xop0, bestmode, xoffset);
1403 /* Fetch it to a register in that size. */
1404 xop0 = force_reg (bestmode, xop0);
1406 /* XBITPOS counts within UNIT, which is what is expected. */
1408 else
1409 /* Get ref to first byte containing part of the field. */
1410 xop0 = adjust_address (xop0, byte_mode, xoffset);
1412 volatile_ok = save_volatile_ok;
1415 /* If op0 is a register, we need it in MAXMODE (which is usually
1416 SImode). to make it acceptable to the format of extzv. */
1417 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1418 goto extzv_loses;
1419 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1420 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1422 /* On big-endian machines, we count bits from the most significant.
1423 If the bit field insn does not, we must invert. */
1424 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1425 xbitpos = unit - bitsize - xbitpos;
1427 /* Now convert from counting within UNIT to counting in MAXMODE. */
1428 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1429 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1431 unit = GET_MODE_BITSIZE (maxmode);
1433 if (xtarget == 0
1434 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1435 xtarget = xspec_target = gen_reg_rtx (tmode);
1437 if (GET_MODE (xtarget) != maxmode)
1439 if (GET_CODE (xtarget) == REG)
1441 int wider = (GET_MODE_SIZE (maxmode)
1442 > GET_MODE_SIZE (GET_MODE (xtarget)));
1443 xtarget = gen_lowpart (maxmode, xtarget);
1444 if (wider)
1445 xspec_target_subreg = xtarget;
1447 else
1448 xtarget = gen_reg_rtx (maxmode);
1451 /* If this machine's extzv insists on a register target,
1452 make sure we have one. */
1453 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1454 (xtarget, maxmode)))
1455 xtarget = gen_reg_rtx (maxmode);
1457 bitsize_rtx = GEN_INT (bitsize);
1458 bitpos_rtx = GEN_INT (xbitpos);
1460 pat = gen_extzv (protect_from_queue (xtarget, 1),
1461 xop0, bitsize_rtx, bitpos_rtx);
1462 if (pat)
1464 emit_insn (pat);
1465 target = xtarget;
1466 spec_target = xspec_target;
1467 spec_target_subreg = xspec_target_subreg;
1469 else
1471 delete_insns_since (last);
1472 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1473 bitpos, target, 1);
1476 else
1477 extzv_loses:
1478 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1479 bitpos, target, 1);
1481 else
1483 if (HAVE_extv
1484 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1485 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1486 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1488 int xbitpos = bitpos, xoffset = offset;
1489 rtx bitsize_rtx, bitpos_rtx;
1490 rtx last = get_last_insn ();
1491 rtx xop0 = op0, xtarget = target;
1492 rtx xspec_target = spec_target;
1493 rtx xspec_target_subreg = spec_target_subreg;
1494 rtx pat;
1495 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1497 if (GET_CODE (xop0) == MEM)
1499 /* Is the memory operand acceptable? */
1500 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1501 (xop0, GET_MODE (xop0))))
1503 /* No, load into a reg and extract from there. */
1504 enum machine_mode bestmode;
1506 /* Get the mode to use for inserting into this field. If
1507 OP0 is BLKmode, get the smallest mode consistent with the
1508 alignment. If OP0 is a non-BLKmode object that is no
1509 wider than MAXMODE, use its mode. Otherwise, use the
1510 smallest mode containing the field. */
1512 if (GET_MODE (xop0) == BLKmode
1513 || (GET_MODE_SIZE (GET_MODE (op0))
1514 > GET_MODE_SIZE (maxmode)))
1515 bestmode = get_best_mode (bitsize, bitnum,
1516 MEM_ALIGN (xop0), maxmode,
1517 MEM_VOLATILE_P (xop0));
1518 else
1519 bestmode = GET_MODE (xop0);
1521 if (bestmode == VOIDmode
1522 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1523 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1524 goto extv_loses;
1526 /* Compute offset as multiple of this unit,
1527 counting in bytes. */
1528 unit = GET_MODE_BITSIZE (bestmode);
1529 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1530 xbitpos = bitnum % unit;
1531 xop0 = adjust_address (xop0, bestmode, xoffset);
1533 /* Fetch it to a register in that size. */
1534 xop0 = force_reg (bestmode, xop0);
1536 /* XBITPOS counts within UNIT, which is what is expected. */
1538 else
1539 /* Get ref to first byte containing part of the field. */
1540 xop0 = adjust_address (xop0, byte_mode, xoffset);
1543 /* If op0 is a register, we need it in MAXMODE (which is usually
1544 SImode) to make it acceptable to the format of extv. */
1545 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1546 goto extv_loses;
1547 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1548 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1550 /* On big-endian machines, we count bits from the most significant.
1551 If the bit field insn does not, we must invert. */
1552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1553 xbitpos = unit - bitsize - xbitpos;
1555 /* XBITPOS counts within a size of UNIT.
1556 Adjust to count within a size of MAXMODE. */
1557 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1558 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1560 unit = GET_MODE_BITSIZE (maxmode);
1562 if (xtarget == 0
1563 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1564 xtarget = xspec_target = gen_reg_rtx (tmode);
1566 if (GET_MODE (xtarget) != maxmode)
1568 if (GET_CODE (xtarget) == REG)
1570 int wider = (GET_MODE_SIZE (maxmode)
1571 > GET_MODE_SIZE (GET_MODE (xtarget)));
1572 xtarget = gen_lowpart (maxmode, xtarget);
1573 if (wider)
1574 xspec_target_subreg = xtarget;
1576 else
1577 xtarget = gen_reg_rtx (maxmode);
1580 /* If this machine's extv insists on a register target,
1581 make sure we have one. */
1582 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1583 (xtarget, maxmode)))
1584 xtarget = gen_reg_rtx (maxmode);
1586 bitsize_rtx = GEN_INT (bitsize);
1587 bitpos_rtx = GEN_INT (xbitpos);
1589 pat = gen_extv (protect_from_queue (xtarget, 1),
1590 xop0, bitsize_rtx, bitpos_rtx);
1591 if (pat)
1593 emit_insn (pat);
1594 target = xtarget;
1595 spec_target = xspec_target;
1596 spec_target_subreg = xspec_target_subreg;
1598 else
1600 delete_insns_since (last);
1601 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1602 bitpos, target, 0);
1605 else
1606 extv_loses:
1607 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1608 bitpos, target, 0);
1610 if (target == spec_target)
1611 return target;
1612 if (target == spec_target_subreg)
1613 return spec_target;
1614 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1616 /* If the target mode is floating-point, first convert to the
1617 integer mode of that size and then access it as a floating-point
1618 value via a SUBREG. */
1619 if (GET_MODE_CLASS (tmode) != MODE_INT
1620 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1622 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1623 MODE_INT, 0),
1624 target, unsignedp);
1625 return gen_lowpart (tmode, target);
1627 else
1628 return convert_to_mode (tmode, target, unsignedp);
1630 return target;
1633 /* Extract a bit field using shifts and boolean operations
1634 Returns an rtx to represent the value.
1635 OP0 addresses a register (word) or memory (byte).
1636 BITPOS says which bit within the word or byte the bit field starts in.
1637 OFFSET says how many bytes farther the bit field starts;
1638 it is 0 if OP0 is a register.
1639 BITSIZE says how many bits long the bit field is.
1640 (If OP0 is a register, it may be narrower than a full word,
1641 but BITPOS still counts within a full word,
1642 which is significant on bigendian machines.)
1644 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1645 If TARGET is nonzero, attempts to store the value there
1646 and return TARGET, but this is not guaranteed.
1647 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1649 static rtx
1650 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1651 unsigned HOST_WIDE_INT offset,
1652 unsigned HOST_WIDE_INT bitsize,
1653 unsigned HOST_WIDE_INT bitpos, rtx target,
1654 int unsignedp)
1656 unsigned int total_bits = BITS_PER_WORD;
1657 enum machine_mode mode;
1659 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1661 /* Special treatment for a bit field split across two registers. */
1662 if (bitsize + bitpos > BITS_PER_WORD)
1663 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1665 else
1667 /* Get the proper mode to use for this field. We want a mode that
1668 includes the entire field. If such a mode would be larger than
1669 a word, we won't be doing the extraction the normal way. */
1671 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1672 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1674 if (mode == VOIDmode)
1675 /* The only way this should occur is if the field spans word
1676 boundaries. */
1677 return extract_split_bit_field (op0, bitsize,
1678 bitpos + offset * BITS_PER_UNIT,
1679 unsignedp);
1681 total_bits = GET_MODE_BITSIZE (mode);
1683 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1684 be in the range 0 to total_bits-1, and put any excess bytes in
1685 OFFSET. */
1686 if (bitpos >= total_bits)
1688 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1689 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1690 * BITS_PER_UNIT);
1693 /* Get ref to an aligned byte, halfword, or word containing the field.
1694 Adjust BITPOS to be position within a word,
1695 and OFFSET to be the offset of that word.
1696 Then alter OP0 to refer to that word. */
1697 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1698 offset -= (offset % (total_bits / BITS_PER_UNIT));
1699 op0 = adjust_address (op0, mode, offset);
1702 mode = GET_MODE (op0);
1704 if (BYTES_BIG_ENDIAN)
1705 /* BITPOS is the distance between our msb and that of OP0.
1706 Convert it to the distance from the lsb. */
1707 bitpos = total_bits - bitsize - bitpos;
1709 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1710 We have reduced the big-endian case to the little-endian case. */
1712 if (unsignedp)
1714 if (bitpos)
1716 /* If the field does not already start at the lsb,
1717 shift it so it does. */
1718 tree amount = build_int_2 (bitpos, 0);
1719 /* Maybe propagate the target for the shift. */
1720 /* But not if we will return it--could confuse integrate.c. */
1721 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1722 && !REG_FUNCTION_VALUE_P (target)
1723 ? target : 0);
1724 if (tmode != mode) subtarget = 0;
1725 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1727 /* Convert the value to the desired mode. */
1728 if (mode != tmode)
1729 op0 = convert_to_mode (tmode, op0, 1);
1731 /* Unless the msb of the field used to be the msb when we shifted,
1732 mask out the upper bits. */
1734 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1735 return expand_binop (GET_MODE (op0), and_optab, op0,
1736 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1737 target, 1, OPTAB_LIB_WIDEN);
1738 return op0;
1741 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1742 then arithmetic-shift its lsb to the lsb of the word. */
1743 op0 = force_reg (mode, op0);
1744 if (mode != tmode)
1745 target = 0;
1747 /* Find the narrowest integer mode that contains the field. */
1749 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1750 mode = GET_MODE_WIDER_MODE (mode))
1751 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1753 op0 = convert_to_mode (mode, op0, 0);
1754 break;
1757 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1759 tree amount
1760 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1761 /* Maybe propagate the target for the shift. */
1762 /* But not if we will return the result--could confuse integrate.c. */
1763 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1764 && ! REG_FUNCTION_VALUE_P (target)
1765 ? target : 0);
1766 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1769 return expand_shift (RSHIFT_EXPR, mode, op0,
1770 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1771 target, 0);
1774 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1775 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1776 complement of that if COMPLEMENT. The mask is truncated if
1777 necessary to the width of mode MODE. The mask is zero-extended if
1778 BITSIZE+BITPOS is too small for MODE. */
1780 static rtx
1781 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1783 HOST_WIDE_INT masklow, maskhigh;
1785 if (bitsize == 0)
1786 masklow = 0;
1787 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1788 masklow = (HOST_WIDE_INT) -1 << bitpos;
1789 else
1790 masklow = 0;
1792 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1793 masklow &= ((unsigned HOST_WIDE_INT) -1
1794 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1796 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1797 maskhigh = -1;
1798 else
1799 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1801 if (bitsize == 0)
1802 maskhigh = 0;
1803 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1804 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1805 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1806 else
1807 maskhigh = 0;
1809 if (complement)
1811 maskhigh = ~maskhigh;
1812 masklow = ~masklow;
1815 return immed_double_const (masklow, maskhigh, mode);
1818 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1819 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1821 static rtx
1822 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1824 unsigned HOST_WIDE_INT v = INTVAL (value);
1825 HOST_WIDE_INT low, high;
1827 if (bitsize < HOST_BITS_PER_WIDE_INT)
1828 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1830 if (bitpos < HOST_BITS_PER_WIDE_INT)
1832 low = v << bitpos;
1833 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1835 else
1837 low = 0;
1838 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1841 return immed_double_const (low, high, mode);
1844 /* Extract a bit field that is split across two words
1845 and return an RTX for the result.
1847 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1848 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1849 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1851 static rtx
1852 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1853 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1855 unsigned int unit;
1856 unsigned int bitsdone = 0;
1857 rtx result = NULL_RTX;
1858 int first = 1;
1860 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1861 much at a time. */
1862 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1863 unit = BITS_PER_WORD;
1864 else
1865 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1867 while (bitsdone < bitsize)
1869 unsigned HOST_WIDE_INT thissize;
1870 rtx part, word;
1871 unsigned HOST_WIDE_INT thispos;
1872 unsigned HOST_WIDE_INT offset;
1874 offset = (bitpos + bitsdone) / unit;
1875 thispos = (bitpos + bitsdone) % unit;
1877 /* THISSIZE must not overrun a word boundary. Otherwise,
1878 extract_fixed_bit_field will call us again, and we will mutually
1879 recurse forever. */
1880 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1881 thissize = MIN (thissize, unit - thispos);
1883 /* If OP0 is a register, then handle OFFSET here.
1885 When handling multiword bitfields, extract_bit_field may pass
1886 down a word_mode SUBREG of a larger REG for a bitfield that actually
1887 crosses a word boundary. Thus, for a SUBREG, we must find
1888 the current word starting from the base register. */
1889 if (GET_CODE (op0) == SUBREG)
1891 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1892 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1893 GET_MODE (SUBREG_REG (op0)));
1894 offset = 0;
1896 else if (GET_CODE (op0) == REG)
1898 word = operand_subword_force (op0, offset, GET_MODE (op0));
1899 offset = 0;
1901 else
1902 word = op0;
1904 /* Extract the parts in bit-counting order,
1905 whose meaning is determined by BYTES_PER_UNIT.
1906 OFFSET is in UNITs, and UNIT is in bits.
1907 extract_fixed_bit_field wants offset in bytes. */
1908 part = extract_fixed_bit_field (word_mode, word,
1909 offset * unit / BITS_PER_UNIT,
1910 thissize, thispos, 0, 1);
1911 bitsdone += thissize;
1913 /* Shift this part into place for the result. */
1914 if (BYTES_BIG_ENDIAN)
1916 if (bitsize != bitsdone)
1917 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1918 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1920 else
1922 if (bitsdone != thissize)
1923 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1924 build_int_2 (bitsdone - thissize, 0), 0, 1);
1927 if (first)
1928 result = part;
1929 else
1930 /* Combine the parts with bitwise or. This works
1931 because we extracted each part as an unsigned bit field. */
1932 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1933 OPTAB_LIB_WIDEN);
1935 first = 0;
1938 /* Unsigned bit field: we are done. */
1939 if (unsignedp)
1940 return result;
1941 /* Signed bit field: sign-extend with two arithmetic shifts. */
1942 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1943 build_int_2 (BITS_PER_WORD - bitsize, 0),
1944 NULL_RTX, 0);
1945 return expand_shift (RSHIFT_EXPR, word_mode, result,
1946 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1949 /* Add INC into TARGET. */
1951 void
1952 expand_inc (rtx target, rtx inc)
1954 rtx value = expand_binop (GET_MODE (target), add_optab,
1955 target, inc,
1956 target, 0, OPTAB_LIB_WIDEN);
1957 if (value != target)
1958 emit_move_insn (target, value);
1961 /* Subtract DEC from TARGET. */
1963 void
1964 expand_dec (rtx target, rtx dec)
1966 rtx value = expand_binop (GET_MODE (target), sub_optab,
1967 target, dec,
1968 target, 0, OPTAB_LIB_WIDEN);
1969 if (value != target)
1970 emit_move_insn (target, value);
1973 /* Output a shift instruction for expression code CODE,
1974 with SHIFTED being the rtx for the value to shift,
1975 and AMOUNT the tree for the amount to shift by.
1976 Store the result in the rtx TARGET, if that is convenient.
1977 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1978 Return the rtx for where the value is. */
1981 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
1982 tree amount, rtx target, int unsignedp)
1984 rtx op1, temp = 0;
1985 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1986 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1987 int try;
1989 /* Previously detected shift-counts computed by NEGATE_EXPR
1990 and shifted in the other direction; but that does not work
1991 on all machines. */
1993 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1995 #ifdef SHIFT_COUNT_TRUNCATED
1996 if (SHIFT_COUNT_TRUNCATED)
1998 if (GET_CODE (op1) == CONST_INT
1999 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2000 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2001 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2002 % GET_MODE_BITSIZE (mode));
2003 else if (GET_CODE (op1) == SUBREG
2004 && subreg_lowpart_p (op1))
2005 op1 = SUBREG_REG (op1);
2007 #endif
2009 if (op1 == const0_rtx)
2010 return shifted;
2012 for (try = 0; temp == 0 && try < 3; try++)
2014 enum optab_methods methods;
2016 if (try == 0)
2017 methods = OPTAB_DIRECT;
2018 else if (try == 1)
2019 methods = OPTAB_WIDEN;
2020 else
2021 methods = OPTAB_LIB_WIDEN;
2023 if (rotate)
2025 /* Widening does not work for rotation. */
2026 if (methods == OPTAB_WIDEN)
2027 continue;
2028 else if (methods == OPTAB_LIB_WIDEN)
2030 /* If we have been unable to open-code this by a rotation,
2031 do it as the IOR of two shifts. I.e., to rotate A
2032 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2033 where C is the bitsize of A.
2035 It is theoretically possible that the target machine might
2036 not be able to perform either shift and hence we would
2037 be making two libcalls rather than just the one for the
2038 shift (similarly if IOR could not be done). We will allow
2039 this extremely unlikely lossage to avoid complicating the
2040 code below. */
2042 rtx subtarget = target == shifted ? 0 : target;
2043 rtx temp1;
2044 tree type = TREE_TYPE (amount);
2045 tree new_amount = make_tree (type, op1);
2046 tree other_amount
2047 = fold (build (MINUS_EXPR, type,
2048 convert (type,
2049 build_int_2 (GET_MODE_BITSIZE (mode),
2050 0)),
2051 amount));
2053 shifted = force_reg (mode, shifted);
2055 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2056 mode, shifted, new_amount, subtarget, 1);
2057 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2058 mode, shifted, other_amount, 0, 1);
2059 return expand_binop (mode, ior_optab, temp, temp1, target,
2060 unsignedp, methods);
2063 temp = expand_binop (mode,
2064 left ? rotl_optab : rotr_optab,
2065 shifted, op1, target, unsignedp, methods);
2067 /* If we don't have the rotate, but we are rotating by a constant
2068 that is in range, try a rotate in the opposite direction. */
2070 if (temp == 0 && GET_CODE (op1) == CONST_INT
2071 && INTVAL (op1) > 0
2072 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2073 temp = expand_binop (mode,
2074 left ? rotr_optab : rotl_optab,
2075 shifted,
2076 GEN_INT (GET_MODE_BITSIZE (mode)
2077 - INTVAL (op1)),
2078 target, unsignedp, methods);
2080 else if (unsignedp)
2081 temp = expand_binop (mode,
2082 left ? ashl_optab : lshr_optab,
2083 shifted, op1, target, unsignedp, methods);
2085 /* Do arithmetic shifts.
2086 Also, if we are going to widen the operand, we can just as well
2087 use an arithmetic right-shift instead of a logical one. */
2088 if (temp == 0 && ! rotate
2089 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2091 enum optab_methods methods1 = methods;
2093 /* If trying to widen a log shift to an arithmetic shift,
2094 don't accept an arithmetic shift of the same size. */
2095 if (unsignedp)
2096 methods1 = OPTAB_MUST_WIDEN;
2098 /* Arithmetic shift */
2100 temp = expand_binop (mode,
2101 left ? ashl_optab : ashr_optab,
2102 shifted, op1, target, unsignedp, methods1);
2105 /* We used to try extzv here for logical right shifts, but that was
2106 only useful for one machine, the VAX, and caused poor code
2107 generation there for lshrdi3, so the code was deleted and a
2108 define_expand for lshrsi3 was added to vax.md. */
2111 if (temp == 0)
2112 abort ();
2113 return temp;
2116 enum alg_code { alg_zero, alg_m, alg_shift,
2117 alg_add_t_m2, alg_sub_t_m2,
2118 alg_add_factor, alg_sub_factor,
2119 alg_add_t2_m, alg_sub_t2_m,
2120 alg_add, alg_subtract, alg_factor, alg_shiftop };
2122 /* This structure records a sequence of operations.
2123 `ops' is the number of operations recorded.
2124 `cost' is their total cost.
2125 The operations are stored in `op' and the corresponding
2126 logarithms of the integer coefficients in `log'.
2128 These are the operations:
2129 alg_zero total := 0;
2130 alg_m total := multiplicand;
2131 alg_shift total := total * coeff
2132 alg_add_t_m2 total := total + multiplicand * coeff;
2133 alg_sub_t_m2 total := total - multiplicand * coeff;
2134 alg_add_factor total := total * coeff + total;
2135 alg_sub_factor total := total * coeff - total;
2136 alg_add_t2_m total := total * coeff + multiplicand;
2137 alg_sub_t2_m total := total * coeff - multiplicand;
2139 The first operand must be either alg_zero or alg_m. */
2141 struct algorithm
2143 short cost;
2144 short ops;
2145 /* The size of the OP and LOG fields are not directly related to the
2146 word size, but the worst-case algorithms will be if we have few
2147 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2148 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2149 in total wordsize operations. */
2150 enum alg_code op[MAX_BITS_PER_WORD];
2151 char log[MAX_BITS_PER_WORD];
2154 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, int);
2155 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2156 int, unsigned HOST_WIDE_INT *,
2157 int *, int *);
2158 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2159 /* Compute and return the best algorithm for multiplying by T.
2160 The algorithm must cost less than cost_limit
2161 If retval.cost >= COST_LIMIT, no algorithm was found and all
2162 other field of the returned struct are undefined. */
2164 static void
2165 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2166 int cost_limit)
2168 int m;
2169 struct algorithm *alg_in, *best_alg;
2170 int cost;
2171 unsigned HOST_WIDE_INT q;
2173 /* Indicate that no algorithm is yet found. If no algorithm
2174 is found, this value will be returned and indicate failure. */
2175 alg_out->cost = cost_limit;
2177 if (cost_limit <= 0)
2178 return;
2180 /* t == 1 can be done in zero cost. */
2181 if (t == 1)
2183 alg_out->ops = 1;
2184 alg_out->cost = 0;
2185 alg_out->op[0] = alg_m;
2186 return;
2189 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2190 fail now. */
2191 if (t == 0)
2193 if (zero_cost >= cost_limit)
2194 return;
2195 else
2197 alg_out->ops = 1;
2198 alg_out->cost = zero_cost;
2199 alg_out->op[0] = alg_zero;
2200 return;
2204 /* We'll be needing a couple extra algorithm structures now. */
2206 alg_in = alloca (sizeof (struct algorithm));
2207 best_alg = alloca (sizeof (struct algorithm));
2209 /* If we have a group of zero bits at the low-order part of T, try
2210 multiplying by the remaining bits and then doing a shift. */
2212 if ((t & 1) == 0)
2214 m = floor_log2 (t & -t); /* m = number of low zero bits */
2215 if (m < BITS_PER_WORD)
2217 q = t >> m;
2218 cost = shift_cost[m];
2219 synth_mult (alg_in, q, cost_limit - cost);
2221 cost += alg_in->cost;
2222 if (cost < cost_limit)
2224 struct algorithm *x;
2225 x = alg_in, alg_in = best_alg, best_alg = x;
2226 best_alg->log[best_alg->ops] = m;
2227 best_alg->op[best_alg->ops] = alg_shift;
2228 cost_limit = cost;
2233 /* If we have an odd number, add or subtract one. */
2234 if ((t & 1) != 0)
2236 unsigned HOST_WIDE_INT w;
2238 for (w = 1; (w & t) != 0; w <<= 1)
2240 /* If T was -1, then W will be zero after the loop. This is another
2241 case where T ends with ...111. Handling this with (T + 1) and
2242 subtract 1 produces slightly better code and results in algorithm
2243 selection much faster than treating it like the ...0111 case
2244 below. */
2245 if (w == 0
2246 || (w > 2
2247 /* Reject the case where t is 3.
2248 Thus we prefer addition in that case. */
2249 && t != 3))
2251 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2253 cost = add_cost;
2254 synth_mult (alg_in, t + 1, cost_limit - cost);
2256 cost += alg_in->cost;
2257 if (cost < cost_limit)
2259 struct algorithm *x;
2260 x = alg_in, alg_in = best_alg, best_alg = x;
2261 best_alg->log[best_alg->ops] = 0;
2262 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2263 cost_limit = cost;
2266 else
2268 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2270 cost = add_cost;
2271 synth_mult (alg_in, t - 1, cost_limit - cost);
2273 cost += alg_in->cost;
2274 if (cost < cost_limit)
2276 struct algorithm *x;
2277 x = alg_in, alg_in = best_alg, best_alg = x;
2278 best_alg->log[best_alg->ops] = 0;
2279 best_alg->op[best_alg->ops] = alg_add_t_m2;
2280 cost_limit = cost;
2285 /* Look for factors of t of the form
2286 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2287 If we find such a factor, we can multiply by t using an algorithm that
2288 multiplies by q, shift the result by m and add/subtract it to itself.
2290 We search for large factors first and loop down, even if large factors
2291 are less probable than small; if we find a large factor we will find a
2292 good sequence quickly, and therefore be able to prune (by decreasing
2293 COST_LIMIT) the search. */
2295 for (m = floor_log2 (t - 1); m >= 2; m--)
2297 unsigned HOST_WIDE_INT d;
2299 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2300 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2302 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2303 synth_mult (alg_in, t / d, cost_limit - cost);
2305 cost += alg_in->cost;
2306 if (cost < cost_limit)
2308 struct algorithm *x;
2309 x = alg_in, alg_in = best_alg, best_alg = x;
2310 best_alg->log[best_alg->ops] = m;
2311 best_alg->op[best_alg->ops] = alg_add_factor;
2312 cost_limit = cost;
2314 /* Other factors will have been taken care of in the recursion. */
2315 break;
2318 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2319 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2321 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2322 synth_mult (alg_in, t / d, cost_limit - cost);
2324 cost += alg_in->cost;
2325 if (cost < cost_limit)
2327 struct algorithm *x;
2328 x = alg_in, alg_in = best_alg, best_alg = x;
2329 best_alg->log[best_alg->ops] = m;
2330 best_alg->op[best_alg->ops] = alg_sub_factor;
2331 cost_limit = cost;
2333 break;
2337 /* Try shift-and-add (load effective address) instructions,
2338 i.e. do a*3, a*5, a*9. */
2339 if ((t & 1) != 0)
2341 q = t - 1;
2342 q = q & -q;
2343 m = exact_log2 (q);
2344 if (m >= 0 && m < BITS_PER_WORD)
2346 cost = shiftadd_cost[m];
2347 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2349 cost += alg_in->cost;
2350 if (cost < cost_limit)
2352 struct algorithm *x;
2353 x = alg_in, alg_in = best_alg, best_alg = x;
2354 best_alg->log[best_alg->ops] = m;
2355 best_alg->op[best_alg->ops] = alg_add_t2_m;
2356 cost_limit = cost;
2360 q = t + 1;
2361 q = q & -q;
2362 m = exact_log2 (q);
2363 if (m >= 0 && m < BITS_PER_WORD)
2365 cost = shiftsub_cost[m];
2366 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2368 cost += alg_in->cost;
2369 if (cost < cost_limit)
2371 struct algorithm *x;
2372 x = alg_in, alg_in = best_alg, best_alg = x;
2373 best_alg->log[best_alg->ops] = m;
2374 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2375 cost_limit = cost;
2380 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2381 we have not found any algorithm. */
2382 if (cost_limit == alg_out->cost)
2383 return;
2385 /* If we are getting a too long sequence for `struct algorithm'
2386 to record, make this search fail. */
2387 if (best_alg->ops == MAX_BITS_PER_WORD)
2388 return;
2390 /* Copy the algorithm from temporary space to the space at alg_out.
2391 We avoid using structure assignment because the majority of
2392 best_alg is normally undefined, and this is a critical function. */
2393 alg_out->ops = best_alg->ops + 1;
2394 alg_out->cost = cost_limit;
2395 memcpy (alg_out->op, best_alg->op,
2396 alg_out->ops * sizeof *alg_out->op);
2397 memcpy (alg_out->log, best_alg->log,
2398 alg_out->ops * sizeof *alg_out->log);
2401 /* Perform a multiplication and return an rtx for the result.
2402 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2403 TARGET is a suggestion for where to store the result (an rtx).
2405 We check specially for a constant integer as OP1.
2406 If you want this check for OP0 as well, then before calling
2407 you should swap the two operands if OP0 would be constant. */
2410 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2411 int unsignedp)
2413 rtx const_op1 = op1;
2415 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2416 less than or equal in size to `unsigned int' this doesn't matter.
2417 If the mode is larger than `unsigned int', then synth_mult works only
2418 if the constant value exactly fits in an `unsigned int' without any
2419 truncation. This means that multiplying by negative values does
2420 not work; results are off by 2^32 on a 32 bit machine. */
2422 /* If we are multiplying in DImode, it may still be a win
2423 to try to work with shifts and adds. */
2424 if (GET_CODE (op1) == CONST_DOUBLE
2425 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2426 && HOST_BITS_PER_INT >= BITS_PER_WORD
2427 && CONST_DOUBLE_HIGH (op1) == 0)
2428 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2429 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2430 && GET_CODE (op1) == CONST_INT
2431 && INTVAL (op1) < 0)
2432 const_op1 = 0;
2434 /* We used to test optimize here, on the grounds that it's better to
2435 produce a smaller program when -O is not used.
2436 But this causes such a terrible slowdown sometimes
2437 that it seems better to use synth_mult always. */
2439 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2440 && (unsignedp || ! flag_trapv))
2442 struct algorithm alg;
2443 struct algorithm alg2;
2444 HOST_WIDE_INT val = INTVAL (op1);
2445 HOST_WIDE_INT val_so_far;
2446 rtx insn;
2447 int mult_cost;
2448 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2450 /* op0 must be register to make mult_cost match the precomputed
2451 shiftadd_cost array. */
2452 op0 = force_reg (mode, op0);
2454 /* Try to do the computation three ways: multiply by the negative of OP1
2455 and then negate, do the multiplication directly, or do multiplication
2456 by OP1 - 1. */
2458 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2459 mult_cost = MIN (12 * add_cost, mult_cost);
2461 synth_mult (&alg, val, mult_cost);
2463 /* This works only if the inverted value actually fits in an
2464 `unsigned int' */
2465 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2467 synth_mult (&alg2, - val,
2468 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2469 if (alg2.cost + negate_cost < alg.cost)
2470 alg = alg2, variant = negate_variant;
2473 /* This proves very useful for division-by-constant. */
2474 synth_mult (&alg2, val - 1,
2475 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2476 if (alg2.cost + add_cost < alg.cost)
2477 alg = alg2, variant = add_variant;
2479 if (alg.cost < mult_cost)
2481 /* We found something cheaper than a multiply insn. */
2482 int opno;
2483 rtx accum, tem;
2484 enum machine_mode nmode;
2486 op0 = protect_from_queue (op0, 0);
2488 /* Avoid referencing memory over and over.
2489 For speed, but also for correctness when mem is volatile. */
2490 if (GET_CODE (op0) == MEM)
2491 op0 = force_reg (mode, op0);
2493 /* ACCUM starts out either as OP0 or as a zero, depending on
2494 the first operation. */
2496 if (alg.op[0] == alg_zero)
2498 accum = copy_to_mode_reg (mode, const0_rtx);
2499 val_so_far = 0;
2501 else if (alg.op[0] == alg_m)
2503 accum = copy_to_mode_reg (mode, op0);
2504 val_so_far = 1;
2506 else
2507 abort ();
2509 for (opno = 1; opno < alg.ops; opno++)
2511 int log = alg.log[opno];
2512 int preserve = preserve_subexpressions_p ();
2513 rtx shift_subtarget = preserve ? 0 : accum;
2514 rtx add_target
2515 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2516 && ! preserve)
2517 ? target : 0;
2518 rtx accum_target = preserve ? 0 : accum;
2520 switch (alg.op[opno])
2522 case alg_shift:
2523 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2524 build_int_2 (log, 0), NULL_RTX, 0);
2525 val_so_far <<= log;
2526 break;
2528 case alg_add_t_m2:
2529 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2530 build_int_2 (log, 0), NULL_RTX, 0);
2531 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2532 add_target
2533 ? add_target : accum_target);
2534 val_so_far += (HOST_WIDE_INT) 1 << log;
2535 break;
2537 case alg_sub_t_m2:
2538 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2539 build_int_2 (log, 0), NULL_RTX, 0);
2540 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2541 add_target
2542 ? add_target : accum_target);
2543 val_so_far -= (HOST_WIDE_INT) 1 << log;
2544 break;
2546 case alg_add_t2_m:
2547 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2548 build_int_2 (log, 0), shift_subtarget,
2550 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2551 add_target
2552 ? add_target : accum_target);
2553 val_so_far = (val_so_far << log) + 1;
2554 break;
2556 case alg_sub_t2_m:
2557 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2558 build_int_2 (log, 0), shift_subtarget,
2560 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2561 add_target
2562 ? add_target : accum_target);
2563 val_so_far = (val_so_far << log) - 1;
2564 break;
2566 case alg_add_factor:
2567 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2568 build_int_2 (log, 0), NULL_RTX, 0);
2569 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2570 add_target
2571 ? add_target : accum_target);
2572 val_so_far += val_so_far << log;
2573 break;
2575 case alg_sub_factor:
2576 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2577 build_int_2 (log, 0), NULL_RTX, 0);
2578 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2579 (add_target ? add_target
2580 : preserve ? 0 : tem));
2581 val_so_far = (val_so_far << log) - val_so_far;
2582 break;
2584 default:
2585 abort ();
2588 /* Write a REG_EQUAL note on the last insn so that we can cse
2589 multiplication sequences. Note that if ACCUM is a SUBREG,
2590 we've set the inner register and must properly indicate
2591 that. */
2593 tem = op0, nmode = mode;
2594 if (GET_CODE (accum) == SUBREG)
2596 nmode = GET_MODE (SUBREG_REG (accum));
2597 tem = gen_lowpart (nmode, op0);
2600 insn = get_last_insn ();
2601 set_unique_reg_note (insn,
2602 REG_EQUAL,
2603 gen_rtx_MULT (nmode, tem,
2604 GEN_INT (val_so_far)));
2607 if (variant == negate_variant)
2609 val_so_far = - val_so_far;
2610 accum = expand_unop (mode, neg_optab, accum, target, 0);
2612 else if (variant == add_variant)
2614 val_so_far = val_so_far + 1;
2615 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2618 if (val != val_so_far)
2619 abort ();
2621 return accum;
2625 if (GET_CODE (op0) == CONST_DOUBLE)
2627 rtx temp = op0;
2628 op0 = op1;
2629 op1 = temp;
2632 /* Expand x*2.0 as x+x. */
2633 if (GET_CODE (op1) == CONST_DOUBLE
2634 && GET_MODE_CLASS (mode) == MODE_FLOAT)
2636 REAL_VALUE_TYPE d;
2637 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
2639 if (REAL_VALUES_EQUAL (d, dconst2))
2641 op0 = force_reg (GET_MODE (op0), op0);
2642 return expand_binop (mode, add_optab, op0, op0,
2643 target, unsignedp, OPTAB_LIB_WIDEN);
2647 /* This used to use umul_optab if unsigned, but for non-widening multiply
2648 there is no difference between signed and unsigned. */
2649 op0 = expand_binop (mode,
2650 ! unsignedp
2651 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2652 ? smulv_optab : smul_optab,
2653 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2654 if (op0 == 0)
2655 abort ();
2656 return op0;
2659 /* Return the smallest n such that 2**n >= X. */
2662 ceil_log2 (unsigned HOST_WIDE_INT x)
2664 return floor_log2 (x - 1) + 1;
2667 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2668 replace division by D, and put the least significant N bits of the result
2669 in *MULTIPLIER_PTR and return the most significant bit.
2671 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2672 needed precision is in PRECISION (should be <= N).
2674 PRECISION should be as small as possible so this function can choose
2675 multiplier more freely.
2677 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2678 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2680 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2681 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2683 static
2684 unsigned HOST_WIDE_INT
2685 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
2686 unsigned HOST_WIDE_INT *multiplier_ptr,
2687 int *post_shift_ptr, int *lgup_ptr)
2689 HOST_WIDE_INT mhigh_hi, mlow_hi;
2690 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2691 int lgup, post_shift;
2692 int pow, pow2;
2693 unsigned HOST_WIDE_INT nl, dummy1;
2694 HOST_WIDE_INT nh, dummy2;
2696 /* lgup = ceil(log2(divisor)); */
2697 lgup = ceil_log2 (d);
2699 if (lgup > n)
2700 abort ();
2702 pow = n + lgup;
2703 pow2 = n + lgup - precision;
2705 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2707 /* We could handle this with some effort, but this case is much better
2708 handled directly with a scc insn, so rely on caller using that. */
2709 abort ();
2712 /* mlow = 2^(N + lgup)/d */
2713 if (pow >= HOST_BITS_PER_WIDE_INT)
2715 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2716 nl = 0;
2718 else
2720 nh = 0;
2721 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2723 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2724 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2726 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2727 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2728 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2729 else
2730 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2731 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2732 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2734 if (mhigh_hi && nh - d >= d)
2735 abort ();
2736 if (mhigh_hi > 1 || mlow_hi > 1)
2737 abort ();
2738 /* Assert that mlow < mhigh. */
2739 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2740 abort ();
2742 /* If precision == N, then mlow, mhigh exceed 2^N
2743 (but they do not exceed 2^(N+1)). */
2745 /* Reduce to lowest terms. */
2746 for (post_shift = lgup; post_shift > 0; post_shift--)
2748 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2749 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2750 if (ml_lo >= mh_lo)
2751 break;
2753 mlow_hi = 0;
2754 mlow_lo = ml_lo;
2755 mhigh_hi = 0;
2756 mhigh_lo = mh_lo;
2759 *post_shift_ptr = post_shift;
2760 *lgup_ptr = lgup;
2761 if (n < HOST_BITS_PER_WIDE_INT)
2763 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2764 *multiplier_ptr = mhigh_lo & mask;
2765 return mhigh_lo >= mask;
2767 else
2769 *multiplier_ptr = mhigh_lo;
2770 return mhigh_hi;
2774 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2775 congruent to 1 (mod 2**N). */
2777 static unsigned HOST_WIDE_INT
2778 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
2780 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2782 /* The algorithm notes that the choice y = x satisfies
2783 x*y == 1 mod 2^3, since x is assumed odd.
2784 Each iteration doubles the number of bits of significance in y. */
2786 unsigned HOST_WIDE_INT mask;
2787 unsigned HOST_WIDE_INT y = x;
2788 int nbit = 3;
2790 mask = (n == HOST_BITS_PER_WIDE_INT
2791 ? ~(unsigned HOST_WIDE_INT) 0
2792 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2794 while (nbit < n)
2796 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2797 nbit *= 2;
2799 return y;
2802 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2803 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2804 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2805 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2806 become signed.
2808 The result is put in TARGET if that is convenient.
2810 MODE is the mode of operation. */
2813 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
2814 rtx op1, rtx target, int unsignedp)
2816 rtx tem;
2817 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2819 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2820 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2821 NULL_RTX, 0);
2822 tem = expand_and (mode, tem, op1, NULL_RTX);
2823 adj_operand
2824 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2825 adj_operand);
2827 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2828 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2829 NULL_RTX, 0);
2830 tem = expand_and (mode, tem, op0, NULL_RTX);
2831 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2832 target);
2834 return target;
2837 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2838 in TARGET if that is convenient, and return where the result is. If the
2839 operation can not be performed, 0 is returned.
2841 MODE is the mode of operation and result.
2843 UNSIGNEDP nonzero means unsigned multiply.
2845 MAX_COST is the total allowed cost for the expanded RTL. */
2848 expand_mult_highpart (enum machine_mode mode, rtx op0,
2849 unsigned HOST_WIDE_INT cnst1, rtx target,
2850 int unsignedp, int max_cost)
2852 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2853 optab mul_highpart_optab;
2854 optab moptab;
2855 rtx tem;
2856 int size = GET_MODE_BITSIZE (mode);
2857 rtx op1, wide_op1;
2859 /* We can't support modes wider than HOST_BITS_PER_INT. */
2860 if (size > HOST_BITS_PER_WIDE_INT)
2861 abort ();
2863 op1 = gen_int_mode (cnst1, mode);
2865 wide_op1
2866 = immed_double_const (cnst1,
2867 (unsignedp
2868 ? (HOST_WIDE_INT) 0
2869 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2870 wider_mode);
2872 /* expand_mult handles constant multiplication of word_mode
2873 or narrower. It does a poor job for large modes. */
2874 if (size < BITS_PER_WORD
2875 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2877 /* We have to do this, since expand_binop doesn't do conversion for
2878 multiply. Maybe change expand_binop to handle widening multiply? */
2879 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2881 /* We know that this can't have signed overflow, so pretend this is
2882 an unsigned multiply. */
2883 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2884 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2885 build_int_2 (size, 0), NULL_RTX, 1);
2886 return convert_modes (mode, wider_mode, tem, unsignedp);
2889 if (target == 0)
2890 target = gen_reg_rtx (mode);
2892 /* Firstly, try using a multiplication insn that only generates the needed
2893 high part of the product, and in the sign flavor of unsignedp. */
2894 if (mul_highpart_cost[(int) mode] < max_cost)
2896 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2897 target = expand_binop (mode, mul_highpart_optab,
2898 op0, op1, target, unsignedp, OPTAB_DIRECT);
2899 if (target)
2900 return target;
2903 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2904 Need to adjust the result after the multiplication. */
2905 if (size - 1 < BITS_PER_WORD
2906 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2907 < max_cost))
2909 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2910 target = expand_binop (mode, mul_highpart_optab,
2911 op0, op1, target, unsignedp, OPTAB_DIRECT);
2912 if (target)
2913 /* We used the wrong signedness. Adjust the result. */
2914 return expand_mult_highpart_adjust (mode, target, op0,
2915 op1, target, unsignedp);
2918 /* Try widening multiplication. */
2919 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2920 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2921 && mul_widen_cost[(int) wider_mode] < max_cost)
2923 op1 = force_reg (mode, op1);
2924 goto try;
2927 /* Try widening the mode and perform a non-widening multiplication. */
2928 moptab = smul_optab;
2929 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2930 && size - 1 < BITS_PER_WORD
2931 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2933 op1 = wide_op1;
2934 goto try;
2937 /* Try widening multiplication of opposite signedness, and adjust. */
2938 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2939 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2940 && size - 1 < BITS_PER_WORD
2941 && (mul_widen_cost[(int) wider_mode]
2942 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2944 rtx regop1 = force_reg (mode, op1);
2945 tem = expand_binop (wider_mode, moptab, op0, regop1,
2946 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2947 if (tem != 0)
2949 /* Extract the high half of the just generated product. */
2950 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2951 build_int_2 (size, 0), NULL_RTX, 1);
2952 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2953 /* We used the wrong signedness. Adjust the result. */
2954 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2955 target, unsignedp);
2959 return 0;
2961 try:
2962 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2963 tem = expand_binop (wider_mode, moptab, op0, op1,
2964 NULL_RTX, unsignedp, OPTAB_WIDEN);
2965 if (tem == 0)
2966 return 0;
2968 /* Extract the high half of the just generated product. */
2969 if (mode == word_mode)
2971 return gen_highpart (mode, tem);
2973 else
2975 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2976 build_int_2 (size, 0), NULL_RTX, 1);
2977 return convert_modes (mode, wider_mode, tem, unsignedp);
2981 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2982 if that is convenient, and returning where the result is.
2983 You may request either the quotient or the remainder as the result;
2984 specify REM_FLAG nonzero to get the remainder.
2986 CODE is the expression code for which kind of division this is;
2987 it controls how rounding is done. MODE is the machine mode to use.
2988 UNSIGNEDP nonzero means do unsigned division. */
2990 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2991 and then correct it by or'ing in missing high bits
2992 if result of ANDI is nonzero.
2993 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2994 This could optimize to a bfexts instruction.
2995 But C doesn't use these operations, so their optimizations are
2996 left for later. */
2997 /* ??? For modulo, we don't actually need the highpart of the first product,
2998 the low part will do nicely. And for small divisors, the second multiply
2999 can also be a low-part only multiply or even be completely left out.
3000 E.g. to calculate the remainder of a division by 3 with a 32 bit
3001 multiply, multiply with 0x55555556 and extract the upper two bits;
3002 the result is exact for inputs up to 0x1fffffff.
3003 The input range can be reduced by using cross-sum rules.
3004 For odd divisors >= 3, the following table gives right shift counts
3005 so that if a number is shifted by an integer multiple of the given
3006 amount, the remainder stays the same:
3007 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3008 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3009 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3010 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3011 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3013 Cross-sum rules for even numbers can be derived by leaving as many bits
3014 to the right alone as the divisor has zeros to the right.
3015 E.g. if x is an unsigned 32 bit number:
3016 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3019 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3022 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3023 rtx op0, rtx op1, rtx target, int unsignedp)
3025 enum machine_mode compute_mode;
3026 rtx tquotient;
3027 rtx quotient = 0, remainder = 0;
3028 rtx last;
3029 int size;
3030 rtx insn, set;
3031 optab optab1, optab2;
3032 int op1_is_constant, op1_is_pow2 = 0;
3033 int max_cost, extra_cost;
3034 static HOST_WIDE_INT last_div_const = 0;
3035 static HOST_WIDE_INT ext_op1;
3037 op1_is_constant = GET_CODE (op1) == CONST_INT;
3038 if (op1_is_constant)
3040 ext_op1 = INTVAL (op1);
3041 if (unsignedp)
3042 ext_op1 &= GET_MODE_MASK (mode);
3043 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3044 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3048 This is the structure of expand_divmod:
3050 First comes code to fix up the operands so we can perform the operations
3051 correctly and efficiently.
3053 Second comes a switch statement with code specific for each rounding mode.
3054 For some special operands this code emits all RTL for the desired
3055 operation, for other cases, it generates only a quotient and stores it in
3056 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3057 to indicate that it has not done anything.
3059 Last comes code that finishes the operation. If QUOTIENT is set and
3060 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3061 QUOTIENT is not set, it is computed using trunc rounding.
3063 We try to generate special code for division and remainder when OP1 is a
3064 constant. If |OP1| = 2**n we can use shifts and some other fast
3065 operations. For other values of OP1, we compute a carefully selected
3066 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3067 by m.
3069 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3070 half of the product. Different strategies for generating the product are
3071 implemented in expand_mult_highpart.
3073 If what we actually want is the remainder, we generate that by another
3074 by-constant multiplication and a subtraction. */
3076 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3077 code below will malfunction if we are, so check here and handle
3078 the special case if so. */
3079 if (op1 == const1_rtx)
3080 return rem_flag ? const0_rtx : op0;
3082 /* When dividing by -1, we could get an overflow.
3083 negv_optab can handle overflows. */
3084 if (! unsignedp && op1 == constm1_rtx)
3086 if (rem_flag)
3087 return const0_rtx;
3088 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3089 ? negv_optab : neg_optab, op0, target, 0);
3092 if (target
3093 /* Don't use the function value register as a target
3094 since we have to read it as well as write it,
3095 and function-inlining gets confused by this. */
3096 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3097 /* Don't clobber an operand while doing a multi-step calculation. */
3098 || ((rem_flag || op1_is_constant)
3099 && (reg_mentioned_p (target, op0)
3100 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3101 || reg_mentioned_p (target, op1)
3102 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3103 target = 0;
3105 /* Get the mode in which to perform this computation. Normally it will
3106 be MODE, but sometimes we can't do the desired operation in MODE.
3107 If so, pick a wider mode in which we can do the operation. Convert
3108 to that mode at the start to avoid repeated conversions.
3110 First see what operations we need. These depend on the expression
3111 we are evaluating. (We assume that divxx3 insns exist under the
3112 same conditions that modxx3 insns and that these insns don't normally
3113 fail. If these assumptions are not correct, we may generate less
3114 efficient code in some cases.)
3116 Then see if we find a mode in which we can open-code that operation
3117 (either a division, modulus, or shift). Finally, check for the smallest
3118 mode for which we can do the operation with a library call. */
3120 /* We might want to refine this now that we have division-by-constant
3121 optimization. Since expand_mult_highpart tries so many variants, it is
3122 not straightforward to generalize this. Maybe we should make an array
3123 of possible modes in init_expmed? Save this for GCC 2.7. */
3125 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3126 ? (unsignedp ? lshr_optab : ashr_optab)
3127 : (unsignedp ? udiv_optab : sdiv_optab));
3128 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3129 ? optab1
3130 : (unsignedp ? udivmod_optab : sdivmod_optab));
3132 for (compute_mode = mode; compute_mode != VOIDmode;
3133 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3134 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3135 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3136 break;
3138 if (compute_mode == VOIDmode)
3139 for (compute_mode = mode; compute_mode != VOIDmode;
3140 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3141 if (optab1->handlers[(int) compute_mode].libfunc
3142 || optab2->handlers[(int) compute_mode].libfunc)
3143 break;
3145 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3146 in expand_binop. */
3147 if (compute_mode == VOIDmode)
3148 compute_mode = mode;
3150 if (target && GET_MODE (target) == compute_mode)
3151 tquotient = target;
3152 else
3153 tquotient = gen_reg_rtx (compute_mode);
3155 size = GET_MODE_BITSIZE (compute_mode);
3156 #if 0
3157 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3158 (mode), and thereby get better code when OP1 is a constant. Do that
3159 later. It will require going over all usages of SIZE below. */
3160 size = GET_MODE_BITSIZE (mode);
3161 #endif
3163 /* Only deduct something for a REM if the last divide done was
3164 for a different constant. Then set the constant of the last
3165 divide. */
3166 max_cost = div_cost[(int) compute_mode]
3167 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3168 && INTVAL (op1) == last_div_const)
3169 ? mul_cost[(int) compute_mode] + add_cost : 0);
3171 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3173 /* Now convert to the best mode to use. */
3174 if (compute_mode != mode)
3176 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3177 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3179 /* convert_modes may have placed op1 into a register, so we
3180 must recompute the following. */
3181 op1_is_constant = GET_CODE (op1) == CONST_INT;
3182 op1_is_pow2 = (op1_is_constant
3183 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3184 || (! unsignedp
3185 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3188 /* If one of the operands is a volatile MEM, copy it into a register. */
3190 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3191 op0 = force_reg (compute_mode, op0);
3192 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3193 op1 = force_reg (compute_mode, op1);
3195 /* If we need the remainder or if OP1 is constant, we need to
3196 put OP0 in a register in case it has any queued subexpressions. */
3197 if (rem_flag || op1_is_constant)
3198 op0 = force_reg (compute_mode, op0);
3200 last = get_last_insn ();
3202 /* Promote floor rounding to trunc rounding for unsigned operations. */
3203 if (unsignedp)
3205 if (code == FLOOR_DIV_EXPR)
3206 code = TRUNC_DIV_EXPR;
3207 if (code == FLOOR_MOD_EXPR)
3208 code = TRUNC_MOD_EXPR;
3209 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3210 code = TRUNC_DIV_EXPR;
3213 if (op1 != const0_rtx)
3214 switch (code)
3216 case TRUNC_MOD_EXPR:
3217 case TRUNC_DIV_EXPR:
3218 if (op1_is_constant)
3220 if (unsignedp)
3222 unsigned HOST_WIDE_INT mh, ml;
3223 int pre_shift, post_shift;
3224 int dummy;
3225 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3226 & GET_MODE_MASK (compute_mode));
3228 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3230 pre_shift = floor_log2 (d);
3231 if (rem_flag)
3233 remainder
3234 = expand_binop (compute_mode, and_optab, op0,
3235 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3236 remainder, 1,
3237 OPTAB_LIB_WIDEN);
3238 if (remainder)
3239 return gen_lowpart (mode, remainder);
3241 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3242 build_int_2 (pre_shift, 0),
3243 tquotient, 1);
3245 else if (size <= HOST_BITS_PER_WIDE_INT)
3247 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3249 /* Most significant bit of divisor is set; emit an scc
3250 insn. */
3251 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3252 compute_mode, 1, 1);
3253 if (quotient == 0)
3254 goto fail1;
3256 else
3258 /* Find a suitable multiplier and right shift count
3259 instead of multiplying with D. */
3261 mh = choose_multiplier (d, size, size,
3262 &ml, &post_shift, &dummy);
3264 /* If the suggested multiplier is more than SIZE bits,
3265 we can do better for even divisors, using an
3266 initial right shift. */
3267 if (mh != 0 && (d & 1) == 0)
3269 pre_shift = floor_log2 (d & -d);
3270 mh = choose_multiplier (d >> pre_shift, size,
3271 size - pre_shift,
3272 &ml, &post_shift, &dummy);
3273 if (mh)
3274 abort ();
3276 else
3277 pre_shift = 0;
3279 if (mh != 0)
3281 rtx t1, t2, t3, t4;
3283 if (post_shift - 1 >= BITS_PER_WORD)
3284 goto fail1;
3286 extra_cost = (shift_cost[post_shift - 1]
3287 + shift_cost[1] + 2 * add_cost);
3288 t1 = expand_mult_highpart (compute_mode, op0, ml,
3289 NULL_RTX, 1,
3290 max_cost - extra_cost);
3291 if (t1 == 0)
3292 goto fail1;
3293 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3294 op0, t1),
3295 NULL_RTX);
3296 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3297 build_int_2 (1, 0), NULL_RTX,1);
3298 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3299 t1, t3),
3300 NULL_RTX);
3301 quotient
3302 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3303 build_int_2 (post_shift - 1, 0),
3304 tquotient, 1);
3306 else
3308 rtx t1, t2;
3310 if (pre_shift >= BITS_PER_WORD
3311 || post_shift >= BITS_PER_WORD)
3312 goto fail1;
3314 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3315 build_int_2 (pre_shift, 0),
3316 NULL_RTX, 1);
3317 extra_cost = (shift_cost[pre_shift]
3318 + shift_cost[post_shift]);
3319 t2 = expand_mult_highpart (compute_mode, t1, ml,
3320 NULL_RTX, 1,
3321 max_cost - extra_cost);
3322 if (t2 == 0)
3323 goto fail1;
3324 quotient
3325 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3326 build_int_2 (post_shift, 0),
3327 tquotient, 1);
3331 else /* Too wide mode to use tricky code */
3332 break;
3334 insn = get_last_insn ();
3335 if (insn != last
3336 && (set = single_set (insn)) != 0
3337 && SET_DEST (set) == quotient)
3338 set_unique_reg_note (insn,
3339 REG_EQUAL,
3340 gen_rtx_UDIV (compute_mode, op0, op1));
3342 else /* TRUNC_DIV, signed */
3344 unsigned HOST_WIDE_INT ml;
3345 int lgup, post_shift;
3346 HOST_WIDE_INT d = INTVAL (op1);
3347 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3349 /* n rem d = n rem -d */
3350 if (rem_flag && d < 0)
3352 d = abs_d;
3353 op1 = gen_int_mode (abs_d, compute_mode);
3356 if (d == 1)
3357 quotient = op0;
3358 else if (d == -1)
3359 quotient = expand_unop (compute_mode, neg_optab, op0,
3360 tquotient, 0);
3361 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3363 /* This case is not handled correctly below. */
3364 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3365 compute_mode, 1, 1);
3366 if (quotient == 0)
3367 goto fail1;
3369 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3370 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3371 /* ??? The cheap metric is computed only for
3372 word_mode. If this operation is wider, this may
3373 not be so. Assume true if the optab has an
3374 expander for this mode. */
3375 && (((rem_flag ? smod_optab : sdiv_optab)
3376 ->handlers[(int) compute_mode].insn_code
3377 != CODE_FOR_nothing)
3378 || (sdivmod_optab->handlers[(int) compute_mode]
3379 .insn_code != CODE_FOR_nothing)))
3381 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3383 lgup = floor_log2 (abs_d);
3384 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3386 rtx label = gen_label_rtx ();
3387 rtx t1;
3389 t1 = copy_to_mode_reg (compute_mode, op0);
3390 do_cmp_and_jump (t1, const0_rtx, GE,
3391 compute_mode, label);
3392 expand_inc (t1, gen_int_mode (abs_d - 1,
3393 compute_mode));
3394 emit_label (label);
3395 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3396 build_int_2 (lgup, 0),
3397 tquotient, 0);
3399 else
3401 rtx t1, t2, t3;
3402 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3403 build_int_2 (size - 1, 0),
3404 NULL_RTX, 0);
3405 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3406 build_int_2 (size - lgup, 0),
3407 NULL_RTX, 1);
3408 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3409 op0, t2),
3410 NULL_RTX);
3411 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3412 build_int_2 (lgup, 0),
3413 tquotient, 0);
3416 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3417 the quotient. */
3418 if (d < 0)
3420 insn = get_last_insn ();
3421 if (insn != last
3422 && (set = single_set (insn)) != 0
3423 && SET_DEST (set) == quotient
3424 && abs_d < ((unsigned HOST_WIDE_INT) 1
3425 << (HOST_BITS_PER_WIDE_INT - 1)))
3426 set_unique_reg_note (insn,
3427 REG_EQUAL,
3428 gen_rtx_DIV (compute_mode,
3429 op0,
3430 GEN_INT
3431 (trunc_int_for_mode
3432 (abs_d,
3433 compute_mode))));
3435 quotient = expand_unop (compute_mode, neg_optab,
3436 quotient, quotient, 0);
3439 else if (size <= HOST_BITS_PER_WIDE_INT)
3441 choose_multiplier (abs_d, size, size - 1,
3442 &ml, &post_shift, &lgup);
3443 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3445 rtx t1, t2, t3;
3447 if (post_shift >= BITS_PER_WORD
3448 || size - 1 >= BITS_PER_WORD)
3449 goto fail1;
3451 extra_cost = (shift_cost[post_shift]
3452 + shift_cost[size - 1] + add_cost);
3453 t1 = expand_mult_highpart (compute_mode, op0, ml,
3454 NULL_RTX, 0,
3455 max_cost - extra_cost);
3456 if (t1 == 0)
3457 goto fail1;
3458 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3459 build_int_2 (post_shift, 0), NULL_RTX, 0);
3460 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3461 build_int_2 (size - 1, 0), NULL_RTX, 0);
3462 if (d < 0)
3463 quotient
3464 = force_operand (gen_rtx_MINUS (compute_mode,
3465 t3, t2),
3466 tquotient);
3467 else
3468 quotient
3469 = force_operand (gen_rtx_MINUS (compute_mode,
3470 t2, t3),
3471 tquotient);
3473 else
3475 rtx t1, t2, t3, t4;
3477 if (post_shift >= BITS_PER_WORD
3478 || size - 1 >= BITS_PER_WORD)
3479 goto fail1;
3481 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3482 extra_cost = (shift_cost[post_shift]
3483 + shift_cost[size - 1] + 2 * add_cost);
3484 t1 = expand_mult_highpart (compute_mode, op0, ml,
3485 NULL_RTX, 0,
3486 max_cost - extra_cost);
3487 if (t1 == 0)
3488 goto fail1;
3489 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3490 t1, op0),
3491 NULL_RTX);
3492 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3493 build_int_2 (post_shift, 0),
3494 NULL_RTX, 0);
3495 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3496 build_int_2 (size - 1, 0),
3497 NULL_RTX, 0);
3498 if (d < 0)
3499 quotient
3500 = force_operand (gen_rtx_MINUS (compute_mode,
3501 t4, t3),
3502 tquotient);
3503 else
3504 quotient
3505 = force_operand (gen_rtx_MINUS (compute_mode,
3506 t3, t4),
3507 tquotient);
3510 else /* Too wide mode to use tricky code */
3511 break;
3513 insn = get_last_insn ();
3514 if (insn != last
3515 && (set = single_set (insn)) != 0
3516 && SET_DEST (set) == quotient)
3517 set_unique_reg_note (insn,
3518 REG_EQUAL,
3519 gen_rtx_DIV (compute_mode, op0, op1));
3521 break;
3523 fail1:
3524 delete_insns_since (last);
3525 break;
3527 case FLOOR_DIV_EXPR:
3528 case FLOOR_MOD_EXPR:
3529 /* We will come here only for signed operations. */
3530 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3532 unsigned HOST_WIDE_INT mh, ml;
3533 int pre_shift, lgup, post_shift;
3534 HOST_WIDE_INT d = INTVAL (op1);
3536 if (d > 0)
3538 /* We could just as easily deal with negative constants here,
3539 but it does not seem worth the trouble for GCC 2.6. */
3540 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3542 pre_shift = floor_log2 (d);
3543 if (rem_flag)
3545 remainder = expand_binop (compute_mode, and_optab, op0,
3546 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3547 remainder, 0, OPTAB_LIB_WIDEN);
3548 if (remainder)
3549 return gen_lowpart (mode, remainder);
3551 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3552 build_int_2 (pre_shift, 0),
3553 tquotient, 0);
3555 else
3557 rtx t1, t2, t3, t4;
3559 mh = choose_multiplier (d, size, size - 1,
3560 &ml, &post_shift, &lgup);
3561 if (mh)
3562 abort ();
3564 if (post_shift < BITS_PER_WORD
3565 && size - 1 < BITS_PER_WORD)
3567 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3568 build_int_2 (size - 1, 0),
3569 NULL_RTX, 0);
3570 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3571 NULL_RTX, 0, OPTAB_WIDEN);
3572 extra_cost = (shift_cost[post_shift]
3573 + shift_cost[size - 1] + 2 * add_cost);
3574 t3 = expand_mult_highpart (compute_mode, t2, ml,
3575 NULL_RTX, 1,
3576 max_cost - extra_cost);
3577 if (t3 != 0)
3579 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3580 build_int_2 (post_shift, 0),
3581 NULL_RTX, 1);
3582 quotient = expand_binop (compute_mode, xor_optab,
3583 t4, t1, tquotient, 0,
3584 OPTAB_WIDEN);
3589 else
3591 rtx nsign, t1, t2, t3, t4;
3592 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3593 op0, constm1_rtx), NULL_RTX);
3594 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3595 0, OPTAB_WIDEN);
3596 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3597 build_int_2 (size - 1, 0), NULL_RTX, 0);
3598 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3599 NULL_RTX);
3600 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3601 NULL_RTX, 0);
3602 if (t4)
3604 rtx t5;
3605 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3606 NULL_RTX, 0);
3607 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3608 t4, t5),
3609 tquotient);
3614 if (quotient != 0)
3615 break;
3616 delete_insns_since (last);
3618 /* Try using an instruction that produces both the quotient and
3619 remainder, using truncation. We can easily compensate the quotient
3620 or remainder to get floor rounding, once we have the remainder.
3621 Notice that we compute also the final remainder value here,
3622 and return the result right away. */
3623 if (target == 0 || GET_MODE (target) != compute_mode)
3624 target = gen_reg_rtx (compute_mode);
3626 if (rem_flag)
3628 remainder
3629 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3630 quotient = gen_reg_rtx (compute_mode);
3632 else
3634 quotient
3635 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3636 remainder = gen_reg_rtx (compute_mode);
3639 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3640 quotient, remainder, 0))
3642 /* This could be computed with a branch-less sequence.
3643 Save that for later. */
3644 rtx tem;
3645 rtx label = gen_label_rtx ();
3646 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3647 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3648 NULL_RTX, 0, OPTAB_WIDEN);
3649 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3650 expand_dec (quotient, const1_rtx);
3651 expand_inc (remainder, op1);
3652 emit_label (label);
3653 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3656 /* No luck with division elimination or divmod. Have to do it
3657 by conditionally adjusting op0 *and* the result. */
3659 rtx label1, label2, label3, label4, label5;
3660 rtx adjusted_op0;
3661 rtx tem;
3663 quotient = gen_reg_rtx (compute_mode);
3664 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3665 label1 = gen_label_rtx ();
3666 label2 = gen_label_rtx ();
3667 label3 = gen_label_rtx ();
3668 label4 = gen_label_rtx ();
3669 label5 = gen_label_rtx ();
3670 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3671 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3672 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3673 quotient, 0, OPTAB_LIB_WIDEN);
3674 if (tem != quotient)
3675 emit_move_insn (quotient, tem);
3676 emit_jump_insn (gen_jump (label5));
3677 emit_barrier ();
3678 emit_label (label1);
3679 expand_inc (adjusted_op0, const1_rtx);
3680 emit_jump_insn (gen_jump (label4));
3681 emit_barrier ();
3682 emit_label (label2);
3683 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3684 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3685 quotient, 0, OPTAB_LIB_WIDEN);
3686 if (tem != quotient)
3687 emit_move_insn (quotient, tem);
3688 emit_jump_insn (gen_jump (label5));
3689 emit_barrier ();
3690 emit_label (label3);
3691 expand_dec (adjusted_op0, const1_rtx);
3692 emit_label (label4);
3693 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3694 quotient, 0, OPTAB_LIB_WIDEN);
3695 if (tem != quotient)
3696 emit_move_insn (quotient, tem);
3697 expand_dec (quotient, const1_rtx);
3698 emit_label (label5);
3700 break;
3702 case CEIL_DIV_EXPR:
3703 case CEIL_MOD_EXPR:
3704 if (unsignedp)
3706 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3708 rtx t1, t2, t3;
3709 unsigned HOST_WIDE_INT d = INTVAL (op1);
3710 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3711 build_int_2 (floor_log2 (d), 0),
3712 tquotient, 1);
3713 t2 = expand_binop (compute_mode, and_optab, op0,
3714 GEN_INT (d - 1),
3715 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3716 t3 = gen_reg_rtx (compute_mode);
3717 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3718 compute_mode, 1, 1);
3719 if (t3 == 0)
3721 rtx lab;
3722 lab = gen_label_rtx ();
3723 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3724 expand_inc (t1, const1_rtx);
3725 emit_label (lab);
3726 quotient = t1;
3728 else
3729 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3730 t1, t3),
3731 tquotient);
3732 break;
3735 /* Try using an instruction that produces both the quotient and
3736 remainder, using truncation. We can easily compensate the
3737 quotient or remainder to get ceiling rounding, once we have the
3738 remainder. Notice that we compute also the final remainder
3739 value here, and return the result right away. */
3740 if (target == 0 || GET_MODE (target) != compute_mode)
3741 target = gen_reg_rtx (compute_mode);
3743 if (rem_flag)
3745 remainder = (GET_CODE (target) == REG
3746 ? target : gen_reg_rtx (compute_mode));
3747 quotient = gen_reg_rtx (compute_mode);
3749 else
3751 quotient = (GET_CODE (target) == REG
3752 ? target : gen_reg_rtx (compute_mode));
3753 remainder = gen_reg_rtx (compute_mode);
3756 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3757 remainder, 1))
3759 /* This could be computed with a branch-less sequence.
3760 Save that for later. */
3761 rtx label = gen_label_rtx ();
3762 do_cmp_and_jump (remainder, const0_rtx, EQ,
3763 compute_mode, label);
3764 expand_inc (quotient, const1_rtx);
3765 expand_dec (remainder, op1);
3766 emit_label (label);
3767 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3770 /* No luck with division elimination or divmod. Have to do it
3771 by conditionally adjusting op0 *and* the result. */
3773 rtx label1, label2;
3774 rtx adjusted_op0, tem;
3776 quotient = gen_reg_rtx (compute_mode);
3777 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3778 label1 = gen_label_rtx ();
3779 label2 = gen_label_rtx ();
3780 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3781 compute_mode, label1);
3782 emit_move_insn (quotient, const0_rtx);
3783 emit_jump_insn (gen_jump (label2));
3784 emit_barrier ();
3785 emit_label (label1);
3786 expand_dec (adjusted_op0, const1_rtx);
3787 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3788 quotient, 1, OPTAB_LIB_WIDEN);
3789 if (tem != quotient)
3790 emit_move_insn (quotient, tem);
3791 expand_inc (quotient, const1_rtx);
3792 emit_label (label2);
3795 else /* signed */
3797 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3798 && INTVAL (op1) >= 0)
3800 /* This is extremely similar to the code for the unsigned case
3801 above. For 2.7 we should merge these variants, but for
3802 2.6.1 I don't want to touch the code for unsigned since that
3803 get used in C. The signed case will only be used by other
3804 languages (Ada). */
3806 rtx t1, t2, t3;
3807 unsigned HOST_WIDE_INT d = INTVAL (op1);
3808 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3809 build_int_2 (floor_log2 (d), 0),
3810 tquotient, 0);
3811 t2 = expand_binop (compute_mode, and_optab, op0,
3812 GEN_INT (d - 1),
3813 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3814 t3 = gen_reg_rtx (compute_mode);
3815 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3816 compute_mode, 1, 1);
3817 if (t3 == 0)
3819 rtx lab;
3820 lab = gen_label_rtx ();
3821 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3822 expand_inc (t1, const1_rtx);
3823 emit_label (lab);
3824 quotient = t1;
3826 else
3827 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3828 t1, t3),
3829 tquotient);
3830 break;
3833 /* Try using an instruction that produces both the quotient and
3834 remainder, using truncation. We can easily compensate the
3835 quotient or remainder to get ceiling rounding, once we have the
3836 remainder. Notice that we compute also the final remainder
3837 value here, and return the result right away. */
3838 if (target == 0 || GET_MODE (target) != compute_mode)
3839 target = gen_reg_rtx (compute_mode);
3840 if (rem_flag)
3842 remainder= (GET_CODE (target) == REG
3843 ? target : gen_reg_rtx (compute_mode));
3844 quotient = gen_reg_rtx (compute_mode);
3846 else
3848 quotient = (GET_CODE (target) == REG
3849 ? target : gen_reg_rtx (compute_mode));
3850 remainder = gen_reg_rtx (compute_mode);
3853 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3854 remainder, 0))
3856 /* This could be computed with a branch-less sequence.
3857 Save that for later. */
3858 rtx tem;
3859 rtx label = gen_label_rtx ();
3860 do_cmp_and_jump (remainder, const0_rtx, EQ,
3861 compute_mode, label);
3862 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3863 NULL_RTX, 0, OPTAB_WIDEN);
3864 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3865 expand_inc (quotient, const1_rtx);
3866 expand_dec (remainder, op1);
3867 emit_label (label);
3868 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3871 /* No luck with division elimination or divmod. Have to do it
3872 by conditionally adjusting op0 *and* the result. */
3874 rtx label1, label2, label3, label4, label5;
3875 rtx adjusted_op0;
3876 rtx tem;
3878 quotient = gen_reg_rtx (compute_mode);
3879 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3880 label1 = gen_label_rtx ();
3881 label2 = gen_label_rtx ();
3882 label3 = gen_label_rtx ();
3883 label4 = gen_label_rtx ();
3884 label5 = gen_label_rtx ();
3885 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3886 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3887 compute_mode, label1);
3888 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3889 quotient, 0, OPTAB_LIB_WIDEN);
3890 if (tem != quotient)
3891 emit_move_insn (quotient, tem);
3892 emit_jump_insn (gen_jump (label5));
3893 emit_barrier ();
3894 emit_label (label1);
3895 expand_dec (adjusted_op0, const1_rtx);
3896 emit_jump_insn (gen_jump (label4));
3897 emit_barrier ();
3898 emit_label (label2);
3899 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3900 compute_mode, label3);
3901 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3902 quotient, 0, OPTAB_LIB_WIDEN);
3903 if (tem != quotient)
3904 emit_move_insn (quotient, tem);
3905 emit_jump_insn (gen_jump (label5));
3906 emit_barrier ();
3907 emit_label (label3);
3908 expand_inc (adjusted_op0, const1_rtx);
3909 emit_label (label4);
3910 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3911 quotient, 0, OPTAB_LIB_WIDEN);
3912 if (tem != quotient)
3913 emit_move_insn (quotient, tem);
3914 expand_inc (quotient, const1_rtx);
3915 emit_label (label5);
3918 break;
3920 case EXACT_DIV_EXPR:
3921 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3923 HOST_WIDE_INT d = INTVAL (op1);
3924 unsigned HOST_WIDE_INT ml;
3925 int pre_shift;
3926 rtx t1;
3928 pre_shift = floor_log2 (d & -d);
3929 ml = invert_mod2n (d >> pre_shift, size);
3930 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3931 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3932 quotient = expand_mult (compute_mode, t1,
3933 gen_int_mode (ml, compute_mode),
3934 NULL_RTX, 1);
3936 insn = get_last_insn ();
3937 set_unique_reg_note (insn,
3938 REG_EQUAL,
3939 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3940 compute_mode,
3941 op0, op1));
3943 break;
3945 case ROUND_DIV_EXPR:
3946 case ROUND_MOD_EXPR:
3947 if (unsignedp)
3949 rtx tem;
3950 rtx label;
3951 label = gen_label_rtx ();
3952 quotient = gen_reg_rtx (compute_mode);
3953 remainder = gen_reg_rtx (compute_mode);
3954 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3956 rtx tem;
3957 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3958 quotient, 1, OPTAB_LIB_WIDEN);
3959 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3960 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3961 remainder, 1, OPTAB_LIB_WIDEN);
3963 tem = plus_constant (op1, -1);
3964 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3965 build_int_2 (1, 0), NULL_RTX, 1);
3966 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3967 expand_inc (quotient, const1_rtx);
3968 expand_dec (remainder, op1);
3969 emit_label (label);
3971 else
3973 rtx abs_rem, abs_op1, tem, mask;
3974 rtx label;
3975 label = gen_label_rtx ();
3976 quotient = gen_reg_rtx (compute_mode);
3977 remainder = gen_reg_rtx (compute_mode);
3978 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3980 rtx tem;
3981 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3982 quotient, 0, OPTAB_LIB_WIDEN);
3983 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3984 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3985 remainder, 0, OPTAB_LIB_WIDEN);
3987 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3988 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3989 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3990 build_int_2 (1, 0), NULL_RTX, 1);
3991 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3992 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3993 NULL_RTX, 0, OPTAB_WIDEN);
3994 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3995 build_int_2 (size - 1, 0), NULL_RTX, 0);
3996 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3997 NULL_RTX, 0, OPTAB_WIDEN);
3998 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3999 NULL_RTX, 0, OPTAB_WIDEN);
4000 expand_inc (quotient, tem);
4001 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4002 NULL_RTX, 0, OPTAB_WIDEN);
4003 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4004 NULL_RTX, 0, OPTAB_WIDEN);
4005 expand_dec (remainder, tem);
4006 emit_label (label);
4008 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4010 default:
4011 abort ();
4014 if (quotient == 0)
4016 if (target && GET_MODE (target) != compute_mode)
4017 target = 0;
4019 if (rem_flag)
4021 /* Try to produce the remainder without producing the quotient.
4022 If we seem to have a divmod pattern that does not require widening,
4023 don't try widening here. We should really have a WIDEN argument
4024 to expand_twoval_binop, since what we'd really like to do here is
4025 1) try a mod insn in compute_mode
4026 2) try a divmod insn in compute_mode
4027 3) try a div insn in compute_mode and multiply-subtract to get
4028 remainder
4029 4) try the same things with widening allowed. */
4030 remainder
4031 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4032 op0, op1, target,
4033 unsignedp,
4034 ((optab2->handlers[(int) compute_mode].insn_code
4035 != CODE_FOR_nothing)
4036 ? OPTAB_DIRECT : OPTAB_WIDEN));
4037 if (remainder == 0)
4039 /* No luck there. Can we do remainder and divide at once
4040 without a library call? */
4041 remainder = gen_reg_rtx (compute_mode);
4042 if (! expand_twoval_binop ((unsignedp
4043 ? udivmod_optab
4044 : sdivmod_optab),
4045 op0, op1,
4046 NULL_RTX, remainder, unsignedp))
4047 remainder = 0;
4050 if (remainder)
4051 return gen_lowpart (mode, remainder);
4054 /* Produce the quotient. Try a quotient insn, but not a library call.
4055 If we have a divmod in this mode, use it in preference to widening
4056 the div (for this test we assume it will not fail). Note that optab2
4057 is set to the one of the two optabs that the call below will use. */
4058 quotient
4059 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4060 op0, op1, rem_flag ? NULL_RTX : target,
4061 unsignedp,
4062 ((optab2->handlers[(int) compute_mode].insn_code
4063 != CODE_FOR_nothing)
4064 ? OPTAB_DIRECT : OPTAB_WIDEN));
4066 if (quotient == 0)
4068 /* No luck there. Try a quotient-and-remainder insn,
4069 keeping the quotient alone. */
4070 quotient = gen_reg_rtx (compute_mode);
4071 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4072 op0, op1,
4073 quotient, NULL_RTX, unsignedp))
4075 quotient = 0;
4076 if (! rem_flag)
4077 /* Still no luck. If we are not computing the remainder,
4078 use a library call for the quotient. */
4079 quotient = sign_expand_binop (compute_mode,
4080 udiv_optab, sdiv_optab,
4081 op0, op1, target,
4082 unsignedp, OPTAB_LIB_WIDEN);
4087 if (rem_flag)
4089 if (target && GET_MODE (target) != compute_mode)
4090 target = 0;
4092 if (quotient == 0)
4093 /* No divide instruction either. Use library for remainder. */
4094 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4095 op0, op1, target,
4096 unsignedp, OPTAB_LIB_WIDEN);
4097 else
4099 /* We divided. Now finish doing X - Y * (X / Y). */
4100 remainder = expand_mult (compute_mode, quotient, op1,
4101 NULL_RTX, unsignedp);
4102 remainder = expand_binop (compute_mode, sub_optab, op0,
4103 remainder, target, unsignedp,
4104 OPTAB_LIB_WIDEN);
4108 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4111 /* Return a tree node with data type TYPE, describing the value of X.
4112 Usually this is an RTL_EXPR, if there is no obvious better choice.
4113 X may be an expression, however we only support those expressions
4114 generated by loop.c. */
4116 tree
4117 make_tree (tree type, rtx x)
4119 tree t;
4121 switch (GET_CODE (x))
4123 case CONST_INT:
4124 t = build_int_2 (INTVAL (x),
4125 (TREE_UNSIGNED (type)
4126 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4127 || INTVAL (x) >= 0 ? 0 : -1);
4128 TREE_TYPE (t) = type;
4129 return t;
4131 case CONST_DOUBLE:
4132 if (GET_MODE (x) == VOIDmode)
4134 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4135 TREE_TYPE (t) = type;
4137 else
4139 REAL_VALUE_TYPE d;
4141 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4142 t = build_real (type, d);
4145 return t;
4147 case CONST_VECTOR:
4149 int i, units;
4150 rtx elt;
4151 tree t = NULL_TREE;
4153 units = CONST_VECTOR_NUNITS (x);
4155 /* Build a tree with vector elements. */
4156 for (i = units - 1; i >= 0; --i)
4158 elt = CONST_VECTOR_ELT (x, i);
4159 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4162 return build_vector (type, t);
4165 case PLUS:
4166 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4167 make_tree (type, XEXP (x, 1))));
4169 case MINUS:
4170 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4171 make_tree (type, XEXP (x, 1))));
4173 case NEG:
4174 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4176 case MULT:
4177 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4178 make_tree (type, XEXP (x, 1))));
4180 case ASHIFT:
4181 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4182 make_tree (type, XEXP (x, 1))));
4184 case LSHIFTRT:
4185 t = (*lang_hooks.types.unsigned_type) (type);
4186 return fold (convert (type,
4187 build (RSHIFT_EXPR, t,
4188 make_tree (t, XEXP (x, 0)),
4189 make_tree (type, XEXP (x, 1)))));
4191 case ASHIFTRT:
4192 t = (*lang_hooks.types.signed_type) (type);
4193 return fold (convert (type,
4194 build (RSHIFT_EXPR, t,
4195 make_tree (t, XEXP (x, 0)),
4196 make_tree (type, XEXP (x, 1)))));
4198 case DIV:
4199 if (TREE_CODE (type) != REAL_TYPE)
4200 t = (*lang_hooks.types.signed_type) (type);
4201 else
4202 t = type;
4204 return fold (convert (type,
4205 build (TRUNC_DIV_EXPR, t,
4206 make_tree (t, XEXP (x, 0)),
4207 make_tree (t, XEXP (x, 1)))));
4208 case UDIV:
4209 t = (*lang_hooks.types.unsigned_type) (type);
4210 return fold (convert (type,
4211 build (TRUNC_DIV_EXPR, t,
4212 make_tree (t, XEXP (x, 0)),
4213 make_tree (t, XEXP (x, 1)))));
4215 case SIGN_EXTEND:
4216 case ZERO_EXTEND:
4217 t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4218 GET_CODE (x) == ZERO_EXTEND);
4219 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4221 default:
4222 t = make_node (RTL_EXPR);
4223 TREE_TYPE (t) = type;
4225 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4226 ptr_mode. So convert. */
4227 if (POINTER_TYPE_P (type))
4228 x = convert_memory_address (TYPE_MODE (type), x);
4230 RTL_EXPR_RTL (t) = x;
4231 /* There are no insns to be output
4232 when this rtl_expr is used. */
4233 RTL_EXPR_SEQUENCE (t) = 0;
4234 return t;
4238 /* Check whether the multiplication X * MULT + ADD overflows.
4239 X, MULT and ADD must be CONST_*.
4240 MODE is the machine mode for the computation.
4241 X and MULT must have mode MODE. ADD may have a different mode.
4242 So can X (defaults to same as MODE).
4243 UNSIGNEDP is nonzero to do unsigned multiplication. */
4245 bool
4246 const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp)
4248 tree type, mult_type, add_type, result;
4250 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4252 /* In order to get a proper overflow indication from an unsigned
4253 type, we have to pretend that it's a sizetype. */
4254 mult_type = type;
4255 if (unsignedp)
4257 mult_type = copy_node (type);
4258 TYPE_IS_SIZETYPE (mult_type) = 1;
4261 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4262 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4264 result = fold (build (PLUS_EXPR, mult_type,
4265 fold (build (MULT_EXPR, mult_type,
4266 make_tree (mult_type, x),
4267 make_tree (mult_type, mult))),
4268 make_tree (add_type, add)));
4270 return TREE_CONSTANT_OVERFLOW (result);
4273 /* Return an rtx representing the value of X * MULT + ADD.
4274 TARGET is a suggestion for where to store the result (an rtx).
4275 MODE is the machine mode for the computation.
4276 X and MULT must have mode MODE. ADD may have a different mode.
4277 So can X (defaults to same as MODE).
4278 UNSIGNEDP is nonzero to do unsigned multiplication.
4279 This may emit insns. */
4282 expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode,
4283 int unsignedp)
4285 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4286 tree add_type = (GET_MODE (add) == VOIDmode
4287 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4288 unsignedp));
4289 tree result = fold (build (PLUS_EXPR, type,
4290 fold (build (MULT_EXPR, type,
4291 make_tree (type, x),
4292 make_tree (type, mult))),
4293 make_tree (add_type, add)));
4295 return expand_expr (result, target, VOIDmode, 0);
4298 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4299 and returning TARGET.
4301 If TARGET is 0, a pseudo-register or constant is returned. */
4304 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4306 rtx tem = 0;
4308 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4309 tem = simplify_binary_operation (AND, mode, op0, op1);
4310 if (tem == 0)
4311 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4313 if (target == 0)
4314 target = tem;
4315 else if (tem != target)
4316 emit_move_insn (target, tem);
4317 return target;
4320 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4321 and storing in TARGET. Normally return TARGET.
4322 Return 0 if that cannot be done.
4324 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4325 it is VOIDmode, they cannot both be CONST_INT.
4327 UNSIGNEDP is for the case where we have to widen the operands
4328 to perform the operation. It says to use zero-extension.
4330 NORMALIZEP is 1 if we should convert the result to be either zero
4331 or one. Normalize is -1 if we should convert the result to be
4332 either zero or -1. If NORMALIZEP is zero, the result will be left
4333 "raw" out of the scc insn. */
4336 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
4337 enum machine_mode mode, int unsignedp, int normalizep)
4339 rtx subtarget;
4340 enum insn_code icode;
4341 enum machine_mode compare_mode;
4342 enum machine_mode target_mode = GET_MODE (target);
4343 rtx tem;
4344 rtx last = get_last_insn ();
4345 rtx pattern, comparison;
4347 /* ??? Ok to do this and then fail? */
4348 op0 = protect_from_queue (op0, 0);
4349 op1 = protect_from_queue (op1, 0);
4351 if (unsignedp)
4352 code = unsigned_condition (code);
4354 /* If one operand is constant, make it the second one. Only do this
4355 if the other operand is not constant as well. */
4357 if (swap_commutative_operands_p (op0, op1))
4359 tem = op0;
4360 op0 = op1;
4361 op1 = tem;
4362 code = swap_condition (code);
4365 if (mode == VOIDmode)
4366 mode = GET_MODE (op0);
4368 /* For some comparisons with 1 and -1, we can convert this to
4369 comparisons with zero. This will often produce more opportunities for
4370 store-flag insns. */
4372 switch (code)
4374 case LT:
4375 if (op1 == const1_rtx)
4376 op1 = const0_rtx, code = LE;
4377 break;
4378 case LE:
4379 if (op1 == constm1_rtx)
4380 op1 = const0_rtx, code = LT;
4381 break;
4382 case GE:
4383 if (op1 == const1_rtx)
4384 op1 = const0_rtx, code = GT;
4385 break;
4386 case GT:
4387 if (op1 == constm1_rtx)
4388 op1 = const0_rtx, code = GE;
4389 break;
4390 case GEU:
4391 if (op1 == const1_rtx)
4392 op1 = const0_rtx, code = NE;
4393 break;
4394 case LTU:
4395 if (op1 == const1_rtx)
4396 op1 = const0_rtx, code = EQ;
4397 break;
4398 default:
4399 break;
4402 /* If we are comparing a double-word integer with zero, we can convert
4403 the comparison into one involving a single word. */
4404 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4405 && GET_MODE_CLASS (mode) == MODE_INT
4406 && op1 == const0_rtx
4407 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4409 if (code == EQ || code == NE)
4411 rtx op00, op01, op0both;
4413 /* Do a logical OR of the two words and compare the result. */
4414 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4415 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4416 op0both = expand_binop (word_mode, ior_optab, op00, op01,
4417 NULL_RTX, unsignedp, OPTAB_DIRECT);
4418 if (op0both != 0)
4419 return emit_store_flag (target, code, op0both, op1, word_mode,
4420 unsignedp, normalizep);
4422 else if (code == LT || code == GE)
4424 rtx op0h;
4426 /* If testing the sign bit, can just test on high word. */
4427 op0h = simplify_gen_subreg (word_mode, op0, mode,
4428 subreg_highpart_offset (word_mode, mode));
4429 return emit_store_flag (target, code, op0h, op1, word_mode,
4430 unsignedp, normalizep);
4434 /* From now on, we won't change CODE, so set ICODE now. */
4435 icode = setcc_gen_code[(int) code];
4437 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4438 complement of A (for GE) and shifting the sign bit to the low bit. */
4439 if (op1 == const0_rtx && (code == LT || code == GE)
4440 && GET_MODE_CLASS (mode) == MODE_INT
4441 && (normalizep || STORE_FLAG_VALUE == 1
4442 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4443 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4444 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4446 subtarget = target;
4448 /* If the result is to be wider than OP0, it is best to convert it
4449 first. If it is to be narrower, it is *incorrect* to convert it
4450 first. */
4451 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4453 op0 = protect_from_queue (op0, 0);
4454 op0 = convert_modes (target_mode, mode, op0, 0);
4455 mode = target_mode;
4458 if (target_mode != mode)
4459 subtarget = 0;
4461 if (code == GE)
4462 op0 = expand_unop (mode, one_cmpl_optab, op0,
4463 ((STORE_FLAG_VALUE == 1 || normalizep)
4464 ? 0 : subtarget), 0);
4466 if (STORE_FLAG_VALUE == 1 || normalizep)
4467 /* If we are supposed to produce a 0/1 value, we want to do
4468 a logical shift from the sign bit to the low-order bit; for
4469 a -1/0 value, we do an arithmetic shift. */
4470 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4471 size_int (GET_MODE_BITSIZE (mode) - 1),
4472 subtarget, normalizep != -1);
4474 if (mode != target_mode)
4475 op0 = convert_modes (target_mode, mode, op0, 0);
4477 return op0;
4480 if (icode != CODE_FOR_nothing)
4482 insn_operand_predicate_fn pred;
4484 /* We think we may be able to do this with a scc insn. Emit the
4485 comparison and then the scc insn.
4487 compare_from_rtx may call emit_queue, which would be deleted below
4488 if the scc insn fails. So call it ourselves before setting LAST.
4489 Likewise for do_pending_stack_adjust. */
4491 emit_queue ();
4492 do_pending_stack_adjust ();
4493 last = get_last_insn ();
4495 comparison
4496 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4497 if (GET_CODE (comparison) == CONST_INT)
4498 return (comparison == const0_rtx ? const0_rtx
4499 : normalizep == 1 ? const1_rtx
4500 : normalizep == -1 ? constm1_rtx
4501 : const_true_rtx);
4503 /* The code of COMPARISON may not match CODE if compare_from_rtx
4504 decided to swap its operands and reverse the original code.
4506 We know that compare_from_rtx returns either a CONST_INT or
4507 a new comparison code, so it is safe to just extract the
4508 code from COMPARISON. */
4509 code = GET_CODE (comparison);
4511 /* Get a reference to the target in the proper mode for this insn. */
4512 compare_mode = insn_data[(int) icode].operand[0].mode;
4513 subtarget = target;
4514 pred = insn_data[(int) icode].operand[0].predicate;
4515 if (preserve_subexpressions_p ()
4516 || ! (*pred) (subtarget, compare_mode))
4517 subtarget = gen_reg_rtx (compare_mode);
4519 pattern = GEN_FCN (icode) (subtarget);
4520 if (pattern)
4522 emit_insn (pattern);
4524 /* If we are converting to a wider mode, first convert to
4525 TARGET_MODE, then normalize. This produces better combining
4526 opportunities on machines that have a SIGN_EXTRACT when we are
4527 testing a single bit. This mostly benefits the 68k.
4529 If STORE_FLAG_VALUE does not have the sign bit set when
4530 interpreted in COMPARE_MODE, we can do this conversion as
4531 unsigned, which is usually more efficient. */
4532 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4534 convert_move (target, subtarget,
4535 (GET_MODE_BITSIZE (compare_mode)
4536 <= HOST_BITS_PER_WIDE_INT)
4537 && 0 == (STORE_FLAG_VALUE
4538 & ((HOST_WIDE_INT) 1
4539 << (GET_MODE_BITSIZE (compare_mode) -1))));
4540 op0 = target;
4541 compare_mode = target_mode;
4543 else
4544 op0 = subtarget;
4546 /* If we want to keep subexpressions around, don't reuse our
4547 last target. */
4549 if (preserve_subexpressions_p ())
4550 subtarget = 0;
4552 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4553 we don't have to do anything. */
4554 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4556 /* STORE_FLAG_VALUE might be the most negative number, so write
4557 the comparison this way to avoid a compiler-time warning. */
4558 else if (- normalizep == STORE_FLAG_VALUE)
4559 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4561 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4562 makes it hard to use a value of just the sign bit due to
4563 ANSI integer constant typing rules. */
4564 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4565 && (STORE_FLAG_VALUE
4566 & ((HOST_WIDE_INT) 1
4567 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4568 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4569 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4570 subtarget, normalizep == 1);
4571 else if (STORE_FLAG_VALUE & 1)
4573 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4574 if (normalizep == -1)
4575 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4577 else
4578 abort ();
4580 /* If we were converting to a smaller mode, do the
4581 conversion now. */
4582 if (target_mode != compare_mode)
4584 convert_move (target, op0, 0);
4585 return target;
4587 else
4588 return op0;
4592 delete_insns_since (last);
4594 /* If expensive optimizations, use different pseudo registers for each
4595 insn, instead of reusing the same pseudo. This leads to better CSE,
4596 but slows down the compiler, since there are more pseudos */
4597 subtarget = (!flag_expensive_optimizations
4598 && (target_mode == mode)) ? target : NULL_RTX;
4600 /* If we reached here, we can't do this with a scc insn. However, there
4601 are some comparisons that can be done directly. For example, if
4602 this is an equality comparison of integers, we can try to exclusive-or
4603 (or subtract) the two operands and use a recursive call to try the
4604 comparison with zero. Don't do any of these cases if branches are
4605 very cheap. */
4607 if (BRANCH_COST > 0
4608 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4609 && op1 != const0_rtx)
4611 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4612 OPTAB_WIDEN);
4614 if (tem == 0)
4615 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4616 OPTAB_WIDEN);
4617 if (tem != 0)
4618 tem = emit_store_flag (target, code, tem, const0_rtx,
4619 mode, unsignedp, normalizep);
4620 if (tem == 0)
4621 delete_insns_since (last);
4622 return tem;
4625 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4626 the constant zero. Reject all other comparisons at this point. Only
4627 do LE and GT if branches are expensive since they are expensive on
4628 2-operand machines. */
4630 if (BRANCH_COST == 0
4631 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4632 || (code != EQ && code != NE
4633 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4634 return 0;
4636 /* See what we need to return. We can only return a 1, -1, or the
4637 sign bit. */
4639 if (normalizep == 0)
4641 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4642 normalizep = STORE_FLAG_VALUE;
4644 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4645 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4646 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4648 else
4649 return 0;
4652 /* Try to put the result of the comparison in the sign bit. Assume we can't
4653 do the necessary operation below. */
4655 tem = 0;
4657 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4658 the sign bit set. */
4660 if (code == LE)
4662 /* This is destructive, so SUBTARGET can't be OP0. */
4663 if (rtx_equal_p (subtarget, op0))
4664 subtarget = 0;
4666 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4667 OPTAB_WIDEN);
4668 if (tem)
4669 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4670 OPTAB_WIDEN);
4673 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4674 number of bits in the mode of OP0, minus one. */
4676 if (code == GT)
4678 if (rtx_equal_p (subtarget, op0))
4679 subtarget = 0;
4681 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4682 size_int (GET_MODE_BITSIZE (mode) - 1),
4683 subtarget, 0);
4684 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4685 OPTAB_WIDEN);
4688 if (code == EQ || code == NE)
4690 /* For EQ or NE, one way to do the comparison is to apply an operation
4691 that converts the operand into a positive number if it is nonzero
4692 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4693 for NE we negate. This puts the result in the sign bit. Then we
4694 normalize with a shift, if needed.
4696 Two operations that can do the above actions are ABS and FFS, so try
4697 them. If that doesn't work, and MODE is smaller than a full word,
4698 we can use zero-extension to the wider mode (an unsigned conversion)
4699 as the operation. */
4701 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4702 that is compensated by the subsequent overflow when subtracting
4703 one / negating. */
4705 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4706 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4707 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4708 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4709 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4711 op0 = protect_from_queue (op0, 0);
4712 tem = convert_modes (word_mode, mode, op0, 1);
4713 mode = word_mode;
4716 if (tem != 0)
4718 if (code == EQ)
4719 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4720 0, OPTAB_WIDEN);
4721 else
4722 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4725 /* If we couldn't do it that way, for NE we can "or" the two's complement
4726 of the value with itself. For EQ, we take the one's complement of
4727 that "or", which is an extra insn, so we only handle EQ if branches
4728 are expensive. */
4730 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4732 if (rtx_equal_p (subtarget, op0))
4733 subtarget = 0;
4735 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4736 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4737 OPTAB_WIDEN);
4739 if (tem && code == EQ)
4740 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4744 if (tem && normalizep)
4745 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4746 size_int (GET_MODE_BITSIZE (mode) - 1),
4747 subtarget, normalizep == 1);
4749 if (tem)
4751 if (GET_MODE (tem) != target_mode)
4753 convert_move (target, tem, 0);
4754 tem = target;
4756 else if (!subtarget)
4758 emit_move_insn (target, tem);
4759 tem = target;
4762 else
4763 delete_insns_since (last);
4765 return tem;
4768 /* Like emit_store_flag, but always succeeds. */
4771 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
4772 enum machine_mode mode, int unsignedp, int normalizep)
4774 rtx tem, label;
4776 /* First see if emit_store_flag can do the job. */
4777 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4778 if (tem != 0)
4779 return tem;
4781 if (normalizep == 0)
4782 normalizep = 1;
4784 /* If this failed, we have to do this with set/compare/jump/set code. */
4786 if (GET_CODE (target) != REG
4787 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4788 target = gen_reg_rtx (GET_MODE (target));
4790 emit_move_insn (target, const1_rtx);
4791 label = gen_label_rtx ();
4792 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4793 NULL_RTX, label);
4795 emit_move_insn (target, const0_rtx);
4796 emit_label (label);
4798 return target;
4801 /* Perform possibly multi-word comparison and conditional jump to LABEL
4802 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4804 The algorithm is based on the code in expr.c:do_jump.
4806 Note that this does not perform a general comparison. Only variants
4807 generated within expmed.c are correctly handled, others abort (but could
4808 be handled if needed). */
4810 static void
4811 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
4812 rtx label)
4814 /* If this mode is an integer too wide to compare properly,
4815 compare word by word. Rely on cse to optimize constant cases. */
4817 if (GET_MODE_CLASS (mode) == MODE_INT
4818 && ! can_compare_p (op, mode, ccp_jump))
4820 rtx label2 = gen_label_rtx ();
4822 switch (op)
4824 case LTU:
4825 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4826 break;
4828 case LEU:
4829 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4830 break;
4832 case LT:
4833 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4834 break;
4836 case GT:
4837 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4838 break;
4840 case GE:
4841 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4842 break;
4844 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4845 that's the only equality operations we do */
4846 case EQ:
4847 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4848 abort ();
4849 do_jump_by_parts_equality_rtx (arg1, label2, label);
4850 break;
4852 case NE:
4853 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4854 abort ();
4855 do_jump_by_parts_equality_rtx (arg1, label, label2);
4856 break;
4858 default:
4859 abort ();
4862 emit_label (label2);
4864 else
4865 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);