* config/arm/elf.h (ASM_OUTPUT_ALIGNED_COMMON): Remove definition.
[official-gcc.git] / gcc / expmed.c
blob5cc08945eddcd5c99c7b1d7fd06d28e37f700a46
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "real.h"
37 #include "recog.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx));
43 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT, rtx));
45 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
46 unsigned HOST_WIDE_INT,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 rtx, int));
50 static rtx mask_rtx PARAMS ((enum machine_mode, int,
51 int, int));
52 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
53 int, int));
54 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT, int));
56 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
57 enum machine_mode, rtx));
59 /* Nonzero means divides or modulus operations are relatively cheap for
60 powers of two, so don't use branches; emit the operation instead.
61 Usually, this will mean that the MD file will emit non-branch
62 sequences. */
64 static int sdiv_pow2_cheap, smod_pow2_cheap;
66 #ifndef SLOW_UNALIGNED_ACCESS
67 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
68 #endif
70 /* For compilers that support multiple targets with different word sizes,
71 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
72 is the H8/300(H) compiler. */
74 #ifndef MAX_BITS_PER_WORD
75 #define MAX_BITS_PER_WORD BITS_PER_WORD
76 #endif
78 /* Reduce conditional compilation elsewhere. */
79 #ifndef HAVE_insv
80 #define HAVE_insv 0
81 #define CODE_FOR_insv CODE_FOR_nothing
82 #define gen_insv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extv
85 #define HAVE_extv 0
86 #define CODE_FOR_extv CODE_FOR_nothing
87 #define gen_extv(a,b,c,d) NULL_RTX
88 #endif
89 #ifndef HAVE_extzv
90 #define HAVE_extzv 0
91 #define CODE_FOR_extzv CODE_FOR_nothing
92 #define gen_extzv(a,b,c,d) NULL_RTX
93 #endif
95 /* Cost of various pieces of RTL. Note that some of these are indexed by
96 shift count and some by mode. */
97 static int add_cost, negate_cost, zero_cost;
98 static int shift_cost[MAX_BITS_PER_WORD];
99 static int shiftadd_cost[MAX_BITS_PER_WORD];
100 static int shiftsub_cost[MAX_BITS_PER_WORD];
101 static int mul_cost[NUM_MACHINE_MODES];
102 static int div_cost[NUM_MACHINE_MODES];
103 static int mul_widen_cost[NUM_MACHINE_MODES];
104 static int mul_highpart_cost[NUM_MACHINE_MODES];
106 void
107 init_expmed ()
109 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
110 int dummy;
111 int m;
112 enum machine_mode mode, wider_mode;
114 start_sequence ();
116 /* This is "some random pseudo register" for purposes of calling recog
117 to see what insns exist. */
118 reg = gen_rtx_REG (word_mode, 10000);
120 zero_cost = rtx_cost (const0_rtx, 0);
121 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
123 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
124 gen_rtx_ASHIFT (word_mode, reg,
125 const0_rtx)));
127 shiftadd_insn
128 = emit_insn (gen_rtx_SET (VOIDmode, reg,
129 gen_rtx_PLUS (word_mode,
130 gen_rtx_MULT (word_mode,
131 reg, const0_rtx),
132 reg)));
134 shiftsub_insn
135 = emit_insn (gen_rtx_SET (VOIDmode, reg,
136 gen_rtx_MINUS (word_mode,
137 gen_rtx_MULT (word_mode,
138 reg, const0_rtx),
139 reg)));
141 init_recog ();
143 shift_cost[0] = 0;
144 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
146 for (m = 1; m < MAX_BITS_PER_WORD; m++)
148 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
149 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
151 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
152 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
153 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
156 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
157 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
159 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
160 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
161 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
164 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
166 sdiv_pow2_cheap
167 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
168 <= 2 * add_cost);
169 smod_pow2_cheap
170 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
171 <= 2 * add_cost);
173 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
174 mode != VOIDmode;
175 mode = GET_MODE_WIDER_MODE (mode))
177 reg = gen_rtx_REG (mode, 10000);
178 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
179 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
180 wider_mode = GET_MODE_WIDER_MODE (mode);
181 if (wider_mode != VOIDmode)
183 mul_widen_cost[(int) wider_mode]
184 = rtx_cost (gen_rtx_MULT (wider_mode,
185 gen_rtx_ZERO_EXTEND (wider_mode, reg),
186 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
187 SET);
188 mul_highpart_cost[(int) mode]
189 = rtx_cost (gen_rtx_TRUNCATE
190 (mode,
191 gen_rtx_LSHIFTRT (wider_mode,
192 gen_rtx_MULT (wider_mode,
193 gen_rtx_ZERO_EXTEND
194 (wider_mode, reg),
195 gen_rtx_ZERO_EXTEND
196 (wider_mode, reg)),
197 GEN_INT (GET_MODE_BITSIZE (mode)))),
198 SET);
202 end_sequence ();
205 /* Return an rtx representing minus the value of X.
206 MODE is the intended mode of the result,
207 useful if X is a CONST_INT. */
210 negate_rtx (mode, x)
211 enum machine_mode mode;
212 rtx x;
214 rtx result = simplify_unary_operation (NEG, mode, x, mode);
216 if (result == 0)
217 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
219 return result;
222 /* Report on the availability of insv/extv/extzv and the desired mode
223 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
224 is false; else the mode of the specified operand. If OPNO is -1,
225 all the caller cares about is whether the insn is available. */
226 enum machine_mode
227 mode_for_extraction (pattern, opno)
228 enum extraction_pattern pattern;
229 int opno;
231 const struct insn_data *data;
233 switch (pattern)
235 case EP_insv:
236 if (HAVE_insv)
238 data = &insn_data[CODE_FOR_insv];
239 break;
241 return MAX_MACHINE_MODE;
243 case EP_extv:
244 if (HAVE_extv)
246 data = &insn_data[CODE_FOR_extv];
247 break;
249 return MAX_MACHINE_MODE;
251 case EP_extzv:
252 if (HAVE_extzv)
254 data = &insn_data[CODE_FOR_extzv];
255 break;
257 return MAX_MACHINE_MODE;
259 default:
260 abort ();
263 if (opno == -1)
264 return VOIDmode;
266 /* Everyone who uses this function used to follow it with
267 if (result == VOIDmode) result = word_mode; */
268 if (data->operand[opno].mode == VOIDmode)
269 return word_mode;
270 return data->operand[opno].mode;
274 /* Generate code to store value from rtx VALUE
275 into a bit-field within structure STR_RTX
276 containing BITSIZE bits starting at bit BITNUM.
277 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
278 ALIGN is the alignment that STR_RTX is known to have.
279 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
281 /* ??? Note that there are two different ideas here for how
282 to determine the size to count bits within, for a register.
283 One is BITS_PER_WORD, and the other is the size of operand 3
284 of the insv pattern.
286 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
287 else, we use the mode of operand 3. */
290 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
291 rtx str_rtx;
292 unsigned HOST_WIDE_INT bitsize;
293 unsigned HOST_WIDE_INT bitnum;
294 enum machine_mode fieldmode;
295 rtx value;
296 HOST_WIDE_INT total_size;
298 unsigned int unit
299 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
300 unsigned HOST_WIDE_INT offset = bitnum / unit;
301 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
302 rtx op0 = str_rtx;
303 int byte_offset;
305 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
307 /* Discount the part of the structure before the desired byte.
308 We need to know how many bytes are safe to reference after it. */
309 if (total_size >= 0)
310 total_size -= (bitpos / BIGGEST_ALIGNMENT
311 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
313 while (GET_CODE (op0) == SUBREG)
315 /* The following line once was done only if WORDS_BIG_ENDIAN,
316 but I think that is a mistake. WORDS_BIG_ENDIAN is
317 meaningful at a much higher level; when structures are copied
318 between memory and regs, the higher-numbered regs
319 always get higher addresses. */
320 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
321 /* We used to adjust BITPOS here, but now we do the whole adjustment
322 right after the loop. */
323 op0 = SUBREG_REG (op0);
326 value = protect_from_queue (value, 0);
328 if (flag_force_mem)
330 int old_generating_concat_p = generating_concat_p;
331 generating_concat_p = 0;
332 value = force_not_mem (value);
333 generating_concat_p = old_generating_concat_p;
336 /* If the target is a register, overwriting the entire object, or storing
337 a full-word or multi-word field can be done with just a SUBREG.
339 If the target is memory, storing any naturally aligned field can be
340 done with a simple store. For targets that support fast unaligned
341 memory, any naturally sized, unit aligned field can be done directly. */
343 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
344 + (offset * UNITS_PER_WORD);
346 if (bitpos == 0
347 && bitsize == GET_MODE_BITSIZE (fieldmode)
348 && (GET_CODE (op0) != MEM
349 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
350 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
351 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
352 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
353 || (offset * BITS_PER_UNIT % bitsize == 0
354 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
356 if (GET_MODE (op0) != fieldmode)
358 if (GET_CODE (op0) == SUBREG)
360 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
361 || GET_MODE_CLASS (fieldmode) == MODE_INT
362 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
363 op0 = SUBREG_REG (op0);
364 else
365 /* Else we've got some float mode source being extracted into
366 a different float mode destination -- this combination of
367 subregs results in Severe Tire Damage. */
368 abort ();
370 if (GET_CODE (op0) == REG)
371 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
372 else
373 op0 = adjust_address (op0, fieldmode, offset);
375 emit_move_insn (op0, value);
376 return value;
379 /* Make sure we are playing with integral modes. Pun with subregs
380 if we aren't. This must come after the entire register case above,
381 since that case is valid for any mode. The following cases are only
382 valid for integral modes. */
384 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
385 if (imode != GET_MODE (op0))
387 if (GET_CODE (op0) == MEM)
388 op0 = adjust_address (op0, imode, 0);
389 else if (imode != BLKmode)
390 op0 = gen_lowpart (imode, op0);
391 else
392 abort ();
396 /* We may be accessing data outside the field, which means
397 we can alias adjacent data. */
398 if (GET_CODE (op0) == MEM)
400 op0 = shallow_copy_rtx (op0);
401 set_mem_alias_set (op0, 0);
402 set_mem_expr (op0, 0);
405 /* If OP0 is a register, BITPOS must count within a word.
406 But as we have it, it counts within whatever size OP0 now has.
407 On a bigendian machine, these are not the same, so convert. */
408 if (BYTES_BIG_ENDIAN
409 && GET_CODE (op0) != MEM
410 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
411 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
413 /* Storing an lsb-aligned field in a register
414 can be done with a movestrict instruction. */
416 if (GET_CODE (op0) != MEM
417 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
418 && bitsize == GET_MODE_BITSIZE (fieldmode)
419 && (movstrict_optab->handlers[(int) fieldmode].insn_code
420 != CODE_FOR_nothing))
422 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
424 /* Get appropriate low part of the value being stored. */
425 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
426 value = gen_lowpart (fieldmode, value);
427 else if (!(GET_CODE (value) == SYMBOL_REF
428 || GET_CODE (value) == LABEL_REF
429 || GET_CODE (value) == CONST))
430 value = convert_to_mode (fieldmode, value, 0);
432 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
433 value = copy_to_mode_reg (fieldmode, value);
435 if (GET_CODE (op0) == SUBREG)
437 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
438 || GET_MODE_CLASS (fieldmode) == MODE_INT
439 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
440 op0 = SUBREG_REG (op0);
441 else
442 /* Else we've got some float mode source being extracted into
443 a different float mode destination -- this combination of
444 subregs results in Severe Tire Damage. */
445 abort ();
448 emit_insn (GEN_FCN (icode)
449 (gen_rtx_SUBREG (fieldmode, op0,
450 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
451 + (offset * UNITS_PER_WORD)),
452 value));
454 return value;
457 /* Handle fields bigger than a word. */
459 if (bitsize > BITS_PER_WORD)
461 /* Here we transfer the words of the field
462 in the order least significant first.
463 This is because the most significant word is the one which may
464 be less than full.
465 However, only do that if the value is not BLKmode. */
467 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
468 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
469 unsigned int i;
471 /* This is the mode we must force value to, so that there will be enough
472 subwords to extract. Note that fieldmode will often (always?) be
473 VOIDmode, because that is what store_field uses to indicate that this
474 is a bit field, but passing VOIDmode to operand_subword_force will
475 result in an abort. */
476 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
478 for (i = 0; i < nwords; i++)
480 /* If I is 0, use the low-order word in both field and target;
481 if I is 1, use the next to lowest word; and so on. */
482 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
483 unsigned int bit_offset = (backwards
484 ? MAX ((int) bitsize - ((int) i + 1)
485 * BITS_PER_WORD,
487 : (int) i * BITS_PER_WORD);
489 store_bit_field (op0, MIN (BITS_PER_WORD,
490 bitsize - i * BITS_PER_WORD),
491 bitnum + bit_offset, word_mode,
492 operand_subword_force (value, wordnum,
493 (GET_MODE (value) == VOIDmode
494 ? fieldmode
495 : GET_MODE (value))),
496 total_size);
498 return value;
501 /* From here on we can assume that the field to be stored in is
502 a full-word (whatever type that is), since it is shorter than a word. */
504 /* OFFSET is the number of words or bytes (UNIT says which)
505 from STR_RTX to the first word or byte containing part of the field. */
507 if (GET_CODE (op0) != MEM)
509 if (offset != 0
510 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
512 if (GET_CODE (op0) != REG)
514 /* Since this is a destination (lvalue), we can't copy it to a
515 pseudo. We can trivially remove a SUBREG that does not
516 change the size of the operand. Such a SUBREG may have been
517 added above. Otherwise, abort. */
518 if (GET_CODE (op0) == SUBREG
519 && (GET_MODE_SIZE (GET_MODE (op0))
520 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
521 op0 = SUBREG_REG (op0);
522 else
523 abort ();
525 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
526 op0, (offset * UNITS_PER_WORD));
528 offset = 0;
530 else
531 op0 = protect_from_queue (op0, 1);
533 /* If VALUE is a floating-point mode, access it as an integer of the
534 corresponding size. This can occur on a machine with 64 bit registers
535 that uses SFmode for float. This can also occur for unaligned float
536 structure fields. */
537 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
538 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
539 value = gen_lowpart ((GET_MODE (value) == VOIDmode
540 ? word_mode : int_mode_for_mode (GET_MODE (value))),
541 value);
543 /* Now OFFSET is nonzero only if OP0 is memory
544 and is therefore always measured in bytes. */
546 if (HAVE_insv
547 && GET_MODE (value) != BLKmode
548 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
549 /* Ensure insv's size is wide enough for this field. */
550 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
551 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
552 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
554 int xbitpos = bitpos;
555 rtx value1;
556 rtx xop0 = op0;
557 rtx last = get_last_insn ();
558 rtx pat;
559 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
560 int save_volatile_ok = volatile_ok;
562 volatile_ok = 1;
564 /* If this machine's insv can only insert into a register, copy OP0
565 into a register and save it back later. */
566 /* This used to check flag_force_mem, but that was a serious
567 de-optimization now that flag_force_mem is enabled by -O2. */
568 if (GET_CODE (op0) == MEM
569 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
570 (op0, VOIDmode)))
572 rtx tempreg;
573 enum machine_mode bestmode;
575 /* Get the mode to use for inserting into this field. If OP0 is
576 BLKmode, get the smallest mode consistent with the alignment. If
577 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
578 mode. Otherwise, use the smallest mode containing the field. */
580 if (GET_MODE (op0) == BLKmode
581 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
582 bestmode
583 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
584 MEM_VOLATILE_P (op0));
585 else
586 bestmode = GET_MODE (op0);
588 if (bestmode == VOIDmode
589 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
590 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
591 goto insv_loses;
593 /* Adjust address to point to the containing unit of that mode.
594 Compute offset as multiple of this unit, counting in bytes. */
595 unit = GET_MODE_BITSIZE (bestmode);
596 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
597 bitpos = bitnum % unit;
598 op0 = adjust_address (op0, bestmode, offset);
600 /* Fetch that unit, store the bitfield in it, then store
601 the unit. */
602 tempreg = copy_to_reg (op0);
603 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
604 total_size);
605 emit_move_insn (op0, tempreg);
606 return value;
608 volatile_ok = save_volatile_ok;
610 /* Add OFFSET into OP0's address. */
611 if (GET_CODE (xop0) == MEM)
612 xop0 = adjust_address (xop0, byte_mode, offset);
614 /* If xop0 is a register, we need it in MAXMODE
615 to make it acceptable to the format of insv. */
616 if (GET_CODE (xop0) == SUBREG)
617 /* We can't just change the mode, because this might clobber op0,
618 and we will need the original value of op0 if insv fails. */
619 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
620 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
621 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
623 /* On big-endian machines, we count bits from the most significant.
624 If the bit field insn does not, we must invert. */
626 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
627 xbitpos = unit - bitsize - xbitpos;
629 /* We have been counting XBITPOS within UNIT.
630 Count instead within the size of the register. */
631 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
632 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
634 unit = GET_MODE_BITSIZE (maxmode);
636 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
637 value1 = value;
638 if (GET_MODE (value) != maxmode)
640 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
642 /* Optimization: Don't bother really extending VALUE
643 if it has all the bits we will actually use. However,
644 if we must narrow it, be sure we do it correctly. */
646 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
648 rtx tmp;
650 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
651 if (! tmp)
652 tmp = simplify_gen_subreg (maxmode,
653 force_reg (GET_MODE (value),
654 value1),
655 GET_MODE (value), 0);
656 value1 = tmp;
658 else
659 value1 = gen_lowpart (maxmode, value1);
661 else if (GET_CODE (value) == CONST_INT)
662 value1 = gen_int_mode (INTVAL (value), maxmode);
663 else if (!CONSTANT_P (value))
664 /* Parse phase is supposed to make VALUE's data type
665 match that of the component reference, which is a type
666 at least as wide as the field; so VALUE should have
667 a mode that corresponds to that type. */
668 abort ();
671 /* If this machine's insv insists on a register,
672 get VALUE1 into a register. */
673 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
674 (value1, maxmode)))
675 value1 = force_reg (maxmode, value1);
677 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
678 if (pat)
679 emit_insn (pat);
680 else
682 delete_insns_since (last);
683 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
686 else
687 insv_loses:
688 /* Insv is not available; store using shifts and boolean ops. */
689 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
690 return value;
693 /* Use shifts and boolean operations to store VALUE
694 into a bit field of width BITSIZE
695 in a memory location specified by OP0 except offset by OFFSET bytes.
696 (OFFSET must be 0 if OP0 is a register.)
697 The field starts at position BITPOS within the byte.
698 (If OP0 is a register, it may be a full word or a narrower mode,
699 but BITPOS still counts within a full word,
700 which is significant on bigendian machines.)
702 Note that protect_from_queue has already been done on OP0 and VALUE. */
704 static void
705 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
706 rtx op0;
707 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
708 rtx value;
710 enum machine_mode mode;
711 unsigned int total_bits = BITS_PER_WORD;
712 rtx subtarget, temp;
713 int all_zero = 0;
714 int all_one = 0;
716 /* There is a case not handled here:
717 a structure with a known alignment of just a halfword
718 and a field split across two aligned halfwords within the structure.
719 Or likewise a structure with a known alignment of just a byte
720 and a field split across two bytes.
721 Such cases are not supposed to be able to occur. */
723 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
725 if (offset != 0)
726 abort ();
727 /* Special treatment for a bit field split across two registers. */
728 if (bitsize + bitpos > BITS_PER_WORD)
730 store_split_bit_field (op0, bitsize, bitpos, value);
731 return;
734 else
736 /* Get the proper mode to use for this field. We want a mode that
737 includes the entire field. If such a mode would be larger than
738 a word, we won't be doing the extraction the normal way.
739 We don't want a mode bigger than the destination. */
741 mode = GET_MODE (op0);
742 if (GET_MODE_BITSIZE (mode) == 0
743 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
744 mode = word_mode;
745 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
746 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
748 if (mode == VOIDmode)
750 /* The only way this should occur is if the field spans word
751 boundaries. */
752 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
753 value);
754 return;
757 total_bits = GET_MODE_BITSIZE (mode);
759 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
760 be in the range 0 to total_bits-1, and put any excess bytes in
761 OFFSET. */
762 if (bitpos >= total_bits)
764 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
765 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
766 * BITS_PER_UNIT);
769 /* Get ref to an aligned byte, halfword, or word containing the field.
770 Adjust BITPOS to be position within a word,
771 and OFFSET to be the offset of that word.
772 Then alter OP0 to refer to that word. */
773 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
774 offset -= (offset % (total_bits / BITS_PER_UNIT));
775 op0 = adjust_address (op0, mode, offset);
778 mode = GET_MODE (op0);
780 /* Now MODE is either some integral mode for a MEM as OP0,
781 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
782 The bit field is contained entirely within OP0.
783 BITPOS is the starting bit number within OP0.
784 (OP0's mode may actually be narrower than MODE.) */
786 if (BYTES_BIG_ENDIAN)
787 /* BITPOS is the distance between our msb
788 and that of the containing datum.
789 Convert it to the distance from the lsb. */
790 bitpos = total_bits - bitsize - bitpos;
792 /* Now BITPOS is always the distance between our lsb
793 and that of OP0. */
795 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
796 we must first convert its mode to MODE. */
798 if (GET_CODE (value) == CONST_INT)
800 HOST_WIDE_INT v = INTVAL (value);
802 if (bitsize < HOST_BITS_PER_WIDE_INT)
803 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
805 if (v == 0)
806 all_zero = 1;
807 else if ((bitsize < HOST_BITS_PER_WIDE_INT
808 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
809 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
810 all_one = 1;
812 value = lshift_value (mode, value, bitpos, bitsize);
814 else
816 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
817 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
819 if (GET_MODE (value) != mode)
821 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
822 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
823 value = gen_lowpart (mode, value);
824 else
825 value = convert_to_mode (mode, value, 1);
828 if (must_and)
829 value = expand_binop (mode, and_optab, value,
830 mask_rtx (mode, 0, bitsize, 0),
831 NULL_RTX, 1, OPTAB_LIB_WIDEN);
832 if (bitpos > 0)
833 value = expand_shift (LSHIFT_EXPR, mode, value,
834 build_int_2 (bitpos, 0), NULL_RTX, 1);
837 /* Now clear the chosen bits in OP0,
838 except that if VALUE is -1 we need not bother. */
840 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
842 if (! all_one)
844 temp = expand_binop (mode, and_optab, op0,
845 mask_rtx (mode, bitpos, bitsize, 1),
846 subtarget, 1, OPTAB_LIB_WIDEN);
847 subtarget = temp;
849 else
850 temp = op0;
852 /* Now logical-or VALUE into OP0, unless it is zero. */
854 if (! all_zero)
855 temp = expand_binop (mode, ior_optab, temp, value,
856 subtarget, 1, OPTAB_LIB_WIDEN);
857 if (op0 != temp)
858 emit_move_insn (op0, temp);
861 /* Store a bit field that is split across multiple accessible memory objects.
863 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
864 BITSIZE is the field width; BITPOS the position of its first bit
865 (within the word).
866 VALUE is the value to store.
868 This does not yet handle fields wider than BITS_PER_WORD. */
870 static void
871 store_split_bit_field (op0, bitsize, bitpos, value)
872 rtx op0;
873 unsigned HOST_WIDE_INT bitsize, bitpos;
874 rtx value;
876 unsigned int unit;
877 unsigned int bitsdone = 0;
879 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
880 much at a time. */
881 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
882 unit = BITS_PER_WORD;
883 else
884 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
886 /* If VALUE is a constant other than a CONST_INT, get it into a register in
887 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
888 that VALUE might be a floating-point constant. */
889 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
891 rtx word = gen_lowpart_common (word_mode, value);
893 if (word && (value != word))
894 value = word;
895 else
896 value = gen_lowpart_common (word_mode,
897 force_reg (GET_MODE (value) != VOIDmode
898 ? GET_MODE (value)
899 : word_mode, value));
901 else if (GET_CODE (value) == ADDRESSOF)
902 value = copy_to_reg (value);
904 while (bitsdone < bitsize)
906 unsigned HOST_WIDE_INT thissize;
907 rtx part, word;
908 unsigned HOST_WIDE_INT thispos;
909 unsigned HOST_WIDE_INT offset;
911 offset = (bitpos + bitsdone) / unit;
912 thispos = (bitpos + bitsdone) % unit;
914 /* THISSIZE must not overrun a word boundary. Otherwise,
915 store_fixed_bit_field will call us again, and we will mutually
916 recurse forever. */
917 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
918 thissize = MIN (thissize, unit - thispos);
920 if (BYTES_BIG_ENDIAN)
922 int total_bits;
924 /* We must do an endian conversion exactly the same way as it is
925 done in extract_bit_field, so that the two calls to
926 extract_fixed_bit_field will have comparable arguments. */
927 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
928 total_bits = BITS_PER_WORD;
929 else
930 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
932 /* Fetch successively less significant portions. */
933 if (GET_CODE (value) == CONST_INT)
934 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
935 >> (bitsize - bitsdone - thissize))
936 & (((HOST_WIDE_INT) 1 << thissize) - 1));
937 else
938 /* The args are chosen so that the last part includes the
939 lsb. Give extract_bit_field the value it needs (with
940 endianness compensation) to fetch the piece we want. */
941 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
942 total_bits - bitsize + bitsdone,
943 NULL_RTX, 1);
945 else
947 /* Fetch successively more significant portions. */
948 if (GET_CODE (value) == CONST_INT)
949 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
950 >> bitsdone)
951 & (((HOST_WIDE_INT) 1 << thissize) - 1));
952 else
953 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
954 bitsdone, NULL_RTX, 1);
957 /* If OP0 is a register, then handle OFFSET here.
959 When handling multiword bitfields, extract_bit_field may pass
960 down a word_mode SUBREG of a larger REG for a bitfield that actually
961 crosses a word boundary. Thus, for a SUBREG, we must find
962 the current word starting from the base register. */
963 if (GET_CODE (op0) == SUBREG)
965 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
966 word = operand_subword_force (SUBREG_REG (op0), word_offset,
967 GET_MODE (SUBREG_REG (op0)));
968 offset = 0;
970 else if (GET_CODE (op0) == REG)
972 word = operand_subword_force (op0, offset, GET_MODE (op0));
973 offset = 0;
975 else
976 word = op0;
978 /* OFFSET is in UNITs, and UNIT is in bits.
979 store_fixed_bit_field wants offset in bytes. */
980 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
981 thispos, part);
982 bitsdone += thissize;
986 /* Generate code to extract a byte-field from STR_RTX
987 containing BITSIZE bits, starting at BITNUM,
988 and put it in TARGET if possible (if TARGET is nonzero).
989 Regardless of TARGET, we return the rtx for where the value is placed.
990 It may be a QUEUED.
992 STR_RTX is the structure containing the byte (a REG or MEM).
993 UNSIGNEDP is nonzero if this is an unsigned bit field.
994 MODE is the natural mode of the field value once extracted.
995 TMODE is the mode the caller would like the value to have;
996 but the value may be returned with type MODE instead.
998 TOTAL_SIZE is the size in bytes of the containing structure,
999 or -1 if varying.
1001 If a TARGET is specified and we can store in it at no extra cost,
1002 we do so, and return TARGET.
1003 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1004 if they are equally easy. */
1007 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1008 target, mode, tmode, total_size)
1009 rtx str_rtx;
1010 unsigned HOST_WIDE_INT bitsize;
1011 unsigned HOST_WIDE_INT bitnum;
1012 int unsignedp;
1013 rtx target;
1014 enum machine_mode mode, tmode;
1015 HOST_WIDE_INT total_size;
1017 unsigned int unit
1018 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1019 unsigned HOST_WIDE_INT offset = bitnum / unit;
1020 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1021 rtx op0 = str_rtx;
1022 rtx spec_target = target;
1023 rtx spec_target_subreg = 0;
1024 enum machine_mode int_mode;
1025 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1026 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1027 enum machine_mode mode1;
1028 int byte_offset;
1030 /* Discount the part of the structure before the desired byte.
1031 We need to know how many bytes are safe to reference after it. */
1032 if (total_size >= 0)
1033 total_size -= (bitpos / BIGGEST_ALIGNMENT
1034 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1036 if (tmode == VOIDmode)
1037 tmode = mode;
1039 while (GET_CODE (op0) == SUBREG)
1041 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1042 if (bitpos > unit)
1044 offset += (bitpos / unit);
1045 bitpos %= unit;
1047 op0 = SUBREG_REG (op0);
1050 if (GET_CODE (op0) == REG
1051 && mode == GET_MODE (op0)
1052 && bitnum == 0
1053 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1055 /* We're trying to extract a full register from itself. */
1056 return op0;
1059 /* Make sure we are playing with integral modes. Pun with subregs
1060 if we aren't. */
1062 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1063 if (imode != GET_MODE (op0))
1065 if (GET_CODE (op0) == MEM)
1066 op0 = adjust_address (op0, imode, 0);
1067 else if (imode != BLKmode)
1068 op0 = gen_lowpart (imode, op0);
1069 else
1070 abort ();
1074 /* We may be accessing data outside the field, which means
1075 we can alias adjacent data. */
1076 if (GET_CODE (op0) == MEM)
1078 op0 = shallow_copy_rtx (op0);
1079 set_mem_alias_set (op0, 0);
1080 set_mem_expr (op0, 0);
1083 /* Extraction of a full-word or multi-word value from a structure
1084 in a register or aligned memory can be done with just a SUBREG.
1085 A subword value in the least significant part of a register
1086 can also be extracted with a SUBREG. For this, we need the
1087 byte offset of the value in op0. */
1089 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1091 /* If OP0 is a register, BITPOS must count within a word.
1092 But as we have it, it counts within whatever size OP0 now has.
1093 On a bigendian machine, these are not the same, so convert. */
1094 if (BYTES_BIG_ENDIAN
1095 && GET_CODE (op0) != MEM
1096 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1097 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1099 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1100 If that's wrong, the solution is to test for it and set TARGET to 0
1101 if needed. */
1103 mode1 = (VECTOR_MODE_P (tmode)
1104 ? mode
1105 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1107 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1108 && bitpos % BITS_PER_WORD == 0)
1109 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1110 /* ??? The big endian test here is wrong. This is correct
1111 if the value is in a register, and if mode_for_size is not
1112 the same mode as op0. This causes us to get unnecessarily
1113 inefficient code from the Thumb port when -mbig-endian. */
1114 && (BYTES_BIG_ENDIAN
1115 ? bitpos + bitsize == BITS_PER_WORD
1116 : bitpos == 0)))
1117 && ((GET_CODE (op0) != MEM
1118 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1119 GET_MODE_BITSIZE (GET_MODE (op0)))
1120 && GET_MODE_SIZE (mode1) != 0
1121 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1122 || (GET_CODE (op0) == MEM
1123 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1124 || (offset * BITS_PER_UNIT % bitsize == 0
1125 && MEM_ALIGN (op0) % bitsize == 0)))))
1127 if (mode1 != GET_MODE (op0))
1129 if (GET_CODE (op0) == SUBREG)
1131 if (GET_MODE (SUBREG_REG (op0)) == mode1
1132 || GET_MODE_CLASS (mode1) == MODE_INT
1133 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1134 op0 = SUBREG_REG (op0);
1135 else
1136 /* Else we've got some float mode source being extracted into
1137 a different float mode destination -- this combination of
1138 subregs results in Severe Tire Damage. */
1139 goto no_subreg_mode_swap;
1141 if (GET_CODE (op0) == REG)
1142 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1143 else
1144 op0 = adjust_address (op0, mode1, offset);
1146 if (mode1 != mode)
1147 return convert_to_mode (tmode, op0, unsignedp);
1148 return op0;
1150 no_subreg_mode_swap:
1152 /* Handle fields bigger than a word. */
1154 if (bitsize > BITS_PER_WORD)
1156 /* Here we transfer the words of the field
1157 in the order least significant first.
1158 This is because the most significant word is the one which may
1159 be less than full. */
1161 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1162 unsigned int i;
1164 if (target == 0 || GET_CODE (target) != REG)
1165 target = gen_reg_rtx (mode);
1167 /* Indicate for flow that the entire target reg is being set. */
1168 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1170 for (i = 0; i < nwords; i++)
1172 /* If I is 0, use the low-order word in both field and target;
1173 if I is 1, use the next to lowest word; and so on. */
1174 /* Word number in TARGET to use. */
1175 unsigned int wordnum
1176 = (WORDS_BIG_ENDIAN
1177 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1178 : i);
1179 /* Offset from start of field in OP0. */
1180 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1181 ? MAX (0, ((int) bitsize - ((int) i + 1)
1182 * (int) BITS_PER_WORD))
1183 : (int) i * BITS_PER_WORD);
1184 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1185 rtx result_part
1186 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1187 bitsize - i * BITS_PER_WORD),
1188 bitnum + bit_offset, 1, target_part, mode,
1189 word_mode, total_size);
1191 if (target_part == 0)
1192 abort ();
1194 if (result_part != target_part)
1195 emit_move_insn (target_part, result_part);
1198 if (unsignedp)
1200 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1201 need to be zero'd out. */
1202 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1204 unsigned int i, total_words;
1206 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1207 for (i = nwords; i < total_words; i++)
1208 emit_move_insn
1209 (operand_subword (target,
1210 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1211 1, VOIDmode),
1212 const0_rtx);
1214 return target;
1217 /* Signed bit field: sign-extend with two arithmetic shifts. */
1218 target = expand_shift (LSHIFT_EXPR, mode, target,
1219 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1220 NULL_RTX, 0);
1221 return expand_shift (RSHIFT_EXPR, mode, target,
1222 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1223 NULL_RTX, 0);
1226 /* From here on we know the desired field is smaller than a word. */
1228 /* Check if there is a correspondingly-sized integer field, so we can
1229 safely extract it as one size of integer, if necessary; then
1230 truncate or extend to the size that is wanted; then use SUBREGs or
1231 convert_to_mode to get one of the modes we really wanted. */
1233 int_mode = int_mode_for_mode (tmode);
1234 if (int_mode == BLKmode)
1235 int_mode = int_mode_for_mode (mode);
1236 if (int_mode == BLKmode)
1237 abort (); /* Should probably push op0 out to memory and then
1238 do a load. */
1240 /* OFFSET is the number of words or bytes (UNIT says which)
1241 from STR_RTX to the first word or byte containing part of the field. */
1243 if (GET_CODE (op0) != MEM)
1245 if (offset != 0
1246 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1248 if (GET_CODE (op0) != REG)
1249 op0 = copy_to_reg (op0);
1250 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1251 op0, (offset * UNITS_PER_WORD));
1253 offset = 0;
1255 else
1256 op0 = protect_from_queue (str_rtx, 1);
1258 /* Now OFFSET is nonzero only for memory operands. */
1260 if (unsignedp)
1262 if (HAVE_extzv
1263 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1264 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1265 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1267 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1268 rtx bitsize_rtx, bitpos_rtx;
1269 rtx last = get_last_insn ();
1270 rtx xop0 = op0;
1271 rtx xtarget = target;
1272 rtx xspec_target = spec_target;
1273 rtx xspec_target_subreg = spec_target_subreg;
1274 rtx pat;
1275 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1277 if (GET_CODE (xop0) == MEM)
1279 int save_volatile_ok = volatile_ok;
1280 volatile_ok = 1;
1282 /* Is the memory operand acceptable? */
1283 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1284 (xop0, GET_MODE (xop0))))
1286 /* No, load into a reg and extract from there. */
1287 enum machine_mode bestmode;
1289 /* Get the mode to use for inserting into this field. If
1290 OP0 is BLKmode, get the smallest mode consistent with the
1291 alignment. If OP0 is a non-BLKmode object that is no
1292 wider than MAXMODE, use its mode. Otherwise, use the
1293 smallest mode containing the field. */
1295 if (GET_MODE (xop0) == BLKmode
1296 || (GET_MODE_SIZE (GET_MODE (op0))
1297 > GET_MODE_SIZE (maxmode)))
1298 bestmode = get_best_mode (bitsize, bitnum,
1299 MEM_ALIGN (xop0), maxmode,
1300 MEM_VOLATILE_P (xop0));
1301 else
1302 bestmode = GET_MODE (xop0);
1304 if (bestmode == VOIDmode
1305 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1306 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1307 goto extzv_loses;
1309 /* Compute offset as multiple of this unit,
1310 counting in bytes. */
1311 unit = GET_MODE_BITSIZE (bestmode);
1312 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1313 xbitpos = bitnum % unit;
1314 xop0 = adjust_address (xop0, bestmode, xoffset);
1316 /* Fetch it to a register in that size. */
1317 xop0 = force_reg (bestmode, xop0);
1319 /* XBITPOS counts within UNIT, which is what is expected. */
1321 else
1322 /* Get ref to first byte containing part of the field. */
1323 xop0 = adjust_address (xop0, byte_mode, xoffset);
1325 volatile_ok = save_volatile_ok;
1328 /* If op0 is a register, we need it in MAXMODE (which is usually
1329 SImode). to make it acceptable to the format of extzv. */
1330 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1331 goto extzv_loses;
1332 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1333 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1335 /* On big-endian machines, we count bits from the most significant.
1336 If the bit field insn does not, we must invert. */
1337 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1338 xbitpos = unit - bitsize - xbitpos;
1340 /* Now convert from counting within UNIT to counting in MAXMODE. */
1341 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1342 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1344 unit = GET_MODE_BITSIZE (maxmode);
1346 if (xtarget == 0
1347 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1348 xtarget = xspec_target = gen_reg_rtx (tmode);
1350 if (GET_MODE (xtarget) != maxmode)
1352 if (GET_CODE (xtarget) == REG)
1354 int wider = (GET_MODE_SIZE (maxmode)
1355 > GET_MODE_SIZE (GET_MODE (xtarget)));
1356 xtarget = gen_lowpart (maxmode, xtarget);
1357 if (wider)
1358 xspec_target_subreg = xtarget;
1360 else
1361 xtarget = gen_reg_rtx (maxmode);
1364 /* If this machine's extzv insists on a register target,
1365 make sure we have one. */
1366 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1367 (xtarget, maxmode)))
1368 xtarget = gen_reg_rtx (maxmode);
1370 bitsize_rtx = GEN_INT (bitsize);
1371 bitpos_rtx = GEN_INT (xbitpos);
1373 pat = gen_extzv (protect_from_queue (xtarget, 1),
1374 xop0, bitsize_rtx, bitpos_rtx);
1375 if (pat)
1377 emit_insn (pat);
1378 target = xtarget;
1379 spec_target = xspec_target;
1380 spec_target_subreg = xspec_target_subreg;
1382 else
1384 delete_insns_since (last);
1385 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1386 bitpos, target, 1);
1389 else
1390 extzv_loses:
1391 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1392 bitpos, target, 1);
1394 else
1396 if (HAVE_extv
1397 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1398 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1399 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1401 int xbitpos = bitpos, xoffset = offset;
1402 rtx bitsize_rtx, bitpos_rtx;
1403 rtx last = get_last_insn ();
1404 rtx xop0 = op0, xtarget = target;
1405 rtx xspec_target = spec_target;
1406 rtx xspec_target_subreg = spec_target_subreg;
1407 rtx pat;
1408 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1410 if (GET_CODE (xop0) == MEM)
1412 /* Is the memory operand acceptable? */
1413 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1414 (xop0, GET_MODE (xop0))))
1416 /* No, load into a reg and extract from there. */
1417 enum machine_mode bestmode;
1419 /* Get the mode to use for inserting into this field. If
1420 OP0 is BLKmode, get the smallest mode consistent with the
1421 alignment. If OP0 is a non-BLKmode object that is no
1422 wider than MAXMODE, use its mode. Otherwise, use the
1423 smallest mode containing the field. */
1425 if (GET_MODE (xop0) == BLKmode
1426 || (GET_MODE_SIZE (GET_MODE (op0))
1427 > GET_MODE_SIZE (maxmode)))
1428 bestmode = get_best_mode (bitsize, bitnum,
1429 MEM_ALIGN (xop0), maxmode,
1430 MEM_VOLATILE_P (xop0));
1431 else
1432 bestmode = GET_MODE (xop0);
1434 if (bestmode == VOIDmode
1435 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1436 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1437 goto extv_loses;
1439 /* Compute offset as multiple of this unit,
1440 counting in bytes. */
1441 unit = GET_MODE_BITSIZE (bestmode);
1442 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1443 xbitpos = bitnum % unit;
1444 xop0 = adjust_address (xop0, bestmode, xoffset);
1446 /* Fetch it to a register in that size. */
1447 xop0 = force_reg (bestmode, xop0);
1449 /* XBITPOS counts within UNIT, which is what is expected. */
1451 else
1452 /* Get ref to first byte containing part of the field. */
1453 xop0 = adjust_address (xop0, byte_mode, xoffset);
1456 /* If op0 is a register, we need it in MAXMODE (which is usually
1457 SImode) to make it acceptable to the format of extv. */
1458 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1459 goto extv_loses;
1460 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1461 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1463 /* On big-endian machines, we count bits from the most significant.
1464 If the bit field insn does not, we must invert. */
1465 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1466 xbitpos = unit - bitsize - xbitpos;
1468 /* XBITPOS counts within a size of UNIT.
1469 Adjust to count within a size of MAXMODE. */
1470 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1471 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1473 unit = GET_MODE_BITSIZE (maxmode);
1475 if (xtarget == 0
1476 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1477 xtarget = xspec_target = gen_reg_rtx (tmode);
1479 if (GET_MODE (xtarget) != maxmode)
1481 if (GET_CODE (xtarget) == REG)
1483 int wider = (GET_MODE_SIZE (maxmode)
1484 > GET_MODE_SIZE (GET_MODE (xtarget)));
1485 xtarget = gen_lowpart (maxmode, xtarget);
1486 if (wider)
1487 xspec_target_subreg = xtarget;
1489 else
1490 xtarget = gen_reg_rtx (maxmode);
1493 /* If this machine's extv insists on a register target,
1494 make sure we have one. */
1495 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1496 (xtarget, maxmode)))
1497 xtarget = gen_reg_rtx (maxmode);
1499 bitsize_rtx = GEN_INT (bitsize);
1500 bitpos_rtx = GEN_INT (xbitpos);
1502 pat = gen_extv (protect_from_queue (xtarget, 1),
1503 xop0, bitsize_rtx, bitpos_rtx);
1504 if (pat)
1506 emit_insn (pat);
1507 target = xtarget;
1508 spec_target = xspec_target;
1509 spec_target_subreg = xspec_target_subreg;
1511 else
1513 delete_insns_since (last);
1514 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1515 bitpos, target, 0);
1518 else
1519 extv_loses:
1520 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1521 bitpos, target, 0);
1523 if (target == spec_target)
1524 return target;
1525 if (target == spec_target_subreg)
1526 return spec_target;
1527 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1529 /* If the target mode is floating-point, first convert to the
1530 integer mode of that size and then access it as a floating-point
1531 value via a SUBREG. */
1532 if (GET_MODE_CLASS (tmode) != MODE_INT
1533 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1535 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1536 MODE_INT, 0),
1537 target, unsignedp);
1538 return gen_lowpart (tmode, target);
1540 else
1541 return convert_to_mode (tmode, target, unsignedp);
1543 return target;
1546 /* Extract a bit field using shifts and boolean operations
1547 Returns an rtx to represent the value.
1548 OP0 addresses a register (word) or memory (byte).
1549 BITPOS says which bit within the word or byte the bit field starts in.
1550 OFFSET says how many bytes farther the bit field starts;
1551 it is 0 if OP0 is a register.
1552 BITSIZE says how many bits long the bit field is.
1553 (If OP0 is a register, it may be narrower than a full word,
1554 but BITPOS still counts within a full word,
1555 which is significant on bigendian machines.)
1557 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1558 If TARGET is nonzero, attempts to store the value there
1559 and return TARGET, but this is not guaranteed.
1560 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1562 static rtx
1563 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1564 target, unsignedp)
1565 enum machine_mode tmode;
1566 rtx op0, target;
1567 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1568 int unsignedp;
1570 unsigned int total_bits = BITS_PER_WORD;
1571 enum machine_mode mode;
1573 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1575 /* Special treatment for a bit field split across two registers. */
1576 if (bitsize + bitpos > BITS_PER_WORD)
1577 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1579 else
1581 /* Get the proper mode to use for this field. We want a mode that
1582 includes the entire field. If such a mode would be larger than
1583 a word, we won't be doing the extraction the normal way. */
1585 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1586 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1588 if (mode == VOIDmode)
1589 /* The only way this should occur is if the field spans word
1590 boundaries. */
1591 return extract_split_bit_field (op0, bitsize,
1592 bitpos + offset * BITS_PER_UNIT,
1593 unsignedp);
1595 total_bits = GET_MODE_BITSIZE (mode);
1597 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1598 be in the range 0 to total_bits-1, and put any excess bytes in
1599 OFFSET. */
1600 if (bitpos >= total_bits)
1602 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1603 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1604 * BITS_PER_UNIT);
1607 /* Get ref to an aligned byte, halfword, or word containing the field.
1608 Adjust BITPOS to be position within a word,
1609 and OFFSET to be the offset of that word.
1610 Then alter OP0 to refer to that word. */
1611 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1612 offset -= (offset % (total_bits / BITS_PER_UNIT));
1613 op0 = adjust_address (op0, mode, offset);
1616 mode = GET_MODE (op0);
1618 if (BYTES_BIG_ENDIAN)
1619 /* BITPOS is the distance between our msb and that of OP0.
1620 Convert it to the distance from the lsb. */
1621 bitpos = total_bits - bitsize - bitpos;
1623 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1624 We have reduced the big-endian case to the little-endian case. */
1626 if (unsignedp)
1628 if (bitpos)
1630 /* If the field does not already start at the lsb,
1631 shift it so it does. */
1632 tree amount = build_int_2 (bitpos, 0);
1633 /* Maybe propagate the target for the shift. */
1634 /* But not if we will return it--could confuse integrate.c. */
1635 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1636 && !REG_FUNCTION_VALUE_P (target)
1637 ? target : 0);
1638 if (tmode != mode) subtarget = 0;
1639 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1641 /* Convert the value to the desired mode. */
1642 if (mode != tmode)
1643 op0 = convert_to_mode (tmode, op0, 1);
1645 /* Unless the msb of the field used to be the msb when we shifted,
1646 mask out the upper bits. */
1648 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1649 return expand_binop (GET_MODE (op0), and_optab, op0,
1650 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1651 target, 1, OPTAB_LIB_WIDEN);
1652 return op0;
1655 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1656 then arithmetic-shift its lsb to the lsb of the word. */
1657 op0 = force_reg (mode, op0);
1658 if (mode != tmode)
1659 target = 0;
1661 /* Find the narrowest integer mode that contains the field. */
1663 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1664 mode = GET_MODE_WIDER_MODE (mode))
1665 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1667 op0 = convert_to_mode (mode, op0, 0);
1668 break;
1671 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1673 tree amount
1674 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1675 /* Maybe propagate the target for the shift. */
1676 /* But not if we will return the result--could confuse integrate.c. */
1677 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1678 && ! REG_FUNCTION_VALUE_P (target)
1679 ? target : 0);
1680 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1683 return expand_shift (RSHIFT_EXPR, mode, op0,
1684 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1685 target, 0);
1688 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1689 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1690 complement of that if COMPLEMENT. The mask is truncated if
1691 necessary to the width of mode MODE. The mask is zero-extended if
1692 BITSIZE+BITPOS is too small for MODE. */
1694 static rtx
1695 mask_rtx (mode, bitpos, bitsize, complement)
1696 enum machine_mode mode;
1697 int bitpos, bitsize, complement;
1699 HOST_WIDE_INT masklow, maskhigh;
1701 if (bitsize == 0)
1702 masklow = 0;
1703 else if (bitpos < HOST_BITS_PER_WIDE_INT)
1704 masklow = (HOST_WIDE_INT) -1 << bitpos;
1705 else
1706 masklow = 0;
1708 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1709 masklow &= ((unsigned HOST_WIDE_INT) -1
1710 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1712 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1713 maskhigh = -1;
1714 else
1715 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1717 if (bitsize == 0)
1718 maskhigh = 0;
1719 else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1720 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1721 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1722 else
1723 maskhigh = 0;
1725 if (complement)
1727 maskhigh = ~maskhigh;
1728 masklow = ~masklow;
1731 return immed_double_const (masklow, maskhigh, mode);
1734 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1735 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1737 static rtx
1738 lshift_value (mode, value, bitpos, bitsize)
1739 enum machine_mode mode;
1740 rtx value;
1741 int bitpos, bitsize;
1743 unsigned HOST_WIDE_INT v = INTVAL (value);
1744 HOST_WIDE_INT low, high;
1746 if (bitsize < HOST_BITS_PER_WIDE_INT)
1747 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1749 if (bitpos < HOST_BITS_PER_WIDE_INT)
1751 low = v << bitpos;
1752 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1754 else
1756 low = 0;
1757 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1760 return immed_double_const (low, high, mode);
1763 /* Extract a bit field that is split across two words
1764 and return an RTX for the result.
1766 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1767 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1768 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1770 static rtx
1771 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1772 rtx op0;
1773 unsigned HOST_WIDE_INT bitsize, bitpos;
1774 int unsignedp;
1776 unsigned int unit;
1777 unsigned int bitsdone = 0;
1778 rtx result = NULL_RTX;
1779 int first = 1;
1781 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1782 much at a time. */
1783 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1784 unit = BITS_PER_WORD;
1785 else
1786 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1788 while (bitsdone < bitsize)
1790 unsigned HOST_WIDE_INT thissize;
1791 rtx part, word;
1792 unsigned HOST_WIDE_INT thispos;
1793 unsigned HOST_WIDE_INT offset;
1795 offset = (bitpos + bitsdone) / unit;
1796 thispos = (bitpos + bitsdone) % unit;
1798 /* THISSIZE must not overrun a word boundary. Otherwise,
1799 extract_fixed_bit_field will call us again, and we will mutually
1800 recurse forever. */
1801 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1802 thissize = MIN (thissize, unit - thispos);
1804 /* If OP0 is a register, then handle OFFSET here.
1806 When handling multiword bitfields, extract_bit_field may pass
1807 down a word_mode SUBREG of a larger REG for a bitfield that actually
1808 crosses a word boundary. Thus, for a SUBREG, we must find
1809 the current word starting from the base register. */
1810 if (GET_CODE (op0) == SUBREG)
1812 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1813 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1814 GET_MODE (SUBREG_REG (op0)));
1815 offset = 0;
1817 else if (GET_CODE (op0) == REG)
1819 word = operand_subword_force (op0, offset, GET_MODE (op0));
1820 offset = 0;
1822 else
1823 word = op0;
1825 /* Extract the parts in bit-counting order,
1826 whose meaning is determined by BYTES_PER_UNIT.
1827 OFFSET is in UNITs, and UNIT is in bits.
1828 extract_fixed_bit_field wants offset in bytes. */
1829 part = extract_fixed_bit_field (word_mode, word,
1830 offset * unit / BITS_PER_UNIT,
1831 thissize, thispos, 0, 1);
1832 bitsdone += thissize;
1834 /* Shift this part into place for the result. */
1835 if (BYTES_BIG_ENDIAN)
1837 if (bitsize != bitsdone)
1838 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1839 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1841 else
1843 if (bitsdone != thissize)
1844 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1845 build_int_2 (bitsdone - thissize, 0), 0, 1);
1848 if (first)
1849 result = part;
1850 else
1851 /* Combine the parts with bitwise or. This works
1852 because we extracted each part as an unsigned bit field. */
1853 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1854 OPTAB_LIB_WIDEN);
1856 first = 0;
1859 /* Unsigned bit field: we are done. */
1860 if (unsignedp)
1861 return result;
1862 /* Signed bit field: sign-extend with two arithmetic shifts. */
1863 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1864 build_int_2 (BITS_PER_WORD - bitsize, 0),
1865 NULL_RTX, 0);
1866 return expand_shift (RSHIFT_EXPR, word_mode, result,
1867 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1870 /* Add INC into TARGET. */
1872 void
1873 expand_inc (target, inc)
1874 rtx target, inc;
1876 rtx value = expand_binop (GET_MODE (target), add_optab,
1877 target, inc,
1878 target, 0, OPTAB_LIB_WIDEN);
1879 if (value != target)
1880 emit_move_insn (target, value);
1883 /* Subtract DEC from TARGET. */
1885 void
1886 expand_dec (target, dec)
1887 rtx target, dec;
1889 rtx value = expand_binop (GET_MODE (target), sub_optab,
1890 target, dec,
1891 target, 0, OPTAB_LIB_WIDEN);
1892 if (value != target)
1893 emit_move_insn (target, value);
1896 /* Output a shift instruction for expression code CODE,
1897 with SHIFTED being the rtx for the value to shift,
1898 and AMOUNT the tree for the amount to shift by.
1899 Store the result in the rtx TARGET, if that is convenient.
1900 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1901 Return the rtx for where the value is. */
1904 expand_shift (code, mode, shifted, amount, target, unsignedp)
1905 enum tree_code code;
1906 enum machine_mode mode;
1907 rtx shifted;
1908 tree amount;
1909 rtx target;
1910 int unsignedp;
1912 rtx op1, temp = 0;
1913 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1914 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1915 int try;
1917 /* Previously detected shift-counts computed by NEGATE_EXPR
1918 and shifted in the other direction; but that does not work
1919 on all machines. */
1921 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1923 #ifdef SHIFT_COUNT_TRUNCATED
1924 if (SHIFT_COUNT_TRUNCATED)
1926 if (GET_CODE (op1) == CONST_INT
1927 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1928 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1929 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1930 % GET_MODE_BITSIZE (mode));
1931 else if (GET_CODE (op1) == SUBREG
1932 && subreg_lowpart_p (op1))
1933 op1 = SUBREG_REG (op1);
1935 #endif
1937 if (op1 == const0_rtx)
1938 return shifted;
1940 for (try = 0; temp == 0 && try < 3; try++)
1942 enum optab_methods methods;
1944 if (try == 0)
1945 methods = OPTAB_DIRECT;
1946 else if (try == 1)
1947 methods = OPTAB_WIDEN;
1948 else
1949 methods = OPTAB_LIB_WIDEN;
1951 if (rotate)
1953 /* Widening does not work for rotation. */
1954 if (methods == OPTAB_WIDEN)
1955 continue;
1956 else if (methods == OPTAB_LIB_WIDEN)
1958 /* If we have been unable to open-code this by a rotation,
1959 do it as the IOR of two shifts. I.e., to rotate A
1960 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1961 where C is the bitsize of A.
1963 It is theoretically possible that the target machine might
1964 not be able to perform either shift and hence we would
1965 be making two libcalls rather than just the one for the
1966 shift (similarly if IOR could not be done). We will allow
1967 this extremely unlikely lossage to avoid complicating the
1968 code below. */
1970 rtx subtarget = target == shifted ? 0 : target;
1971 rtx temp1;
1972 tree type = TREE_TYPE (amount);
1973 tree new_amount = make_tree (type, op1);
1974 tree other_amount
1975 = fold (build (MINUS_EXPR, type,
1976 convert (type,
1977 build_int_2 (GET_MODE_BITSIZE (mode),
1978 0)),
1979 amount));
1981 shifted = force_reg (mode, shifted);
1983 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1984 mode, shifted, new_amount, subtarget, 1);
1985 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1986 mode, shifted, other_amount, 0, 1);
1987 return expand_binop (mode, ior_optab, temp, temp1, target,
1988 unsignedp, methods);
1991 temp = expand_binop (mode,
1992 left ? rotl_optab : rotr_optab,
1993 shifted, op1, target, unsignedp, methods);
1995 /* If we don't have the rotate, but we are rotating by a constant
1996 that is in range, try a rotate in the opposite direction. */
1998 if (temp == 0 && GET_CODE (op1) == CONST_INT
1999 && INTVAL (op1) > 0
2000 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2001 temp = expand_binop (mode,
2002 left ? rotr_optab : rotl_optab,
2003 shifted,
2004 GEN_INT (GET_MODE_BITSIZE (mode)
2005 - INTVAL (op1)),
2006 target, unsignedp, methods);
2008 else if (unsignedp)
2009 temp = expand_binop (mode,
2010 left ? ashl_optab : lshr_optab,
2011 shifted, op1, target, unsignedp, methods);
2013 /* Do arithmetic shifts.
2014 Also, if we are going to widen the operand, we can just as well
2015 use an arithmetic right-shift instead of a logical one. */
2016 if (temp == 0 && ! rotate
2017 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2019 enum optab_methods methods1 = methods;
2021 /* If trying to widen a log shift to an arithmetic shift,
2022 don't accept an arithmetic shift of the same size. */
2023 if (unsignedp)
2024 methods1 = OPTAB_MUST_WIDEN;
2026 /* Arithmetic shift */
2028 temp = expand_binop (mode,
2029 left ? ashl_optab : ashr_optab,
2030 shifted, op1, target, unsignedp, methods1);
2033 /* We used to try extzv here for logical right shifts, but that was
2034 only useful for one machine, the VAX, and caused poor code
2035 generation there for lshrdi3, so the code was deleted and a
2036 define_expand for lshrsi3 was added to vax.md. */
2039 if (temp == 0)
2040 abort ();
2041 return temp;
2044 enum alg_code { alg_zero, alg_m, alg_shift,
2045 alg_add_t_m2, alg_sub_t_m2,
2046 alg_add_factor, alg_sub_factor,
2047 alg_add_t2_m, alg_sub_t2_m,
2048 alg_add, alg_subtract, alg_factor, alg_shiftop };
2050 /* This structure records a sequence of operations.
2051 `ops' is the number of operations recorded.
2052 `cost' is their total cost.
2053 The operations are stored in `op' and the corresponding
2054 logarithms of the integer coefficients in `log'.
2056 These are the operations:
2057 alg_zero total := 0;
2058 alg_m total := multiplicand;
2059 alg_shift total := total * coeff
2060 alg_add_t_m2 total := total + multiplicand * coeff;
2061 alg_sub_t_m2 total := total - multiplicand * coeff;
2062 alg_add_factor total := total * coeff + total;
2063 alg_sub_factor total := total * coeff - total;
2064 alg_add_t2_m total := total * coeff + multiplicand;
2065 alg_sub_t2_m total := total * coeff - multiplicand;
2067 The first operand must be either alg_zero or alg_m. */
2069 struct algorithm
2071 short cost;
2072 short ops;
2073 /* The size of the OP and LOG fields are not directly related to the
2074 word size, but the worst-case algorithms will be if we have few
2075 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2076 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2077 in total wordsize operations. */
2078 enum alg_code op[MAX_BITS_PER_WORD];
2079 char log[MAX_BITS_PER_WORD];
2082 static void synth_mult PARAMS ((struct algorithm *,
2083 unsigned HOST_WIDE_INT,
2084 int));
2085 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2086 int, int,
2087 unsigned HOST_WIDE_INT *,
2088 int *, int *));
2089 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2090 int));
2091 /* Compute and return the best algorithm for multiplying by T.
2092 The algorithm must cost less than cost_limit
2093 If retval.cost >= COST_LIMIT, no algorithm was found and all
2094 other field of the returned struct are undefined. */
2096 static void
2097 synth_mult (alg_out, t, cost_limit)
2098 struct algorithm *alg_out;
2099 unsigned HOST_WIDE_INT t;
2100 int cost_limit;
2102 int m;
2103 struct algorithm *alg_in, *best_alg;
2104 int cost;
2105 unsigned HOST_WIDE_INT q;
2107 /* Indicate that no algorithm is yet found. If no algorithm
2108 is found, this value will be returned and indicate failure. */
2109 alg_out->cost = cost_limit;
2111 if (cost_limit <= 0)
2112 return;
2114 /* t == 1 can be done in zero cost. */
2115 if (t == 1)
2117 alg_out->ops = 1;
2118 alg_out->cost = 0;
2119 alg_out->op[0] = alg_m;
2120 return;
2123 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2124 fail now. */
2125 if (t == 0)
2127 if (zero_cost >= cost_limit)
2128 return;
2129 else
2131 alg_out->ops = 1;
2132 alg_out->cost = zero_cost;
2133 alg_out->op[0] = alg_zero;
2134 return;
2138 /* We'll be needing a couple extra algorithm structures now. */
2140 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2141 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2143 /* If we have a group of zero bits at the low-order part of T, try
2144 multiplying by the remaining bits and then doing a shift. */
2146 if ((t & 1) == 0)
2148 m = floor_log2 (t & -t); /* m = number of low zero bits */
2149 if (m < BITS_PER_WORD)
2151 q = t >> m;
2152 cost = shift_cost[m];
2153 synth_mult (alg_in, q, cost_limit - cost);
2155 cost += alg_in->cost;
2156 if (cost < cost_limit)
2158 struct algorithm *x;
2159 x = alg_in, alg_in = best_alg, best_alg = x;
2160 best_alg->log[best_alg->ops] = m;
2161 best_alg->op[best_alg->ops] = alg_shift;
2162 cost_limit = cost;
2167 /* If we have an odd number, add or subtract one. */
2168 if ((t & 1) != 0)
2170 unsigned HOST_WIDE_INT w;
2172 for (w = 1; (w & t) != 0; w <<= 1)
2174 /* If T was -1, then W will be zero after the loop. This is another
2175 case where T ends with ...111. Handling this with (T + 1) and
2176 subtract 1 produces slightly better code and results in algorithm
2177 selection much faster than treating it like the ...0111 case
2178 below. */
2179 if (w == 0
2180 || (w > 2
2181 /* Reject the case where t is 3.
2182 Thus we prefer addition in that case. */
2183 && t != 3))
2185 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2187 cost = add_cost;
2188 synth_mult (alg_in, t + 1, cost_limit - cost);
2190 cost += alg_in->cost;
2191 if (cost < cost_limit)
2193 struct algorithm *x;
2194 x = alg_in, alg_in = best_alg, best_alg = x;
2195 best_alg->log[best_alg->ops] = 0;
2196 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2197 cost_limit = cost;
2200 else
2202 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2204 cost = add_cost;
2205 synth_mult (alg_in, t - 1, cost_limit - cost);
2207 cost += alg_in->cost;
2208 if (cost < cost_limit)
2210 struct algorithm *x;
2211 x = alg_in, alg_in = best_alg, best_alg = x;
2212 best_alg->log[best_alg->ops] = 0;
2213 best_alg->op[best_alg->ops] = alg_add_t_m2;
2214 cost_limit = cost;
2219 /* Look for factors of t of the form
2220 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2221 If we find such a factor, we can multiply by t using an algorithm that
2222 multiplies by q, shift the result by m and add/subtract it to itself.
2224 We search for large factors first and loop down, even if large factors
2225 are less probable than small; if we find a large factor we will find a
2226 good sequence quickly, and therefore be able to prune (by decreasing
2227 COST_LIMIT) the search. */
2229 for (m = floor_log2 (t - 1); m >= 2; m--)
2231 unsigned HOST_WIDE_INT d;
2233 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2234 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2236 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2237 synth_mult (alg_in, t / d, cost_limit - cost);
2239 cost += alg_in->cost;
2240 if (cost < cost_limit)
2242 struct algorithm *x;
2243 x = alg_in, alg_in = best_alg, best_alg = x;
2244 best_alg->log[best_alg->ops] = m;
2245 best_alg->op[best_alg->ops] = alg_add_factor;
2246 cost_limit = cost;
2248 /* Other factors will have been taken care of in the recursion. */
2249 break;
2252 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2253 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2255 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2256 synth_mult (alg_in, t / d, cost_limit - cost);
2258 cost += alg_in->cost;
2259 if (cost < cost_limit)
2261 struct algorithm *x;
2262 x = alg_in, alg_in = best_alg, best_alg = x;
2263 best_alg->log[best_alg->ops] = m;
2264 best_alg->op[best_alg->ops] = alg_sub_factor;
2265 cost_limit = cost;
2267 break;
2271 /* Try shift-and-add (load effective address) instructions,
2272 i.e. do a*3, a*5, a*9. */
2273 if ((t & 1) != 0)
2275 q = t - 1;
2276 q = q & -q;
2277 m = exact_log2 (q);
2278 if (m >= 0 && m < BITS_PER_WORD)
2280 cost = shiftadd_cost[m];
2281 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2283 cost += alg_in->cost;
2284 if (cost < cost_limit)
2286 struct algorithm *x;
2287 x = alg_in, alg_in = best_alg, best_alg = x;
2288 best_alg->log[best_alg->ops] = m;
2289 best_alg->op[best_alg->ops] = alg_add_t2_m;
2290 cost_limit = cost;
2294 q = t + 1;
2295 q = q & -q;
2296 m = exact_log2 (q);
2297 if (m >= 0 && m < BITS_PER_WORD)
2299 cost = shiftsub_cost[m];
2300 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2302 cost += alg_in->cost;
2303 if (cost < cost_limit)
2305 struct algorithm *x;
2306 x = alg_in, alg_in = best_alg, best_alg = x;
2307 best_alg->log[best_alg->ops] = m;
2308 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2309 cost_limit = cost;
2314 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2315 we have not found any algorithm. */
2316 if (cost_limit == alg_out->cost)
2317 return;
2319 /* If we are getting a too long sequence for `struct algorithm'
2320 to record, make this search fail. */
2321 if (best_alg->ops == MAX_BITS_PER_WORD)
2322 return;
2324 /* Copy the algorithm from temporary space to the space at alg_out.
2325 We avoid using structure assignment because the majority of
2326 best_alg is normally undefined, and this is a critical function. */
2327 alg_out->ops = best_alg->ops + 1;
2328 alg_out->cost = cost_limit;
2329 memcpy (alg_out->op, best_alg->op,
2330 alg_out->ops * sizeof *alg_out->op);
2331 memcpy (alg_out->log, best_alg->log,
2332 alg_out->ops * sizeof *alg_out->log);
2335 /* Perform a multiplication and return an rtx for the result.
2336 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2337 TARGET is a suggestion for where to store the result (an rtx).
2339 We check specially for a constant integer as OP1.
2340 If you want this check for OP0 as well, then before calling
2341 you should swap the two operands if OP0 would be constant. */
2344 expand_mult (mode, op0, op1, target, unsignedp)
2345 enum machine_mode mode;
2346 rtx op0, op1, target;
2347 int unsignedp;
2349 rtx const_op1 = op1;
2351 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2352 less than or equal in size to `unsigned int' this doesn't matter.
2353 If the mode is larger than `unsigned int', then synth_mult works only
2354 if the constant value exactly fits in an `unsigned int' without any
2355 truncation. This means that multiplying by negative values does
2356 not work; results are off by 2^32 on a 32 bit machine. */
2358 /* If we are multiplying in DImode, it may still be a win
2359 to try to work with shifts and adds. */
2360 if (GET_CODE (op1) == CONST_DOUBLE
2361 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2362 && HOST_BITS_PER_INT >= BITS_PER_WORD
2363 && CONST_DOUBLE_HIGH (op1) == 0)
2364 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2365 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2366 && GET_CODE (op1) == CONST_INT
2367 && INTVAL (op1) < 0)
2368 const_op1 = 0;
2370 /* We used to test optimize here, on the grounds that it's better to
2371 produce a smaller program when -O is not used.
2372 But this causes such a terrible slowdown sometimes
2373 that it seems better to use synth_mult always. */
2375 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2376 && (unsignedp || ! flag_trapv))
2378 struct algorithm alg;
2379 struct algorithm alg2;
2380 HOST_WIDE_INT val = INTVAL (op1);
2381 HOST_WIDE_INT val_so_far;
2382 rtx insn;
2383 int mult_cost;
2384 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2386 /* op0 must be register to make mult_cost match the precomputed
2387 shiftadd_cost array. */
2388 op0 = force_reg (mode, op0);
2390 /* Try to do the computation three ways: multiply by the negative of OP1
2391 and then negate, do the multiplication directly, or do multiplication
2392 by OP1 - 1. */
2394 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2395 mult_cost = MIN (12 * add_cost, mult_cost);
2397 synth_mult (&alg, val, mult_cost);
2399 /* This works only if the inverted value actually fits in an
2400 `unsigned int' */
2401 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2403 synth_mult (&alg2, - val,
2404 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2405 if (alg2.cost + negate_cost < alg.cost)
2406 alg = alg2, variant = negate_variant;
2409 /* This proves very useful for division-by-constant. */
2410 synth_mult (&alg2, val - 1,
2411 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2412 if (alg2.cost + add_cost < alg.cost)
2413 alg = alg2, variant = add_variant;
2415 if (alg.cost < mult_cost)
2417 /* We found something cheaper than a multiply insn. */
2418 int opno;
2419 rtx accum, tem;
2420 enum machine_mode nmode;
2422 op0 = protect_from_queue (op0, 0);
2424 /* Avoid referencing memory over and over.
2425 For speed, but also for correctness when mem is volatile. */
2426 if (GET_CODE (op0) == MEM)
2427 op0 = force_reg (mode, op0);
2429 /* ACCUM starts out either as OP0 or as a zero, depending on
2430 the first operation. */
2432 if (alg.op[0] == alg_zero)
2434 accum = copy_to_mode_reg (mode, const0_rtx);
2435 val_so_far = 0;
2437 else if (alg.op[0] == alg_m)
2439 accum = copy_to_mode_reg (mode, op0);
2440 val_so_far = 1;
2442 else
2443 abort ();
2445 for (opno = 1; opno < alg.ops; opno++)
2447 int log = alg.log[opno];
2448 int preserve = preserve_subexpressions_p ();
2449 rtx shift_subtarget = preserve ? 0 : accum;
2450 rtx add_target
2451 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2452 && ! preserve)
2453 ? target : 0;
2454 rtx accum_target = preserve ? 0 : accum;
2456 switch (alg.op[opno])
2458 case alg_shift:
2459 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2460 build_int_2 (log, 0), NULL_RTX, 0);
2461 val_so_far <<= log;
2462 break;
2464 case alg_add_t_m2:
2465 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2466 build_int_2 (log, 0), NULL_RTX, 0);
2467 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2468 add_target
2469 ? add_target : accum_target);
2470 val_so_far += (HOST_WIDE_INT) 1 << log;
2471 break;
2473 case alg_sub_t_m2:
2474 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2475 build_int_2 (log, 0), NULL_RTX, 0);
2476 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2477 add_target
2478 ? add_target : accum_target);
2479 val_so_far -= (HOST_WIDE_INT) 1 << log;
2480 break;
2482 case alg_add_t2_m:
2483 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2484 build_int_2 (log, 0), shift_subtarget,
2486 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2487 add_target
2488 ? add_target : accum_target);
2489 val_so_far = (val_so_far << log) + 1;
2490 break;
2492 case alg_sub_t2_m:
2493 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2494 build_int_2 (log, 0), shift_subtarget,
2496 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2497 add_target
2498 ? add_target : accum_target);
2499 val_so_far = (val_so_far << log) - 1;
2500 break;
2502 case alg_add_factor:
2503 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2504 build_int_2 (log, 0), NULL_RTX, 0);
2505 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2506 add_target
2507 ? add_target : accum_target);
2508 val_so_far += val_so_far << log;
2509 break;
2511 case alg_sub_factor:
2512 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2513 build_int_2 (log, 0), NULL_RTX, 0);
2514 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2515 (add_target ? add_target
2516 : preserve ? 0 : tem));
2517 val_so_far = (val_so_far << log) - val_so_far;
2518 break;
2520 default:
2521 abort ();
2524 /* Write a REG_EQUAL note on the last insn so that we can cse
2525 multiplication sequences. Note that if ACCUM is a SUBREG,
2526 we've set the inner register and must properly indicate
2527 that. */
2529 tem = op0, nmode = mode;
2530 if (GET_CODE (accum) == SUBREG)
2532 nmode = GET_MODE (SUBREG_REG (accum));
2533 tem = gen_lowpart (nmode, op0);
2536 insn = get_last_insn ();
2537 set_unique_reg_note (insn,
2538 REG_EQUAL,
2539 gen_rtx_MULT (nmode, tem,
2540 GEN_INT (val_so_far)));
2543 if (variant == negate_variant)
2545 val_so_far = - val_so_far;
2546 accum = expand_unop (mode, neg_optab, accum, target, 0);
2548 else if (variant == add_variant)
2550 val_so_far = val_so_far + 1;
2551 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2554 if (val != val_so_far)
2555 abort ();
2557 return accum;
2561 /* This used to use umul_optab if unsigned, but for non-widening multiply
2562 there is no difference between signed and unsigned. */
2563 op0 = expand_binop (mode,
2564 ! unsignedp
2565 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2566 ? smulv_optab : smul_optab,
2567 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2568 if (op0 == 0)
2569 abort ();
2570 return op0;
2573 /* Return the smallest n such that 2**n >= X. */
2576 ceil_log2 (x)
2577 unsigned HOST_WIDE_INT x;
2579 return floor_log2 (x - 1) + 1;
2582 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2583 replace division by D, and put the least significant N bits of the result
2584 in *MULTIPLIER_PTR and return the most significant bit.
2586 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2587 needed precision is in PRECISION (should be <= N).
2589 PRECISION should be as small as possible so this function can choose
2590 multiplier more freely.
2592 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2593 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2595 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2596 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2598 static
2599 unsigned HOST_WIDE_INT
2600 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2601 unsigned HOST_WIDE_INT d;
2602 int n;
2603 int precision;
2604 unsigned HOST_WIDE_INT *multiplier_ptr;
2605 int *post_shift_ptr;
2606 int *lgup_ptr;
2608 HOST_WIDE_INT mhigh_hi, mlow_hi;
2609 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2610 int lgup, post_shift;
2611 int pow, pow2;
2612 unsigned HOST_WIDE_INT nl, dummy1;
2613 HOST_WIDE_INT nh, dummy2;
2615 /* lgup = ceil(log2(divisor)); */
2616 lgup = ceil_log2 (d);
2618 if (lgup > n)
2619 abort ();
2621 pow = n + lgup;
2622 pow2 = n + lgup - precision;
2624 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2626 /* We could handle this with some effort, but this case is much better
2627 handled directly with a scc insn, so rely on caller using that. */
2628 abort ();
2631 /* mlow = 2^(N + lgup)/d */
2632 if (pow >= HOST_BITS_PER_WIDE_INT)
2634 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2635 nl = 0;
2637 else
2639 nh = 0;
2640 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2642 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2643 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2645 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2646 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2647 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2648 else
2649 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2650 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2651 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2653 if (mhigh_hi && nh - d >= d)
2654 abort ();
2655 if (mhigh_hi > 1 || mlow_hi > 1)
2656 abort ();
2657 /* assert that mlow < mhigh. */
2658 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2659 abort ();
2661 /* If precision == N, then mlow, mhigh exceed 2^N
2662 (but they do not exceed 2^(N+1)). */
2664 /* Reduce to lowest terms */
2665 for (post_shift = lgup; post_shift > 0; post_shift--)
2667 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2668 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2669 if (ml_lo >= mh_lo)
2670 break;
2672 mlow_hi = 0;
2673 mlow_lo = ml_lo;
2674 mhigh_hi = 0;
2675 mhigh_lo = mh_lo;
2678 *post_shift_ptr = post_shift;
2679 *lgup_ptr = lgup;
2680 if (n < HOST_BITS_PER_WIDE_INT)
2682 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2683 *multiplier_ptr = mhigh_lo & mask;
2684 return mhigh_lo >= mask;
2686 else
2688 *multiplier_ptr = mhigh_lo;
2689 return mhigh_hi;
2693 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2694 congruent to 1 (mod 2**N). */
2696 static unsigned HOST_WIDE_INT
2697 invert_mod2n (x, n)
2698 unsigned HOST_WIDE_INT x;
2699 int n;
2701 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2703 /* The algorithm notes that the choice y = x satisfies
2704 x*y == 1 mod 2^3, since x is assumed odd.
2705 Each iteration doubles the number of bits of significance in y. */
2707 unsigned HOST_WIDE_INT mask;
2708 unsigned HOST_WIDE_INT y = x;
2709 int nbit = 3;
2711 mask = (n == HOST_BITS_PER_WIDE_INT
2712 ? ~(unsigned HOST_WIDE_INT) 0
2713 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2715 while (nbit < n)
2717 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2718 nbit *= 2;
2720 return y;
2723 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2724 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2725 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2726 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2727 become signed.
2729 The result is put in TARGET if that is convenient.
2731 MODE is the mode of operation. */
2734 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2735 enum machine_mode mode;
2736 rtx adj_operand, op0, op1, target;
2737 int unsignedp;
2739 rtx tem;
2740 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2742 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2743 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2744 NULL_RTX, 0);
2745 tem = expand_and (mode, tem, op1, NULL_RTX);
2746 adj_operand
2747 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2748 adj_operand);
2750 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2751 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2752 NULL_RTX, 0);
2753 tem = expand_and (mode, tem, op0, NULL_RTX);
2754 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2755 target);
2757 return target;
2760 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2761 in TARGET if that is convenient, and return where the result is. If the
2762 operation can not be performed, 0 is returned.
2764 MODE is the mode of operation and result.
2766 UNSIGNEDP nonzero means unsigned multiply.
2768 MAX_COST is the total allowed cost for the expanded RTL. */
2771 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2772 enum machine_mode mode;
2773 rtx op0, target;
2774 unsigned HOST_WIDE_INT cnst1;
2775 int unsignedp;
2776 int max_cost;
2778 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2779 optab mul_highpart_optab;
2780 optab moptab;
2781 rtx tem;
2782 int size = GET_MODE_BITSIZE (mode);
2783 rtx op1, wide_op1;
2785 /* We can't support modes wider than HOST_BITS_PER_INT. */
2786 if (size > HOST_BITS_PER_WIDE_INT)
2787 abort ();
2789 op1 = gen_int_mode (cnst1, mode);
2791 wide_op1
2792 = immed_double_const (cnst1,
2793 (unsignedp
2794 ? (HOST_WIDE_INT) 0
2795 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2796 wider_mode);
2798 /* expand_mult handles constant multiplication of word_mode
2799 or narrower. It does a poor job for large modes. */
2800 if (size < BITS_PER_WORD
2801 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2803 /* We have to do this, since expand_binop doesn't do conversion for
2804 multiply. Maybe change expand_binop to handle widening multiply? */
2805 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2807 /* We know that this can't have signed overflow, so pretend this is
2808 an unsigned multiply. */
2809 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2810 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2811 build_int_2 (size, 0), NULL_RTX, 1);
2812 return convert_modes (mode, wider_mode, tem, unsignedp);
2815 if (target == 0)
2816 target = gen_reg_rtx (mode);
2818 /* Firstly, try using a multiplication insn that only generates the needed
2819 high part of the product, and in the sign flavor of unsignedp. */
2820 if (mul_highpart_cost[(int) mode] < max_cost)
2822 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2823 target = expand_binop (mode, mul_highpart_optab,
2824 op0, op1, target, unsignedp, OPTAB_DIRECT);
2825 if (target)
2826 return target;
2829 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2830 Need to adjust the result after the multiplication. */
2831 if (size - 1 < BITS_PER_WORD
2832 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2833 < max_cost))
2835 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2836 target = expand_binop (mode, mul_highpart_optab,
2837 op0, op1, target, unsignedp, OPTAB_DIRECT);
2838 if (target)
2839 /* We used the wrong signedness. Adjust the result. */
2840 return expand_mult_highpart_adjust (mode, target, op0,
2841 op1, target, unsignedp);
2844 /* Try widening multiplication. */
2845 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2846 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2847 && mul_widen_cost[(int) wider_mode] < max_cost)
2849 op1 = force_reg (mode, op1);
2850 goto try;
2853 /* Try widening the mode and perform a non-widening multiplication. */
2854 moptab = smul_optab;
2855 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2856 && size - 1 < BITS_PER_WORD
2857 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2859 op1 = wide_op1;
2860 goto try;
2863 /* Try widening multiplication of opposite signedness, and adjust. */
2864 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2865 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2866 && size - 1 < BITS_PER_WORD
2867 && (mul_widen_cost[(int) wider_mode]
2868 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2870 rtx regop1 = force_reg (mode, op1);
2871 tem = expand_binop (wider_mode, moptab, op0, regop1,
2872 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2873 if (tem != 0)
2875 /* Extract the high half of the just generated product. */
2876 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2877 build_int_2 (size, 0), NULL_RTX, 1);
2878 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2879 /* We used the wrong signedness. Adjust the result. */
2880 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2881 target, unsignedp);
2885 return 0;
2887 try:
2888 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2889 tem = expand_binop (wider_mode, moptab, op0, op1,
2890 NULL_RTX, unsignedp, OPTAB_WIDEN);
2891 if (tem == 0)
2892 return 0;
2894 /* Extract the high half of the just generated product. */
2895 if (mode == word_mode)
2897 return gen_highpart (mode, tem);
2899 else
2901 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2902 build_int_2 (size, 0), NULL_RTX, 1);
2903 return convert_modes (mode, wider_mode, tem, unsignedp);
2907 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2908 if that is convenient, and returning where the result is.
2909 You may request either the quotient or the remainder as the result;
2910 specify REM_FLAG nonzero to get the remainder.
2912 CODE is the expression code for which kind of division this is;
2913 it controls how rounding is done. MODE is the machine mode to use.
2914 UNSIGNEDP nonzero means do unsigned division. */
2916 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2917 and then correct it by or'ing in missing high bits
2918 if result of ANDI is nonzero.
2919 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2920 This could optimize to a bfexts instruction.
2921 But C doesn't use these operations, so their optimizations are
2922 left for later. */
2923 /* ??? For modulo, we don't actually need the highpart of the first product,
2924 the low part will do nicely. And for small divisors, the second multiply
2925 can also be a low-part only multiply or even be completely left out.
2926 E.g. to calculate the remainder of a division by 3 with a 32 bit
2927 multiply, multiply with 0x55555556 and extract the upper two bits;
2928 the result is exact for inputs up to 0x1fffffff.
2929 The input range can be reduced by using cross-sum rules.
2930 For odd divisors >= 3, the following table gives right shift counts
2931 so that if a number is shifted by an integer multiple of the given
2932 amount, the remainder stays the same:
2933 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2934 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2935 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2936 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2937 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2939 Cross-sum rules for even numbers can be derived by leaving as many bits
2940 to the right alone as the divisor has zeros to the right.
2941 E.g. if x is an unsigned 32 bit number:
2942 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2945 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2948 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2949 int rem_flag;
2950 enum tree_code code;
2951 enum machine_mode mode;
2952 rtx op0, op1, target;
2953 int unsignedp;
2955 enum machine_mode compute_mode;
2956 rtx tquotient;
2957 rtx quotient = 0, remainder = 0;
2958 rtx last;
2959 int size;
2960 rtx insn, set;
2961 optab optab1, optab2;
2962 int op1_is_constant, op1_is_pow2 = 0;
2963 int max_cost, extra_cost;
2964 static HOST_WIDE_INT last_div_const = 0;
2965 static HOST_WIDE_INT ext_op1;
2967 op1_is_constant = GET_CODE (op1) == CONST_INT;
2968 if (op1_is_constant)
2970 ext_op1 = INTVAL (op1);
2971 if (unsignedp)
2972 ext_op1 &= GET_MODE_MASK (mode);
2973 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
2974 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
2978 This is the structure of expand_divmod:
2980 First comes code to fix up the operands so we can perform the operations
2981 correctly and efficiently.
2983 Second comes a switch statement with code specific for each rounding mode.
2984 For some special operands this code emits all RTL for the desired
2985 operation, for other cases, it generates only a quotient and stores it in
2986 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2987 to indicate that it has not done anything.
2989 Last comes code that finishes the operation. If QUOTIENT is set and
2990 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2991 QUOTIENT is not set, it is computed using trunc rounding.
2993 We try to generate special code for division and remainder when OP1 is a
2994 constant. If |OP1| = 2**n we can use shifts and some other fast
2995 operations. For other values of OP1, we compute a carefully selected
2996 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2997 by m.
2999 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3000 half of the product. Different strategies for generating the product are
3001 implemented in expand_mult_highpart.
3003 If what we actually want is the remainder, we generate that by another
3004 by-constant multiplication and a subtraction. */
3006 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3007 code below will malfunction if we are, so check here and handle
3008 the special case if so. */
3009 if (op1 == const1_rtx)
3010 return rem_flag ? const0_rtx : op0;
3012 /* When dividing by -1, we could get an overflow.
3013 negv_optab can handle overflows. */
3014 if (! unsignedp && op1 == constm1_rtx)
3016 if (rem_flag)
3017 return const0_rtx;
3018 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3019 ? negv_optab : neg_optab, op0, target, 0);
3022 if (target
3023 /* Don't use the function value register as a target
3024 since we have to read it as well as write it,
3025 and function-inlining gets confused by this. */
3026 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3027 /* Don't clobber an operand while doing a multi-step calculation. */
3028 || ((rem_flag || op1_is_constant)
3029 && (reg_mentioned_p (target, op0)
3030 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3031 || reg_mentioned_p (target, op1)
3032 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3033 target = 0;
3035 /* Get the mode in which to perform this computation. Normally it will
3036 be MODE, but sometimes we can't do the desired operation in MODE.
3037 If so, pick a wider mode in which we can do the operation. Convert
3038 to that mode at the start to avoid repeated conversions.
3040 First see what operations we need. These depend on the expression
3041 we are evaluating. (We assume that divxx3 insns exist under the
3042 same conditions that modxx3 insns and that these insns don't normally
3043 fail. If these assumptions are not correct, we may generate less
3044 efficient code in some cases.)
3046 Then see if we find a mode in which we can open-code that operation
3047 (either a division, modulus, or shift). Finally, check for the smallest
3048 mode for which we can do the operation with a library call. */
3050 /* We might want to refine this now that we have division-by-constant
3051 optimization. Since expand_mult_highpart tries so many variants, it is
3052 not straightforward to generalize this. Maybe we should make an array
3053 of possible modes in init_expmed? Save this for GCC 2.7. */
3055 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3056 ? (unsignedp ? lshr_optab : ashr_optab)
3057 : (unsignedp ? udiv_optab : sdiv_optab));
3058 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3059 ? optab1
3060 : (unsignedp ? udivmod_optab : sdivmod_optab));
3062 for (compute_mode = mode; compute_mode != VOIDmode;
3063 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3064 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3065 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3066 break;
3068 if (compute_mode == VOIDmode)
3069 for (compute_mode = mode; compute_mode != VOIDmode;
3070 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3071 if (optab1->handlers[(int) compute_mode].libfunc
3072 || optab2->handlers[(int) compute_mode].libfunc)
3073 break;
3075 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3076 in expand_binop. */
3077 if (compute_mode == VOIDmode)
3078 compute_mode = mode;
3080 if (target && GET_MODE (target) == compute_mode)
3081 tquotient = target;
3082 else
3083 tquotient = gen_reg_rtx (compute_mode);
3085 size = GET_MODE_BITSIZE (compute_mode);
3086 #if 0
3087 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3088 (mode), and thereby get better code when OP1 is a constant. Do that
3089 later. It will require going over all usages of SIZE below. */
3090 size = GET_MODE_BITSIZE (mode);
3091 #endif
3093 /* Only deduct something for a REM if the last divide done was
3094 for a different constant. Then set the constant of the last
3095 divide. */
3096 max_cost = div_cost[(int) compute_mode]
3097 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3098 && INTVAL (op1) == last_div_const)
3099 ? mul_cost[(int) compute_mode] + add_cost : 0);
3101 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3103 /* Now convert to the best mode to use. */
3104 if (compute_mode != mode)
3106 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3107 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3109 /* convert_modes may have placed op1 into a register, so we
3110 must recompute the following. */
3111 op1_is_constant = GET_CODE (op1) == CONST_INT;
3112 op1_is_pow2 = (op1_is_constant
3113 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3114 || (! unsignedp
3115 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3118 /* If one of the operands is a volatile MEM, copy it into a register. */
3120 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3121 op0 = force_reg (compute_mode, op0);
3122 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3123 op1 = force_reg (compute_mode, op1);
3125 /* If we need the remainder or if OP1 is constant, we need to
3126 put OP0 in a register in case it has any queued subexpressions. */
3127 if (rem_flag || op1_is_constant)
3128 op0 = force_reg (compute_mode, op0);
3130 last = get_last_insn ();
3132 /* Promote floor rounding to trunc rounding for unsigned operations. */
3133 if (unsignedp)
3135 if (code == FLOOR_DIV_EXPR)
3136 code = TRUNC_DIV_EXPR;
3137 if (code == FLOOR_MOD_EXPR)
3138 code = TRUNC_MOD_EXPR;
3139 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3140 code = TRUNC_DIV_EXPR;
3143 if (op1 != const0_rtx)
3144 switch (code)
3146 case TRUNC_MOD_EXPR:
3147 case TRUNC_DIV_EXPR:
3148 if (op1_is_constant)
3150 if (unsignedp)
3152 unsigned HOST_WIDE_INT mh, ml;
3153 int pre_shift, post_shift;
3154 int dummy;
3155 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3156 & GET_MODE_MASK (compute_mode));
3158 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3160 pre_shift = floor_log2 (d);
3161 if (rem_flag)
3163 remainder
3164 = expand_binop (compute_mode, and_optab, op0,
3165 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3166 remainder, 1,
3167 OPTAB_LIB_WIDEN);
3168 if (remainder)
3169 return gen_lowpart (mode, remainder);
3171 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3172 build_int_2 (pre_shift, 0),
3173 tquotient, 1);
3175 else if (size <= HOST_BITS_PER_WIDE_INT)
3177 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3179 /* Most significant bit of divisor is set; emit an scc
3180 insn. */
3181 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3182 compute_mode, 1, 1);
3183 if (quotient == 0)
3184 goto fail1;
3186 else
3188 /* Find a suitable multiplier and right shift count
3189 instead of multiplying with D. */
3191 mh = choose_multiplier (d, size, size,
3192 &ml, &post_shift, &dummy);
3194 /* If the suggested multiplier is more than SIZE bits,
3195 we can do better for even divisors, using an
3196 initial right shift. */
3197 if (mh != 0 && (d & 1) == 0)
3199 pre_shift = floor_log2 (d & -d);
3200 mh = choose_multiplier (d >> pre_shift, size,
3201 size - pre_shift,
3202 &ml, &post_shift, &dummy);
3203 if (mh)
3204 abort ();
3206 else
3207 pre_shift = 0;
3209 if (mh != 0)
3211 rtx t1, t2, t3, t4;
3213 if (post_shift - 1 >= BITS_PER_WORD)
3214 goto fail1;
3216 extra_cost = (shift_cost[post_shift - 1]
3217 + shift_cost[1] + 2 * add_cost);
3218 t1 = expand_mult_highpart (compute_mode, op0, ml,
3219 NULL_RTX, 1,
3220 max_cost - extra_cost);
3221 if (t1 == 0)
3222 goto fail1;
3223 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3224 op0, t1),
3225 NULL_RTX);
3226 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3227 build_int_2 (1, 0), NULL_RTX,1);
3228 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3229 t1, t3),
3230 NULL_RTX);
3231 quotient
3232 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3233 build_int_2 (post_shift - 1, 0),
3234 tquotient, 1);
3236 else
3238 rtx t1, t2;
3240 if (pre_shift >= BITS_PER_WORD
3241 || post_shift >= BITS_PER_WORD)
3242 goto fail1;
3244 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3245 build_int_2 (pre_shift, 0),
3246 NULL_RTX, 1);
3247 extra_cost = (shift_cost[pre_shift]
3248 + shift_cost[post_shift]);
3249 t2 = expand_mult_highpart (compute_mode, t1, ml,
3250 NULL_RTX, 1,
3251 max_cost - extra_cost);
3252 if (t2 == 0)
3253 goto fail1;
3254 quotient
3255 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3256 build_int_2 (post_shift, 0),
3257 tquotient, 1);
3261 else /* Too wide mode to use tricky code */
3262 break;
3264 insn = get_last_insn ();
3265 if (insn != last
3266 && (set = single_set (insn)) != 0
3267 && SET_DEST (set) == quotient)
3268 set_unique_reg_note (insn,
3269 REG_EQUAL,
3270 gen_rtx_UDIV (compute_mode, op0, op1));
3272 else /* TRUNC_DIV, signed */
3274 unsigned HOST_WIDE_INT ml;
3275 int lgup, post_shift;
3276 HOST_WIDE_INT d = INTVAL (op1);
3277 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3279 /* n rem d = n rem -d */
3280 if (rem_flag && d < 0)
3282 d = abs_d;
3283 op1 = gen_int_mode (abs_d, compute_mode);
3286 if (d == 1)
3287 quotient = op0;
3288 else if (d == -1)
3289 quotient = expand_unop (compute_mode, neg_optab, op0,
3290 tquotient, 0);
3291 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3293 /* This case is not handled correctly below. */
3294 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3295 compute_mode, 1, 1);
3296 if (quotient == 0)
3297 goto fail1;
3299 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3300 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3301 /* ??? The cheap metric is computed only for
3302 word_mode. If this operation is wider, this may
3303 not be so. Assume true if the optab has an
3304 expander for this mode. */
3305 && (((rem_flag ? smod_optab : sdiv_optab)
3306 ->handlers[(int) compute_mode].insn_code
3307 != CODE_FOR_nothing)
3308 || (sdivmod_optab->handlers[(int) compute_mode]
3309 .insn_code != CODE_FOR_nothing)))
3311 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3313 lgup = floor_log2 (abs_d);
3314 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3316 rtx label = gen_label_rtx ();
3317 rtx t1;
3319 t1 = copy_to_mode_reg (compute_mode, op0);
3320 do_cmp_and_jump (t1, const0_rtx, GE,
3321 compute_mode, label);
3322 expand_inc (t1, gen_int_mode (abs_d - 1,
3323 compute_mode));
3324 emit_label (label);
3325 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3326 build_int_2 (lgup, 0),
3327 tquotient, 0);
3329 else
3331 rtx t1, t2, t3;
3332 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3333 build_int_2 (size - 1, 0),
3334 NULL_RTX, 0);
3335 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3336 build_int_2 (size - lgup, 0),
3337 NULL_RTX, 1);
3338 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3339 op0, t2),
3340 NULL_RTX);
3341 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3342 build_int_2 (lgup, 0),
3343 tquotient, 0);
3346 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3347 the quotient. */
3348 if (d < 0)
3350 insn = get_last_insn ();
3351 if (insn != last
3352 && (set = single_set (insn)) != 0
3353 && SET_DEST (set) == quotient
3354 && abs_d < ((unsigned HOST_WIDE_INT) 1
3355 << (HOST_BITS_PER_WIDE_INT - 1)))
3356 set_unique_reg_note (insn,
3357 REG_EQUAL,
3358 gen_rtx_DIV (compute_mode,
3359 op0,
3360 GEN_INT
3361 (trunc_int_for_mode
3362 (abs_d,
3363 compute_mode))));
3365 quotient = expand_unop (compute_mode, neg_optab,
3366 quotient, quotient, 0);
3369 else if (size <= HOST_BITS_PER_WIDE_INT)
3371 choose_multiplier (abs_d, size, size - 1,
3372 &ml, &post_shift, &lgup);
3373 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3375 rtx t1, t2, t3;
3377 if (post_shift >= BITS_PER_WORD
3378 || size - 1 >= BITS_PER_WORD)
3379 goto fail1;
3381 extra_cost = (shift_cost[post_shift]
3382 + shift_cost[size - 1] + add_cost);
3383 t1 = expand_mult_highpart (compute_mode, op0, ml,
3384 NULL_RTX, 0,
3385 max_cost - extra_cost);
3386 if (t1 == 0)
3387 goto fail1;
3388 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3389 build_int_2 (post_shift, 0), NULL_RTX, 0);
3390 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3391 build_int_2 (size - 1, 0), NULL_RTX, 0);
3392 if (d < 0)
3393 quotient
3394 = force_operand (gen_rtx_MINUS (compute_mode,
3395 t3, t2),
3396 tquotient);
3397 else
3398 quotient
3399 = force_operand (gen_rtx_MINUS (compute_mode,
3400 t2, t3),
3401 tquotient);
3403 else
3405 rtx t1, t2, t3, t4;
3407 if (post_shift >= BITS_PER_WORD
3408 || size - 1 >= BITS_PER_WORD)
3409 goto fail1;
3411 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3412 extra_cost = (shift_cost[post_shift]
3413 + shift_cost[size - 1] + 2 * add_cost);
3414 t1 = expand_mult_highpart (compute_mode, op0, ml,
3415 NULL_RTX, 0,
3416 max_cost - extra_cost);
3417 if (t1 == 0)
3418 goto fail1;
3419 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3420 t1, op0),
3421 NULL_RTX);
3422 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3423 build_int_2 (post_shift, 0),
3424 NULL_RTX, 0);
3425 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3426 build_int_2 (size - 1, 0),
3427 NULL_RTX, 0);
3428 if (d < 0)
3429 quotient
3430 = force_operand (gen_rtx_MINUS (compute_mode,
3431 t4, t3),
3432 tquotient);
3433 else
3434 quotient
3435 = force_operand (gen_rtx_MINUS (compute_mode,
3436 t3, t4),
3437 tquotient);
3440 else /* Too wide mode to use tricky code */
3441 break;
3443 insn = get_last_insn ();
3444 if (insn != last
3445 && (set = single_set (insn)) != 0
3446 && SET_DEST (set) == quotient)
3447 set_unique_reg_note (insn,
3448 REG_EQUAL,
3449 gen_rtx_DIV (compute_mode, op0, op1));
3451 break;
3453 fail1:
3454 delete_insns_since (last);
3455 break;
3457 case FLOOR_DIV_EXPR:
3458 case FLOOR_MOD_EXPR:
3459 /* We will come here only for signed operations. */
3460 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3462 unsigned HOST_WIDE_INT mh, ml;
3463 int pre_shift, lgup, post_shift;
3464 HOST_WIDE_INT d = INTVAL (op1);
3466 if (d > 0)
3468 /* We could just as easily deal with negative constants here,
3469 but it does not seem worth the trouble for GCC 2.6. */
3470 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3472 pre_shift = floor_log2 (d);
3473 if (rem_flag)
3475 remainder = expand_binop (compute_mode, and_optab, op0,
3476 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3477 remainder, 0, OPTAB_LIB_WIDEN);
3478 if (remainder)
3479 return gen_lowpart (mode, remainder);
3481 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3482 build_int_2 (pre_shift, 0),
3483 tquotient, 0);
3485 else
3487 rtx t1, t2, t3, t4;
3489 mh = choose_multiplier (d, size, size - 1,
3490 &ml, &post_shift, &lgup);
3491 if (mh)
3492 abort ();
3494 if (post_shift < BITS_PER_WORD
3495 && size - 1 < BITS_PER_WORD)
3497 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3498 build_int_2 (size - 1, 0),
3499 NULL_RTX, 0);
3500 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3501 NULL_RTX, 0, OPTAB_WIDEN);
3502 extra_cost = (shift_cost[post_shift]
3503 + shift_cost[size - 1] + 2 * add_cost);
3504 t3 = expand_mult_highpart (compute_mode, t2, ml,
3505 NULL_RTX, 1,
3506 max_cost - extra_cost);
3507 if (t3 != 0)
3509 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3510 build_int_2 (post_shift, 0),
3511 NULL_RTX, 1);
3512 quotient = expand_binop (compute_mode, xor_optab,
3513 t4, t1, tquotient, 0,
3514 OPTAB_WIDEN);
3519 else
3521 rtx nsign, t1, t2, t3, t4;
3522 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3523 op0, constm1_rtx), NULL_RTX);
3524 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3525 0, OPTAB_WIDEN);
3526 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3527 build_int_2 (size - 1, 0), NULL_RTX, 0);
3528 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3529 NULL_RTX);
3530 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3531 NULL_RTX, 0);
3532 if (t4)
3534 rtx t5;
3535 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3536 NULL_RTX, 0);
3537 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3538 t4, t5),
3539 tquotient);
3544 if (quotient != 0)
3545 break;
3546 delete_insns_since (last);
3548 /* Try using an instruction that produces both the quotient and
3549 remainder, using truncation. We can easily compensate the quotient
3550 or remainder to get floor rounding, once we have the remainder.
3551 Notice that we compute also the final remainder value here,
3552 and return the result right away. */
3553 if (target == 0 || GET_MODE (target) != compute_mode)
3554 target = gen_reg_rtx (compute_mode);
3556 if (rem_flag)
3558 remainder
3559 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3560 quotient = gen_reg_rtx (compute_mode);
3562 else
3564 quotient
3565 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3566 remainder = gen_reg_rtx (compute_mode);
3569 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3570 quotient, remainder, 0))
3572 /* This could be computed with a branch-less sequence.
3573 Save that for later. */
3574 rtx tem;
3575 rtx label = gen_label_rtx ();
3576 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3577 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3578 NULL_RTX, 0, OPTAB_WIDEN);
3579 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3580 expand_dec (quotient, const1_rtx);
3581 expand_inc (remainder, op1);
3582 emit_label (label);
3583 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3586 /* No luck with division elimination or divmod. Have to do it
3587 by conditionally adjusting op0 *and* the result. */
3589 rtx label1, label2, label3, label4, label5;
3590 rtx adjusted_op0;
3591 rtx tem;
3593 quotient = gen_reg_rtx (compute_mode);
3594 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3595 label1 = gen_label_rtx ();
3596 label2 = gen_label_rtx ();
3597 label3 = gen_label_rtx ();
3598 label4 = gen_label_rtx ();
3599 label5 = gen_label_rtx ();
3600 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3601 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3602 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3603 quotient, 0, OPTAB_LIB_WIDEN);
3604 if (tem != quotient)
3605 emit_move_insn (quotient, tem);
3606 emit_jump_insn (gen_jump (label5));
3607 emit_barrier ();
3608 emit_label (label1);
3609 expand_inc (adjusted_op0, const1_rtx);
3610 emit_jump_insn (gen_jump (label4));
3611 emit_barrier ();
3612 emit_label (label2);
3613 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3614 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3615 quotient, 0, OPTAB_LIB_WIDEN);
3616 if (tem != quotient)
3617 emit_move_insn (quotient, tem);
3618 emit_jump_insn (gen_jump (label5));
3619 emit_barrier ();
3620 emit_label (label3);
3621 expand_dec (adjusted_op0, const1_rtx);
3622 emit_label (label4);
3623 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3624 quotient, 0, OPTAB_LIB_WIDEN);
3625 if (tem != quotient)
3626 emit_move_insn (quotient, tem);
3627 expand_dec (quotient, const1_rtx);
3628 emit_label (label5);
3630 break;
3632 case CEIL_DIV_EXPR:
3633 case CEIL_MOD_EXPR:
3634 if (unsignedp)
3636 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3638 rtx t1, t2, t3;
3639 unsigned HOST_WIDE_INT d = INTVAL (op1);
3640 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3641 build_int_2 (floor_log2 (d), 0),
3642 tquotient, 1);
3643 t2 = expand_binop (compute_mode, and_optab, op0,
3644 GEN_INT (d - 1),
3645 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3646 t3 = gen_reg_rtx (compute_mode);
3647 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3648 compute_mode, 1, 1);
3649 if (t3 == 0)
3651 rtx lab;
3652 lab = gen_label_rtx ();
3653 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3654 expand_inc (t1, const1_rtx);
3655 emit_label (lab);
3656 quotient = t1;
3658 else
3659 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3660 t1, t3),
3661 tquotient);
3662 break;
3665 /* Try using an instruction that produces both the quotient and
3666 remainder, using truncation. We can easily compensate the
3667 quotient or remainder to get ceiling rounding, once we have the
3668 remainder. Notice that we compute also the final remainder
3669 value here, and return the result right away. */
3670 if (target == 0 || GET_MODE (target) != compute_mode)
3671 target = gen_reg_rtx (compute_mode);
3673 if (rem_flag)
3675 remainder = (GET_CODE (target) == REG
3676 ? target : gen_reg_rtx (compute_mode));
3677 quotient = gen_reg_rtx (compute_mode);
3679 else
3681 quotient = (GET_CODE (target) == REG
3682 ? target : gen_reg_rtx (compute_mode));
3683 remainder = gen_reg_rtx (compute_mode);
3686 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3687 remainder, 1))
3689 /* This could be computed with a branch-less sequence.
3690 Save that for later. */
3691 rtx label = gen_label_rtx ();
3692 do_cmp_and_jump (remainder, const0_rtx, EQ,
3693 compute_mode, label);
3694 expand_inc (quotient, const1_rtx);
3695 expand_dec (remainder, op1);
3696 emit_label (label);
3697 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3700 /* No luck with division elimination or divmod. Have to do it
3701 by conditionally adjusting op0 *and* the result. */
3703 rtx label1, label2;
3704 rtx adjusted_op0, tem;
3706 quotient = gen_reg_rtx (compute_mode);
3707 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3708 label1 = gen_label_rtx ();
3709 label2 = gen_label_rtx ();
3710 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3711 compute_mode, label1);
3712 emit_move_insn (quotient, const0_rtx);
3713 emit_jump_insn (gen_jump (label2));
3714 emit_barrier ();
3715 emit_label (label1);
3716 expand_dec (adjusted_op0, const1_rtx);
3717 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3718 quotient, 1, OPTAB_LIB_WIDEN);
3719 if (tem != quotient)
3720 emit_move_insn (quotient, tem);
3721 expand_inc (quotient, const1_rtx);
3722 emit_label (label2);
3725 else /* signed */
3727 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3728 && INTVAL (op1) >= 0)
3730 /* This is extremely similar to the code for the unsigned case
3731 above. For 2.7 we should merge these variants, but for
3732 2.6.1 I don't want to touch the code for unsigned since that
3733 get used in C. The signed case will only be used by other
3734 languages (Ada). */
3736 rtx t1, t2, t3;
3737 unsigned HOST_WIDE_INT d = INTVAL (op1);
3738 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3739 build_int_2 (floor_log2 (d), 0),
3740 tquotient, 0);
3741 t2 = expand_binop (compute_mode, and_optab, op0,
3742 GEN_INT (d - 1),
3743 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3744 t3 = gen_reg_rtx (compute_mode);
3745 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3746 compute_mode, 1, 1);
3747 if (t3 == 0)
3749 rtx lab;
3750 lab = gen_label_rtx ();
3751 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3752 expand_inc (t1, const1_rtx);
3753 emit_label (lab);
3754 quotient = t1;
3756 else
3757 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3758 t1, t3),
3759 tquotient);
3760 break;
3763 /* Try using an instruction that produces both the quotient and
3764 remainder, using truncation. We can easily compensate the
3765 quotient or remainder to get ceiling rounding, once we have the
3766 remainder. Notice that we compute also the final remainder
3767 value here, and return the result right away. */
3768 if (target == 0 || GET_MODE (target) != compute_mode)
3769 target = gen_reg_rtx (compute_mode);
3770 if (rem_flag)
3772 remainder= (GET_CODE (target) == REG
3773 ? target : gen_reg_rtx (compute_mode));
3774 quotient = gen_reg_rtx (compute_mode);
3776 else
3778 quotient = (GET_CODE (target) == REG
3779 ? target : gen_reg_rtx (compute_mode));
3780 remainder = gen_reg_rtx (compute_mode);
3783 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3784 remainder, 0))
3786 /* This could be computed with a branch-less sequence.
3787 Save that for later. */
3788 rtx tem;
3789 rtx label = gen_label_rtx ();
3790 do_cmp_and_jump (remainder, const0_rtx, EQ,
3791 compute_mode, label);
3792 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3793 NULL_RTX, 0, OPTAB_WIDEN);
3794 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3795 expand_inc (quotient, const1_rtx);
3796 expand_dec (remainder, op1);
3797 emit_label (label);
3798 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3801 /* No luck with division elimination or divmod. Have to do it
3802 by conditionally adjusting op0 *and* the result. */
3804 rtx label1, label2, label3, label4, label5;
3805 rtx adjusted_op0;
3806 rtx tem;
3808 quotient = gen_reg_rtx (compute_mode);
3809 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3810 label1 = gen_label_rtx ();
3811 label2 = gen_label_rtx ();
3812 label3 = gen_label_rtx ();
3813 label4 = gen_label_rtx ();
3814 label5 = gen_label_rtx ();
3815 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3816 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3817 compute_mode, label1);
3818 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3819 quotient, 0, OPTAB_LIB_WIDEN);
3820 if (tem != quotient)
3821 emit_move_insn (quotient, tem);
3822 emit_jump_insn (gen_jump (label5));
3823 emit_barrier ();
3824 emit_label (label1);
3825 expand_dec (adjusted_op0, const1_rtx);
3826 emit_jump_insn (gen_jump (label4));
3827 emit_barrier ();
3828 emit_label (label2);
3829 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3830 compute_mode, label3);
3831 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3832 quotient, 0, OPTAB_LIB_WIDEN);
3833 if (tem != quotient)
3834 emit_move_insn (quotient, tem);
3835 emit_jump_insn (gen_jump (label5));
3836 emit_barrier ();
3837 emit_label (label3);
3838 expand_inc (adjusted_op0, const1_rtx);
3839 emit_label (label4);
3840 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3841 quotient, 0, OPTAB_LIB_WIDEN);
3842 if (tem != quotient)
3843 emit_move_insn (quotient, tem);
3844 expand_inc (quotient, const1_rtx);
3845 emit_label (label5);
3848 break;
3850 case EXACT_DIV_EXPR:
3851 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3853 HOST_WIDE_INT d = INTVAL (op1);
3854 unsigned HOST_WIDE_INT ml;
3855 int pre_shift;
3856 rtx t1;
3858 pre_shift = floor_log2 (d & -d);
3859 ml = invert_mod2n (d >> pre_shift, size);
3860 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3861 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3862 quotient = expand_mult (compute_mode, t1,
3863 gen_int_mode (ml, compute_mode),
3864 NULL_RTX, 0);
3866 insn = get_last_insn ();
3867 set_unique_reg_note (insn,
3868 REG_EQUAL,
3869 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3870 compute_mode,
3871 op0, op1));
3873 break;
3875 case ROUND_DIV_EXPR:
3876 case ROUND_MOD_EXPR:
3877 if (unsignedp)
3879 rtx tem;
3880 rtx label;
3881 label = gen_label_rtx ();
3882 quotient = gen_reg_rtx (compute_mode);
3883 remainder = gen_reg_rtx (compute_mode);
3884 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3886 rtx tem;
3887 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3888 quotient, 1, OPTAB_LIB_WIDEN);
3889 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3890 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3891 remainder, 1, OPTAB_LIB_WIDEN);
3893 tem = plus_constant (op1, -1);
3894 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3895 build_int_2 (1, 0), NULL_RTX, 1);
3896 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3897 expand_inc (quotient, const1_rtx);
3898 expand_dec (remainder, op1);
3899 emit_label (label);
3901 else
3903 rtx abs_rem, abs_op1, tem, mask;
3904 rtx label;
3905 label = gen_label_rtx ();
3906 quotient = gen_reg_rtx (compute_mode);
3907 remainder = gen_reg_rtx (compute_mode);
3908 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3910 rtx tem;
3911 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3912 quotient, 0, OPTAB_LIB_WIDEN);
3913 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3914 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3915 remainder, 0, OPTAB_LIB_WIDEN);
3917 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3918 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3919 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3920 build_int_2 (1, 0), NULL_RTX, 1);
3921 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3922 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3923 NULL_RTX, 0, OPTAB_WIDEN);
3924 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3925 build_int_2 (size - 1, 0), NULL_RTX, 0);
3926 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3927 NULL_RTX, 0, OPTAB_WIDEN);
3928 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3929 NULL_RTX, 0, OPTAB_WIDEN);
3930 expand_inc (quotient, tem);
3931 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3932 NULL_RTX, 0, OPTAB_WIDEN);
3933 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3934 NULL_RTX, 0, OPTAB_WIDEN);
3935 expand_dec (remainder, tem);
3936 emit_label (label);
3938 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3940 default:
3941 abort ();
3944 if (quotient == 0)
3946 if (target && GET_MODE (target) != compute_mode)
3947 target = 0;
3949 if (rem_flag)
3951 /* Try to produce the remainder without producing the quotient.
3952 If we seem to have a divmod pattern that does not require widening,
3953 don't try widening here. We should really have an WIDEN argument
3954 to expand_twoval_binop, since what we'd really like to do here is
3955 1) try a mod insn in compute_mode
3956 2) try a divmod insn in compute_mode
3957 3) try a div insn in compute_mode and multiply-subtract to get
3958 remainder
3959 4) try the same things with widening allowed. */
3960 remainder
3961 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3962 op0, op1, target,
3963 unsignedp,
3964 ((optab2->handlers[(int) compute_mode].insn_code
3965 != CODE_FOR_nothing)
3966 ? OPTAB_DIRECT : OPTAB_WIDEN));
3967 if (remainder == 0)
3969 /* No luck there. Can we do remainder and divide at once
3970 without a library call? */
3971 remainder = gen_reg_rtx (compute_mode);
3972 if (! expand_twoval_binop ((unsignedp
3973 ? udivmod_optab
3974 : sdivmod_optab),
3975 op0, op1,
3976 NULL_RTX, remainder, unsignedp))
3977 remainder = 0;
3980 if (remainder)
3981 return gen_lowpart (mode, remainder);
3984 /* Produce the quotient. Try a quotient insn, but not a library call.
3985 If we have a divmod in this mode, use it in preference to widening
3986 the div (for this test we assume it will not fail). Note that optab2
3987 is set to the one of the two optabs that the call below will use. */
3988 quotient
3989 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3990 op0, op1, rem_flag ? NULL_RTX : target,
3991 unsignedp,
3992 ((optab2->handlers[(int) compute_mode].insn_code
3993 != CODE_FOR_nothing)
3994 ? OPTAB_DIRECT : OPTAB_WIDEN));
3996 if (quotient == 0)
3998 /* No luck there. Try a quotient-and-remainder insn,
3999 keeping the quotient alone. */
4000 quotient = gen_reg_rtx (compute_mode);
4001 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4002 op0, op1,
4003 quotient, NULL_RTX, unsignedp))
4005 quotient = 0;
4006 if (! rem_flag)
4007 /* Still no luck. If we are not computing the remainder,
4008 use a library call for the quotient. */
4009 quotient = sign_expand_binop (compute_mode,
4010 udiv_optab, sdiv_optab,
4011 op0, op1, target,
4012 unsignedp, OPTAB_LIB_WIDEN);
4017 if (rem_flag)
4019 if (target && GET_MODE (target) != compute_mode)
4020 target = 0;
4022 if (quotient == 0)
4023 /* No divide instruction either. Use library for remainder. */
4024 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4025 op0, op1, target,
4026 unsignedp, OPTAB_LIB_WIDEN);
4027 else
4029 /* We divided. Now finish doing X - Y * (X / Y). */
4030 remainder = expand_mult (compute_mode, quotient, op1,
4031 NULL_RTX, unsignedp);
4032 remainder = expand_binop (compute_mode, sub_optab, op0,
4033 remainder, target, unsignedp,
4034 OPTAB_LIB_WIDEN);
4038 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4041 /* Return a tree node with data type TYPE, describing the value of X.
4042 Usually this is an RTL_EXPR, if there is no obvious better choice.
4043 X may be an expression, however we only support those expressions
4044 generated by loop.c. */
4046 tree
4047 make_tree (type, x)
4048 tree type;
4049 rtx x;
4051 tree t;
4053 switch (GET_CODE (x))
4055 case CONST_INT:
4056 t = build_int_2 (INTVAL (x),
4057 (TREE_UNSIGNED (type)
4058 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4059 || INTVAL (x) >= 0 ? 0 : -1);
4060 TREE_TYPE (t) = type;
4061 return t;
4063 case CONST_DOUBLE:
4064 if (GET_MODE (x) == VOIDmode)
4066 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4067 TREE_TYPE (t) = type;
4069 else
4071 REAL_VALUE_TYPE d;
4073 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4074 t = build_real (type, d);
4077 return t;
4079 case CONST_VECTOR:
4081 int i, units;
4082 rtx elt;
4083 tree t = NULL_TREE;
4085 units = CONST_VECTOR_NUNITS (x);
4087 /* Build a tree with vector elements. */
4088 for (i = units - 1; i >= 0; --i)
4090 elt = CONST_VECTOR_ELT (x, i);
4091 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4094 return build_vector (type, t);
4097 case PLUS:
4098 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4099 make_tree (type, XEXP (x, 1))));
4101 case MINUS:
4102 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4103 make_tree (type, XEXP (x, 1))));
4105 case NEG:
4106 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4108 case MULT:
4109 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4110 make_tree (type, XEXP (x, 1))));
4112 case ASHIFT:
4113 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4114 make_tree (type, XEXP (x, 1))));
4116 case LSHIFTRT:
4117 t = (*lang_hooks.types.unsigned_type) (type);
4118 return fold (convert (type,
4119 build (RSHIFT_EXPR, t,
4120 make_tree (t, XEXP (x, 0)),
4121 make_tree (type, XEXP (x, 1)))));
4123 case ASHIFTRT:
4124 t = (*lang_hooks.types.signed_type) (type);
4125 return fold (convert (type,
4126 build (RSHIFT_EXPR, t,
4127 make_tree (t, XEXP (x, 0)),
4128 make_tree (type, XEXP (x, 1)))));
4130 case DIV:
4131 if (TREE_CODE (type) != REAL_TYPE)
4132 t = (*lang_hooks.types.signed_type) (type);
4133 else
4134 t = type;
4136 return fold (convert (type,
4137 build (TRUNC_DIV_EXPR, t,
4138 make_tree (t, XEXP (x, 0)),
4139 make_tree (t, XEXP (x, 1)))));
4140 case UDIV:
4141 t = (*lang_hooks.types.unsigned_type) (type);
4142 return fold (convert (type,
4143 build (TRUNC_DIV_EXPR, t,
4144 make_tree (t, XEXP (x, 0)),
4145 make_tree (t, XEXP (x, 1)))));
4147 case SIGN_EXTEND:
4148 case ZERO_EXTEND:
4149 t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4150 GET_CODE (x) == ZERO_EXTEND);
4151 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4153 default:
4154 t = make_node (RTL_EXPR);
4155 TREE_TYPE (t) = type;
4157 #ifdef POINTERS_EXTEND_UNSIGNED
4158 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4159 ptr_mode. So convert. */
4160 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4161 x = convert_memory_address (TYPE_MODE (type), x);
4162 #endif
4164 RTL_EXPR_RTL (t) = x;
4165 /* There are no insns to be output
4166 when this rtl_expr is used. */
4167 RTL_EXPR_SEQUENCE (t) = 0;
4168 return t;
4172 /* Check whether the multiplication X * MULT + ADD overflows.
4173 X, MULT and ADD must be CONST_*.
4174 MODE is the machine mode for the computation.
4175 X and MULT must have mode MODE. ADD may have a different mode.
4176 So can X (defaults to same as MODE).
4177 UNSIGNEDP is nonzero to do unsigned multiplication. */
4179 bool
4180 const_mult_add_overflow_p (x, mult, add, mode, unsignedp)
4181 rtx x, mult, add;
4182 enum machine_mode mode;
4183 int unsignedp;
4185 tree type, mult_type, add_type, result;
4187 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4189 /* In order to get a proper overflow indication from an unsigned
4190 type, we have to pretend that it's a sizetype. */
4191 mult_type = type;
4192 if (unsignedp)
4194 mult_type = copy_node (type);
4195 TYPE_IS_SIZETYPE (mult_type) = 1;
4198 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4199 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4201 result = fold (build (PLUS_EXPR, mult_type,
4202 fold (build (MULT_EXPR, mult_type,
4203 make_tree (mult_type, x),
4204 make_tree (mult_type, mult))),
4205 make_tree (add_type, add)));
4207 return TREE_CONSTANT_OVERFLOW (result);
4210 /* Return an rtx representing the value of X * MULT + ADD.
4211 TARGET is a suggestion for where to store the result (an rtx).
4212 MODE is the machine mode for the computation.
4213 X and MULT must have mode MODE. ADD may have a different mode.
4214 So can X (defaults to same as MODE).
4215 UNSIGNEDP is nonzero to do unsigned multiplication.
4216 This may emit insns. */
4219 expand_mult_add (x, target, mult, add, mode, unsignedp)
4220 rtx x, target, mult, add;
4221 enum machine_mode mode;
4222 int unsignedp;
4224 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4225 tree add_type = (GET_MODE (add) == VOIDmode
4226 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4227 unsignedp));
4228 tree result = fold (build (PLUS_EXPR, type,
4229 fold (build (MULT_EXPR, type,
4230 make_tree (type, x),
4231 make_tree (type, mult))),
4232 make_tree (add_type, add)));
4234 return expand_expr (result, target, VOIDmode, 0);
4237 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4238 and returning TARGET.
4240 If TARGET is 0, a pseudo-register or constant is returned. */
4243 expand_and (mode, op0, op1, target)
4244 enum machine_mode mode;
4245 rtx op0, op1, target;
4247 rtx tem = 0;
4249 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4250 tem = simplify_binary_operation (AND, mode, op0, op1);
4251 if (tem == 0)
4252 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4254 if (target == 0)
4255 target = tem;
4256 else if (tem != target)
4257 emit_move_insn (target, tem);
4258 return target;
4261 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4262 and storing in TARGET. Normally return TARGET.
4263 Return 0 if that cannot be done.
4265 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4266 it is VOIDmode, they cannot both be CONST_INT.
4268 UNSIGNEDP is for the case where we have to widen the operands
4269 to perform the operation. It says to use zero-extension.
4271 NORMALIZEP is 1 if we should convert the result to be either zero
4272 or one. Normalize is -1 if we should convert the result to be
4273 either zero or -1. If NORMALIZEP is zero, the result will be left
4274 "raw" out of the scc insn. */
4277 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4278 rtx target;
4279 enum rtx_code code;
4280 rtx op0, op1;
4281 enum machine_mode mode;
4282 int unsignedp;
4283 int normalizep;
4285 rtx subtarget;
4286 enum insn_code icode;
4287 enum machine_mode compare_mode;
4288 enum machine_mode target_mode = GET_MODE (target);
4289 rtx tem;
4290 rtx last = get_last_insn ();
4291 rtx pattern, comparison;
4293 /* ??? Ok to do this and then fail? */
4294 op0 = protect_from_queue (op0, 0);
4295 op1 = protect_from_queue (op1, 0);
4297 if (unsignedp)
4298 code = unsigned_condition (code);
4300 /* If one operand is constant, make it the second one. Only do this
4301 if the other operand is not constant as well. */
4303 if (swap_commutative_operands_p (op0, op1))
4305 tem = op0;
4306 op0 = op1;
4307 op1 = tem;
4308 code = swap_condition (code);
4311 if (mode == VOIDmode)
4312 mode = GET_MODE (op0);
4314 /* For some comparisons with 1 and -1, we can convert this to
4315 comparisons with zero. This will often produce more opportunities for
4316 store-flag insns. */
4318 switch (code)
4320 case LT:
4321 if (op1 == const1_rtx)
4322 op1 = const0_rtx, code = LE;
4323 break;
4324 case LE:
4325 if (op1 == constm1_rtx)
4326 op1 = const0_rtx, code = LT;
4327 break;
4328 case GE:
4329 if (op1 == const1_rtx)
4330 op1 = const0_rtx, code = GT;
4331 break;
4332 case GT:
4333 if (op1 == constm1_rtx)
4334 op1 = const0_rtx, code = GE;
4335 break;
4336 case GEU:
4337 if (op1 == const1_rtx)
4338 op1 = const0_rtx, code = NE;
4339 break;
4340 case LTU:
4341 if (op1 == const1_rtx)
4342 op1 = const0_rtx, code = EQ;
4343 break;
4344 default:
4345 break;
4348 /* If we are comparing a double-word integer with zero, we can convert
4349 the comparison into one involving a single word. */
4350 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4351 && GET_MODE_CLASS (mode) == MODE_INT
4352 && op1 == const0_rtx
4353 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4355 if (code == EQ || code == NE)
4357 /* Do a logical OR of the two words and compare the result. */
4358 rtx op0h = gen_highpart (word_mode, op0);
4359 rtx op0l = gen_lowpart (word_mode, op0);
4360 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4361 NULL_RTX, unsignedp, OPTAB_DIRECT);
4362 if (op0both != 0)
4363 return emit_store_flag (target, code, op0both, op1, word_mode,
4364 unsignedp, normalizep);
4366 else if (code == LT || code == GE)
4367 /* If testing the sign bit, can just test on high word. */
4368 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4369 op1, word_mode, unsignedp, normalizep);
4372 /* From now on, we won't change CODE, so set ICODE now. */
4373 icode = setcc_gen_code[(int) code];
4375 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4376 complement of A (for GE) and shifting the sign bit to the low bit. */
4377 if (op1 == const0_rtx && (code == LT || code == GE)
4378 && GET_MODE_CLASS (mode) == MODE_INT
4379 && (normalizep || STORE_FLAG_VALUE == 1
4380 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4381 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4382 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4384 subtarget = target;
4386 /* If the result is to be wider than OP0, it is best to convert it
4387 first. If it is to be narrower, it is *incorrect* to convert it
4388 first. */
4389 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4391 op0 = protect_from_queue (op0, 0);
4392 op0 = convert_modes (target_mode, mode, op0, 0);
4393 mode = target_mode;
4396 if (target_mode != mode)
4397 subtarget = 0;
4399 if (code == GE)
4400 op0 = expand_unop (mode, one_cmpl_optab, op0,
4401 ((STORE_FLAG_VALUE == 1 || normalizep)
4402 ? 0 : subtarget), 0);
4404 if (STORE_FLAG_VALUE == 1 || normalizep)
4405 /* If we are supposed to produce a 0/1 value, we want to do
4406 a logical shift from the sign bit to the low-order bit; for
4407 a -1/0 value, we do an arithmetic shift. */
4408 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4409 size_int (GET_MODE_BITSIZE (mode) - 1),
4410 subtarget, normalizep != -1);
4412 if (mode != target_mode)
4413 op0 = convert_modes (target_mode, mode, op0, 0);
4415 return op0;
4418 if (icode != CODE_FOR_nothing)
4420 insn_operand_predicate_fn pred;
4422 /* We think we may be able to do this with a scc insn. Emit the
4423 comparison and then the scc insn.
4425 compare_from_rtx may call emit_queue, which would be deleted below
4426 if the scc insn fails. So call it ourselves before setting LAST.
4427 Likewise for do_pending_stack_adjust. */
4429 emit_queue ();
4430 do_pending_stack_adjust ();
4431 last = get_last_insn ();
4433 comparison
4434 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4435 if (GET_CODE (comparison) == CONST_INT)
4436 return (comparison == const0_rtx ? const0_rtx
4437 : normalizep == 1 ? const1_rtx
4438 : normalizep == -1 ? constm1_rtx
4439 : const_true_rtx);
4441 /* The code of COMPARISON may not match CODE if compare_from_rtx
4442 decided to swap its operands and reverse the original code.
4444 We know that compare_from_rtx returns either a CONST_INT or
4445 a new comparison code, so it is safe to just extract the
4446 code from COMPARISON. */
4447 code = GET_CODE (comparison);
4449 /* Get a reference to the target in the proper mode for this insn. */
4450 compare_mode = insn_data[(int) icode].operand[0].mode;
4451 subtarget = target;
4452 pred = insn_data[(int) icode].operand[0].predicate;
4453 if (preserve_subexpressions_p ()
4454 || ! (*pred) (subtarget, compare_mode))
4455 subtarget = gen_reg_rtx (compare_mode);
4457 pattern = GEN_FCN (icode) (subtarget);
4458 if (pattern)
4460 emit_insn (pattern);
4462 /* If we are converting to a wider mode, first convert to
4463 TARGET_MODE, then normalize. This produces better combining
4464 opportunities on machines that have a SIGN_EXTRACT when we are
4465 testing a single bit. This mostly benefits the 68k.
4467 If STORE_FLAG_VALUE does not have the sign bit set when
4468 interpreted in COMPARE_MODE, we can do this conversion as
4469 unsigned, which is usually more efficient. */
4470 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4472 convert_move (target, subtarget,
4473 (GET_MODE_BITSIZE (compare_mode)
4474 <= HOST_BITS_PER_WIDE_INT)
4475 && 0 == (STORE_FLAG_VALUE
4476 & ((HOST_WIDE_INT) 1
4477 << (GET_MODE_BITSIZE (compare_mode) -1))));
4478 op0 = target;
4479 compare_mode = target_mode;
4481 else
4482 op0 = subtarget;
4484 /* If we want to keep subexpressions around, don't reuse our
4485 last target. */
4487 if (preserve_subexpressions_p ())
4488 subtarget = 0;
4490 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4491 we don't have to do anything. */
4492 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4494 /* STORE_FLAG_VALUE might be the most negative number, so write
4495 the comparison this way to avoid a compiler-time warning. */
4496 else if (- normalizep == STORE_FLAG_VALUE)
4497 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4499 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4500 makes it hard to use a value of just the sign bit due to
4501 ANSI integer constant typing rules. */
4502 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4503 && (STORE_FLAG_VALUE
4504 & ((HOST_WIDE_INT) 1
4505 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4506 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4507 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4508 subtarget, normalizep == 1);
4509 else if (STORE_FLAG_VALUE & 1)
4511 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4512 if (normalizep == -1)
4513 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4515 else
4516 abort ();
4518 /* If we were converting to a smaller mode, do the
4519 conversion now. */
4520 if (target_mode != compare_mode)
4522 convert_move (target, op0, 0);
4523 return target;
4525 else
4526 return op0;
4530 delete_insns_since (last);
4532 /* If expensive optimizations, use different pseudo registers for each
4533 insn, instead of reusing the same pseudo. This leads to better CSE,
4534 but slows down the compiler, since there are more pseudos */
4535 subtarget = (!flag_expensive_optimizations
4536 && (target_mode == mode)) ? target : NULL_RTX;
4538 /* If we reached here, we can't do this with a scc insn. However, there
4539 are some comparisons that can be done directly. For example, if
4540 this is an equality comparison of integers, we can try to exclusive-or
4541 (or subtract) the two operands and use a recursive call to try the
4542 comparison with zero. Don't do any of these cases if branches are
4543 very cheap. */
4545 if (BRANCH_COST > 0
4546 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4547 && op1 != const0_rtx)
4549 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4550 OPTAB_WIDEN);
4552 if (tem == 0)
4553 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4554 OPTAB_WIDEN);
4555 if (tem != 0)
4556 tem = emit_store_flag (target, code, tem, const0_rtx,
4557 mode, unsignedp, normalizep);
4558 if (tem == 0)
4559 delete_insns_since (last);
4560 return tem;
4563 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4564 the constant zero. Reject all other comparisons at this point. Only
4565 do LE and GT if branches are expensive since they are expensive on
4566 2-operand machines. */
4568 if (BRANCH_COST == 0
4569 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4570 || (code != EQ && code != NE
4571 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4572 return 0;
4574 /* See what we need to return. We can only return a 1, -1, or the
4575 sign bit. */
4577 if (normalizep == 0)
4579 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4580 normalizep = STORE_FLAG_VALUE;
4582 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4583 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4584 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4586 else
4587 return 0;
4590 /* Try to put the result of the comparison in the sign bit. Assume we can't
4591 do the necessary operation below. */
4593 tem = 0;
4595 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4596 the sign bit set. */
4598 if (code == LE)
4600 /* This is destructive, so SUBTARGET can't be OP0. */
4601 if (rtx_equal_p (subtarget, op0))
4602 subtarget = 0;
4604 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4605 OPTAB_WIDEN);
4606 if (tem)
4607 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4608 OPTAB_WIDEN);
4611 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4612 number of bits in the mode of OP0, minus one. */
4614 if (code == GT)
4616 if (rtx_equal_p (subtarget, op0))
4617 subtarget = 0;
4619 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4620 size_int (GET_MODE_BITSIZE (mode) - 1),
4621 subtarget, 0);
4622 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4623 OPTAB_WIDEN);
4626 if (code == EQ || code == NE)
4628 /* For EQ or NE, one way to do the comparison is to apply an operation
4629 that converts the operand into a positive number if it is nonzero
4630 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4631 for NE we negate. This puts the result in the sign bit. Then we
4632 normalize with a shift, if needed.
4634 Two operations that can do the above actions are ABS and FFS, so try
4635 them. If that doesn't work, and MODE is smaller than a full word,
4636 we can use zero-extension to the wider mode (an unsigned conversion)
4637 as the operation. */
4639 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4640 that is compensated by the subsequent overflow when subtracting
4641 one / negating. */
4643 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4644 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4645 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4646 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4647 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4649 op0 = protect_from_queue (op0, 0);
4650 tem = convert_modes (word_mode, mode, op0, 1);
4651 mode = word_mode;
4654 if (tem != 0)
4656 if (code == EQ)
4657 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4658 0, OPTAB_WIDEN);
4659 else
4660 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4663 /* If we couldn't do it that way, for NE we can "or" the two's complement
4664 of the value with itself. For EQ, we take the one's complement of
4665 that "or", which is an extra insn, so we only handle EQ if branches
4666 are expensive. */
4668 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4670 if (rtx_equal_p (subtarget, op0))
4671 subtarget = 0;
4673 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4674 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4675 OPTAB_WIDEN);
4677 if (tem && code == EQ)
4678 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4682 if (tem && normalizep)
4683 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4684 size_int (GET_MODE_BITSIZE (mode) - 1),
4685 subtarget, normalizep == 1);
4687 if (tem)
4689 if (GET_MODE (tem) != target_mode)
4691 convert_move (target, tem, 0);
4692 tem = target;
4694 else if (!subtarget)
4696 emit_move_insn (target, tem);
4697 tem = target;
4700 else
4701 delete_insns_since (last);
4703 return tem;
4706 /* Like emit_store_flag, but always succeeds. */
4709 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4710 rtx target;
4711 enum rtx_code code;
4712 rtx op0, op1;
4713 enum machine_mode mode;
4714 int unsignedp;
4715 int normalizep;
4717 rtx tem, label;
4719 /* First see if emit_store_flag can do the job. */
4720 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4721 if (tem != 0)
4722 return tem;
4724 if (normalizep == 0)
4725 normalizep = 1;
4727 /* If this failed, we have to do this with set/compare/jump/set code. */
4729 if (GET_CODE (target) != REG
4730 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4731 target = gen_reg_rtx (GET_MODE (target));
4733 emit_move_insn (target, const1_rtx);
4734 label = gen_label_rtx ();
4735 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4736 NULL_RTX, label);
4738 emit_move_insn (target, const0_rtx);
4739 emit_label (label);
4741 return target;
4744 /* Perform possibly multi-word comparison and conditional jump to LABEL
4745 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4747 The algorithm is based on the code in expr.c:do_jump.
4749 Note that this does not perform a general comparison. Only variants
4750 generated within expmed.c are correctly handled, others abort (but could
4751 be handled if needed). */
4753 static void
4754 do_cmp_and_jump (arg1, arg2, op, mode, label)
4755 rtx arg1, arg2, label;
4756 enum rtx_code op;
4757 enum machine_mode mode;
4759 /* If this mode is an integer too wide to compare properly,
4760 compare word by word. Rely on cse to optimize constant cases. */
4762 if (GET_MODE_CLASS (mode) == MODE_INT
4763 && ! can_compare_p (op, mode, ccp_jump))
4765 rtx label2 = gen_label_rtx ();
4767 switch (op)
4769 case LTU:
4770 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4771 break;
4773 case LEU:
4774 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4775 break;
4777 case LT:
4778 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4779 break;
4781 case GT:
4782 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4783 break;
4785 case GE:
4786 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4787 break;
4789 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4790 that's the only equality operations we do */
4791 case EQ:
4792 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4793 abort ();
4794 do_jump_by_parts_equality_rtx (arg1, label2, label);
4795 break;
4797 case NE:
4798 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4799 abort ();
4800 do_jump_by_parts_equality_rtx (arg1, label, label2);
4801 break;
4803 default:
4804 abort ();
4807 emit_label (label2);
4809 else
4810 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);