Merge from mainline
[official-gcc.git] / gcc / expmed.c
blob8ef4fca585f96dcce5ac9e6d195088268bcf6d51
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
36 #include "langhooks.h"
38 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
40 unsigned HOST_WIDE_INT, rtx));
41 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx));
43 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
47 rtx, int));
48 static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
52 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int));
54 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
55 enum machine_mode, rtx));
57 /* Non-zero means divides or modulus operations are relatively cheap for
58 powers of two, so don't use branches; emit the operation instead.
59 Usually, this will mean that the MD file will emit non-branch
60 sequences. */
62 static int sdiv_pow2_cheap, smod_pow2_cheap;
64 #ifndef SLOW_UNALIGNED_ACCESS
65 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
66 #endif
68 /* For compilers that support multiple targets with different word sizes,
69 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
70 is the H8/300(H) compiler. */
72 #ifndef MAX_BITS_PER_WORD
73 #define MAX_BITS_PER_WORD BITS_PER_WORD
74 #endif
76 /* Reduce conditional compilation elsewhere. */
77 #ifndef HAVE_insv
78 #define HAVE_insv 0
79 #define CODE_FOR_insv CODE_FOR_nothing
80 #define gen_insv(a,b,c,d) NULL_RTX
81 #endif
82 #ifndef HAVE_extv
83 #define HAVE_extv 0
84 #define CODE_FOR_extv CODE_FOR_nothing
85 #define gen_extv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extzv
88 #define HAVE_extzv 0
89 #define CODE_FOR_extzv CODE_FOR_nothing
90 #define gen_extzv(a,b,c,d) NULL_RTX
91 #endif
93 /* Cost of various pieces of RTL. Note that some of these are indexed by
94 shift count and some by mode. */
95 static int add_cost, negate_cost, zero_cost;
96 static int shift_cost[MAX_BITS_PER_WORD];
97 static int shiftadd_cost[MAX_BITS_PER_WORD];
98 static int shiftsub_cost[MAX_BITS_PER_WORD];
99 static int mul_cost[NUM_MACHINE_MODES];
100 static int div_cost[NUM_MACHINE_MODES];
101 static int mul_widen_cost[NUM_MACHINE_MODES];
102 static int mul_highpart_cost[NUM_MACHINE_MODES];
104 void
105 init_expmed ()
107 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
108 int dummy;
109 int m;
110 enum machine_mode mode, wider_mode;
112 start_sequence ();
114 /* This is "some random pseudo register" for purposes of calling recog
115 to see what insns exist. */
116 reg = gen_rtx_REG (word_mode, 10000);
118 zero_cost = rtx_cost (const0_rtx, 0);
119 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
121 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
122 gen_rtx_ASHIFT (word_mode, reg,
123 const0_rtx)));
125 shiftadd_insn
126 = emit_insn (gen_rtx_SET (VOIDmode, reg,
127 gen_rtx_PLUS (word_mode,
128 gen_rtx_MULT (word_mode,
129 reg, const0_rtx),
130 reg)));
132 shiftsub_insn
133 = emit_insn (gen_rtx_SET (VOIDmode, reg,
134 gen_rtx_MINUS (word_mode,
135 gen_rtx_MULT (word_mode,
136 reg, const0_rtx),
137 reg)));
139 init_recog ();
141 shift_cost[0] = 0;
142 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
144 for (m = 1; m < MAX_BITS_PER_WORD; m++)
146 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
147 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
149 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
150 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
151 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
153 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
154 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
155 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
157 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
158 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
159 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
162 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
164 sdiv_pow2_cheap
165 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
166 <= 2 * add_cost);
167 smod_pow2_cheap
168 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
169 <= 2 * add_cost);
171 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
172 mode != VOIDmode;
173 mode = GET_MODE_WIDER_MODE (mode))
175 reg = gen_rtx_REG (mode, 10000);
176 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
177 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
178 wider_mode = GET_MODE_WIDER_MODE (mode);
179 if (wider_mode != VOIDmode)
181 mul_widen_cost[(int) wider_mode]
182 = rtx_cost (gen_rtx_MULT (wider_mode,
183 gen_rtx_ZERO_EXTEND (wider_mode, reg),
184 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
185 SET);
186 mul_highpart_cost[(int) mode]
187 = rtx_cost (gen_rtx_TRUNCATE
188 (mode,
189 gen_rtx_LSHIFTRT (wider_mode,
190 gen_rtx_MULT (wider_mode,
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg),
193 gen_rtx_ZERO_EXTEND
194 (wider_mode, reg)),
195 GEN_INT (GET_MODE_BITSIZE (mode)))),
196 SET);
200 end_sequence ();
203 /* Return an rtx representing minus the value of X.
204 MODE is the intended mode of the result,
205 useful if X is a CONST_INT. */
208 negate_rtx (mode, x)
209 enum machine_mode mode;
210 rtx x;
212 rtx result = simplify_unary_operation (NEG, mode, x, mode);
214 if (result == 0)
215 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
217 return result;
220 /* Report on the availability of insv/extv/extzv and the desired mode
221 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
222 is false; else the mode of the specified operand. If OPNO is -1,
223 all the caller cares about is whether the insn is available. */
224 enum machine_mode
225 mode_for_extraction (pattern, opno)
226 enum extraction_pattern pattern;
227 int opno;
229 const struct insn_data *data;
231 switch (pattern)
233 case EP_insv:
234 if (HAVE_insv)
236 data = &insn_data[CODE_FOR_insv];
237 break;
239 return MAX_MACHINE_MODE;
241 case EP_extv:
242 if (HAVE_extv)
244 data = &insn_data[CODE_FOR_extv];
245 break;
247 return MAX_MACHINE_MODE;
249 case EP_extzv:
250 if (HAVE_extzv)
252 data = &insn_data[CODE_FOR_extzv];
253 break;
255 return MAX_MACHINE_MODE;
257 default:
258 abort ();
261 if (opno == -1)
262 return VOIDmode;
264 /* Everyone who uses this function used to follow it with
265 if (result == VOIDmode) result = word_mode; */
266 if (data->operand[opno].mode == VOIDmode)
267 return word_mode;
268 return data->operand[opno].mode;
272 /* Generate code to store value from rtx VALUE
273 into a bit-field within structure STR_RTX
274 containing BITSIZE bits starting at bit BITNUM.
275 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
276 ALIGN is the alignment that STR_RTX is known to have.
277 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
279 /* ??? Note that there are two different ideas here for how
280 to determine the size to count bits within, for a register.
281 One is BITS_PER_WORD, and the other is the size of operand 3
282 of the insv pattern.
284 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
285 else, we use the mode of operand 3. */
288 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
289 rtx str_rtx;
290 unsigned HOST_WIDE_INT bitsize;
291 unsigned HOST_WIDE_INT bitnum;
292 enum machine_mode fieldmode;
293 rtx value;
294 HOST_WIDE_INT total_size;
296 unsigned int unit
297 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
298 unsigned HOST_WIDE_INT offset = bitnum / unit;
299 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
300 rtx op0 = str_rtx;
301 int byte_offset;
303 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
305 /* Discount the part of the structure before the desired byte.
306 We need to know how many bytes are safe to reference after it. */
307 if (total_size >= 0)
308 total_size -= (bitpos / BIGGEST_ALIGNMENT
309 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
311 while (GET_CODE (op0) == SUBREG)
313 /* The following line once was done only if WORDS_BIG_ENDIAN,
314 but I think that is a mistake. WORDS_BIG_ENDIAN is
315 meaningful at a much higher level; when structures are copied
316 between memory and regs, the higher-numbered regs
317 always get higher addresses. */
318 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
319 /* We used to adjust BITPOS here, but now we do the whole adjustment
320 right after the loop. */
321 op0 = SUBREG_REG (op0);
324 value = protect_from_queue (value, 0);
326 if (flag_force_mem)
328 int old_generating_concat_p = generating_concat_p;
329 generating_concat_p = 0;
330 value = force_not_mem (value);
331 generating_concat_p = old_generating_concat_p;
334 /* If the target is a register, overwriting the entire object, or storing
335 a full-word or multi-word field can be done with just a SUBREG.
337 If the target is memory, storing any naturally aligned field can be
338 done with a simple store. For targets that support fast unaligned
339 memory, any naturally sized, unit aligned field can be done directly. */
341 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
342 + (offset * UNITS_PER_WORD);
344 if (bitpos == 0
345 && bitsize == GET_MODE_BITSIZE (fieldmode)
346 && (GET_CODE (op0) != MEM
347 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
348 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
349 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
350 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
351 || (offset * BITS_PER_UNIT % bitsize == 0
352 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
354 if (GET_MODE (op0) != fieldmode)
356 if (GET_CODE (op0) == SUBREG)
358 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
359 || GET_MODE_CLASS (fieldmode) == MODE_INT
360 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
361 op0 = SUBREG_REG (op0);
362 else
363 /* Else we've got some float mode source being extracted into
364 a different float mode destination -- this combination of
365 subregs results in Severe Tire Damage. */
366 abort ();
368 if (GET_CODE (op0) == REG)
369 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
370 else
371 op0 = adjust_address (op0, fieldmode, offset);
373 emit_move_insn (op0, value);
374 return value;
377 /* Make sure we are playing with integral modes. Pun with subregs
378 if we aren't. This must come after the entire register case above,
379 since that case is valid for any mode. The following cases are only
380 valid for integral modes. */
382 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
383 if (imode != GET_MODE (op0))
385 if (GET_CODE (op0) == MEM)
386 op0 = adjust_address (op0, imode, 0);
387 else if (imode != BLKmode)
388 op0 = gen_lowpart (imode, op0);
389 else
390 abort ();
394 /* We may be accessing data outside the field, which means
395 we can alias adjacent data. */
396 if (GET_CODE (op0) == MEM)
398 op0 = shallow_copy_rtx (op0);
399 set_mem_alias_set (op0, 0);
400 set_mem_expr (op0, 0);
403 /* If OP0 is a register, BITPOS must count within a word.
404 But as we have it, it counts within whatever size OP0 now has.
405 On a bigendian machine, these are not the same, so convert. */
406 if (BYTES_BIG_ENDIAN
407 && GET_CODE (op0) != MEM
408 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
409 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
411 /* Storing an lsb-aligned field in a register
412 can be done with a movestrict instruction. */
414 if (GET_CODE (op0) != MEM
415 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
416 && bitsize == GET_MODE_BITSIZE (fieldmode)
417 && (movstrict_optab->handlers[(int) fieldmode].insn_code
418 != CODE_FOR_nothing))
420 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
422 /* Get appropriate low part of the value being stored. */
423 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
424 value = gen_lowpart (fieldmode, value);
425 else if (!(GET_CODE (value) == SYMBOL_REF
426 || GET_CODE (value) == LABEL_REF
427 || GET_CODE (value) == CONST))
428 value = convert_to_mode (fieldmode, value, 0);
430 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
431 value = copy_to_mode_reg (fieldmode, value);
433 if (GET_CODE (op0) == SUBREG)
435 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
436 || GET_MODE_CLASS (fieldmode) == MODE_INT
437 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
438 op0 = SUBREG_REG (op0);
439 else
440 /* Else we've got some float mode source being extracted into
441 a different float mode destination -- this combination of
442 subregs results in Severe Tire Damage. */
443 abort ();
446 emit_insn (GEN_FCN (icode)
447 (gen_rtx_SUBREG (fieldmode, op0,
448 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
449 + (offset * UNITS_PER_WORD)),
450 value));
452 return value;
455 /* Handle fields bigger than a word. */
457 if (bitsize > BITS_PER_WORD)
459 /* Here we transfer the words of the field
460 in the order least significant first.
461 This is because the most significant word is the one which may
462 be less than full.
463 However, only do that if the value is not BLKmode. */
465 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
466 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
467 unsigned int i;
469 /* This is the mode we must force value to, so that there will be enough
470 subwords to extract. Note that fieldmode will often (always?) be
471 VOIDmode, because that is what store_field uses to indicate that this
472 is a bit field, but passing VOIDmode to operand_subword_force will
473 result in an abort. */
474 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
476 for (i = 0; i < nwords; i++)
478 /* If I is 0, use the low-order word in both field and target;
479 if I is 1, use the next to lowest word; and so on. */
480 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
481 unsigned int bit_offset = (backwards
482 ? MAX ((int) bitsize - ((int) i + 1)
483 * BITS_PER_WORD,
485 : (int) i * BITS_PER_WORD);
487 store_bit_field (op0, MIN (BITS_PER_WORD,
488 bitsize - i * BITS_PER_WORD),
489 bitnum + bit_offset, word_mode,
490 operand_subword_force (value, wordnum,
491 (GET_MODE (value) == VOIDmode
492 ? fieldmode
493 : GET_MODE (value))),
494 total_size);
496 return value;
499 /* From here on we can assume that the field to be stored in is
500 a full-word (whatever type that is), since it is shorter than a word. */
502 /* OFFSET is the number of words or bytes (UNIT says which)
503 from STR_RTX to the first word or byte containing part of the field. */
505 if (GET_CODE (op0) != MEM)
507 if (offset != 0
508 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
510 if (GET_CODE (op0) != REG)
512 /* Since this is a destination (lvalue), we can't copy it to a
513 pseudo. We can trivially remove a SUBREG that does not
514 change the size of the operand. Such a SUBREG may have been
515 added above. Otherwise, abort. */
516 if (GET_CODE (op0) == SUBREG
517 && (GET_MODE_SIZE (GET_MODE (op0))
518 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
519 op0 = SUBREG_REG (op0);
520 else
521 abort ();
523 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
524 op0, (offset * UNITS_PER_WORD));
526 offset = 0;
528 else
529 op0 = protect_from_queue (op0, 1);
531 /* If VALUE is a floating-point mode, access it as an integer of the
532 corresponding size. This can occur on a machine with 64 bit registers
533 that uses SFmode for float. This can also occur for unaligned float
534 structure fields. */
535 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
536 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
537 value = gen_lowpart (word_mode, value);
539 /* Now OFFSET is nonzero only if OP0 is memory
540 and is therefore always measured in bytes. */
542 if (HAVE_insv
543 && GET_MODE (value) != BLKmode
544 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
545 /* Ensure insv's size is wide enough for this field. */
546 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
547 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
548 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
550 int xbitpos = bitpos;
551 rtx value1;
552 rtx xop0 = op0;
553 rtx last = get_last_insn ();
554 rtx pat;
555 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
556 int save_volatile_ok = volatile_ok;
558 volatile_ok = 1;
560 /* If this machine's insv can only insert into a register, copy OP0
561 into a register and save it back later. */
562 /* This used to check flag_force_mem, but that was a serious
563 de-optimization now that flag_force_mem is enabled by -O2. */
564 if (GET_CODE (op0) == MEM
565 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
566 (op0, VOIDmode)))
568 rtx tempreg;
569 enum machine_mode bestmode;
571 /* Get the mode to use for inserting into this field. If OP0 is
572 BLKmode, get the smallest mode consistent with the alignment. If
573 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
574 mode. Otherwise, use the smallest mode containing the field. */
576 if (GET_MODE (op0) == BLKmode
577 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
578 bestmode
579 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
580 MEM_VOLATILE_P (op0));
581 else
582 bestmode = GET_MODE (op0);
584 if (bestmode == VOIDmode
585 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
586 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
587 goto insv_loses;
589 /* Adjust address to point to the containing unit of that mode.
590 Compute offset as multiple of this unit, counting in bytes. */
591 unit = GET_MODE_BITSIZE (bestmode);
592 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
593 bitpos = bitnum % unit;
594 op0 = adjust_address (op0, bestmode, offset);
596 /* Fetch that unit, store the bitfield in it, then store
597 the unit. */
598 tempreg = copy_to_reg (op0);
599 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
600 total_size);
601 emit_move_insn (op0, tempreg);
602 return value;
604 volatile_ok = save_volatile_ok;
606 /* Add OFFSET into OP0's address. */
607 if (GET_CODE (xop0) == MEM)
608 xop0 = adjust_address (xop0, byte_mode, offset);
610 /* If xop0 is a register, we need it in MAXMODE
611 to make it acceptable to the format of insv. */
612 if (GET_CODE (xop0) == SUBREG)
613 /* We can't just change the mode, because this might clobber op0,
614 and we will need the original value of op0 if insv fails. */
615 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
616 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
617 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
619 /* On big-endian machines, we count bits from the most significant.
620 If the bit field insn does not, we must invert. */
622 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
623 xbitpos = unit - bitsize - xbitpos;
625 /* We have been counting XBITPOS within UNIT.
626 Count instead within the size of the register. */
627 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
628 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
630 unit = GET_MODE_BITSIZE (maxmode);
632 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
633 value1 = value;
634 if (GET_MODE (value) != maxmode)
636 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
638 /* Optimization: Don't bother really extending VALUE
639 if it has all the bits we will actually use. However,
640 if we must narrow it, be sure we do it correctly. */
642 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
644 rtx tmp;
646 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
647 if (! tmp)
648 tmp = simplify_gen_subreg (maxmode,
649 force_reg (GET_MODE (value),
650 value1),
651 GET_MODE (value), 0);
652 value1 = tmp;
654 else
655 value1 = gen_lowpart (maxmode, value1);
657 else if (GET_CODE (value) == CONST_INT)
658 value1 = gen_int_mode (INTVAL (value), maxmode);
659 else if (!CONSTANT_P (value))
660 /* Parse phase is supposed to make VALUE's data type
661 match that of the component reference, which is a type
662 at least as wide as the field; so VALUE should have
663 a mode that corresponds to that type. */
664 abort ();
667 /* If this machine's insv insists on a register,
668 get VALUE1 into a register. */
669 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
670 (value1, maxmode)))
671 value1 = force_reg (maxmode, value1);
673 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
674 if (pat)
675 emit_insn (pat);
676 else
678 delete_insns_since (last);
679 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
682 else
683 insv_loses:
684 /* Insv is not available; store using shifts and boolean ops. */
685 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
686 return value;
689 /* Use shifts and boolean operations to store VALUE
690 into a bit field of width BITSIZE
691 in a memory location specified by OP0 except offset by OFFSET bytes.
692 (OFFSET must be 0 if OP0 is a register.)
693 The field starts at position BITPOS within the byte.
694 (If OP0 is a register, it may be a full word or a narrower mode,
695 but BITPOS still counts within a full word,
696 which is significant on bigendian machines.)
698 Note that protect_from_queue has already been done on OP0 and VALUE. */
700 static void
701 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
702 rtx op0;
703 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
704 rtx value;
706 enum machine_mode mode;
707 unsigned int total_bits = BITS_PER_WORD;
708 rtx subtarget, temp;
709 int all_zero = 0;
710 int all_one = 0;
712 /* There is a case not handled here:
713 a structure with a known alignment of just a halfword
714 and a field split across two aligned halfwords within the structure.
715 Or likewise a structure with a known alignment of just a byte
716 and a field split across two bytes.
717 Such cases are not supposed to be able to occur. */
719 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
721 if (offset != 0)
722 abort ();
723 /* Special treatment for a bit field split across two registers. */
724 if (bitsize + bitpos > BITS_PER_WORD)
726 store_split_bit_field (op0, bitsize, bitpos, value);
727 return;
730 else
732 /* Get the proper mode to use for this field. We want a mode that
733 includes the entire field. If such a mode would be larger than
734 a word, we won't be doing the extraction the normal way.
735 We don't want a mode bigger than the destination. */
737 mode = GET_MODE (op0);
738 if (GET_MODE_BITSIZE (mode) == 0
739 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
740 mode = word_mode;
741 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
742 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
744 if (mode == VOIDmode)
746 /* The only way this should occur is if the field spans word
747 boundaries. */
748 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
749 value);
750 return;
753 total_bits = GET_MODE_BITSIZE (mode);
755 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
756 be in the range 0 to total_bits-1, and put any excess bytes in
757 OFFSET. */
758 if (bitpos >= total_bits)
760 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
761 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
762 * BITS_PER_UNIT);
765 /* Get ref to an aligned byte, halfword, or word containing the field.
766 Adjust BITPOS to be position within a word,
767 and OFFSET to be the offset of that word.
768 Then alter OP0 to refer to that word. */
769 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
770 offset -= (offset % (total_bits / BITS_PER_UNIT));
771 op0 = adjust_address (op0, mode, offset);
774 mode = GET_MODE (op0);
776 /* Now MODE is either some integral mode for a MEM as OP0,
777 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
778 The bit field is contained entirely within OP0.
779 BITPOS is the starting bit number within OP0.
780 (OP0's mode may actually be narrower than MODE.) */
782 if (BYTES_BIG_ENDIAN)
783 /* BITPOS is the distance between our msb
784 and that of the containing datum.
785 Convert it to the distance from the lsb. */
786 bitpos = total_bits - bitsize - bitpos;
788 /* Now BITPOS is always the distance between our lsb
789 and that of OP0. */
791 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
792 we must first convert its mode to MODE. */
794 if (GET_CODE (value) == CONST_INT)
796 HOST_WIDE_INT v = INTVAL (value);
798 if (bitsize < HOST_BITS_PER_WIDE_INT)
799 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
801 if (v == 0)
802 all_zero = 1;
803 else if ((bitsize < HOST_BITS_PER_WIDE_INT
804 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
805 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
806 all_one = 1;
808 value = lshift_value (mode, value, bitpos, bitsize);
810 else
812 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
813 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
815 if (GET_MODE (value) != mode)
817 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
818 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
819 value = gen_lowpart (mode, value);
820 else
821 value = convert_to_mode (mode, value, 1);
824 if (must_and)
825 value = expand_binop (mode, and_optab, value,
826 mask_rtx (mode, 0, bitsize, 0),
827 NULL_RTX, 1, OPTAB_LIB_WIDEN);
828 if (bitpos > 0)
829 value = expand_shift (LSHIFT_EXPR, mode, value,
830 build_int_2 (bitpos, 0), NULL_RTX, 1);
833 /* Now clear the chosen bits in OP0,
834 except that if VALUE is -1 we need not bother. */
836 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
838 if (! all_one)
840 temp = expand_binop (mode, and_optab, op0,
841 mask_rtx (mode, bitpos, bitsize, 1),
842 subtarget, 1, OPTAB_LIB_WIDEN);
843 subtarget = temp;
845 else
846 temp = op0;
848 /* Now logical-or VALUE into OP0, unless it is zero. */
850 if (! all_zero)
851 temp = expand_binop (mode, ior_optab, temp, value,
852 subtarget, 1, OPTAB_LIB_WIDEN);
853 if (op0 != temp)
854 emit_move_insn (op0, temp);
857 /* Store a bit field that is split across multiple accessible memory objects.
859 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
860 BITSIZE is the field width; BITPOS the position of its first bit
861 (within the word).
862 VALUE is the value to store.
864 This does not yet handle fields wider than BITS_PER_WORD. */
866 static void
867 store_split_bit_field (op0, bitsize, bitpos, value)
868 rtx op0;
869 unsigned HOST_WIDE_INT bitsize, bitpos;
870 rtx value;
872 unsigned int unit;
873 unsigned int bitsdone = 0;
875 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
876 much at a time. */
877 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
878 unit = BITS_PER_WORD;
879 else
880 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
882 /* If VALUE is a constant other than a CONST_INT, get it into a register in
883 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
884 that VALUE might be a floating-point constant. */
885 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
887 rtx word = gen_lowpart_common (word_mode, value);
889 if (word && (value != word))
890 value = word;
891 else
892 value = gen_lowpart_common (word_mode,
893 force_reg (GET_MODE (value) != VOIDmode
894 ? GET_MODE (value)
895 : word_mode, value));
897 else if (GET_CODE (value) == ADDRESSOF)
898 value = copy_to_reg (value);
900 while (bitsdone < bitsize)
902 unsigned HOST_WIDE_INT thissize;
903 rtx part, word;
904 unsigned HOST_WIDE_INT thispos;
905 unsigned HOST_WIDE_INT offset;
907 offset = (bitpos + bitsdone) / unit;
908 thispos = (bitpos + bitsdone) % unit;
910 /* THISSIZE must not overrun a word boundary. Otherwise,
911 store_fixed_bit_field will call us again, and we will mutually
912 recurse forever. */
913 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
914 thissize = MIN (thissize, unit - thispos);
916 if (BYTES_BIG_ENDIAN)
918 int total_bits;
920 /* We must do an endian conversion exactly the same way as it is
921 done in extract_bit_field, so that the two calls to
922 extract_fixed_bit_field will have comparable arguments. */
923 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
924 total_bits = BITS_PER_WORD;
925 else
926 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
928 /* Fetch successively less significant portions. */
929 if (GET_CODE (value) == CONST_INT)
930 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
931 >> (bitsize - bitsdone - thissize))
932 & (((HOST_WIDE_INT) 1 << thissize) - 1));
933 else
934 /* The args are chosen so that the last part includes the
935 lsb. Give extract_bit_field the value it needs (with
936 endianness compensation) to fetch the piece we want. */
937 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
938 total_bits - bitsize + bitsdone,
939 NULL_RTX, 1);
941 else
943 /* Fetch successively more significant portions. */
944 if (GET_CODE (value) == CONST_INT)
945 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
946 >> bitsdone)
947 & (((HOST_WIDE_INT) 1 << thissize) - 1));
948 else
949 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
950 bitsdone, NULL_RTX, 1);
953 /* If OP0 is a register, then handle OFFSET here.
955 When handling multiword bitfields, extract_bit_field may pass
956 down a word_mode SUBREG of a larger REG for a bitfield that actually
957 crosses a word boundary. Thus, for a SUBREG, we must find
958 the current word starting from the base register. */
959 if (GET_CODE (op0) == SUBREG)
961 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
962 word = operand_subword_force (SUBREG_REG (op0), word_offset,
963 GET_MODE (SUBREG_REG (op0)));
964 offset = 0;
966 else if (GET_CODE (op0) == REG)
968 word = operand_subword_force (op0, offset, GET_MODE (op0));
969 offset = 0;
971 else
972 word = op0;
974 /* OFFSET is in UNITs, and UNIT is in bits.
975 store_fixed_bit_field wants offset in bytes. */
976 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
977 thispos, part);
978 bitsdone += thissize;
982 /* Generate code to extract a byte-field from STR_RTX
983 containing BITSIZE bits, starting at BITNUM,
984 and put it in TARGET if possible (if TARGET is nonzero).
985 Regardless of TARGET, we return the rtx for where the value is placed.
986 It may be a QUEUED.
988 STR_RTX is the structure containing the byte (a REG or MEM).
989 UNSIGNEDP is nonzero if this is an unsigned bit field.
990 MODE is the natural mode of the field value once extracted.
991 TMODE is the mode the caller would like the value to have;
992 but the value may be returned with type MODE instead.
994 TOTAL_SIZE is the size in bytes of the containing structure,
995 or -1 if varying.
997 If a TARGET is specified and we can store in it at no extra cost,
998 we do so, and return TARGET.
999 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1000 if they are equally easy. */
1003 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1004 target, mode, tmode, total_size)
1005 rtx str_rtx;
1006 unsigned HOST_WIDE_INT bitsize;
1007 unsigned HOST_WIDE_INT bitnum;
1008 int unsignedp;
1009 rtx target;
1010 enum machine_mode mode, tmode;
1011 HOST_WIDE_INT total_size;
1013 unsigned int unit
1014 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1015 unsigned HOST_WIDE_INT offset = bitnum / unit;
1016 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1017 rtx op0 = str_rtx;
1018 rtx spec_target = target;
1019 rtx spec_target_subreg = 0;
1020 enum machine_mode int_mode;
1021 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1022 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1023 enum machine_mode mode1;
1024 int byte_offset;
1026 /* Discount the part of the structure before the desired byte.
1027 We need to know how many bytes are safe to reference after it. */
1028 if (total_size >= 0)
1029 total_size -= (bitpos / BIGGEST_ALIGNMENT
1030 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1032 if (tmode == VOIDmode)
1033 tmode = mode;
1034 while (GET_CODE (op0) == SUBREG)
1036 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1037 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1039 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1041 inner_size = MIN (inner_size, BITS_PER_WORD);
1043 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1045 bitpos += inner_size - outer_size;
1046 if (bitpos > unit)
1048 offset += (bitpos / unit);
1049 bitpos %= unit;
1053 op0 = SUBREG_REG (op0);
1056 if (GET_CODE (op0) == REG
1057 && mode == GET_MODE (op0)
1058 && bitnum == 0
1059 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1061 /* We're trying to extract a full register from itself. */
1062 return op0;
1065 /* Make sure we are playing with integral modes. Pun with subregs
1066 if we aren't. */
1068 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1069 if (imode != GET_MODE (op0))
1071 if (GET_CODE (op0) == MEM)
1072 op0 = adjust_address (op0, imode, 0);
1073 else if (imode != BLKmode)
1074 op0 = gen_lowpart (imode, op0);
1075 else
1076 abort ();
1080 /* We may be accessing data outside the field, which means
1081 we can alias adjacent data. */
1082 if (GET_CODE (op0) == MEM)
1084 op0 = shallow_copy_rtx (op0);
1085 set_mem_alias_set (op0, 0);
1086 set_mem_expr (op0, 0);
1089 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1090 If that's wrong, the solution is to test for it and set TARGET to 0
1091 if needed. */
1093 /* If OP0 is a register, BITPOS must count within a word.
1094 But as we have it, it counts within whatever size OP0 now has.
1095 On a bigendian machine, these are not the same, so convert. */
1096 if (BYTES_BIG_ENDIAN
1097 && GET_CODE (op0) != MEM
1098 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1099 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1101 /* Extracting a full-word or multi-word value
1102 from a structure in a register or aligned memory.
1103 This can be done with just SUBREG.
1104 So too extracting a subword value in
1105 the least significant part of the register. */
1107 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1108 + (offset * UNITS_PER_WORD);
1110 mode1 = (VECTOR_MODE_P (tmode)
1111 ? mode
1112 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1114 if (((GET_CODE (op0) != MEM
1115 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1116 GET_MODE_BITSIZE (GET_MODE (op0)))
1117 && GET_MODE_SIZE (mode1) != 0
1118 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1119 || (GET_CODE (op0) == MEM
1120 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1121 || (offset * BITS_PER_UNIT % bitsize == 0
1122 && MEM_ALIGN (op0) % bitsize == 0))))
1123 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1124 && bitpos % BITS_PER_WORD == 0)
1125 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1126 /* ??? The big endian test here is wrong. This is correct
1127 if the value is in a register, and if mode_for_size is not
1128 the same mode as op0. This causes us to get unnecessarily
1129 inefficient code from the Thumb port when -mbig-endian. */
1130 && (BYTES_BIG_ENDIAN
1131 ? bitpos + bitsize == BITS_PER_WORD
1132 : bitpos == 0))))
1134 if (mode1 != GET_MODE (op0))
1136 if (GET_CODE (op0) == SUBREG)
1138 if (GET_MODE (SUBREG_REG (op0)) == mode1
1139 || GET_MODE_CLASS (mode1) == MODE_INT
1140 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1141 op0 = SUBREG_REG (op0);
1142 else
1143 /* Else we've got some float mode source being extracted into
1144 a different float mode destination -- this combination of
1145 subregs results in Severe Tire Damage. */
1146 goto no_subreg_mode_swap;
1148 if (GET_CODE (op0) == REG)
1149 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1150 else
1151 op0 = adjust_address (op0, mode1, offset);
1153 if (mode1 != mode)
1154 return convert_to_mode (tmode, op0, unsignedp);
1155 return op0;
1157 no_subreg_mode_swap:
1159 /* Handle fields bigger than a word. */
1161 if (bitsize > BITS_PER_WORD)
1163 /* Here we transfer the words of the field
1164 in the order least significant first.
1165 This is because the most significant word is the one which may
1166 be less than full. */
1168 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1169 unsigned int i;
1171 if (target == 0 || GET_CODE (target) != REG)
1172 target = gen_reg_rtx (mode);
1174 /* Indicate for flow that the entire target reg is being set. */
1175 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1177 for (i = 0; i < nwords; i++)
1179 /* If I is 0, use the low-order word in both field and target;
1180 if I is 1, use the next to lowest word; and so on. */
1181 /* Word number in TARGET to use. */
1182 unsigned int wordnum
1183 = (WORDS_BIG_ENDIAN
1184 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1185 : i);
1186 /* Offset from start of field in OP0. */
1187 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1188 ? MAX (0, ((int) bitsize - ((int) i + 1)
1189 * (int) BITS_PER_WORD))
1190 : (int) i * BITS_PER_WORD);
1191 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1192 rtx result_part
1193 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1194 bitsize - i * BITS_PER_WORD),
1195 bitnum + bit_offset, 1, target_part, mode,
1196 word_mode, total_size);
1198 if (target_part == 0)
1199 abort ();
1201 if (result_part != target_part)
1202 emit_move_insn (target_part, result_part);
1205 if (unsignedp)
1207 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1208 need to be zero'd out. */
1209 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1211 unsigned int i, total_words;
1213 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1214 for (i = nwords; i < total_words; i++)
1215 emit_move_insn
1216 (operand_subword (target,
1217 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1218 1, VOIDmode),
1219 const0_rtx);
1221 return target;
1224 /* Signed bit field: sign-extend with two arithmetic shifts. */
1225 target = expand_shift (LSHIFT_EXPR, mode, target,
1226 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1227 NULL_RTX, 0);
1228 return expand_shift (RSHIFT_EXPR, mode, target,
1229 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1230 NULL_RTX, 0);
1233 /* From here on we know the desired field is smaller than a word. */
1235 /* Check if there is a correspondingly-sized integer field, so we can
1236 safely extract it as one size of integer, if necessary; then
1237 truncate or extend to the size that is wanted; then use SUBREGs or
1238 convert_to_mode to get one of the modes we really wanted. */
1240 int_mode = int_mode_for_mode (tmode);
1241 if (int_mode == BLKmode)
1242 int_mode = int_mode_for_mode (mode);
1243 if (int_mode == BLKmode)
1244 abort (); /* Should probably push op0 out to memory and then
1245 do a load. */
1247 /* OFFSET is the number of words or bytes (UNIT says which)
1248 from STR_RTX to the first word or byte containing part of the field. */
1250 if (GET_CODE (op0) != MEM)
1252 if (offset != 0
1253 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1255 if (GET_CODE (op0) != REG)
1256 op0 = copy_to_reg (op0);
1257 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1258 op0, (offset * UNITS_PER_WORD));
1260 offset = 0;
1262 else
1263 op0 = protect_from_queue (str_rtx, 1);
1265 /* Now OFFSET is nonzero only for memory operands. */
1267 if (unsignedp)
1269 if (HAVE_extzv
1270 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1271 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1272 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1274 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1275 rtx bitsize_rtx, bitpos_rtx;
1276 rtx last = get_last_insn ();
1277 rtx xop0 = op0;
1278 rtx xtarget = target;
1279 rtx xspec_target = spec_target;
1280 rtx xspec_target_subreg = spec_target_subreg;
1281 rtx pat;
1282 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1284 if (GET_CODE (xop0) == MEM)
1286 int save_volatile_ok = volatile_ok;
1287 volatile_ok = 1;
1289 /* Is the memory operand acceptable? */
1290 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1291 (xop0, GET_MODE (xop0))))
1293 /* No, load into a reg and extract from there. */
1294 enum machine_mode bestmode;
1296 /* Get the mode to use for inserting into this field. If
1297 OP0 is BLKmode, get the smallest mode consistent with the
1298 alignment. If OP0 is a non-BLKmode object that is no
1299 wider than MAXMODE, use its mode. Otherwise, use the
1300 smallest mode containing the field. */
1302 if (GET_MODE (xop0) == BLKmode
1303 || (GET_MODE_SIZE (GET_MODE (op0))
1304 > GET_MODE_SIZE (maxmode)))
1305 bestmode = get_best_mode (bitsize, bitnum,
1306 MEM_ALIGN (xop0), maxmode,
1307 MEM_VOLATILE_P (xop0));
1308 else
1309 bestmode = GET_MODE (xop0);
1311 if (bestmode == VOIDmode
1312 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1313 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1314 goto extzv_loses;
1316 /* Compute offset as multiple of this unit,
1317 counting in bytes. */
1318 unit = GET_MODE_BITSIZE (bestmode);
1319 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1320 xbitpos = bitnum % unit;
1321 xop0 = adjust_address (xop0, bestmode, xoffset);
1323 /* Fetch it to a register in that size. */
1324 xop0 = force_reg (bestmode, xop0);
1326 /* XBITPOS counts within UNIT, which is what is expected. */
1328 else
1329 /* Get ref to first byte containing part of the field. */
1330 xop0 = adjust_address (xop0, byte_mode, xoffset);
1332 volatile_ok = save_volatile_ok;
1335 /* If op0 is a register, we need it in MAXMODE (which is usually
1336 SImode). to make it acceptable to the format of extzv. */
1337 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1338 goto extzv_loses;
1339 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1340 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1342 /* On big-endian machines, we count bits from the most significant.
1343 If the bit field insn does not, we must invert. */
1344 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1345 xbitpos = unit - bitsize - xbitpos;
1347 /* Now convert from counting within UNIT to counting in MAXMODE. */
1348 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1349 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1351 unit = GET_MODE_BITSIZE (maxmode);
1353 if (xtarget == 0
1354 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1355 xtarget = xspec_target = gen_reg_rtx (tmode);
1357 if (GET_MODE (xtarget) != maxmode)
1359 if (GET_CODE (xtarget) == REG)
1361 int wider = (GET_MODE_SIZE (maxmode)
1362 > GET_MODE_SIZE (GET_MODE (xtarget)));
1363 xtarget = gen_lowpart (maxmode, xtarget);
1364 if (wider)
1365 xspec_target_subreg = xtarget;
1367 else
1368 xtarget = gen_reg_rtx (maxmode);
1371 /* If this machine's extzv insists on a register target,
1372 make sure we have one. */
1373 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1374 (xtarget, maxmode)))
1375 xtarget = gen_reg_rtx (maxmode);
1377 bitsize_rtx = GEN_INT (bitsize);
1378 bitpos_rtx = GEN_INT (xbitpos);
1380 pat = gen_extzv (protect_from_queue (xtarget, 1),
1381 xop0, bitsize_rtx, bitpos_rtx);
1382 if (pat)
1384 emit_insn (pat);
1385 target = xtarget;
1386 spec_target = xspec_target;
1387 spec_target_subreg = xspec_target_subreg;
1389 else
1391 delete_insns_since (last);
1392 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1393 bitpos, target, 1);
1396 else
1397 extzv_loses:
1398 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1399 bitpos, target, 1);
1401 else
1403 if (HAVE_extv
1404 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1405 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1406 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1408 int xbitpos = bitpos, xoffset = offset;
1409 rtx bitsize_rtx, bitpos_rtx;
1410 rtx last = get_last_insn ();
1411 rtx xop0 = op0, xtarget = target;
1412 rtx xspec_target = spec_target;
1413 rtx xspec_target_subreg = spec_target_subreg;
1414 rtx pat;
1415 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1417 if (GET_CODE (xop0) == MEM)
1419 /* Is the memory operand acceptable? */
1420 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1421 (xop0, GET_MODE (xop0))))
1423 /* No, load into a reg and extract from there. */
1424 enum machine_mode bestmode;
1426 /* Get the mode to use for inserting into this field. If
1427 OP0 is BLKmode, get the smallest mode consistent with the
1428 alignment. If OP0 is a non-BLKmode object that is no
1429 wider than MAXMODE, use its mode. Otherwise, use the
1430 smallest mode containing the field. */
1432 if (GET_MODE (xop0) == BLKmode
1433 || (GET_MODE_SIZE (GET_MODE (op0))
1434 > GET_MODE_SIZE (maxmode)))
1435 bestmode = get_best_mode (bitsize, bitnum,
1436 MEM_ALIGN (xop0), maxmode,
1437 MEM_VOLATILE_P (xop0));
1438 else
1439 bestmode = GET_MODE (xop0);
1441 if (bestmode == VOIDmode
1442 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1443 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1444 goto extv_loses;
1446 /* Compute offset as multiple of this unit,
1447 counting in bytes. */
1448 unit = GET_MODE_BITSIZE (bestmode);
1449 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1450 xbitpos = bitnum % unit;
1451 xop0 = adjust_address (xop0, bestmode, xoffset);
1453 /* Fetch it to a register in that size. */
1454 xop0 = force_reg (bestmode, xop0);
1456 /* XBITPOS counts within UNIT, which is what is expected. */
1458 else
1459 /* Get ref to first byte containing part of the field. */
1460 xop0 = adjust_address (xop0, byte_mode, xoffset);
1463 /* If op0 is a register, we need it in MAXMODE (which is usually
1464 SImode) to make it acceptable to the format of extv. */
1465 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1466 goto extv_loses;
1467 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1468 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1470 /* On big-endian machines, we count bits from the most significant.
1471 If the bit field insn does not, we must invert. */
1472 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1473 xbitpos = unit - bitsize - xbitpos;
1475 /* XBITPOS counts within a size of UNIT.
1476 Adjust to count within a size of MAXMODE. */
1477 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1478 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1480 unit = GET_MODE_BITSIZE (maxmode);
1482 if (xtarget == 0
1483 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1484 xtarget = xspec_target = gen_reg_rtx (tmode);
1486 if (GET_MODE (xtarget) != maxmode)
1488 if (GET_CODE (xtarget) == REG)
1490 int wider = (GET_MODE_SIZE (maxmode)
1491 > GET_MODE_SIZE (GET_MODE (xtarget)));
1492 xtarget = gen_lowpart (maxmode, xtarget);
1493 if (wider)
1494 xspec_target_subreg = xtarget;
1496 else
1497 xtarget = gen_reg_rtx (maxmode);
1500 /* If this machine's extv insists on a register target,
1501 make sure we have one. */
1502 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1503 (xtarget, maxmode)))
1504 xtarget = gen_reg_rtx (maxmode);
1506 bitsize_rtx = GEN_INT (bitsize);
1507 bitpos_rtx = GEN_INT (xbitpos);
1509 pat = gen_extv (protect_from_queue (xtarget, 1),
1510 xop0, bitsize_rtx, bitpos_rtx);
1511 if (pat)
1513 emit_insn (pat);
1514 target = xtarget;
1515 spec_target = xspec_target;
1516 spec_target_subreg = xspec_target_subreg;
1518 else
1520 delete_insns_since (last);
1521 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1522 bitpos, target, 0);
1525 else
1526 extv_loses:
1527 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1528 bitpos, target, 0);
1530 if (target == spec_target)
1531 return target;
1532 if (target == spec_target_subreg)
1533 return spec_target;
1534 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1536 /* If the target mode is floating-point, first convert to the
1537 integer mode of that size and then access it as a floating-point
1538 value via a SUBREG. */
1539 if (GET_MODE_CLASS (tmode) != MODE_INT
1540 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1542 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1543 MODE_INT, 0),
1544 target, unsignedp);
1545 return gen_lowpart (tmode, target);
1547 else
1548 return convert_to_mode (tmode, target, unsignedp);
1550 return target;
1553 /* Extract a bit field using shifts and boolean operations
1554 Returns an rtx to represent the value.
1555 OP0 addresses a register (word) or memory (byte).
1556 BITPOS says which bit within the word or byte the bit field starts in.
1557 OFFSET says how many bytes farther the bit field starts;
1558 it is 0 if OP0 is a register.
1559 BITSIZE says how many bits long the bit field is.
1560 (If OP0 is a register, it may be narrower than a full word,
1561 but BITPOS still counts within a full word,
1562 which is significant on bigendian machines.)
1564 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1565 If TARGET is nonzero, attempts to store the value there
1566 and return TARGET, but this is not guaranteed.
1567 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1569 static rtx
1570 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1571 target, unsignedp)
1572 enum machine_mode tmode;
1573 rtx op0, target;
1574 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1575 int unsignedp;
1577 unsigned int total_bits = BITS_PER_WORD;
1578 enum machine_mode mode;
1580 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1582 /* Special treatment for a bit field split across two registers. */
1583 if (bitsize + bitpos > BITS_PER_WORD)
1584 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1586 else
1588 /* Get the proper mode to use for this field. We want a mode that
1589 includes the entire field. If such a mode would be larger than
1590 a word, we won't be doing the extraction the normal way. */
1592 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1593 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1595 if (mode == VOIDmode)
1596 /* The only way this should occur is if the field spans word
1597 boundaries. */
1598 return extract_split_bit_field (op0, bitsize,
1599 bitpos + offset * BITS_PER_UNIT,
1600 unsignedp);
1602 total_bits = GET_MODE_BITSIZE (mode);
1604 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1605 be in the range 0 to total_bits-1, and put any excess bytes in
1606 OFFSET. */
1607 if (bitpos >= total_bits)
1609 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1610 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1611 * BITS_PER_UNIT);
1614 /* Get ref to an aligned byte, halfword, or word containing the field.
1615 Adjust BITPOS to be position within a word,
1616 and OFFSET to be the offset of that word.
1617 Then alter OP0 to refer to that word. */
1618 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1619 offset -= (offset % (total_bits / BITS_PER_UNIT));
1620 op0 = adjust_address (op0, mode, offset);
1623 mode = GET_MODE (op0);
1625 if (BYTES_BIG_ENDIAN)
1626 /* BITPOS is the distance between our msb and that of OP0.
1627 Convert it to the distance from the lsb. */
1628 bitpos = total_bits - bitsize - bitpos;
1630 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1631 We have reduced the big-endian case to the little-endian case. */
1633 if (unsignedp)
1635 if (bitpos)
1637 /* If the field does not already start at the lsb,
1638 shift it so it does. */
1639 tree amount = build_int_2 (bitpos, 0);
1640 /* Maybe propagate the target for the shift. */
1641 /* But not if we will return it--could confuse integrate.c. */
1642 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1643 && !REG_FUNCTION_VALUE_P (target)
1644 ? target : 0);
1645 if (tmode != mode) subtarget = 0;
1646 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1648 /* Convert the value to the desired mode. */
1649 if (mode != tmode)
1650 op0 = convert_to_mode (tmode, op0, 1);
1652 /* Unless the msb of the field used to be the msb when we shifted,
1653 mask out the upper bits. */
1655 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1656 return expand_binop (GET_MODE (op0), and_optab, op0,
1657 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1658 target, 1, OPTAB_LIB_WIDEN);
1659 return op0;
1662 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1663 then arithmetic-shift its lsb to the lsb of the word. */
1664 op0 = force_reg (mode, op0);
1665 if (mode != tmode)
1666 target = 0;
1668 /* Find the narrowest integer mode that contains the field. */
1670 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1671 mode = GET_MODE_WIDER_MODE (mode))
1672 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1674 op0 = convert_to_mode (mode, op0, 0);
1675 break;
1678 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1680 tree amount
1681 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1682 /* Maybe propagate the target for the shift. */
1683 /* But not if we will return the result--could confuse integrate.c. */
1684 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1685 && ! REG_FUNCTION_VALUE_P (target)
1686 ? target : 0);
1687 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1690 return expand_shift (RSHIFT_EXPR, mode, op0,
1691 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1692 target, 0);
1695 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1696 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1697 complement of that if COMPLEMENT. The mask is truncated if
1698 necessary to the width of mode MODE. The mask is zero-extended if
1699 BITSIZE+BITPOS is too small for MODE. */
1701 static rtx
1702 mask_rtx (mode, bitpos, bitsize, complement)
1703 enum machine_mode mode;
1704 int bitpos, bitsize, complement;
1706 HOST_WIDE_INT masklow, maskhigh;
1708 if (bitpos < HOST_BITS_PER_WIDE_INT)
1709 masklow = (HOST_WIDE_INT) -1 << bitpos;
1710 else
1711 masklow = 0;
1713 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1714 masklow &= ((unsigned HOST_WIDE_INT) -1
1715 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1717 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1718 maskhigh = -1;
1719 else
1720 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1722 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1723 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1724 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1725 else
1726 maskhigh = 0;
1728 if (complement)
1730 maskhigh = ~maskhigh;
1731 masklow = ~masklow;
1734 return immed_double_const (masklow, maskhigh, mode);
1737 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1738 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1740 static rtx
1741 lshift_value (mode, value, bitpos, bitsize)
1742 enum machine_mode mode;
1743 rtx value;
1744 int bitpos, bitsize;
1746 unsigned HOST_WIDE_INT v = INTVAL (value);
1747 HOST_WIDE_INT low, high;
1749 if (bitsize < HOST_BITS_PER_WIDE_INT)
1750 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1752 if (bitpos < HOST_BITS_PER_WIDE_INT)
1754 low = v << bitpos;
1755 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1757 else
1759 low = 0;
1760 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1763 return immed_double_const (low, high, mode);
1766 /* Extract a bit field that is split across two words
1767 and return an RTX for the result.
1769 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1770 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1771 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1773 static rtx
1774 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1775 rtx op0;
1776 unsigned HOST_WIDE_INT bitsize, bitpos;
1777 int unsignedp;
1779 unsigned int unit;
1780 unsigned int bitsdone = 0;
1781 rtx result = NULL_RTX;
1782 int first = 1;
1784 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1785 much at a time. */
1786 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1787 unit = BITS_PER_WORD;
1788 else
1789 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1791 while (bitsdone < bitsize)
1793 unsigned HOST_WIDE_INT thissize;
1794 rtx part, word;
1795 unsigned HOST_WIDE_INT thispos;
1796 unsigned HOST_WIDE_INT offset;
1798 offset = (bitpos + bitsdone) / unit;
1799 thispos = (bitpos + bitsdone) % unit;
1801 /* THISSIZE must not overrun a word boundary. Otherwise,
1802 extract_fixed_bit_field will call us again, and we will mutually
1803 recurse forever. */
1804 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1805 thissize = MIN (thissize, unit - thispos);
1807 /* If OP0 is a register, then handle OFFSET here.
1809 When handling multiword bitfields, extract_bit_field may pass
1810 down a word_mode SUBREG of a larger REG for a bitfield that actually
1811 crosses a word boundary. Thus, for a SUBREG, we must find
1812 the current word starting from the base register. */
1813 if (GET_CODE (op0) == SUBREG)
1815 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1816 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1817 GET_MODE (SUBREG_REG (op0)));
1818 offset = 0;
1820 else if (GET_CODE (op0) == REG)
1822 word = operand_subword_force (op0, offset, GET_MODE (op0));
1823 offset = 0;
1825 else
1826 word = op0;
1828 /* Extract the parts in bit-counting order,
1829 whose meaning is determined by BYTES_PER_UNIT.
1830 OFFSET is in UNITs, and UNIT is in bits.
1831 extract_fixed_bit_field wants offset in bytes. */
1832 part = extract_fixed_bit_field (word_mode, word,
1833 offset * unit / BITS_PER_UNIT,
1834 thissize, thispos, 0, 1);
1835 bitsdone += thissize;
1837 /* Shift this part into place for the result. */
1838 if (BYTES_BIG_ENDIAN)
1840 if (bitsize != bitsdone)
1841 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1842 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1844 else
1846 if (bitsdone != thissize)
1847 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1848 build_int_2 (bitsdone - thissize, 0), 0, 1);
1851 if (first)
1852 result = part;
1853 else
1854 /* Combine the parts with bitwise or. This works
1855 because we extracted each part as an unsigned bit field. */
1856 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1857 OPTAB_LIB_WIDEN);
1859 first = 0;
1862 /* Unsigned bit field: we are done. */
1863 if (unsignedp)
1864 return result;
1865 /* Signed bit field: sign-extend with two arithmetic shifts. */
1866 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1867 build_int_2 (BITS_PER_WORD - bitsize, 0),
1868 NULL_RTX, 0);
1869 return expand_shift (RSHIFT_EXPR, word_mode, result,
1870 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1873 /* Add INC into TARGET. */
1875 void
1876 expand_inc (target, inc)
1877 rtx target, inc;
1879 rtx value = expand_binop (GET_MODE (target), add_optab,
1880 target, inc,
1881 target, 0, OPTAB_LIB_WIDEN);
1882 if (value != target)
1883 emit_move_insn (target, value);
1886 /* Subtract DEC from TARGET. */
1888 void
1889 expand_dec (target, dec)
1890 rtx target, dec;
1892 rtx value = expand_binop (GET_MODE (target), sub_optab,
1893 target, dec,
1894 target, 0, OPTAB_LIB_WIDEN);
1895 if (value != target)
1896 emit_move_insn (target, value);
1899 /* Output a shift instruction for expression code CODE,
1900 with SHIFTED being the rtx for the value to shift,
1901 and AMOUNT the tree for the amount to shift by.
1902 Store the result in the rtx TARGET, if that is convenient.
1903 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1904 Return the rtx for where the value is. */
1907 expand_shift (code, mode, shifted, amount, target, unsignedp)
1908 enum tree_code code;
1909 enum machine_mode mode;
1910 rtx shifted;
1911 tree amount;
1912 rtx target;
1913 int unsignedp;
1915 rtx op1, temp = 0;
1916 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1917 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1918 int try;
1920 /* Previously detected shift-counts computed by NEGATE_EXPR
1921 and shifted in the other direction; but that does not work
1922 on all machines. */
1924 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1926 #ifdef SHIFT_COUNT_TRUNCATED
1927 if (SHIFT_COUNT_TRUNCATED)
1929 if (GET_CODE (op1) == CONST_INT
1930 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1931 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1932 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1933 % GET_MODE_BITSIZE (mode));
1934 else if (GET_CODE (op1) == SUBREG
1935 && subreg_lowpart_p (op1))
1936 op1 = SUBREG_REG (op1);
1938 #endif
1940 if (op1 == const0_rtx)
1941 return shifted;
1943 for (try = 0; temp == 0 && try < 3; try++)
1945 enum optab_methods methods;
1947 if (try == 0)
1948 methods = OPTAB_DIRECT;
1949 else if (try == 1)
1950 methods = OPTAB_WIDEN;
1951 else
1952 methods = OPTAB_LIB_WIDEN;
1954 if (rotate)
1956 /* Widening does not work for rotation. */
1957 if (methods == OPTAB_WIDEN)
1958 continue;
1959 else if (methods == OPTAB_LIB_WIDEN)
1961 /* If we have been unable to open-code this by a rotation,
1962 do it as the IOR of two shifts. I.e., to rotate A
1963 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1964 where C is the bitsize of A.
1966 It is theoretically possible that the target machine might
1967 not be able to perform either shift and hence we would
1968 be making two libcalls rather than just the one for the
1969 shift (similarly if IOR could not be done). We will allow
1970 this extremely unlikely lossage to avoid complicating the
1971 code below. */
1973 rtx subtarget = target == shifted ? 0 : target;
1974 rtx temp1;
1975 tree type = TREE_TYPE (amount);
1976 tree new_amount = make_tree (type, op1);
1977 tree other_amount
1978 = fold (build (MINUS_EXPR, type,
1979 convert (type,
1980 build_int_2 (GET_MODE_BITSIZE (mode),
1981 0)),
1982 amount));
1984 shifted = force_reg (mode, shifted);
1986 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1987 mode, shifted, new_amount, subtarget, 1);
1988 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1989 mode, shifted, other_amount, 0, 1);
1990 return expand_binop (mode, ior_optab, temp, temp1, target,
1991 unsignedp, methods);
1994 temp = expand_binop (mode,
1995 left ? rotl_optab : rotr_optab,
1996 shifted, op1, target, unsignedp, methods);
1998 /* If we don't have the rotate, but we are rotating by a constant
1999 that is in range, try a rotate in the opposite direction. */
2001 if (temp == 0 && GET_CODE (op1) == CONST_INT
2002 && INTVAL (op1) > 0
2003 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2004 temp = expand_binop (mode,
2005 left ? rotr_optab : rotl_optab,
2006 shifted,
2007 GEN_INT (GET_MODE_BITSIZE (mode)
2008 - INTVAL (op1)),
2009 target, unsignedp, methods);
2011 else if (unsignedp)
2012 temp = expand_binop (mode,
2013 left ? ashl_optab : lshr_optab,
2014 shifted, op1, target, unsignedp, methods);
2016 /* Do arithmetic shifts.
2017 Also, if we are going to widen the operand, we can just as well
2018 use an arithmetic right-shift instead of a logical one. */
2019 if (temp == 0 && ! rotate
2020 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2022 enum optab_methods methods1 = methods;
2024 /* If trying to widen a log shift to an arithmetic shift,
2025 don't accept an arithmetic shift of the same size. */
2026 if (unsignedp)
2027 methods1 = OPTAB_MUST_WIDEN;
2029 /* Arithmetic shift */
2031 temp = expand_binop (mode,
2032 left ? ashl_optab : ashr_optab,
2033 shifted, op1, target, unsignedp, methods1);
2036 /* We used to try extzv here for logical right shifts, but that was
2037 only useful for one machine, the VAX, and caused poor code
2038 generation there for lshrdi3, so the code was deleted and a
2039 define_expand for lshrsi3 was added to vax.md. */
2042 if (temp == 0)
2043 abort ();
2044 return temp;
2047 enum alg_code { alg_zero, alg_m, alg_shift,
2048 alg_add_t_m2, alg_sub_t_m2,
2049 alg_add_factor, alg_sub_factor,
2050 alg_add_t2_m, alg_sub_t2_m,
2051 alg_add, alg_subtract, alg_factor, alg_shiftop };
2053 /* This structure records a sequence of operations.
2054 `ops' is the number of operations recorded.
2055 `cost' is their total cost.
2056 The operations are stored in `op' and the corresponding
2057 logarithms of the integer coefficients in `log'.
2059 These are the operations:
2060 alg_zero total := 0;
2061 alg_m total := multiplicand;
2062 alg_shift total := total * coeff
2063 alg_add_t_m2 total := total + multiplicand * coeff;
2064 alg_sub_t_m2 total := total - multiplicand * coeff;
2065 alg_add_factor total := total * coeff + total;
2066 alg_sub_factor total := total * coeff - total;
2067 alg_add_t2_m total := total * coeff + multiplicand;
2068 alg_sub_t2_m total := total * coeff - multiplicand;
2070 The first operand must be either alg_zero or alg_m. */
2072 struct algorithm
2074 short cost;
2075 short ops;
2076 /* The size of the OP and LOG fields are not directly related to the
2077 word size, but the worst-case algorithms will be if we have few
2078 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2079 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2080 in total wordsize operations. */
2081 enum alg_code op[MAX_BITS_PER_WORD];
2082 char log[MAX_BITS_PER_WORD];
2085 static void synth_mult PARAMS ((struct algorithm *,
2086 unsigned HOST_WIDE_INT,
2087 int));
2088 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2089 int, int,
2090 unsigned HOST_WIDE_INT *,
2091 int *, int *));
2092 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2093 int));
2094 /* Compute and return the best algorithm for multiplying by T.
2095 The algorithm must cost less than cost_limit
2096 If retval.cost >= COST_LIMIT, no algorithm was found and all
2097 other field of the returned struct are undefined. */
2099 static void
2100 synth_mult (alg_out, t, cost_limit)
2101 struct algorithm *alg_out;
2102 unsigned HOST_WIDE_INT t;
2103 int cost_limit;
2105 int m;
2106 struct algorithm *alg_in, *best_alg;
2107 int cost;
2108 unsigned HOST_WIDE_INT q;
2110 /* Indicate that no algorithm is yet found. If no algorithm
2111 is found, this value will be returned and indicate failure. */
2112 alg_out->cost = cost_limit;
2114 if (cost_limit <= 0)
2115 return;
2117 /* t == 1 can be done in zero cost. */
2118 if (t == 1)
2120 alg_out->ops = 1;
2121 alg_out->cost = 0;
2122 alg_out->op[0] = alg_m;
2123 return;
2126 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2127 fail now. */
2128 if (t == 0)
2130 if (zero_cost >= cost_limit)
2131 return;
2132 else
2134 alg_out->ops = 1;
2135 alg_out->cost = zero_cost;
2136 alg_out->op[0] = alg_zero;
2137 return;
2141 /* We'll be needing a couple extra algorithm structures now. */
2143 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2144 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2146 /* If we have a group of zero bits at the low-order part of T, try
2147 multiplying by the remaining bits and then doing a shift. */
2149 if ((t & 1) == 0)
2151 m = floor_log2 (t & -t); /* m = number of low zero bits */
2152 if (m < BITS_PER_WORD)
2154 q = t >> m;
2155 cost = shift_cost[m];
2156 synth_mult (alg_in, q, cost_limit - cost);
2158 cost += alg_in->cost;
2159 if (cost < cost_limit)
2161 struct algorithm *x;
2162 x = alg_in, alg_in = best_alg, best_alg = x;
2163 best_alg->log[best_alg->ops] = m;
2164 best_alg->op[best_alg->ops] = alg_shift;
2165 cost_limit = cost;
2170 /* If we have an odd number, add or subtract one. */
2171 if ((t & 1) != 0)
2173 unsigned HOST_WIDE_INT w;
2175 for (w = 1; (w & t) != 0; w <<= 1)
2177 /* If T was -1, then W will be zero after the loop. This is another
2178 case where T ends with ...111. Handling this with (T + 1) and
2179 subtract 1 produces slightly better code and results in algorithm
2180 selection much faster than treating it like the ...0111 case
2181 below. */
2182 if (w == 0
2183 || (w > 2
2184 /* Reject the case where t is 3.
2185 Thus we prefer addition in that case. */
2186 && t != 3))
2188 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2190 cost = add_cost;
2191 synth_mult (alg_in, t + 1, cost_limit - cost);
2193 cost += alg_in->cost;
2194 if (cost < cost_limit)
2196 struct algorithm *x;
2197 x = alg_in, alg_in = best_alg, best_alg = x;
2198 best_alg->log[best_alg->ops] = 0;
2199 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2200 cost_limit = cost;
2203 else
2205 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2207 cost = add_cost;
2208 synth_mult (alg_in, t - 1, cost_limit - cost);
2210 cost += alg_in->cost;
2211 if (cost < cost_limit)
2213 struct algorithm *x;
2214 x = alg_in, alg_in = best_alg, best_alg = x;
2215 best_alg->log[best_alg->ops] = 0;
2216 best_alg->op[best_alg->ops] = alg_add_t_m2;
2217 cost_limit = cost;
2222 /* Look for factors of t of the form
2223 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2224 If we find such a factor, we can multiply by t using an algorithm that
2225 multiplies by q, shift the result by m and add/subtract it to itself.
2227 We search for large factors first and loop down, even if large factors
2228 are less probable than small; if we find a large factor we will find a
2229 good sequence quickly, and therefore be able to prune (by decreasing
2230 COST_LIMIT) the search. */
2232 for (m = floor_log2 (t - 1); m >= 2; m--)
2234 unsigned HOST_WIDE_INT d;
2236 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2237 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2239 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2240 synth_mult (alg_in, t / d, cost_limit - cost);
2242 cost += alg_in->cost;
2243 if (cost < cost_limit)
2245 struct algorithm *x;
2246 x = alg_in, alg_in = best_alg, best_alg = x;
2247 best_alg->log[best_alg->ops] = m;
2248 best_alg->op[best_alg->ops] = alg_add_factor;
2249 cost_limit = cost;
2251 /* Other factors will have been taken care of in the recursion. */
2252 break;
2255 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2256 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2258 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2259 synth_mult (alg_in, t / d, cost_limit - cost);
2261 cost += alg_in->cost;
2262 if (cost < cost_limit)
2264 struct algorithm *x;
2265 x = alg_in, alg_in = best_alg, best_alg = x;
2266 best_alg->log[best_alg->ops] = m;
2267 best_alg->op[best_alg->ops] = alg_sub_factor;
2268 cost_limit = cost;
2270 break;
2274 /* Try shift-and-add (load effective address) instructions,
2275 i.e. do a*3, a*5, a*9. */
2276 if ((t & 1) != 0)
2278 q = t - 1;
2279 q = q & -q;
2280 m = exact_log2 (q);
2281 if (m >= 0 && m < BITS_PER_WORD)
2283 cost = shiftadd_cost[m];
2284 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2286 cost += alg_in->cost;
2287 if (cost < cost_limit)
2289 struct algorithm *x;
2290 x = alg_in, alg_in = best_alg, best_alg = x;
2291 best_alg->log[best_alg->ops] = m;
2292 best_alg->op[best_alg->ops] = alg_add_t2_m;
2293 cost_limit = cost;
2297 q = t + 1;
2298 q = q & -q;
2299 m = exact_log2 (q);
2300 if (m >= 0 && m < BITS_PER_WORD)
2302 cost = shiftsub_cost[m];
2303 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2305 cost += alg_in->cost;
2306 if (cost < cost_limit)
2308 struct algorithm *x;
2309 x = alg_in, alg_in = best_alg, best_alg = x;
2310 best_alg->log[best_alg->ops] = m;
2311 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2312 cost_limit = cost;
2317 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2318 we have not found any algorithm. */
2319 if (cost_limit == alg_out->cost)
2320 return;
2322 /* If we are getting a too long sequence for `struct algorithm'
2323 to record, make this search fail. */
2324 if (best_alg->ops == MAX_BITS_PER_WORD)
2325 return;
2327 /* Copy the algorithm from temporary space to the space at alg_out.
2328 We avoid using structure assignment because the majority of
2329 best_alg is normally undefined, and this is a critical function. */
2330 alg_out->ops = best_alg->ops + 1;
2331 alg_out->cost = cost_limit;
2332 memcpy (alg_out->op, best_alg->op,
2333 alg_out->ops * sizeof *alg_out->op);
2334 memcpy (alg_out->log, best_alg->log,
2335 alg_out->ops * sizeof *alg_out->log);
2338 /* Perform a multiplication and return an rtx for the result.
2339 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2340 TARGET is a suggestion for where to store the result (an rtx).
2342 We check specially for a constant integer as OP1.
2343 If you want this check for OP0 as well, then before calling
2344 you should swap the two operands if OP0 would be constant. */
2347 expand_mult (mode, op0, op1, target, unsignedp)
2348 enum machine_mode mode;
2349 rtx op0, op1, target;
2350 int unsignedp;
2352 rtx const_op1 = op1;
2354 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2355 less than or equal in size to `unsigned int' this doesn't matter.
2356 If the mode is larger than `unsigned int', then synth_mult works only
2357 if the constant value exactly fits in an `unsigned int' without any
2358 truncation. This means that multiplying by negative values does
2359 not work; results are off by 2^32 on a 32 bit machine. */
2361 /* If we are multiplying in DImode, it may still be a win
2362 to try to work with shifts and adds. */
2363 if (GET_CODE (op1) == CONST_DOUBLE
2364 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2365 && HOST_BITS_PER_INT >= BITS_PER_WORD
2366 && CONST_DOUBLE_HIGH (op1) == 0)
2367 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2368 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2369 && GET_CODE (op1) == CONST_INT
2370 && INTVAL (op1) < 0)
2371 const_op1 = 0;
2373 /* We used to test optimize here, on the grounds that it's better to
2374 produce a smaller program when -O is not used.
2375 But this causes such a terrible slowdown sometimes
2376 that it seems better to use synth_mult always. */
2378 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2379 && (unsignedp || ! flag_trapv))
2381 struct algorithm alg;
2382 struct algorithm alg2;
2383 HOST_WIDE_INT val = INTVAL (op1);
2384 HOST_WIDE_INT val_so_far;
2385 rtx insn;
2386 int mult_cost;
2387 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2389 /* op0 must be register to make mult_cost match the precomputed
2390 shiftadd_cost array. */
2391 op0 = force_reg (mode, op0);
2393 /* Try to do the computation three ways: multiply by the negative of OP1
2394 and then negate, do the multiplication directly, or do multiplication
2395 by OP1 - 1. */
2397 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2398 mult_cost = MIN (12 * add_cost, mult_cost);
2400 synth_mult (&alg, val, mult_cost);
2402 /* This works only if the inverted value actually fits in an
2403 `unsigned int' */
2404 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2406 synth_mult (&alg2, - val,
2407 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2408 if (alg2.cost + negate_cost < alg.cost)
2409 alg = alg2, variant = negate_variant;
2412 /* This proves very useful for division-by-constant. */
2413 synth_mult (&alg2, val - 1,
2414 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2415 if (alg2.cost + add_cost < alg.cost)
2416 alg = alg2, variant = add_variant;
2418 if (alg.cost < mult_cost)
2420 /* We found something cheaper than a multiply insn. */
2421 int opno;
2422 rtx accum, tem;
2423 enum machine_mode nmode;
2425 op0 = protect_from_queue (op0, 0);
2427 /* Avoid referencing memory over and over.
2428 For speed, but also for correctness when mem is volatile. */
2429 if (GET_CODE (op0) == MEM)
2430 op0 = force_reg (mode, op0);
2432 /* ACCUM starts out either as OP0 or as a zero, depending on
2433 the first operation. */
2435 if (alg.op[0] == alg_zero)
2437 accum = copy_to_mode_reg (mode, const0_rtx);
2438 val_so_far = 0;
2440 else if (alg.op[0] == alg_m)
2442 accum = copy_to_mode_reg (mode, op0);
2443 val_so_far = 1;
2445 else
2446 abort ();
2448 for (opno = 1; opno < alg.ops; opno++)
2450 int log = alg.log[opno];
2451 int preserve = preserve_subexpressions_p ();
2452 rtx shift_subtarget = preserve ? 0 : accum;
2453 rtx add_target
2454 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2455 && ! preserve)
2456 ? target : 0;
2457 rtx accum_target = preserve ? 0 : accum;
2459 switch (alg.op[opno])
2461 case alg_shift:
2462 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2463 build_int_2 (log, 0), NULL_RTX, 0);
2464 val_so_far <<= log;
2465 break;
2467 case alg_add_t_m2:
2468 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2469 build_int_2 (log, 0), NULL_RTX, 0);
2470 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2471 add_target
2472 ? add_target : accum_target);
2473 val_so_far += (HOST_WIDE_INT) 1 << log;
2474 break;
2476 case alg_sub_t_m2:
2477 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2478 build_int_2 (log, 0), NULL_RTX, 0);
2479 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2480 add_target
2481 ? add_target : accum_target);
2482 val_so_far -= (HOST_WIDE_INT) 1 << log;
2483 break;
2485 case alg_add_t2_m:
2486 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2487 build_int_2 (log, 0), shift_subtarget,
2489 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2490 add_target
2491 ? add_target : accum_target);
2492 val_so_far = (val_so_far << log) + 1;
2493 break;
2495 case alg_sub_t2_m:
2496 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2497 build_int_2 (log, 0), shift_subtarget,
2499 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2500 add_target
2501 ? add_target : accum_target);
2502 val_so_far = (val_so_far << log) - 1;
2503 break;
2505 case alg_add_factor:
2506 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2507 build_int_2 (log, 0), NULL_RTX, 0);
2508 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2509 add_target
2510 ? add_target : accum_target);
2511 val_so_far += val_so_far << log;
2512 break;
2514 case alg_sub_factor:
2515 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2516 build_int_2 (log, 0), NULL_RTX, 0);
2517 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2518 (add_target ? add_target
2519 : preserve ? 0 : tem));
2520 val_so_far = (val_so_far << log) - val_so_far;
2521 break;
2523 default:
2524 abort ();
2527 /* Write a REG_EQUAL note on the last insn so that we can cse
2528 multiplication sequences. Note that if ACCUM is a SUBREG,
2529 we've set the inner register and must properly indicate
2530 that. */
2532 tem = op0, nmode = mode;
2533 if (GET_CODE (accum) == SUBREG)
2535 nmode = GET_MODE (SUBREG_REG (accum));
2536 tem = gen_lowpart (nmode, op0);
2539 insn = get_last_insn ();
2540 set_unique_reg_note (insn,
2541 REG_EQUAL,
2542 gen_rtx_MULT (nmode, tem,
2543 GEN_INT (val_so_far)));
2546 if (variant == negate_variant)
2548 val_so_far = - val_so_far;
2549 accum = expand_unop (mode, neg_optab, accum, target, 0);
2551 else if (variant == add_variant)
2553 val_so_far = val_so_far + 1;
2554 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2557 if (val != val_so_far)
2558 abort ();
2560 return accum;
2564 /* This used to use umul_optab if unsigned, but for non-widening multiply
2565 there is no difference between signed and unsigned. */
2566 op0 = expand_binop (mode,
2567 ! unsignedp
2568 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2569 ? smulv_optab : smul_optab,
2570 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2571 if (op0 == 0)
2572 abort ();
2573 return op0;
2576 /* Return the smallest n such that 2**n >= X. */
2579 ceil_log2 (x)
2580 unsigned HOST_WIDE_INT x;
2582 return floor_log2 (x - 1) + 1;
2585 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2586 replace division by D, and put the least significant N bits of the result
2587 in *MULTIPLIER_PTR and return the most significant bit.
2589 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2590 needed precision is in PRECISION (should be <= N).
2592 PRECISION should be as small as possible so this function can choose
2593 multiplier more freely.
2595 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2596 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2598 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2599 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2601 static
2602 unsigned HOST_WIDE_INT
2603 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2604 unsigned HOST_WIDE_INT d;
2605 int n;
2606 int precision;
2607 unsigned HOST_WIDE_INT *multiplier_ptr;
2608 int *post_shift_ptr;
2609 int *lgup_ptr;
2611 HOST_WIDE_INT mhigh_hi, mlow_hi;
2612 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2613 int lgup, post_shift;
2614 int pow, pow2;
2615 unsigned HOST_WIDE_INT nl, dummy1;
2616 HOST_WIDE_INT nh, dummy2;
2618 /* lgup = ceil(log2(divisor)); */
2619 lgup = ceil_log2 (d);
2621 if (lgup > n)
2622 abort ();
2624 pow = n + lgup;
2625 pow2 = n + lgup - precision;
2627 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2629 /* We could handle this with some effort, but this case is much better
2630 handled directly with a scc insn, so rely on caller using that. */
2631 abort ();
2634 /* mlow = 2^(N + lgup)/d */
2635 if (pow >= HOST_BITS_PER_WIDE_INT)
2637 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2638 nl = 0;
2640 else
2642 nh = 0;
2643 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2645 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2646 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2648 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2649 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2650 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2651 else
2652 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2653 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2654 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2656 if (mhigh_hi && nh - d >= d)
2657 abort ();
2658 if (mhigh_hi > 1 || mlow_hi > 1)
2659 abort ();
2660 /* assert that mlow < mhigh. */
2661 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2662 abort ();
2664 /* If precision == N, then mlow, mhigh exceed 2^N
2665 (but they do not exceed 2^(N+1)). */
2667 /* Reduce to lowest terms */
2668 for (post_shift = lgup; post_shift > 0; post_shift--)
2670 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2671 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2672 if (ml_lo >= mh_lo)
2673 break;
2675 mlow_hi = 0;
2676 mlow_lo = ml_lo;
2677 mhigh_hi = 0;
2678 mhigh_lo = mh_lo;
2681 *post_shift_ptr = post_shift;
2682 *lgup_ptr = lgup;
2683 if (n < HOST_BITS_PER_WIDE_INT)
2685 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2686 *multiplier_ptr = mhigh_lo & mask;
2687 return mhigh_lo >= mask;
2689 else
2691 *multiplier_ptr = mhigh_lo;
2692 return mhigh_hi;
2696 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2697 congruent to 1 (mod 2**N). */
2699 static unsigned HOST_WIDE_INT
2700 invert_mod2n (x, n)
2701 unsigned HOST_WIDE_INT x;
2702 int n;
2704 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2706 /* The algorithm notes that the choice y = x satisfies
2707 x*y == 1 mod 2^3, since x is assumed odd.
2708 Each iteration doubles the number of bits of significance in y. */
2710 unsigned HOST_WIDE_INT mask;
2711 unsigned HOST_WIDE_INT y = x;
2712 int nbit = 3;
2714 mask = (n == HOST_BITS_PER_WIDE_INT
2715 ? ~(unsigned HOST_WIDE_INT) 0
2716 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2718 while (nbit < n)
2720 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2721 nbit *= 2;
2723 return y;
2726 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2727 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2728 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2729 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2730 become signed.
2732 The result is put in TARGET if that is convenient.
2734 MODE is the mode of operation. */
2737 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2738 enum machine_mode mode;
2739 rtx adj_operand, op0, op1, target;
2740 int unsignedp;
2742 rtx tem;
2743 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2745 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2746 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2747 NULL_RTX, 0);
2748 tem = expand_and (mode, tem, op1, NULL_RTX);
2749 adj_operand
2750 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2751 adj_operand);
2753 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2754 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2755 NULL_RTX, 0);
2756 tem = expand_and (mode, tem, op0, NULL_RTX);
2757 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2758 target);
2760 return target;
2763 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2764 in TARGET if that is convenient, and return where the result is. If the
2765 operation can not be performed, 0 is returned.
2767 MODE is the mode of operation and result.
2769 UNSIGNEDP nonzero means unsigned multiply.
2771 MAX_COST is the total allowed cost for the expanded RTL. */
2774 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2775 enum machine_mode mode;
2776 rtx op0, target;
2777 unsigned HOST_WIDE_INT cnst1;
2778 int unsignedp;
2779 int max_cost;
2781 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2782 optab mul_highpart_optab;
2783 optab moptab;
2784 rtx tem;
2785 int size = GET_MODE_BITSIZE (mode);
2786 rtx op1, wide_op1;
2788 /* We can't support modes wider than HOST_BITS_PER_INT. */
2789 if (size > HOST_BITS_PER_WIDE_INT)
2790 abort ();
2792 op1 = gen_int_mode (cnst1, mode);
2794 wide_op1
2795 = immed_double_const (cnst1,
2796 (unsignedp
2797 ? (HOST_WIDE_INT) 0
2798 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2799 wider_mode);
2801 /* expand_mult handles constant multiplication of word_mode
2802 or narrower. It does a poor job for large modes. */
2803 if (size < BITS_PER_WORD
2804 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2806 /* We have to do this, since expand_binop doesn't do conversion for
2807 multiply. Maybe change expand_binop to handle widening multiply? */
2808 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2810 /* We know that this can't have signed overflow, so pretend this is
2811 an unsigned multiply. */
2812 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2813 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2814 build_int_2 (size, 0), NULL_RTX, 1);
2815 return convert_modes (mode, wider_mode, tem, unsignedp);
2818 if (target == 0)
2819 target = gen_reg_rtx (mode);
2821 /* Firstly, try using a multiplication insn that only generates the needed
2822 high part of the product, and in the sign flavor of unsignedp. */
2823 if (mul_highpart_cost[(int) mode] < max_cost)
2825 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2826 target = expand_binop (mode, mul_highpart_optab,
2827 op0, op1, target, unsignedp, OPTAB_DIRECT);
2828 if (target)
2829 return target;
2832 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2833 Need to adjust the result after the multiplication. */
2834 if (size - 1 < BITS_PER_WORD
2835 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2836 < max_cost))
2838 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2839 target = expand_binop (mode, mul_highpart_optab,
2840 op0, op1, target, unsignedp, OPTAB_DIRECT);
2841 if (target)
2842 /* We used the wrong signedness. Adjust the result. */
2843 return expand_mult_highpart_adjust (mode, target, op0,
2844 op1, target, unsignedp);
2847 /* Try widening multiplication. */
2848 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2849 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2850 && mul_widen_cost[(int) wider_mode] < max_cost)
2852 op1 = force_reg (mode, op1);
2853 goto try;
2856 /* Try widening the mode and perform a non-widening multiplication. */
2857 moptab = smul_optab;
2858 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2859 && size - 1 < BITS_PER_WORD
2860 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2862 op1 = wide_op1;
2863 goto try;
2866 /* Try widening multiplication of opposite signedness, and adjust. */
2867 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2868 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2869 && size - 1 < BITS_PER_WORD
2870 && (mul_widen_cost[(int) wider_mode]
2871 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2873 rtx regop1 = force_reg (mode, op1);
2874 tem = expand_binop (wider_mode, moptab, op0, regop1,
2875 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2876 if (tem != 0)
2878 /* Extract the high half of the just generated product. */
2879 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2880 build_int_2 (size, 0), NULL_RTX, 1);
2881 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2882 /* We used the wrong signedness. Adjust the result. */
2883 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2884 target, unsignedp);
2888 return 0;
2890 try:
2891 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2892 tem = expand_binop (wider_mode, moptab, op0, op1,
2893 NULL_RTX, unsignedp, OPTAB_WIDEN);
2894 if (tem == 0)
2895 return 0;
2897 /* Extract the high half of the just generated product. */
2898 if (mode == word_mode)
2900 return gen_highpart (mode, tem);
2902 else
2904 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2905 build_int_2 (size, 0), NULL_RTX, 1);
2906 return convert_modes (mode, wider_mode, tem, unsignedp);
2910 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2911 if that is convenient, and returning where the result is.
2912 You may request either the quotient or the remainder as the result;
2913 specify REM_FLAG nonzero to get the remainder.
2915 CODE is the expression code for which kind of division this is;
2916 it controls how rounding is done. MODE is the machine mode to use.
2917 UNSIGNEDP nonzero means do unsigned division. */
2919 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2920 and then correct it by or'ing in missing high bits
2921 if result of ANDI is nonzero.
2922 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2923 This could optimize to a bfexts instruction.
2924 But C doesn't use these operations, so their optimizations are
2925 left for later. */
2926 /* ??? For modulo, we don't actually need the highpart of the first product,
2927 the low part will do nicely. And for small divisors, the second multiply
2928 can also be a low-part only multiply or even be completely left out.
2929 E.g. to calculate the remainder of a division by 3 with a 32 bit
2930 multiply, multiply with 0x55555556 and extract the upper two bits;
2931 the result is exact for inputs up to 0x1fffffff.
2932 The input range can be reduced by using cross-sum rules.
2933 For odd divisors >= 3, the following table gives right shift counts
2934 so that if an number is shifted by an integer multiple of the given
2935 amount, the remainder stays the same:
2936 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2937 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2938 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2939 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2940 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2942 Cross-sum rules for even numbers can be derived by leaving as many bits
2943 to the right alone as the divisor has zeros to the right.
2944 E.g. if x is an unsigned 32 bit number:
2945 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2948 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2951 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2952 int rem_flag;
2953 enum tree_code code;
2954 enum machine_mode mode;
2955 rtx op0, op1, target;
2956 int unsignedp;
2958 enum machine_mode compute_mode;
2959 rtx tquotient;
2960 rtx quotient = 0, remainder = 0;
2961 rtx last;
2962 int size;
2963 rtx insn, set;
2964 optab optab1, optab2;
2965 int op1_is_constant, op1_is_pow2;
2966 int max_cost, extra_cost;
2967 static HOST_WIDE_INT last_div_const = 0;
2969 op1_is_constant = GET_CODE (op1) == CONST_INT;
2970 op1_is_pow2 = (op1_is_constant
2971 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2972 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2975 This is the structure of expand_divmod:
2977 First comes code to fix up the operands so we can perform the operations
2978 correctly and efficiently.
2980 Second comes a switch statement with code specific for each rounding mode.
2981 For some special operands this code emits all RTL for the desired
2982 operation, for other cases, it generates only a quotient and stores it in
2983 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2984 to indicate that it has not done anything.
2986 Last comes code that finishes the operation. If QUOTIENT is set and
2987 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2988 QUOTIENT is not set, it is computed using trunc rounding.
2990 We try to generate special code for division and remainder when OP1 is a
2991 constant. If |OP1| = 2**n we can use shifts and some other fast
2992 operations. For other values of OP1, we compute a carefully selected
2993 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2994 by m.
2996 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2997 half of the product. Different strategies for generating the product are
2998 implemented in expand_mult_highpart.
3000 If what we actually want is the remainder, we generate that by another
3001 by-constant multiplication and a subtraction. */
3003 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3004 code below will malfunction if we are, so check here and handle
3005 the special case if so. */
3006 if (op1 == const1_rtx)
3007 return rem_flag ? const0_rtx : op0;
3009 /* When dividing by -1, we could get an overflow.
3010 negv_optab can handle overflows. */
3011 if (! unsignedp && op1 == constm1_rtx)
3013 if (rem_flag)
3014 return const0_rtx;
3015 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3016 ? negv_optab : neg_optab, op0, target, 0);
3019 if (target
3020 /* Don't use the function value register as a target
3021 since we have to read it as well as write it,
3022 and function-inlining gets confused by this. */
3023 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3024 /* Don't clobber an operand while doing a multi-step calculation. */
3025 || ((rem_flag || op1_is_constant)
3026 && (reg_mentioned_p (target, op0)
3027 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3028 || reg_mentioned_p (target, op1)
3029 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3030 target = 0;
3032 /* Get the mode in which to perform this computation. Normally it will
3033 be MODE, but sometimes we can't do the desired operation in MODE.
3034 If so, pick a wider mode in which we can do the operation. Convert
3035 to that mode at the start to avoid repeated conversions.
3037 First see what operations we need. These depend on the expression
3038 we are evaluating. (We assume that divxx3 insns exist under the
3039 same conditions that modxx3 insns and that these insns don't normally
3040 fail. If these assumptions are not correct, we may generate less
3041 efficient code in some cases.)
3043 Then see if we find a mode in which we can open-code that operation
3044 (either a division, modulus, or shift). Finally, check for the smallest
3045 mode for which we can do the operation with a library call. */
3047 /* We might want to refine this now that we have division-by-constant
3048 optimization. Since expand_mult_highpart tries so many variants, it is
3049 not straightforward to generalize this. Maybe we should make an array
3050 of possible modes in init_expmed? Save this for GCC 2.7. */
3052 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3053 : (unsignedp ? udiv_optab : sdiv_optab));
3054 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3056 for (compute_mode = mode; compute_mode != VOIDmode;
3057 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3058 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3059 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3060 break;
3062 if (compute_mode == VOIDmode)
3063 for (compute_mode = mode; compute_mode != VOIDmode;
3064 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3065 if (optab1->handlers[(int) compute_mode].libfunc
3066 || optab2->handlers[(int) compute_mode].libfunc)
3067 break;
3069 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3070 in expand_binop. */
3071 if (compute_mode == VOIDmode)
3072 compute_mode = mode;
3074 if (target && GET_MODE (target) == compute_mode)
3075 tquotient = target;
3076 else
3077 tquotient = gen_reg_rtx (compute_mode);
3079 size = GET_MODE_BITSIZE (compute_mode);
3080 #if 0
3081 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3082 (mode), and thereby get better code when OP1 is a constant. Do that
3083 later. It will require going over all usages of SIZE below. */
3084 size = GET_MODE_BITSIZE (mode);
3085 #endif
3087 /* Only deduct something for a REM if the last divide done was
3088 for a different constant. Then set the constant of the last
3089 divide. */
3090 max_cost = div_cost[(int) compute_mode]
3091 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3092 && INTVAL (op1) == last_div_const)
3093 ? mul_cost[(int) compute_mode] + add_cost : 0);
3095 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3097 /* Now convert to the best mode to use. */
3098 if (compute_mode != mode)
3100 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3101 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3103 /* convert_modes may have placed op1 into a register, so we
3104 must recompute the following. */
3105 op1_is_constant = GET_CODE (op1) == CONST_INT;
3106 op1_is_pow2 = (op1_is_constant
3107 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3108 || (! unsignedp
3109 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3112 /* If one of the operands is a volatile MEM, copy it into a register. */
3114 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3115 op0 = force_reg (compute_mode, op0);
3116 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3117 op1 = force_reg (compute_mode, op1);
3119 /* If we need the remainder or if OP1 is constant, we need to
3120 put OP0 in a register in case it has any queued subexpressions. */
3121 if (rem_flag || op1_is_constant)
3122 op0 = force_reg (compute_mode, op0);
3124 last = get_last_insn ();
3126 /* Promote floor rounding to trunc rounding for unsigned operations. */
3127 if (unsignedp)
3129 if (code == FLOOR_DIV_EXPR)
3130 code = TRUNC_DIV_EXPR;
3131 if (code == FLOOR_MOD_EXPR)
3132 code = TRUNC_MOD_EXPR;
3133 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3134 code = TRUNC_DIV_EXPR;
3137 if (op1 != const0_rtx)
3138 switch (code)
3140 case TRUNC_MOD_EXPR:
3141 case TRUNC_DIV_EXPR:
3142 if (op1_is_constant)
3144 if (unsignedp)
3146 unsigned HOST_WIDE_INT mh, ml;
3147 int pre_shift, post_shift;
3148 int dummy;
3149 unsigned HOST_WIDE_INT d = INTVAL (op1);
3151 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3153 pre_shift = floor_log2 (d);
3154 if (rem_flag)
3156 remainder
3157 = expand_binop (compute_mode, and_optab, op0,
3158 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3159 remainder, 1,
3160 OPTAB_LIB_WIDEN);
3161 if (remainder)
3162 return gen_lowpart (mode, remainder);
3164 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3165 build_int_2 (pre_shift, 0),
3166 tquotient, 1);
3168 else if (size <= HOST_BITS_PER_WIDE_INT)
3170 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3172 /* Most significant bit of divisor is set; emit an scc
3173 insn. */
3174 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3175 compute_mode, 1, 1);
3176 if (quotient == 0)
3177 goto fail1;
3179 else
3181 /* Find a suitable multiplier and right shift count
3182 instead of multiplying with D. */
3184 mh = choose_multiplier (d, size, size,
3185 &ml, &post_shift, &dummy);
3187 /* If the suggested multiplier is more than SIZE bits,
3188 we can do better for even divisors, using an
3189 initial right shift. */
3190 if (mh != 0 && (d & 1) == 0)
3192 pre_shift = floor_log2 (d & -d);
3193 mh = choose_multiplier (d >> pre_shift, size,
3194 size - pre_shift,
3195 &ml, &post_shift, &dummy);
3196 if (mh)
3197 abort ();
3199 else
3200 pre_shift = 0;
3202 if (mh != 0)
3204 rtx t1, t2, t3, t4;
3206 if (post_shift - 1 >= BITS_PER_WORD)
3207 goto fail1;
3209 extra_cost = (shift_cost[post_shift - 1]
3210 + shift_cost[1] + 2 * add_cost);
3211 t1 = expand_mult_highpart (compute_mode, op0, ml,
3212 NULL_RTX, 1,
3213 max_cost - extra_cost);
3214 if (t1 == 0)
3215 goto fail1;
3216 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3217 op0, t1),
3218 NULL_RTX);
3219 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3220 build_int_2 (1, 0), NULL_RTX,1);
3221 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3222 t1, t3),
3223 NULL_RTX);
3224 quotient
3225 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3226 build_int_2 (post_shift - 1, 0),
3227 tquotient, 1);
3229 else
3231 rtx t1, t2;
3233 if (pre_shift >= BITS_PER_WORD
3234 || post_shift >= BITS_PER_WORD)
3235 goto fail1;
3237 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3238 build_int_2 (pre_shift, 0),
3239 NULL_RTX, 1);
3240 extra_cost = (shift_cost[pre_shift]
3241 + shift_cost[post_shift]);
3242 t2 = expand_mult_highpart (compute_mode, t1, ml,
3243 NULL_RTX, 1,
3244 max_cost - extra_cost);
3245 if (t2 == 0)
3246 goto fail1;
3247 quotient
3248 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3249 build_int_2 (post_shift, 0),
3250 tquotient, 1);
3254 else /* Too wide mode to use tricky code */
3255 break;
3257 insn = get_last_insn ();
3258 if (insn != last
3259 && (set = single_set (insn)) != 0
3260 && SET_DEST (set) == quotient)
3261 set_unique_reg_note (insn,
3262 REG_EQUAL,
3263 gen_rtx_UDIV (compute_mode, op0, op1));
3265 else /* TRUNC_DIV, signed */
3267 unsigned HOST_WIDE_INT ml;
3268 int lgup, post_shift;
3269 HOST_WIDE_INT d = INTVAL (op1);
3270 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3272 /* n rem d = n rem -d */
3273 if (rem_flag && d < 0)
3275 d = abs_d;
3276 op1 = gen_int_mode (abs_d, compute_mode);
3279 if (d == 1)
3280 quotient = op0;
3281 else if (d == -1)
3282 quotient = expand_unop (compute_mode, neg_optab, op0,
3283 tquotient, 0);
3284 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3286 /* This case is not handled correctly below. */
3287 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3288 compute_mode, 1, 1);
3289 if (quotient == 0)
3290 goto fail1;
3292 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3293 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3294 /* ??? The cheap metric is computed only for
3295 word_mode. If this operation is wider, this may
3296 not be so. Assume true if the optab has an
3297 expander for this mode. */
3298 && (((rem_flag ? smod_optab : sdiv_optab)
3299 ->handlers[(int) compute_mode].insn_code
3300 != CODE_FOR_nothing)
3301 || (sdivmod_optab->handlers[(int) compute_mode]
3302 .insn_code != CODE_FOR_nothing)))
3304 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3306 lgup = floor_log2 (abs_d);
3307 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3309 rtx label = gen_label_rtx ();
3310 rtx t1;
3312 t1 = copy_to_mode_reg (compute_mode, op0);
3313 do_cmp_and_jump (t1, const0_rtx, GE,
3314 compute_mode, label);
3315 expand_inc (t1, gen_int_mode (abs_d - 1,
3316 compute_mode));
3317 emit_label (label);
3318 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3319 build_int_2 (lgup, 0),
3320 tquotient, 0);
3322 else
3324 rtx t1, t2, t3;
3325 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3326 build_int_2 (size - 1, 0),
3327 NULL_RTX, 0);
3328 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3329 build_int_2 (size - lgup, 0),
3330 NULL_RTX, 1);
3331 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3332 op0, t2),
3333 NULL_RTX);
3334 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3335 build_int_2 (lgup, 0),
3336 tquotient, 0);
3339 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3340 the quotient. */
3341 if (d < 0)
3343 insn = get_last_insn ();
3344 if (insn != last
3345 && (set = single_set (insn)) != 0
3346 && SET_DEST (set) == quotient
3347 && abs_d < ((unsigned HOST_WIDE_INT) 1
3348 << (HOST_BITS_PER_WIDE_INT - 1)))
3349 set_unique_reg_note (insn,
3350 REG_EQUAL,
3351 gen_rtx_DIV (compute_mode,
3352 op0,
3353 GEN_INT
3354 (trunc_int_for_mode
3355 (abs_d,
3356 compute_mode))));
3358 quotient = expand_unop (compute_mode, neg_optab,
3359 quotient, quotient, 0);
3362 else if (size <= HOST_BITS_PER_WIDE_INT)
3364 choose_multiplier (abs_d, size, size - 1,
3365 &ml, &post_shift, &lgup);
3366 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3368 rtx t1, t2, t3;
3370 if (post_shift >= BITS_PER_WORD
3371 || size - 1 >= BITS_PER_WORD)
3372 goto fail1;
3374 extra_cost = (shift_cost[post_shift]
3375 + shift_cost[size - 1] + add_cost);
3376 t1 = expand_mult_highpart (compute_mode, op0, ml,
3377 NULL_RTX, 0,
3378 max_cost - extra_cost);
3379 if (t1 == 0)
3380 goto fail1;
3381 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3382 build_int_2 (post_shift, 0), NULL_RTX, 0);
3383 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3384 build_int_2 (size - 1, 0), NULL_RTX, 0);
3385 if (d < 0)
3386 quotient
3387 = force_operand (gen_rtx_MINUS (compute_mode,
3388 t3, t2),
3389 tquotient);
3390 else
3391 quotient
3392 = force_operand (gen_rtx_MINUS (compute_mode,
3393 t2, t3),
3394 tquotient);
3396 else
3398 rtx t1, t2, t3, t4;
3400 if (post_shift >= BITS_PER_WORD
3401 || size - 1 >= BITS_PER_WORD)
3402 goto fail1;
3404 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3405 extra_cost = (shift_cost[post_shift]
3406 + shift_cost[size - 1] + 2 * add_cost);
3407 t1 = expand_mult_highpart (compute_mode, op0, ml,
3408 NULL_RTX, 0,
3409 max_cost - extra_cost);
3410 if (t1 == 0)
3411 goto fail1;
3412 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3413 t1, op0),
3414 NULL_RTX);
3415 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3416 build_int_2 (post_shift, 0),
3417 NULL_RTX, 0);
3418 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3419 build_int_2 (size - 1, 0),
3420 NULL_RTX, 0);
3421 if (d < 0)
3422 quotient
3423 = force_operand (gen_rtx_MINUS (compute_mode,
3424 t4, t3),
3425 tquotient);
3426 else
3427 quotient
3428 = force_operand (gen_rtx_MINUS (compute_mode,
3429 t3, t4),
3430 tquotient);
3433 else /* Too wide mode to use tricky code */
3434 break;
3436 insn = get_last_insn ();
3437 if (insn != last
3438 && (set = single_set (insn)) != 0
3439 && SET_DEST (set) == quotient)
3440 set_unique_reg_note (insn,
3441 REG_EQUAL,
3442 gen_rtx_DIV (compute_mode, op0, op1));
3444 break;
3446 fail1:
3447 delete_insns_since (last);
3448 break;
3450 case FLOOR_DIV_EXPR:
3451 case FLOOR_MOD_EXPR:
3452 /* We will come here only for signed operations. */
3453 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3455 unsigned HOST_WIDE_INT mh, ml;
3456 int pre_shift, lgup, post_shift;
3457 HOST_WIDE_INT d = INTVAL (op1);
3459 if (d > 0)
3461 /* We could just as easily deal with negative constants here,
3462 but it does not seem worth the trouble for GCC 2.6. */
3463 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3465 pre_shift = floor_log2 (d);
3466 if (rem_flag)
3468 remainder = expand_binop (compute_mode, and_optab, op0,
3469 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3470 remainder, 0, OPTAB_LIB_WIDEN);
3471 if (remainder)
3472 return gen_lowpart (mode, remainder);
3474 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3475 build_int_2 (pre_shift, 0),
3476 tquotient, 0);
3478 else
3480 rtx t1, t2, t3, t4;
3482 mh = choose_multiplier (d, size, size - 1,
3483 &ml, &post_shift, &lgup);
3484 if (mh)
3485 abort ();
3487 if (post_shift < BITS_PER_WORD
3488 && size - 1 < BITS_PER_WORD)
3490 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3491 build_int_2 (size - 1, 0),
3492 NULL_RTX, 0);
3493 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3494 NULL_RTX, 0, OPTAB_WIDEN);
3495 extra_cost = (shift_cost[post_shift]
3496 + shift_cost[size - 1] + 2 * add_cost);
3497 t3 = expand_mult_highpart (compute_mode, t2, ml,
3498 NULL_RTX, 1,
3499 max_cost - extra_cost);
3500 if (t3 != 0)
3502 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3503 build_int_2 (post_shift, 0),
3504 NULL_RTX, 1);
3505 quotient = expand_binop (compute_mode, xor_optab,
3506 t4, t1, tquotient, 0,
3507 OPTAB_WIDEN);
3512 else
3514 rtx nsign, t1, t2, t3, t4;
3515 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3516 op0, constm1_rtx), NULL_RTX);
3517 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3518 0, OPTAB_WIDEN);
3519 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3520 build_int_2 (size - 1, 0), NULL_RTX, 0);
3521 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3522 NULL_RTX);
3523 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3524 NULL_RTX, 0);
3525 if (t4)
3527 rtx t5;
3528 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3529 NULL_RTX, 0);
3530 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3531 t4, t5),
3532 tquotient);
3537 if (quotient != 0)
3538 break;
3539 delete_insns_since (last);
3541 /* Try using an instruction that produces both the quotient and
3542 remainder, using truncation. We can easily compensate the quotient
3543 or remainder to get floor rounding, once we have the remainder.
3544 Notice that we compute also the final remainder value here,
3545 and return the result right away. */
3546 if (target == 0 || GET_MODE (target) != compute_mode)
3547 target = gen_reg_rtx (compute_mode);
3549 if (rem_flag)
3551 remainder
3552 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3553 quotient = gen_reg_rtx (compute_mode);
3555 else
3557 quotient
3558 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3559 remainder = gen_reg_rtx (compute_mode);
3562 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3563 quotient, remainder, 0))
3565 /* This could be computed with a branch-less sequence.
3566 Save that for later. */
3567 rtx tem;
3568 rtx label = gen_label_rtx ();
3569 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3570 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3571 NULL_RTX, 0, OPTAB_WIDEN);
3572 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3573 expand_dec (quotient, const1_rtx);
3574 expand_inc (remainder, op1);
3575 emit_label (label);
3576 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3579 /* No luck with division elimination or divmod. Have to do it
3580 by conditionally adjusting op0 *and* the result. */
3582 rtx label1, label2, label3, label4, label5;
3583 rtx adjusted_op0;
3584 rtx tem;
3586 quotient = gen_reg_rtx (compute_mode);
3587 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3588 label1 = gen_label_rtx ();
3589 label2 = gen_label_rtx ();
3590 label3 = gen_label_rtx ();
3591 label4 = gen_label_rtx ();
3592 label5 = gen_label_rtx ();
3593 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3594 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3595 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3596 quotient, 0, OPTAB_LIB_WIDEN);
3597 if (tem != quotient)
3598 emit_move_insn (quotient, tem);
3599 emit_jump_insn (gen_jump (label5));
3600 emit_barrier ();
3601 emit_label (label1);
3602 expand_inc (adjusted_op0, const1_rtx);
3603 emit_jump_insn (gen_jump (label4));
3604 emit_barrier ();
3605 emit_label (label2);
3606 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3607 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3608 quotient, 0, OPTAB_LIB_WIDEN);
3609 if (tem != quotient)
3610 emit_move_insn (quotient, tem);
3611 emit_jump_insn (gen_jump (label5));
3612 emit_barrier ();
3613 emit_label (label3);
3614 expand_dec (adjusted_op0, const1_rtx);
3615 emit_label (label4);
3616 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3617 quotient, 0, OPTAB_LIB_WIDEN);
3618 if (tem != quotient)
3619 emit_move_insn (quotient, tem);
3620 expand_dec (quotient, const1_rtx);
3621 emit_label (label5);
3623 break;
3625 case CEIL_DIV_EXPR:
3626 case CEIL_MOD_EXPR:
3627 if (unsignedp)
3629 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3631 rtx t1, t2, t3;
3632 unsigned HOST_WIDE_INT d = INTVAL (op1);
3633 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3634 build_int_2 (floor_log2 (d), 0),
3635 tquotient, 1);
3636 t2 = expand_binop (compute_mode, and_optab, op0,
3637 GEN_INT (d - 1),
3638 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3639 t3 = gen_reg_rtx (compute_mode);
3640 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3641 compute_mode, 1, 1);
3642 if (t3 == 0)
3644 rtx lab;
3645 lab = gen_label_rtx ();
3646 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3647 expand_inc (t1, const1_rtx);
3648 emit_label (lab);
3649 quotient = t1;
3651 else
3652 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3653 t1, t3),
3654 tquotient);
3655 break;
3658 /* Try using an instruction that produces both the quotient and
3659 remainder, using truncation. We can easily compensate the
3660 quotient or remainder to get ceiling rounding, once we have the
3661 remainder. Notice that we compute also the final remainder
3662 value here, and return the result right away. */
3663 if (target == 0 || GET_MODE (target) != compute_mode)
3664 target = gen_reg_rtx (compute_mode);
3666 if (rem_flag)
3668 remainder = (GET_CODE (target) == REG
3669 ? target : gen_reg_rtx (compute_mode));
3670 quotient = gen_reg_rtx (compute_mode);
3672 else
3674 quotient = (GET_CODE (target) == REG
3675 ? target : gen_reg_rtx (compute_mode));
3676 remainder = gen_reg_rtx (compute_mode);
3679 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3680 remainder, 1))
3682 /* This could be computed with a branch-less sequence.
3683 Save that for later. */
3684 rtx label = gen_label_rtx ();
3685 do_cmp_and_jump (remainder, const0_rtx, EQ,
3686 compute_mode, label);
3687 expand_inc (quotient, const1_rtx);
3688 expand_dec (remainder, op1);
3689 emit_label (label);
3690 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3693 /* No luck with division elimination or divmod. Have to do it
3694 by conditionally adjusting op0 *and* the result. */
3696 rtx label1, label2;
3697 rtx adjusted_op0, tem;
3699 quotient = gen_reg_rtx (compute_mode);
3700 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3701 label1 = gen_label_rtx ();
3702 label2 = gen_label_rtx ();
3703 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3704 compute_mode, label1);
3705 emit_move_insn (quotient, const0_rtx);
3706 emit_jump_insn (gen_jump (label2));
3707 emit_barrier ();
3708 emit_label (label1);
3709 expand_dec (adjusted_op0, const1_rtx);
3710 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3711 quotient, 1, OPTAB_LIB_WIDEN);
3712 if (tem != quotient)
3713 emit_move_insn (quotient, tem);
3714 expand_inc (quotient, const1_rtx);
3715 emit_label (label2);
3718 else /* signed */
3720 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3721 && INTVAL (op1) >= 0)
3723 /* This is extremely similar to the code for the unsigned case
3724 above. For 2.7 we should merge these variants, but for
3725 2.6.1 I don't want to touch the code for unsigned since that
3726 get used in C. The signed case will only be used by other
3727 languages (Ada). */
3729 rtx t1, t2, t3;
3730 unsigned HOST_WIDE_INT d = INTVAL (op1);
3731 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3732 build_int_2 (floor_log2 (d), 0),
3733 tquotient, 0);
3734 t2 = expand_binop (compute_mode, and_optab, op0,
3735 GEN_INT (d - 1),
3736 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3737 t3 = gen_reg_rtx (compute_mode);
3738 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3739 compute_mode, 1, 1);
3740 if (t3 == 0)
3742 rtx lab;
3743 lab = gen_label_rtx ();
3744 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3745 expand_inc (t1, const1_rtx);
3746 emit_label (lab);
3747 quotient = t1;
3749 else
3750 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3751 t1, t3),
3752 tquotient);
3753 break;
3756 /* Try using an instruction that produces both the quotient and
3757 remainder, using truncation. We can easily compensate the
3758 quotient or remainder to get ceiling rounding, once we have the
3759 remainder. Notice that we compute also the final remainder
3760 value here, and return the result right away. */
3761 if (target == 0 || GET_MODE (target) != compute_mode)
3762 target = gen_reg_rtx (compute_mode);
3763 if (rem_flag)
3765 remainder= (GET_CODE (target) == REG
3766 ? target : gen_reg_rtx (compute_mode));
3767 quotient = gen_reg_rtx (compute_mode);
3769 else
3771 quotient = (GET_CODE (target) == REG
3772 ? target : gen_reg_rtx (compute_mode));
3773 remainder = gen_reg_rtx (compute_mode);
3776 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3777 remainder, 0))
3779 /* This could be computed with a branch-less sequence.
3780 Save that for later. */
3781 rtx tem;
3782 rtx label = gen_label_rtx ();
3783 do_cmp_and_jump (remainder, const0_rtx, EQ,
3784 compute_mode, label);
3785 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3786 NULL_RTX, 0, OPTAB_WIDEN);
3787 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3788 expand_inc (quotient, const1_rtx);
3789 expand_dec (remainder, op1);
3790 emit_label (label);
3791 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3794 /* No luck with division elimination or divmod. Have to do it
3795 by conditionally adjusting op0 *and* the result. */
3797 rtx label1, label2, label3, label4, label5;
3798 rtx adjusted_op0;
3799 rtx tem;
3801 quotient = gen_reg_rtx (compute_mode);
3802 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3803 label1 = gen_label_rtx ();
3804 label2 = gen_label_rtx ();
3805 label3 = gen_label_rtx ();
3806 label4 = gen_label_rtx ();
3807 label5 = gen_label_rtx ();
3808 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3809 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3810 compute_mode, label1);
3811 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3812 quotient, 0, OPTAB_LIB_WIDEN);
3813 if (tem != quotient)
3814 emit_move_insn (quotient, tem);
3815 emit_jump_insn (gen_jump (label5));
3816 emit_barrier ();
3817 emit_label (label1);
3818 expand_dec (adjusted_op0, const1_rtx);
3819 emit_jump_insn (gen_jump (label4));
3820 emit_barrier ();
3821 emit_label (label2);
3822 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3823 compute_mode, label3);
3824 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3825 quotient, 0, OPTAB_LIB_WIDEN);
3826 if (tem != quotient)
3827 emit_move_insn (quotient, tem);
3828 emit_jump_insn (gen_jump (label5));
3829 emit_barrier ();
3830 emit_label (label3);
3831 expand_inc (adjusted_op0, const1_rtx);
3832 emit_label (label4);
3833 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3834 quotient, 0, OPTAB_LIB_WIDEN);
3835 if (tem != quotient)
3836 emit_move_insn (quotient, tem);
3837 expand_inc (quotient, const1_rtx);
3838 emit_label (label5);
3841 break;
3843 case EXACT_DIV_EXPR:
3844 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3846 HOST_WIDE_INT d = INTVAL (op1);
3847 unsigned HOST_WIDE_INT ml;
3848 int pre_shift;
3849 rtx t1;
3851 pre_shift = floor_log2 (d & -d);
3852 ml = invert_mod2n (d >> pre_shift, size);
3853 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3854 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3855 quotient = expand_mult (compute_mode, t1,
3856 gen_int_mode (ml, compute_mode),
3857 NULL_RTX, 0);
3859 insn = get_last_insn ();
3860 set_unique_reg_note (insn,
3861 REG_EQUAL,
3862 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3863 compute_mode,
3864 op0, op1));
3866 break;
3868 case ROUND_DIV_EXPR:
3869 case ROUND_MOD_EXPR:
3870 if (unsignedp)
3872 rtx tem;
3873 rtx label;
3874 label = gen_label_rtx ();
3875 quotient = gen_reg_rtx (compute_mode);
3876 remainder = gen_reg_rtx (compute_mode);
3877 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3879 rtx tem;
3880 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3881 quotient, 1, OPTAB_LIB_WIDEN);
3882 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3883 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3884 remainder, 1, OPTAB_LIB_WIDEN);
3886 tem = plus_constant (op1, -1);
3887 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3888 build_int_2 (1, 0), NULL_RTX, 1);
3889 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3890 expand_inc (quotient, const1_rtx);
3891 expand_dec (remainder, op1);
3892 emit_label (label);
3894 else
3896 rtx abs_rem, abs_op1, tem, mask;
3897 rtx label;
3898 label = gen_label_rtx ();
3899 quotient = gen_reg_rtx (compute_mode);
3900 remainder = gen_reg_rtx (compute_mode);
3901 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3903 rtx tem;
3904 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3905 quotient, 0, OPTAB_LIB_WIDEN);
3906 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3907 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3908 remainder, 0, OPTAB_LIB_WIDEN);
3910 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3911 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3912 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3913 build_int_2 (1, 0), NULL_RTX, 1);
3914 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3915 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3916 NULL_RTX, 0, OPTAB_WIDEN);
3917 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3918 build_int_2 (size - 1, 0), NULL_RTX, 0);
3919 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3920 NULL_RTX, 0, OPTAB_WIDEN);
3921 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3922 NULL_RTX, 0, OPTAB_WIDEN);
3923 expand_inc (quotient, tem);
3924 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3925 NULL_RTX, 0, OPTAB_WIDEN);
3926 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3927 NULL_RTX, 0, OPTAB_WIDEN);
3928 expand_dec (remainder, tem);
3929 emit_label (label);
3931 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3933 default:
3934 abort ();
3937 if (quotient == 0)
3939 if (target && GET_MODE (target) != compute_mode)
3940 target = 0;
3942 if (rem_flag)
3944 /* Try to produce the remainder without producing the quotient.
3945 If we seem to have a divmod pattern that does not require widening,
3946 don't try widening here. We should really have an WIDEN argument
3947 to expand_twoval_binop, since what we'd really like to do here is
3948 1) try a mod insn in compute_mode
3949 2) try a divmod insn in compute_mode
3950 3) try a div insn in compute_mode and multiply-subtract to get
3951 remainder
3952 4) try the same things with widening allowed. */
3953 remainder
3954 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3955 op0, op1, target,
3956 unsignedp,
3957 ((optab2->handlers[(int) compute_mode].insn_code
3958 != CODE_FOR_nothing)
3959 ? OPTAB_DIRECT : OPTAB_WIDEN));
3960 if (remainder == 0)
3962 /* No luck there. Can we do remainder and divide at once
3963 without a library call? */
3964 remainder = gen_reg_rtx (compute_mode);
3965 if (! expand_twoval_binop ((unsignedp
3966 ? udivmod_optab
3967 : sdivmod_optab),
3968 op0, op1,
3969 NULL_RTX, remainder, unsignedp))
3970 remainder = 0;
3973 if (remainder)
3974 return gen_lowpart (mode, remainder);
3977 /* Produce the quotient. Try a quotient insn, but not a library call.
3978 If we have a divmod in this mode, use it in preference to widening
3979 the div (for this test we assume it will not fail). Note that optab2
3980 is set to the one of the two optabs that the call below will use. */
3981 quotient
3982 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3983 op0, op1, rem_flag ? NULL_RTX : target,
3984 unsignedp,
3985 ((optab2->handlers[(int) compute_mode].insn_code
3986 != CODE_FOR_nothing)
3987 ? OPTAB_DIRECT : OPTAB_WIDEN));
3989 if (quotient == 0)
3991 /* No luck there. Try a quotient-and-remainder insn,
3992 keeping the quotient alone. */
3993 quotient = gen_reg_rtx (compute_mode);
3994 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3995 op0, op1,
3996 quotient, NULL_RTX, unsignedp))
3998 quotient = 0;
3999 if (! rem_flag)
4000 /* Still no luck. If we are not computing the remainder,
4001 use a library call for the quotient. */
4002 quotient = sign_expand_binop (compute_mode,
4003 udiv_optab, sdiv_optab,
4004 op0, op1, target,
4005 unsignedp, OPTAB_LIB_WIDEN);
4010 if (rem_flag)
4012 if (target && GET_MODE (target) != compute_mode)
4013 target = 0;
4015 if (quotient == 0)
4016 /* No divide instruction either. Use library for remainder. */
4017 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4018 op0, op1, target,
4019 unsignedp, OPTAB_LIB_WIDEN);
4020 else
4022 /* We divided. Now finish doing X - Y * (X / Y). */
4023 remainder = expand_mult (compute_mode, quotient, op1,
4024 NULL_RTX, unsignedp);
4025 remainder = expand_binop (compute_mode, sub_optab, op0,
4026 remainder, target, unsignedp,
4027 OPTAB_LIB_WIDEN);
4031 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4034 /* Return a tree node with data type TYPE, describing the value of X.
4035 Usually this is an RTL_EXPR, if there is no obvious better choice.
4036 X may be an expression, however we only support those expressions
4037 generated by loop.c. */
4039 tree
4040 make_tree (type, x)
4041 tree type;
4042 rtx x;
4044 tree t;
4046 switch (GET_CODE (x))
4048 case CONST_INT:
4049 t = build_int_2 (INTVAL (x),
4050 (TREE_UNSIGNED (type)
4051 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4052 || INTVAL (x) >= 0 ? 0 : -1);
4053 TREE_TYPE (t) = type;
4054 return t;
4056 case CONST_DOUBLE:
4057 if (GET_MODE (x) == VOIDmode)
4059 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4060 TREE_TYPE (t) = type;
4062 else
4064 REAL_VALUE_TYPE d;
4066 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4067 t = build_real (type, d);
4070 return t;
4072 case CONST_VECTOR:
4074 int i, units;
4075 rtx elt;
4076 tree t = NULL_TREE;
4078 units = CONST_VECTOR_NUNITS (x);
4080 /* Build a tree with vector elements. */
4081 for (i = units - 1; i >= 0; --i)
4083 elt = CONST_VECTOR_ELT (x, i);
4084 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4087 return build_vector (type, t);
4090 case PLUS:
4091 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4092 make_tree (type, XEXP (x, 1))));
4094 case MINUS:
4095 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4096 make_tree (type, XEXP (x, 1))));
4098 case NEG:
4099 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4101 case MULT:
4102 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4103 make_tree (type, XEXP (x, 1))));
4105 case ASHIFT:
4106 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4107 make_tree (type, XEXP (x, 1))));
4109 case LSHIFTRT:
4110 t = (*lang_hooks.types.unsigned_type) (type);
4111 return fold (convert (type,
4112 build (RSHIFT_EXPR, t,
4113 make_tree (t, XEXP (x, 0)),
4114 make_tree (type, XEXP (x, 1)))));
4116 case ASHIFTRT:
4117 t = (*lang_hooks.types.signed_type) (type);
4118 return fold (convert (type,
4119 build (RSHIFT_EXPR, t,
4120 make_tree (t, XEXP (x, 0)),
4121 make_tree (type, XEXP (x, 1)))));
4123 case DIV:
4124 if (TREE_CODE (type) != REAL_TYPE)
4125 t = (*lang_hooks.types.signed_type) (type);
4126 else
4127 t = type;
4129 return fold (convert (type,
4130 build (TRUNC_DIV_EXPR, t,
4131 make_tree (t, XEXP (x, 0)),
4132 make_tree (t, XEXP (x, 1)))));
4133 case UDIV:
4134 t = (*lang_hooks.types.unsigned_type) (type);
4135 return fold (convert (type,
4136 build (TRUNC_DIV_EXPR, t,
4137 make_tree (t, XEXP (x, 0)),
4138 make_tree (t, XEXP (x, 1)))));
4140 case SIGN_EXTEND:
4141 case ZERO_EXTEND:
4142 t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4143 GET_CODE (x) == ZERO_EXTEND);
4144 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4146 default:
4147 t = make_node (RTL_EXPR);
4148 TREE_TYPE (t) = type;
4150 #ifdef POINTERS_EXTEND_UNSIGNED
4151 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4152 ptr_mode. So convert. */
4153 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4154 x = convert_memory_address (TYPE_MODE (type), x);
4155 #endif
4157 RTL_EXPR_RTL (t) = x;
4158 /* There are no insns to be output
4159 when this rtl_expr is used. */
4160 RTL_EXPR_SEQUENCE (t) = 0;
4161 return t;
4165 /* Check whether the multiplication X * MULT + ADD overflows.
4166 X, MULT and ADD must be CONST_*.
4167 MODE is the machine mode for the computation.
4168 X and MULT must have mode MODE. ADD may have a different mode.
4169 So can X (defaults to same as MODE).
4170 UNSIGNEDP is non-zero to do unsigned multiplication. */
4172 bool
4173 const_mult_add_overflow_p (x, mult, add, mode, unsignedp)
4174 rtx x, mult, add;
4175 enum machine_mode mode;
4176 int unsignedp;
4178 tree type, mult_type, add_type, result;
4180 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4182 /* In order to get a proper overflow indication from an unsigned
4183 type, we have to pretend that it's a sizetype. */
4184 mult_type = type;
4185 if (unsignedp)
4187 mult_type = copy_node (type);
4188 TYPE_IS_SIZETYPE (mult_type) = 1;
4191 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4192 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4194 result = fold (build (PLUS_EXPR, mult_type,
4195 fold (build (MULT_EXPR, mult_type,
4196 make_tree (mult_type, x),
4197 make_tree (mult_type, mult))),
4198 make_tree (add_type, add)));
4200 return TREE_CONSTANT_OVERFLOW (result);
4203 /* Return an rtx representing the value of X * MULT + ADD.
4204 TARGET is a suggestion for where to store the result (an rtx).
4205 MODE is the machine mode for the computation.
4206 X and MULT must have mode MODE. ADD may have a different mode.
4207 So can X (defaults to same as MODE).
4208 UNSIGNEDP is non-zero to do unsigned multiplication.
4209 This may emit insns. */
4212 expand_mult_add (x, target, mult, add, mode, unsignedp)
4213 rtx x, target, mult, add;
4214 enum machine_mode mode;
4215 int unsignedp;
4217 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4218 tree add_type = (GET_MODE (add) == VOIDmode
4219 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4220 unsignedp));
4221 tree result = fold (build (PLUS_EXPR, type,
4222 fold (build (MULT_EXPR, type,
4223 make_tree (type, x),
4224 make_tree (type, mult))),
4225 make_tree (add_type, add)));
4227 return expand_expr (result, target, VOIDmode, 0);
4230 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4231 and returning TARGET.
4233 If TARGET is 0, a pseudo-register or constant is returned. */
4236 expand_and (mode, op0, op1, target)
4237 enum machine_mode mode;
4238 rtx op0, op1, target;
4240 rtx tem = 0;
4242 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4243 tem = simplify_binary_operation (AND, mode, op0, op1);
4244 if (tem == 0)
4245 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4247 if (target == 0)
4248 target = tem;
4249 else if (tem != target)
4250 emit_move_insn (target, tem);
4251 return target;
4254 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4255 and storing in TARGET. Normally return TARGET.
4256 Return 0 if that cannot be done.
4258 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4259 it is VOIDmode, they cannot both be CONST_INT.
4261 UNSIGNEDP is for the case where we have to widen the operands
4262 to perform the operation. It says to use zero-extension.
4264 NORMALIZEP is 1 if we should convert the result to be either zero
4265 or one. Normalize is -1 if we should convert the result to be
4266 either zero or -1. If NORMALIZEP is zero, the result will be left
4267 "raw" out of the scc insn. */
4270 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4271 rtx target;
4272 enum rtx_code code;
4273 rtx op0, op1;
4274 enum machine_mode mode;
4275 int unsignedp;
4276 int normalizep;
4278 rtx subtarget;
4279 enum insn_code icode;
4280 enum machine_mode compare_mode;
4281 enum machine_mode target_mode = GET_MODE (target);
4282 rtx tem;
4283 rtx last = get_last_insn ();
4284 rtx pattern, comparison;
4286 /* ??? Ok to do this and then fail? */
4287 op0 = protect_from_queue (op0, 0);
4288 op1 = protect_from_queue (op1, 0);
4290 if (unsignedp)
4291 code = unsigned_condition (code);
4293 /* If one operand is constant, make it the second one. Only do this
4294 if the other operand is not constant as well. */
4296 if (swap_commutative_operands_p (op0, op1))
4298 tem = op0;
4299 op0 = op1;
4300 op1 = tem;
4301 code = swap_condition (code);
4304 if (mode == VOIDmode)
4305 mode = GET_MODE (op0);
4307 /* For some comparisons with 1 and -1, we can convert this to
4308 comparisons with zero. This will often produce more opportunities for
4309 store-flag insns. */
4311 switch (code)
4313 case LT:
4314 if (op1 == const1_rtx)
4315 op1 = const0_rtx, code = LE;
4316 break;
4317 case LE:
4318 if (op1 == constm1_rtx)
4319 op1 = const0_rtx, code = LT;
4320 break;
4321 case GE:
4322 if (op1 == const1_rtx)
4323 op1 = const0_rtx, code = GT;
4324 break;
4325 case GT:
4326 if (op1 == constm1_rtx)
4327 op1 = const0_rtx, code = GE;
4328 break;
4329 case GEU:
4330 if (op1 == const1_rtx)
4331 op1 = const0_rtx, code = NE;
4332 break;
4333 case LTU:
4334 if (op1 == const1_rtx)
4335 op1 = const0_rtx, code = EQ;
4336 break;
4337 default:
4338 break;
4341 /* If we are comparing a double-word integer with zero, we can convert
4342 the comparison into one involving a single word. */
4343 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4344 && GET_MODE_CLASS (mode) == MODE_INT
4345 && op1 == const0_rtx
4346 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4348 if (code == EQ || code == NE)
4350 /* Do a logical OR of the two words and compare the result. */
4351 rtx op0h = gen_highpart (word_mode, op0);
4352 rtx op0l = gen_lowpart (word_mode, op0);
4353 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4354 NULL_RTX, unsignedp, OPTAB_DIRECT);
4355 if (op0both != 0)
4356 return emit_store_flag (target, code, op0both, op1, word_mode,
4357 unsignedp, normalizep);
4359 else if (code == LT || code == GE)
4360 /* If testing the sign bit, can just test on high word. */
4361 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4362 op1, word_mode, unsignedp, normalizep);
4365 /* From now on, we won't change CODE, so set ICODE now. */
4366 icode = setcc_gen_code[(int) code];
4368 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4369 complement of A (for GE) and shifting the sign bit to the low bit. */
4370 if (op1 == const0_rtx && (code == LT || code == GE)
4371 && GET_MODE_CLASS (mode) == MODE_INT
4372 && (normalizep || STORE_FLAG_VALUE == 1
4373 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4374 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4375 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4377 subtarget = target;
4379 /* If the result is to be wider than OP0, it is best to convert it
4380 first. If it is to be narrower, it is *incorrect* to convert it
4381 first. */
4382 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4384 op0 = protect_from_queue (op0, 0);
4385 op0 = convert_modes (target_mode, mode, op0, 0);
4386 mode = target_mode;
4389 if (target_mode != mode)
4390 subtarget = 0;
4392 if (code == GE)
4393 op0 = expand_unop (mode, one_cmpl_optab, op0,
4394 ((STORE_FLAG_VALUE == 1 || normalizep)
4395 ? 0 : subtarget), 0);
4397 if (STORE_FLAG_VALUE == 1 || normalizep)
4398 /* If we are supposed to produce a 0/1 value, we want to do
4399 a logical shift from the sign bit to the low-order bit; for
4400 a -1/0 value, we do an arithmetic shift. */
4401 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4402 size_int (GET_MODE_BITSIZE (mode) - 1),
4403 subtarget, normalizep != -1);
4405 if (mode != target_mode)
4406 op0 = convert_modes (target_mode, mode, op0, 0);
4408 return op0;
4411 if (icode != CODE_FOR_nothing)
4413 insn_operand_predicate_fn pred;
4415 /* We think we may be able to do this with a scc insn. Emit the
4416 comparison and then the scc insn.
4418 compare_from_rtx may call emit_queue, which would be deleted below
4419 if the scc insn fails. So call it ourselves before setting LAST.
4420 Likewise for do_pending_stack_adjust. */
4422 emit_queue ();
4423 do_pending_stack_adjust ();
4424 last = get_last_insn ();
4426 comparison
4427 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4428 if (GET_CODE (comparison) == CONST_INT)
4429 return (comparison == const0_rtx ? const0_rtx
4430 : normalizep == 1 ? const1_rtx
4431 : normalizep == -1 ? constm1_rtx
4432 : const_true_rtx);
4434 /* The code of COMPARISON may not match CODE if compare_from_rtx
4435 decided to swap its operands and reverse the original code.
4437 We know that compare_from_rtx returns either a CONST_INT or
4438 a new comparison code, so it is safe to just extract the
4439 code from COMPARISON. */
4440 code = GET_CODE (comparison);
4442 /* Get a reference to the target in the proper mode for this insn. */
4443 compare_mode = insn_data[(int) icode].operand[0].mode;
4444 subtarget = target;
4445 pred = insn_data[(int) icode].operand[0].predicate;
4446 if (preserve_subexpressions_p ()
4447 || ! (*pred) (subtarget, compare_mode))
4448 subtarget = gen_reg_rtx (compare_mode);
4450 pattern = GEN_FCN (icode) (subtarget);
4451 if (pattern)
4453 emit_insn (pattern);
4455 /* If we are converting to a wider mode, first convert to
4456 TARGET_MODE, then normalize. This produces better combining
4457 opportunities on machines that have a SIGN_EXTRACT when we are
4458 testing a single bit. This mostly benefits the 68k.
4460 If STORE_FLAG_VALUE does not have the sign bit set when
4461 interpreted in COMPARE_MODE, we can do this conversion as
4462 unsigned, which is usually more efficient. */
4463 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4465 convert_move (target, subtarget,
4466 (GET_MODE_BITSIZE (compare_mode)
4467 <= HOST_BITS_PER_WIDE_INT)
4468 && 0 == (STORE_FLAG_VALUE
4469 & ((HOST_WIDE_INT) 1
4470 << (GET_MODE_BITSIZE (compare_mode) -1))));
4471 op0 = target;
4472 compare_mode = target_mode;
4474 else
4475 op0 = subtarget;
4477 /* If we want to keep subexpressions around, don't reuse our
4478 last target. */
4480 if (preserve_subexpressions_p ())
4481 subtarget = 0;
4483 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4484 we don't have to do anything. */
4485 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4487 /* STORE_FLAG_VALUE might be the most negative number, so write
4488 the comparison this way to avoid a compiler-time warning. */
4489 else if (- normalizep == STORE_FLAG_VALUE)
4490 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4492 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4493 makes it hard to use a value of just the sign bit due to
4494 ANSI integer constant typing rules. */
4495 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4496 && (STORE_FLAG_VALUE
4497 & ((HOST_WIDE_INT) 1
4498 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4499 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4500 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4501 subtarget, normalizep == 1);
4502 else if (STORE_FLAG_VALUE & 1)
4504 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4505 if (normalizep == -1)
4506 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4508 else
4509 abort ();
4511 /* If we were converting to a smaller mode, do the
4512 conversion now. */
4513 if (target_mode != compare_mode)
4515 convert_move (target, op0, 0);
4516 return target;
4518 else
4519 return op0;
4523 delete_insns_since (last);
4525 /* If expensive optimizations, use different pseudo registers for each
4526 insn, instead of reusing the same pseudo. This leads to better CSE,
4527 but slows down the compiler, since there are more pseudos */
4528 subtarget = (!flag_expensive_optimizations
4529 && (target_mode == mode)) ? target : NULL_RTX;
4531 /* If we reached here, we can't do this with a scc insn. However, there
4532 are some comparisons that can be done directly. For example, if
4533 this is an equality comparison of integers, we can try to exclusive-or
4534 (or subtract) the two operands and use a recursive call to try the
4535 comparison with zero. Don't do any of these cases if branches are
4536 very cheap. */
4538 if (BRANCH_COST > 0
4539 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4540 && op1 != const0_rtx)
4542 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4543 OPTAB_WIDEN);
4545 if (tem == 0)
4546 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4547 OPTAB_WIDEN);
4548 if (tem != 0)
4549 tem = emit_store_flag (target, code, tem, const0_rtx,
4550 mode, unsignedp, normalizep);
4551 if (tem == 0)
4552 delete_insns_since (last);
4553 return tem;
4556 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4557 the constant zero. Reject all other comparisons at this point. Only
4558 do LE and GT if branches are expensive since they are expensive on
4559 2-operand machines. */
4561 if (BRANCH_COST == 0
4562 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4563 || (code != EQ && code != NE
4564 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4565 return 0;
4567 /* See what we need to return. We can only return a 1, -1, or the
4568 sign bit. */
4570 if (normalizep == 0)
4572 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4573 normalizep = STORE_FLAG_VALUE;
4575 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4576 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4577 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4579 else
4580 return 0;
4583 /* Try to put the result of the comparison in the sign bit. Assume we can't
4584 do the necessary operation below. */
4586 tem = 0;
4588 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4589 the sign bit set. */
4591 if (code == LE)
4593 /* This is destructive, so SUBTARGET can't be OP0. */
4594 if (rtx_equal_p (subtarget, op0))
4595 subtarget = 0;
4597 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4598 OPTAB_WIDEN);
4599 if (tem)
4600 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4601 OPTAB_WIDEN);
4604 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4605 number of bits in the mode of OP0, minus one. */
4607 if (code == GT)
4609 if (rtx_equal_p (subtarget, op0))
4610 subtarget = 0;
4612 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4613 size_int (GET_MODE_BITSIZE (mode) - 1),
4614 subtarget, 0);
4615 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4616 OPTAB_WIDEN);
4619 if (code == EQ || code == NE)
4621 /* For EQ or NE, one way to do the comparison is to apply an operation
4622 that converts the operand into a positive number if it is non-zero
4623 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4624 for NE we negate. This puts the result in the sign bit. Then we
4625 normalize with a shift, if needed.
4627 Two operations that can do the above actions are ABS and FFS, so try
4628 them. If that doesn't work, and MODE is smaller than a full word,
4629 we can use zero-extension to the wider mode (an unsigned conversion)
4630 as the operation. */
4632 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4633 that is compensated by the subsequent overflow when subtracting
4634 one / negating. */
4636 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4637 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4638 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4639 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4640 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4642 op0 = protect_from_queue (op0, 0);
4643 tem = convert_modes (word_mode, mode, op0, 1);
4644 mode = word_mode;
4647 if (tem != 0)
4649 if (code == EQ)
4650 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4651 0, OPTAB_WIDEN);
4652 else
4653 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4656 /* If we couldn't do it that way, for NE we can "or" the two's complement
4657 of the value with itself. For EQ, we take the one's complement of
4658 that "or", which is an extra insn, so we only handle EQ if branches
4659 are expensive. */
4661 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4663 if (rtx_equal_p (subtarget, op0))
4664 subtarget = 0;
4666 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4667 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4668 OPTAB_WIDEN);
4670 if (tem && code == EQ)
4671 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4675 if (tem && normalizep)
4676 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4677 size_int (GET_MODE_BITSIZE (mode) - 1),
4678 subtarget, normalizep == 1);
4680 if (tem)
4682 if (GET_MODE (tem) != target_mode)
4684 convert_move (target, tem, 0);
4685 tem = target;
4687 else if (!subtarget)
4689 emit_move_insn (target, tem);
4690 tem = target;
4693 else
4694 delete_insns_since (last);
4696 return tem;
4699 /* Like emit_store_flag, but always succeeds. */
4702 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4703 rtx target;
4704 enum rtx_code code;
4705 rtx op0, op1;
4706 enum machine_mode mode;
4707 int unsignedp;
4708 int normalizep;
4710 rtx tem, label;
4712 /* First see if emit_store_flag can do the job. */
4713 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4714 if (tem != 0)
4715 return tem;
4717 if (normalizep == 0)
4718 normalizep = 1;
4720 /* If this failed, we have to do this with set/compare/jump/set code. */
4722 if (GET_CODE (target) != REG
4723 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4724 target = gen_reg_rtx (GET_MODE (target));
4726 emit_move_insn (target, const1_rtx);
4727 label = gen_label_rtx ();
4728 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4729 NULL_RTX, label);
4731 emit_move_insn (target, const0_rtx);
4732 emit_label (label);
4734 return target;
4737 /* Perform possibly multi-word comparison and conditional jump to LABEL
4738 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4740 The algorithm is based on the code in expr.c:do_jump.
4742 Note that this does not perform a general comparison. Only variants
4743 generated within expmed.c are correctly handled, others abort (but could
4744 be handled if needed). */
4746 static void
4747 do_cmp_and_jump (arg1, arg2, op, mode, label)
4748 rtx arg1, arg2, label;
4749 enum rtx_code op;
4750 enum machine_mode mode;
4752 /* If this mode is an integer too wide to compare properly,
4753 compare word by word. Rely on cse to optimize constant cases. */
4755 if (GET_MODE_CLASS (mode) == MODE_INT
4756 && ! can_compare_p (op, mode, ccp_jump))
4758 rtx label2 = gen_label_rtx ();
4760 switch (op)
4762 case LTU:
4763 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4764 break;
4766 case LEU:
4767 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4768 break;
4770 case LT:
4771 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4772 break;
4774 case GT:
4775 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4776 break;
4778 case GE:
4779 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4780 break;
4782 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4783 that's the only equality operations we do */
4784 case EQ:
4785 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4786 abort ();
4787 do_jump_by_parts_equality_rtx (arg1, label2, label);
4788 break;
4790 case NE:
4791 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4792 abort ();
4793 do_jump_by_parts_equality_rtx (arg1, label, label2);
4794 break;
4796 default:
4797 abort ();
4800 emit_label (label2);
4802 else
4803 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);