configure.in: Use target, not target_alias, when matching triplet patterns.
[official-gcc.git] / gcc / expmed.c
blob730c4c1de036c7d36be299aeed98e5b3769ae59c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
36 #include "langhooks.h"
38 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
40 unsigned HOST_WIDE_INT, rtx));
41 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx));
43 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
47 rtx, int));
48 static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
52 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int));
54 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
55 enum machine_mode, rtx));
57 /* Nonzero means divides or modulus operations are relatively cheap for
58 powers of two, so don't use branches; emit the operation instead.
59 Usually, this will mean that the MD file will emit non-branch
60 sequences. */
62 static int sdiv_pow2_cheap, smod_pow2_cheap;
64 #ifndef SLOW_UNALIGNED_ACCESS
65 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
66 #endif
68 /* For compilers that support multiple targets with different word sizes,
69 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
70 is the H8/300(H) compiler. */
72 #ifndef MAX_BITS_PER_WORD
73 #define MAX_BITS_PER_WORD BITS_PER_WORD
74 #endif
76 /* Reduce conditional compilation elsewhere. */
77 #ifndef HAVE_insv
78 #define HAVE_insv 0
79 #define CODE_FOR_insv CODE_FOR_nothing
80 #define gen_insv(a,b,c,d) NULL_RTX
81 #endif
82 #ifndef HAVE_extv
83 #define HAVE_extv 0
84 #define CODE_FOR_extv CODE_FOR_nothing
85 #define gen_extv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extzv
88 #define HAVE_extzv 0
89 #define CODE_FOR_extzv CODE_FOR_nothing
90 #define gen_extzv(a,b,c,d) NULL_RTX
91 #endif
93 /* Cost of various pieces of RTL. Note that some of these are indexed by
94 shift count and some by mode. */
95 static int add_cost, negate_cost, zero_cost;
96 static int shift_cost[MAX_BITS_PER_WORD];
97 static int shiftadd_cost[MAX_BITS_PER_WORD];
98 static int shiftsub_cost[MAX_BITS_PER_WORD];
99 static int mul_cost[NUM_MACHINE_MODES];
100 static int div_cost[NUM_MACHINE_MODES];
101 static int mul_widen_cost[NUM_MACHINE_MODES];
102 static int mul_highpart_cost[NUM_MACHINE_MODES];
104 void
105 init_expmed ()
107 rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
108 int dummy;
109 int m;
110 enum machine_mode mode, wider_mode;
112 start_sequence ();
114 /* This is "some random pseudo register" for purposes of calling recog
115 to see what insns exist. */
116 reg = gen_rtx_REG (word_mode, 10000);
118 zero_cost = rtx_cost (const0_rtx, 0);
119 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
121 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
122 gen_rtx_ASHIFT (word_mode, reg,
123 const0_rtx)));
125 shiftadd_insn
126 = emit_insn (gen_rtx_SET (VOIDmode, reg,
127 gen_rtx_PLUS (word_mode,
128 gen_rtx_MULT (word_mode,
129 reg, const0_rtx),
130 reg)));
132 shiftsub_insn
133 = emit_insn (gen_rtx_SET (VOIDmode, reg,
134 gen_rtx_MINUS (word_mode,
135 gen_rtx_MULT (word_mode,
136 reg, const0_rtx),
137 reg)));
139 init_recog ();
141 shift_cost[0] = 0;
142 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
144 for (m = 1; m < MAX_BITS_PER_WORD; m++)
146 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
147 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
149 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
150 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
151 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
153 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
154 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
155 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
157 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
158 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
159 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
162 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
164 sdiv_pow2_cheap
165 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
166 <= 2 * add_cost);
167 smod_pow2_cheap
168 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
169 <= 2 * add_cost);
171 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
172 mode != VOIDmode;
173 mode = GET_MODE_WIDER_MODE (mode))
175 reg = gen_rtx_REG (mode, 10000);
176 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
177 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
178 wider_mode = GET_MODE_WIDER_MODE (mode);
179 if (wider_mode != VOIDmode)
181 mul_widen_cost[(int) wider_mode]
182 = rtx_cost (gen_rtx_MULT (wider_mode,
183 gen_rtx_ZERO_EXTEND (wider_mode, reg),
184 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
185 SET);
186 mul_highpart_cost[(int) mode]
187 = rtx_cost (gen_rtx_TRUNCATE
188 (mode,
189 gen_rtx_LSHIFTRT (wider_mode,
190 gen_rtx_MULT (wider_mode,
191 gen_rtx_ZERO_EXTEND
192 (wider_mode, reg),
193 gen_rtx_ZERO_EXTEND
194 (wider_mode, reg)),
195 GEN_INT (GET_MODE_BITSIZE (mode)))),
196 SET);
200 end_sequence ();
203 /* Return an rtx representing minus the value of X.
204 MODE is the intended mode of the result,
205 useful if X is a CONST_INT. */
208 negate_rtx (mode, x)
209 enum machine_mode mode;
210 rtx x;
212 rtx result = simplify_unary_operation (NEG, mode, x, mode);
214 if (result == 0)
215 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
217 return result;
220 /* Report on the availability of insv/extv/extzv and the desired mode
221 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
222 is false; else the mode of the specified operand. If OPNO is -1,
223 all the caller cares about is whether the insn is available. */
224 enum machine_mode
225 mode_for_extraction (pattern, opno)
226 enum extraction_pattern pattern;
227 int opno;
229 const struct insn_data *data;
231 switch (pattern)
233 case EP_insv:
234 if (HAVE_insv)
236 data = &insn_data[CODE_FOR_insv];
237 break;
239 return MAX_MACHINE_MODE;
241 case EP_extv:
242 if (HAVE_extv)
244 data = &insn_data[CODE_FOR_extv];
245 break;
247 return MAX_MACHINE_MODE;
249 case EP_extzv:
250 if (HAVE_extzv)
252 data = &insn_data[CODE_FOR_extzv];
253 break;
255 return MAX_MACHINE_MODE;
257 default:
258 abort ();
261 if (opno == -1)
262 return VOIDmode;
264 /* Everyone who uses this function used to follow it with
265 if (result == VOIDmode) result = word_mode; */
266 if (data->operand[opno].mode == VOIDmode)
267 return word_mode;
268 return data->operand[opno].mode;
272 /* Generate code to store value from rtx VALUE
273 into a bit-field within structure STR_RTX
274 containing BITSIZE bits starting at bit BITNUM.
275 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
276 ALIGN is the alignment that STR_RTX is known to have.
277 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
279 /* ??? Note that there are two different ideas here for how
280 to determine the size to count bits within, for a register.
281 One is BITS_PER_WORD, and the other is the size of operand 3
282 of the insv pattern.
284 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
285 else, we use the mode of operand 3. */
288 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
289 rtx str_rtx;
290 unsigned HOST_WIDE_INT bitsize;
291 unsigned HOST_WIDE_INT bitnum;
292 enum machine_mode fieldmode;
293 rtx value;
294 HOST_WIDE_INT total_size;
296 unsigned int unit
297 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
298 unsigned HOST_WIDE_INT offset = bitnum / unit;
299 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
300 rtx op0 = str_rtx;
301 int byte_offset;
303 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
305 /* Discount the part of the structure before the desired byte.
306 We need to know how many bytes are safe to reference after it. */
307 if (total_size >= 0)
308 total_size -= (bitpos / BIGGEST_ALIGNMENT
309 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
311 while (GET_CODE (op0) == SUBREG)
313 /* The following line once was done only if WORDS_BIG_ENDIAN,
314 but I think that is a mistake. WORDS_BIG_ENDIAN is
315 meaningful at a much higher level; when structures are copied
316 between memory and regs, the higher-numbered regs
317 always get higher addresses. */
318 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
319 /* We used to adjust BITPOS here, but now we do the whole adjustment
320 right after the loop. */
321 op0 = SUBREG_REG (op0);
324 value = protect_from_queue (value, 0);
326 if (flag_force_mem)
328 int old_generating_concat_p = generating_concat_p;
329 generating_concat_p = 0;
330 value = force_not_mem (value);
331 generating_concat_p = old_generating_concat_p;
334 /* If the target is a register, overwriting the entire object, or storing
335 a full-word or multi-word field can be done with just a SUBREG.
337 If the target is memory, storing any naturally aligned field can be
338 done with a simple store. For targets that support fast unaligned
339 memory, any naturally sized, unit aligned field can be done directly. */
341 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
342 + (offset * UNITS_PER_WORD);
344 if (bitpos == 0
345 && bitsize == GET_MODE_BITSIZE (fieldmode)
346 && (GET_CODE (op0) != MEM
347 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
348 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
349 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
350 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
351 || (offset * BITS_PER_UNIT % bitsize == 0
352 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
354 if (GET_MODE (op0) != fieldmode)
356 if (GET_CODE (op0) == SUBREG)
358 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
359 || GET_MODE_CLASS (fieldmode) == MODE_INT
360 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
361 op0 = SUBREG_REG (op0);
362 else
363 /* Else we've got some float mode source being extracted into
364 a different float mode destination -- this combination of
365 subregs results in Severe Tire Damage. */
366 abort ();
368 if (GET_CODE (op0) == REG)
369 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
370 else
371 op0 = adjust_address (op0, fieldmode, offset);
373 emit_move_insn (op0, value);
374 return value;
377 /* Make sure we are playing with integral modes. Pun with subregs
378 if we aren't. This must come after the entire register case above,
379 since that case is valid for any mode. The following cases are only
380 valid for integral modes. */
382 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
383 if (imode != GET_MODE (op0))
385 if (GET_CODE (op0) == MEM)
386 op0 = adjust_address (op0, imode, 0);
387 else if (imode != BLKmode)
388 op0 = gen_lowpart (imode, op0);
389 else
390 abort ();
394 /* We may be accessing data outside the field, which means
395 we can alias adjacent data. */
396 if (GET_CODE (op0) == MEM)
398 op0 = shallow_copy_rtx (op0);
399 set_mem_alias_set (op0, 0);
400 set_mem_expr (op0, 0);
403 /* If OP0 is a register, BITPOS must count within a word.
404 But as we have it, it counts within whatever size OP0 now has.
405 On a bigendian machine, these are not the same, so convert. */
406 if (BYTES_BIG_ENDIAN
407 && GET_CODE (op0) != MEM
408 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
409 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
411 /* Storing an lsb-aligned field in a register
412 can be done with a movestrict instruction. */
414 if (GET_CODE (op0) != MEM
415 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
416 && bitsize == GET_MODE_BITSIZE (fieldmode)
417 && (movstrict_optab->handlers[(int) fieldmode].insn_code
418 != CODE_FOR_nothing))
420 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
422 /* Get appropriate low part of the value being stored. */
423 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
424 value = gen_lowpart (fieldmode, value);
425 else if (!(GET_CODE (value) == SYMBOL_REF
426 || GET_CODE (value) == LABEL_REF
427 || GET_CODE (value) == CONST))
428 value = convert_to_mode (fieldmode, value, 0);
430 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
431 value = copy_to_mode_reg (fieldmode, value);
433 if (GET_CODE (op0) == SUBREG)
435 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
436 || GET_MODE_CLASS (fieldmode) == MODE_INT
437 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
438 op0 = SUBREG_REG (op0);
439 else
440 /* Else we've got some float mode source being extracted into
441 a different float mode destination -- this combination of
442 subregs results in Severe Tire Damage. */
443 abort ();
446 emit_insn (GEN_FCN (icode)
447 (gen_rtx_SUBREG (fieldmode, op0,
448 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
449 + (offset * UNITS_PER_WORD)),
450 value));
452 return value;
455 /* Handle fields bigger than a word. */
457 if (bitsize > BITS_PER_WORD)
459 /* Here we transfer the words of the field
460 in the order least significant first.
461 This is because the most significant word is the one which may
462 be less than full.
463 However, only do that if the value is not BLKmode. */
465 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
466 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
467 unsigned int i;
469 /* This is the mode we must force value to, so that there will be enough
470 subwords to extract. Note that fieldmode will often (always?) be
471 VOIDmode, because that is what store_field uses to indicate that this
472 is a bit field, but passing VOIDmode to operand_subword_force will
473 result in an abort. */
474 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
476 for (i = 0; i < nwords; i++)
478 /* If I is 0, use the low-order word in both field and target;
479 if I is 1, use the next to lowest word; and so on. */
480 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
481 unsigned int bit_offset = (backwards
482 ? MAX ((int) bitsize - ((int) i + 1)
483 * BITS_PER_WORD,
485 : (int) i * BITS_PER_WORD);
487 store_bit_field (op0, MIN (BITS_PER_WORD,
488 bitsize - i * BITS_PER_WORD),
489 bitnum + bit_offset, word_mode,
490 operand_subword_force (value, wordnum,
491 (GET_MODE (value) == VOIDmode
492 ? fieldmode
493 : GET_MODE (value))),
494 total_size);
496 return value;
499 /* From here on we can assume that the field to be stored in is
500 a full-word (whatever type that is), since it is shorter than a word. */
502 /* OFFSET is the number of words or bytes (UNIT says which)
503 from STR_RTX to the first word or byte containing part of the field. */
505 if (GET_CODE (op0) != MEM)
507 if (offset != 0
508 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
510 if (GET_CODE (op0) != REG)
512 /* Since this is a destination (lvalue), we can't copy it to a
513 pseudo. We can trivially remove a SUBREG that does not
514 change the size of the operand. Such a SUBREG may have been
515 added above. Otherwise, abort. */
516 if (GET_CODE (op0) == SUBREG
517 && (GET_MODE_SIZE (GET_MODE (op0))
518 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
519 op0 = SUBREG_REG (op0);
520 else
521 abort ();
523 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
524 op0, (offset * UNITS_PER_WORD));
526 offset = 0;
528 else
529 op0 = protect_from_queue (op0, 1);
531 /* If VALUE is a floating-point mode, access it as an integer of the
532 corresponding size. This can occur on a machine with 64 bit registers
533 that uses SFmode for float. This can also occur for unaligned float
534 structure fields. */
535 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
536 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
537 value = gen_lowpart (word_mode, value);
539 /* Now OFFSET is nonzero only if OP0 is memory
540 and is therefore always measured in bytes. */
542 if (HAVE_insv
543 && GET_MODE (value) != BLKmode
544 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
545 /* Ensure insv's size is wide enough for this field. */
546 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
547 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
548 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
550 int xbitpos = bitpos;
551 rtx value1;
552 rtx xop0 = op0;
553 rtx last = get_last_insn ();
554 rtx pat;
555 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
556 int save_volatile_ok = volatile_ok;
558 volatile_ok = 1;
560 /* If this machine's insv can only insert into a register, copy OP0
561 into a register and save it back later. */
562 /* This used to check flag_force_mem, but that was a serious
563 de-optimization now that flag_force_mem is enabled by -O2. */
564 if (GET_CODE (op0) == MEM
565 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
566 (op0, VOIDmode)))
568 rtx tempreg;
569 enum machine_mode bestmode;
571 /* Get the mode to use for inserting into this field. If OP0 is
572 BLKmode, get the smallest mode consistent with the alignment. If
573 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
574 mode. Otherwise, use the smallest mode containing the field. */
576 if (GET_MODE (op0) == BLKmode
577 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
578 bestmode
579 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
580 MEM_VOLATILE_P (op0));
581 else
582 bestmode = GET_MODE (op0);
584 if (bestmode == VOIDmode
585 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
586 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
587 goto insv_loses;
589 /* Adjust address to point to the containing unit of that mode.
590 Compute offset as multiple of this unit, counting in bytes. */
591 unit = GET_MODE_BITSIZE (bestmode);
592 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
593 bitpos = bitnum % unit;
594 op0 = adjust_address (op0, bestmode, offset);
596 /* Fetch that unit, store the bitfield in it, then store
597 the unit. */
598 tempreg = copy_to_reg (op0);
599 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
600 total_size);
601 emit_move_insn (op0, tempreg);
602 return value;
604 volatile_ok = save_volatile_ok;
606 /* Add OFFSET into OP0's address. */
607 if (GET_CODE (xop0) == MEM)
608 xop0 = adjust_address (xop0, byte_mode, offset);
610 /* If xop0 is a register, we need it in MAXMODE
611 to make it acceptable to the format of insv. */
612 if (GET_CODE (xop0) == SUBREG)
613 /* We can't just change the mode, because this might clobber op0,
614 and we will need the original value of op0 if insv fails. */
615 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
616 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
617 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
619 /* On big-endian machines, we count bits from the most significant.
620 If the bit field insn does not, we must invert. */
622 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
623 xbitpos = unit - bitsize - xbitpos;
625 /* We have been counting XBITPOS within UNIT.
626 Count instead within the size of the register. */
627 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
628 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
630 unit = GET_MODE_BITSIZE (maxmode);
632 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
633 value1 = value;
634 if (GET_MODE (value) != maxmode)
636 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
638 /* Optimization: Don't bother really extending VALUE
639 if it has all the bits we will actually use. However,
640 if we must narrow it, be sure we do it correctly. */
642 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
644 rtx tmp;
646 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
647 if (! tmp)
648 tmp = simplify_gen_subreg (maxmode,
649 force_reg (GET_MODE (value),
650 value1),
651 GET_MODE (value), 0);
652 value1 = tmp;
654 else
655 value1 = gen_lowpart (maxmode, value1);
657 else if (GET_CODE (value) == CONST_INT)
658 value1 = gen_int_mode (INTVAL (value), maxmode);
659 else if (!CONSTANT_P (value))
660 /* Parse phase is supposed to make VALUE's data type
661 match that of the component reference, which is a type
662 at least as wide as the field; so VALUE should have
663 a mode that corresponds to that type. */
664 abort ();
667 /* If this machine's insv insists on a register,
668 get VALUE1 into a register. */
669 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
670 (value1, maxmode)))
671 value1 = force_reg (maxmode, value1);
673 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
674 if (pat)
675 emit_insn (pat);
676 else
678 delete_insns_since (last);
679 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
682 else
683 insv_loses:
684 /* Insv is not available; store using shifts and boolean ops. */
685 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
686 return value;
689 /* Use shifts and boolean operations to store VALUE
690 into a bit field of width BITSIZE
691 in a memory location specified by OP0 except offset by OFFSET bytes.
692 (OFFSET must be 0 if OP0 is a register.)
693 The field starts at position BITPOS within the byte.
694 (If OP0 is a register, it may be a full word or a narrower mode,
695 but BITPOS still counts within a full word,
696 which is significant on bigendian machines.)
698 Note that protect_from_queue has already been done on OP0 and VALUE. */
700 static void
701 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
702 rtx op0;
703 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
704 rtx value;
706 enum machine_mode mode;
707 unsigned int total_bits = BITS_PER_WORD;
708 rtx subtarget, temp;
709 int all_zero = 0;
710 int all_one = 0;
712 /* There is a case not handled here:
713 a structure with a known alignment of just a halfword
714 and a field split across two aligned halfwords within the structure.
715 Or likewise a structure with a known alignment of just a byte
716 and a field split across two bytes.
717 Such cases are not supposed to be able to occur. */
719 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
721 if (offset != 0)
722 abort ();
723 /* Special treatment for a bit field split across two registers. */
724 if (bitsize + bitpos > BITS_PER_WORD)
726 store_split_bit_field (op0, bitsize, bitpos, value);
727 return;
730 else
732 /* Get the proper mode to use for this field. We want a mode that
733 includes the entire field. If such a mode would be larger than
734 a word, we won't be doing the extraction the normal way.
735 We don't want a mode bigger than the destination. */
737 mode = GET_MODE (op0);
738 if (GET_MODE_BITSIZE (mode) == 0
739 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
740 mode = word_mode;
741 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
742 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
744 if (mode == VOIDmode)
746 /* The only way this should occur is if the field spans word
747 boundaries. */
748 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
749 value);
750 return;
753 total_bits = GET_MODE_BITSIZE (mode);
755 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
756 be in the range 0 to total_bits-1, and put any excess bytes in
757 OFFSET. */
758 if (bitpos >= total_bits)
760 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
761 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
762 * BITS_PER_UNIT);
765 /* Get ref to an aligned byte, halfword, or word containing the field.
766 Adjust BITPOS to be position within a word,
767 and OFFSET to be the offset of that word.
768 Then alter OP0 to refer to that word. */
769 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
770 offset -= (offset % (total_bits / BITS_PER_UNIT));
771 op0 = adjust_address (op0, mode, offset);
774 mode = GET_MODE (op0);
776 /* Now MODE is either some integral mode for a MEM as OP0,
777 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
778 The bit field is contained entirely within OP0.
779 BITPOS is the starting bit number within OP0.
780 (OP0's mode may actually be narrower than MODE.) */
782 if (BYTES_BIG_ENDIAN)
783 /* BITPOS is the distance between our msb
784 and that of the containing datum.
785 Convert it to the distance from the lsb. */
786 bitpos = total_bits - bitsize - bitpos;
788 /* Now BITPOS is always the distance between our lsb
789 and that of OP0. */
791 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
792 we must first convert its mode to MODE. */
794 if (GET_CODE (value) == CONST_INT)
796 HOST_WIDE_INT v = INTVAL (value);
798 if (bitsize < HOST_BITS_PER_WIDE_INT)
799 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
801 if (v == 0)
802 all_zero = 1;
803 else if ((bitsize < HOST_BITS_PER_WIDE_INT
804 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
805 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
806 all_one = 1;
808 value = lshift_value (mode, value, bitpos, bitsize);
810 else
812 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
813 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
815 if (GET_MODE (value) != mode)
817 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
818 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
819 value = gen_lowpart (mode, value);
820 else
821 value = convert_to_mode (mode, value, 1);
824 if (must_and)
825 value = expand_binop (mode, and_optab, value,
826 mask_rtx (mode, 0, bitsize, 0),
827 NULL_RTX, 1, OPTAB_LIB_WIDEN);
828 if (bitpos > 0)
829 value = expand_shift (LSHIFT_EXPR, mode, value,
830 build_int_2 (bitpos, 0), NULL_RTX, 1);
833 /* Now clear the chosen bits in OP0,
834 except that if VALUE is -1 we need not bother. */
836 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
838 if (! all_one)
840 temp = expand_binop (mode, and_optab, op0,
841 mask_rtx (mode, bitpos, bitsize, 1),
842 subtarget, 1, OPTAB_LIB_WIDEN);
843 subtarget = temp;
845 else
846 temp = op0;
848 /* Now logical-or VALUE into OP0, unless it is zero. */
850 if (! all_zero)
851 temp = expand_binop (mode, ior_optab, temp, value,
852 subtarget, 1, OPTAB_LIB_WIDEN);
853 if (op0 != temp)
854 emit_move_insn (op0, temp);
857 /* Store a bit field that is split across multiple accessible memory objects.
859 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
860 BITSIZE is the field width; BITPOS the position of its first bit
861 (within the word).
862 VALUE is the value to store.
864 This does not yet handle fields wider than BITS_PER_WORD. */
866 static void
867 store_split_bit_field (op0, bitsize, bitpos, value)
868 rtx op0;
869 unsigned HOST_WIDE_INT bitsize, bitpos;
870 rtx value;
872 unsigned int unit;
873 unsigned int bitsdone = 0;
875 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
876 much at a time. */
877 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
878 unit = BITS_PER_WORD;
879 else
880 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
882 /* If VALUE is a constant other than a CONST_INT, get it into a register in
883 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
884 that VALUE might be a floating-point constant. */
885 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
887 rtx word = gen_lowpart_common (word_mode, value);
889 if (word && (value != word))
890 value = word;
891 else
892 value = gen_lowpart_common (word_mode,
893 force_reg (GET_MODE (value) != VOIDmode
894 ? GET_MODE (value)
895 : word_mode, value));
897 else if (GET_CODE (value) == ADDRESSOF)
898 value = copy_to_reg (value);
900 while (bitsdone < bitsize)
902 unsigned HOST_WIDE_INT thissize;
903 rtx part, word;
904 unsigned HOST_WIDE_INT thispos;
905 unsigned HOST_WIDE_INT offset;
907 offset = (bitpos + bitsdone) / unit;
908 thispos = (bitpos + bitsdone) % unit;
910 /* THISSIZE must not overrun a word boundary. Otherwise,
911 store_fixed_bit_field will call us again, and we will mutually
912 recurse forever. */
913 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
914 thissize = MIN (thissize, unit - thispos);
916 if (BYTES_BIG_ENDIAN)
918 int total_bits;
920 /* We must do an endian conversion exactly the same way as it is
921 done in extract_bit_field, so that the two calls to
922 extract_fixed_bit_field will have comparable arguments. */
923 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
924 total_bits = BITS_PER_WORD;
925 else
926 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
928 /* Fetch successively less significant portions. */
929 if (GET_CODE (value) == CONST_INT)
930 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
931 >> (bitsize - bitsdone - thissize))
932 & (((HOST_WIDE_INT) 1 << thissize) - 1));
933 else
934 /* The args are chosen so that the last part includes the
935 lsb. Give extract_bit_field the value it needs (with
936 endianness compensation) to fetch the piece we want. */
937 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
938 total_bits - bitsize + bitsdone,
939 NULL_RTX, 1);
941 else
943 /* Fetch successively more significant portions. */
944 if (GET_CODE (value) == CONST_INT)
945 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
946 >> bitsdone)
947 & (((HOST_WIDE_INT) 1 << thissize) - 1));
948 else
949 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
950 bitsdone, NULL_RTX, 1);
953 /* If OP0 is a register, then handle OFFSET here.
955 When handling multiword bitfields, extract_bit_field may pass
956 down a word_mode SUBREG of a larger REG for a bitfield that actually
957 crosses a word boundary. Thus, for a SUBREG, we must find
958 the current word starting from the base register. */
959 if (GET_CODE (op0) == SUBREG)
961 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
962 word = operand_subword_force (SUBREG_REG (op0), word_offset,
963 GET_MODE (SUBREG_REG (op0)));
964 offset = 0;
966 else if (GET_CODE (op0) == REG)
968 word = operand_subword_force (op0, offset, GET_MODE (op0));
969 offset = 0;
971 else
972 word = op0;
974 /* OFFSET is in UNITs, and UNIT is in bits.
975 store_fixed_bit_field wants offset in bytes. */
976 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
977 thispos, part);
978 bitsdone += thissize;
982 /* Generate code to extract a byte-field from STR_RTX
983 containing BITSIZE bits, starting at BITNUM,
984 and put it in TARGET if possible (if TARGET is nonzero).
985 Regardless of TARGET, we return the rtx for where the value is placed.
986 It may be a QUEUED.
988 STR_RTX is the structure containing the byte (a REG or MEM).
989 UNSIGNEDP is nonzero if this is an unsigned bit field.
990 MODE is the natural mode of the field value once extracted.
991 TMODE is the mode the caller would like the value to have;
992 but the value may be returned with type MODE instead.
994 TOTAL_SIZE is the size in bytes of the containing structure,
995 or -1 if varying.
997 If a TARGET is specified and we can store in it at no extra cost,
998 we do so, and return TARGET.
999 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1000 if they are equally easy. */
1003 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1004 target, mode, tmode, total_size)
1005 rtx str_rtx;
1006 unsigned HOST_WIDE_INT bitsize;
1007 unsigned HOST_WIDE_INT bitnum;
1008 int unsignedp;
1009 rtx target;
1010 enum machine_mode mode, tmode;
1011 HOST_WIDE_INT total_size;
1013 unsigned int unit
1014 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1015 unsigned HOST_WIDE_INT offset = bitnum / unit;
1016 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1017 rtx op0 = str_rtx;
1018 rtx spec_target = target;
1019 rtx spec_target_subreg = 0;
1020 enum machine_mode int_mode;
1021 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1022 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1023 enum machine_mode mode1;
1024 int byte_offset;
1026 /* Discount the part of the structure before the desired byte.
1027 We need to know how many bytes are safe to reference after it. */
1028 if (total_size >= 0)
1029 total_size -= (bitpos / BIGGEST_ALIGNMENT
1030 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1032 if (tmode == VOIDmode)
1033 tmode = mode;
1035 while (GET_CODE (op0) == SUBREG)
1037 bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1038 if (bitpos > unit)
1040 offset += (bitpos / unit);
1041 bitpos %= unit;
1043 op0 = SUBREG_REG (op0);
1046 if (GET_CODE (op0) == REG
1047 && mode == GET_MODE (op0)
1048 && bitnum == 0
1049 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1051 /* We're trying to extract a full register from itself. */
1052 return op0;
1055 /* Make sure we are playing with integral modes. Pun with subregs
1056 if we aren't. */
1058 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1059 if (imode != GET_MODE (op0))
1061 if (GET_CODE (op0) == MEM)
1062 op0 = adjust_address (op0, imode, 0);
1063 else if (imode != BLKmode)
1064 op0 = gen_lowpart (imode, op0);
1065 else
1066 abort ();
1070 /* We may be accessing data outside the field, which means
1071 we can alias adjacent data. */
1072 if (GET_CODE (op0) == MEM)
1074 op0 = shallow_copy_rtx (op0);
1075 set_mem_alias_set (op0, 0);
1076 set_mem_expr (op0, 0);
1079 /* Extraction of a full-word or multi-word value from a structure
1080 in a register or aligned memory can be done with just a SUBREG.
1081 A subword value in the least significant part of a register
1082 can also be extracted with a SUBREG. For this, we need the
1083 byte offset of the value in op0. */
1085 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1087 /* If OP0 is a register, BITPOS must count within a word.
1088 But as we have it, it counts within whatever size OP0 now has.
1089 On a bigendian machine, these are not the same, so convert. */
1090 if (BYTES_BIG_ENDIAN
1091 && GET_CODE (op0) != MEM
1092 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1093 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1095 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1096 If that's wrong, the solution is to test for it and set TARGET to 0
1097 if needed. */
1099 mode1 = (VECTOR_MODE_P (tmode)
1100 ? mode
1101 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1103 if (((GET_CODE (op0) != MEM
1104 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1105 GET_MODE_BITSIZE (GET_MODE (op0)))
1106 && GET_MODE_SIZE (mode1) != 0
1107 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1108 || (GET_CODE (op0) == MEM
1109 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1110 || (offset * BITS_PER_UNIT % bitsize == 0
1111 && MEM_ALIGN (op0) % bitsize == 0))))
1112 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1113 && bitpos % BITS_PER_WORD == 0)
1114 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1115 /* ??? The big endian test here is wrong. This is correct
1116 if the value is in a register, and if mode_for_size is not
1117 the same mode as op0. This causes us to get unnecessarily
1118 inefficient code from the Thumb port when -mbig-endian. */
1119 && (BYTES_BIG_ENDIAN
1120 ? bitpos + bitsize == BITS_PER_WORD
1121 : bitpos == 0))))
1123 if (mode1 != GET_MODE (op0))
1125 if (GET_CODE (op0) == SUBREG)
1127 if (GET_MODE (SUBREG_REG (op0)) == mode1
1128 || GET_MODE_CLASS (mode1) == MODE_INT
1129 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1130 op0 = SUBREG_REG (op0);
1131 else
1132 /* Else we've got some float mode source being extracted into
1133 a different float mode destination -- this combination of
1134 subregs results in Severe Tire Damage. */
1135 goto no_subreg_mode_swap;
1137 if (GET_CODE (op0) == REG)
1138 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1139 else
1140 op0 = adjust_address (op0, mode1, offset);
1142 if (mode1 != mode)
1143 return convert_to_mode (tmode, op0, unsignedp);
1144 return op0;
1146 no_subreg_mode_swap:
1148 /* Handle fields bigger than a word. */
1150 if (bitsize > BITS_PER_WORD)
1152 /* Here we transfer the words of the field
1153 in the order least significant first.
1154 This is because the most significant word is the one which may
1155 be less than full. */
1157 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1158 unsigned int i;
1160 if (target == 0 || GET_CODE (target) != REG)
1161 target = gen_reg_rtx (mode);
1163 /* Indicate for flow that the entire target reg is being set. */
1164 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1166 for (i = 0; i < nwords; i++)
1168 /* If I is 0, use the low-order word in both field and target;
1169 if I is 1, use the next to lowest word; and so on. */
1170 /* Word number in TARGET to use. */
1171 unsigned int wordnum
1172 = (WORDS_BIG_ENDIAN
1173 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1174 : i);
1175 /* Offset from start of field in OP0. */
1176 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1177 ? MAX (0, ((int) bitsize - ((int) i + 1)
1178 * (int) BITS_PER_WORD))
1179 : (int) i * BITS_PER_WORD);
1180 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1181 rtx result_part
1182 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1183 bitsize - i * BITS_PER_WORD),
1184 bitnum + bit_offset, 1, target_part, mode,
1185 word_mode, total_size);
1187 if (target_part == 0)
1188 abort ();
1190 if (result_part != target_part)
1191 emit_move_insn (target_part, result_part);
1194 if (unsignedp)
1196 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1197 need to be zero'd out. */
1198 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1200 unsigned int i, total_words;
1202 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1203 for (i = nwords; i < total_words; i++)
1204 emit_move_insn
1205 (operand_subword (target,
1206 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1207 1, VOIDmode),
1208 const0_rtx);
1210 return target;
1213 /* Signed bit field: sign-extend with two arithmetic shifts. */
1214 target = expand_shift (LSHIFT_EXPR, mode, target,
1215 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1216 NULL_RTX, 0);
1217 return expand_shift (RSHIFT_EXPR, mode, target,
1218 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1219 NULL_RTX, 0);
1222 /* From here on we know the desired field is smaller than a word. */
1224 /* Check if there is a correspondingly-sized integer field, so we can
1225 safely extract it as one size of integer, if necessary; then
1226 truncate or extend to the size that is wanted; then use SUBREGs or
1227 convert_to_mode to get one of the modes we really wanted. */
1229 int_mode = int_mode_for_mode (tmode);
1230 if (int_mode == BLKmode)
1231 int_mode = int_mode_for_mode (mode);
1232 if (int_mode == BLKmode)
1233 abort (); /* Should probably push op0 out to memory and then
1234 do a load. */
1236 /* OFFSET is the number of words or bytes (UNIT says which)
1237 from STR_RTX to the first word or byte containing part of the field. */
1239 if (GET_CODE (op0) != MEM)
1241 if (offset != 0
1242 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1244 if (GET_CODE (op0) != REG)
1245 op0 = copy_to_reg (op0);
1246 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1247 op0, (offset * UNITS_PER_WORD));
1249 offset = 0;
1251 else
1252 op0 = protect_from_queue (str_rtx, 1);
1254 /* Now OFFSET is nonzero only for memory operands. */
1256 if (unsignedp)
1258 if (HAVE_extzv
1259 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1260 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1261 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1263 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1264 rtx bitsize_rtx, bitpos_rtx;
1265 rtx last = get_last_insn ();
1266 rtx xop0 = op0;
1267 rtx xtarget = target;
1268 rtx xspec_target = spec_target;
1269 rtx xspec_target_subreg = spec_target_subreg;
1270 rtx pat;
1271 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1273 if (GET_CODE (xop0) == MEM)
1275 int save_volatile_ok = volatile_ok;
1276 volatile_ok = 1;
1278 /* Is the memory operand acceptable? */
1279 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1280 (xop0, GET_MODE (xop0))))
1282 /* No, load into a reg and extract from there. */
1283 enum machine_mode bestmode;
1285 /* Get the mode to use for inserting into this field. If
1286 OP0 is BLKmode, get the smallest mode consistent with the
1287 alignment. If OP0 is a non-BLKmode object that is no
1288 wider than MAXMODE, use its mode. Otherwise, use the
1289 smallest mode containing the field. */
1291 if (GET_MODE (xop0) == BLKmode
1292 || (GET_MODE_SIZE (GET_MODE (op0))
1293 > GET_MODE_SIZE (maxmode)))
1294 bestmode = get_best_mode (bitsize, bitnum,
1295 MEM_ALIGN (xop0), maxmode,
1296 MEM_VOLATILE_P (xop0));
1297 else
1298 bestmode = GET_MODE (xop0);
1300 if (bestmode == VOIDmode
1301 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1302 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1303 goto extzv_loses;
1305 /* Compute offset as multiple of this unit,
1306 counting in bytes. */
1307 unit = GET_MODE_BITSIZE (bestmode);
1308 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1309 xbitpos = bitnum % unit;
1310 xop0 = adjust_address (xop0, bestmode, xoffset);
1312 /* Fetch it to a register in that size. */
1313 xop0 = force_reg (bestmode, xop0);
1315 /* XBITPOS counts within UNIT, which is what is expected. */
1317 else
1318 /* Get ref to first byte containing part of the field. */
1319 xop0 = adjust_address (xop0, byte_mode, xoffset);
1321 volatile_ok = save_volatile_ok;
1324 /* If op0 is a register, we need it in MAXMODE (which is usually
1325 SImode). to make it acceptable to the format of extzv. */
1326 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1327 goto extzv_loses;
1328 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1329 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1331 /* On big-endian machines, we count bits from the most significant.
1332 If the bit field insn does not, we must invert. */
1333 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1334 xbitpos = unit - bitsize - xbitpos;
1336 /* Now convert from counting within UNIT to counting in MAXMODE. */
1337 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1338 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1340 unit = GET_MODE_BITSIZE (maxmode);
1342 if (xtarget == 0
1343 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1344 xtarget = xspec_target = gen_reg_rtx (tmode);
1346 if (GET_MODE (xtarget) != maxmode)
1348 if (GET_CODE (xtarget) == REG)
1350 int wider = (GET_MODE_SIZE (maxmode)
1351 > GET_MODE_SIZE (GET_MODE (xtarget)));
1352 xtarget = gen_lowpart (maxmode, xtarget);
1353 if (wider)
1354 xspec_target_subreg = xtarget;
1356 else
1357 xtarget = gen_reg_rtx (maxmode);
1360 /* If this machine's extzv insists on a register target,
1361 make sure we have one. */
1362 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1363 (xtarget, maxmode)))
1364 xtarget = gen_reg_rtx (maxmode);
1366 bitsize_rtx = GEN_INT (bitsize);
1367 bitpos_rtx = GEN_INT (xbitpos);
1369 pat = gen_extzv (protect_from_queue (xtarget, 1),
1370 xop0, bitsize_rtx, bitpos_rtx);
1371 if (pat)
1373 emit_insn (pat);
1374 target = xtarget;
1375 spec_target = xspec_target;
1376 spec_target_subreg = xspec_target_subreg;
1378 else
1380 delete_insns_since (last);
1381 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1382 bitpos, target, 1);
1385 else
1386 extzv_loses:
1387 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1388 bitpos, target, 1);
1390 else
1392 if (HAVE_extv
1393 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1394 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1395 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1397 int xbitpos = bitpos, xoffset = offset;
1398 rtx bitsize_rtx, bitpos_rtx;
1399 rtx last = get_last_insn ();
1400 rtx xop0 = op0, xtarget = target;
1401 rtx xspec_target = spec_target;
1402 rtx xspec_target_subreg = spec_target_subreg;
1403 rtx pat;
1404 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1406 if (GET_CODE (xop0) == MEM)
1408 /* Is the memory operand acceptable? */
1409 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1410 (xop0, GET_MODE (xop0))))
1412 /* No, load into a reg and extract from there. */
1413 enum machine_mode bestmode;
1415 /* Get the mode to use for inserting into this field. If
1416 OP0 is BLKmode, get the smallest mode consistent with the
1417 alignment. If OP0 is a non-BLKmode object that is no
1418 wider than MAXMODE, use its mode. Otherwise, use the
1419 smallest mode containing the field. */
1421 if (GET_MODE (xop0) == BLKmode
1422 || (GET_MODE_SIZE (GET_MODE (op0))
1423 > GET_MODE_SIZE (maxmode)))
1424 bestmode = get_best_mode (bitsize, bitnum,
1425 MEM_ALIGN (xop0), maxmode,
1426 MEM_VOLATILE_P (xop0));
1427 else
1428 bestmode = GET_MODE (xop0);
1430 if (bestmode == VOIDmode
1431 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1432 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1433 goto extv_loses;
1435 /* Compute offset as multiple of this unit,
1436 counting in bytes. */
1437 unit = GET_MODE_BITSIZE (bestmode);
1438 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1439 xbitpos = bitnum % unit;
1440 xop0 = adjust_address (xop0, bestmode, xoffset);
1442 /* Fetch it to a register in that size. */
1443 xop0 = force_reg (bestmode, xop0);
1445 /* XBITPOS counts within UNIT, which is what is expected. */
1447 else
1448 /* Get ref to first byte containing part of the field. */
1449 xop0 = adjust_address (xop0, byte_mode, xoffset);
1452 /* If op0 is a register, we need it in MAXMODE (which is usually
1453 SImode) to make it acceptable to the format of extv. */
1454 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1455 goto extv_loses;
1456 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1457 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1459 /* On big-endian machines, we count bits from the most significant.
1460 If the bit field insn does not, we must invert. */
1461 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1462 xbitpos = unit - bitsize - xbitpos;
1464 /* XBITPOS counts within a size of UNIT.
1465 Adjust to count within a size of MAXMODE. */
1466 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1467 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1469 unit = GET_MODE_BITSIZE (maxmode);
1471 if (xtarget == 0
1472 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1473 xtarget = xspec_target = gen_reg_rtx (tmode);
1475 if (GET_MODE (xtarget) != maxmode)
1477 if (GET_CODE (xtarget) == REG)
1479 int wider = (GET_MODE_SIZE (maxmode)
1480 > GET_MODE_SIZE (GET_MODE (xtarget)));
1481 xtarget = gen_lowpart (maxmode, xtarget);
1482 if (wider)
1483 xspec_target_subreg = xtarget;
1485 else
1486 xtarget = gen_reg_rtx (maxmode);
1489 /* If this machine's extv insists on a register target,
1490 make sure we have one. */
1491 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1492 (xtarget, maxmode)))
1493 xtarget = gen_reg_rtx (maxmode);
1495 bitsize_rtx = GEN_INT (bitsize);
1496 bitpos_rtx = GEN_INT (xbitpos);
1498 pat = gen_extv (protect_from_queue (xtarget, 1),
1499 xop0, bitsize_rtx, bitpos_rtx);
1500 if (pat)
1502 emit_insn (pat);
1503 target = xtarget;
1504 spec_target = xspec_target;
1505 spec_target_subreg = xspec_target_subreg;
1507 else
1509 delete_insns_since (last);
1510 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1511 bitpos, target, 0);
1514 else
1515 extv_loses:
1516 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1517 bitpos, target, 0);
1519 if (target == spec_target)
1520 return target;
1521 if (target == spec_target_subreg)
1522 return spec_target;
1523 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1525 /* If the target mode is floating-point, first convert to the
1526 integer mode of that size and then access it as a floating-point
1527 value via a SUBREG. */
1528 if (GET_MODE_CLASS (tmode) != MODE_INT
1529 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1531 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1532 MODE_INT, 0),
1533 target, unsignedp);
1534 return gen_lowpart (tmode, target);
1536 else
1537 return convert_to_mode (tmode, target, unsignedp);
1539 return target;
1542 /* Extract a bit field using shifts and boolean operations
1543 Returns an rtx to represent the value.
1544 OP0 addresses a register (word) or memory (byte).
1545 BITPOS says which bit within the word or byte the bit field starts in.
1546 OFFSET says how many bytes farther the bit field starts;
1547 it is 0 if OP0 is a register.
1548 BITSIZE says how many bits long the bit field is.
1549 (If OP0 is a register, it may be narrower than a full word,
1550 but BITPOS still counts within a full word,
1551 which is significant on bigendian machines.)
1553 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1554 If TARGET is nonzero, attempts to store the value there
1555 and return TARGET, but this is not guaranteed.
1556 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1558 static rtx
1559 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1560 target, unsignedp)
1561 enum machine_mode tmode;
1562 rtx op0, target;
1563 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1564 int unsignedp;
1566 unsigned int total_bits = BITS_PER_WORD;
1567 enum machine_mode mode;
1569 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1571 /* Special treatment for a bit field split across two registers. */
1572 if (bitsize + bitpos > BITS_PER_WORD)
1573 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1575 else
1577 /* Get the proper mode to use for this field. We want a mode that
1578 includes the entire field. If such a mode would be larger than
1579 a word, we won't be doing the extraction the normal way. */
1581 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1582 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1584 if (mode == VOIDmode)
1585 /* The only way this should occur is if the field spans word
1586 boundaries. */
1587 return extract_split_bit_field (op0, bitsize,
1588 bitpos + offset * BITS_PER_UNIT,
1589 unsignedp);
1591 total_bits = GET_MODE_BITSIZE (mode);
1593 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1594 be in the range 0 to total_bits-1, and put any excess bytes in
1595 OFFSET. */
1596 if (bitpos >= total_bits)
1598 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1599 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1600 * BITS_PER_UNIT);
1603 /* Get ref to an aligned byte, halfword, or word containing the field.
1604 Adjust BITPOS to be position within a word,
1605 and OFFSET to be the offset of that word.
1606 Then alter OP0 to refer to that word. */
1607 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1608 offset -= (offset % (total_bits / BITS_PER_UNIT));
1609 op0 = adjust_address (op0, mode, offset);
1612 mode = GET_MODE (op0);
1614 if (BYTES_BIG_ENDIAN)
1615 /* BITPOS is the distance between our msb and that of OP0.
1616 Convert it to the distance from the lsb. */
1617 bitpos = total_bits - bitsize - bitpos;
1619 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1620 We have reduced the big-endian case to the little-endian case. */
1622 if (unsignedp)
1624 if (bitpos)
1626 /* If the field does not already start at the lsb,
1627 shift it so it does. */
1628 tree amount = build_int_2 (bitpos, 0);
1629 /* Maybe propagate the target for the shift. */
1630 /* But not if we will return it--could confuse integrate.c. */
1631 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1632 && !REG_FUNCTION_VALUE_P (target)
1633 ? target : 0);
1634 if (tmode != mode) subtarget = 0;
1635 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1637 /* Convert the value to the desired mode. */
1638 if (mode != tmode)
1639 op0 = convert_to_mode (tmode, op0, 1);
1641 /* Unless the msb of the field used to be the msb when we shifted,
1642 mask out the upper bits. */
1644 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1645 return expand_binop (GET_MODE (op0), and_optab, op0,
1646 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1647 target, 1, OPTAB_LIB_WIDEN);
1648 return op0;
1651 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1652 then arithmetic-shift its lsb to the lsb of the word. */
1653 op0 = force_reg (mode, op0);
1654 if (mode != tmode)
1655 target = 0;
1657 /* Find the narrowest integer mode that contains the field. */
1659 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1660 mode = GET_MODE_WIDER_MODE (mode))
1661 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1663 op0 = convert_to_mode (mode, op0, 0);
1664 break;
1667 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1669 tree amount
1670 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1671 /* Maybe propagate the target for the shift. */
1672 /* But not if we will return the result--could confuse integrate.c. */
1673 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1674 && ! REG_FUNCTION_VALUE_P (target)
1675 ? target : 0);
1676 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1679 return expand_shift (RSHIFT_EXPR, mode, op0,
1680 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1681 target, 0);
1684 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1685 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1686 complement of that if COMPLEMENT. The mask is truncated if
1687 necessary to the width of mode MODE. The mask is zero-extended if
1688 BITSIZE+BITPOS is too small for MODE. */
1690 static rtx
1691 mask_rtx (mode, bitpos, bitsize, complement)
1692 enum machine_mode mode;
1693 int bitpos, bitsize, complement;
1695 HOST_WIDE_INT masklow, maskhigh;
1697 if (bitpos < HOST_BITS_PER_WIDE_INT)
1698 masklow = (HOST_WIDE_INT) -1 << bitpos;
1699 else
1700 masklow = 0;
1702 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1703 masklow &= ((unsigned HOST_WIDE_INT) -1
1704 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1706 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1707 maskhigh = -1;
1708 else
1709 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1711 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1712 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1713 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1714 else
1715 maskhigh = 0;
1717 if (complement)
1719 maskhigh = ~maskhigh;
1720 masklow = ~masklow;
1723 return immed_double_const (masklow, maskhigh, mode);
1726 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1727 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1729 static rtx
1730 lshift_value (mode, value, bitpos, bitsize)
1731 enum machine_mode mode;
1732 rtx value;
1733 int bitpos, bitsize;
1735 unsigned HOST_WIDE_INT v = INTVAL (value);
1736 HOST_WIDE_INT low, high;
1738 if (bitsize < HOST_BITS_PER_WIDE_INT)
1739 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1741 if (bitpos < HOST_BITS_PER_WIDE_INT)
1743 low = v << bitpos;
1744 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1746 else
1748 low = 0;
1749 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1752 return immed_double_const (low, high, mode);
1755 /* Extract a bit field that is split across two words
1756 and return an RTX for the result.
1758 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1759 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1760 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1762 static rtx
1763 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1764 rtx op0;
1765 unsigned HOST_WIDE_INT bitsize, bitpos;
1766 int unsignedp;
1768 unsigned int unit;
1769 unsigned int bitsdone = 0;
1770 rtx result = NULL_RTX;
1771 int first = 1;
1773 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1774 much at a time. */
1775 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1776 unit = BITS_PER_WORD;
1777 else
1778 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1780 while (bitsdone < bitsize)
1782 unsigned HOST_WIDE_INT thissize;
1783 rtx part, word;
1784 unsigned HOST_WIDE_INT thispos;
1785 unsigned HOST_WIDE_INT offset;
1787 offset = (bitpos + bitsdone) / unit;
1788 thispos = (bitpos + bitsdone) % unit;
1790 /* THISSIZE must not overrun a word boundary. Otherwise,
1791 extract_fixed_bit_field will call us again, and we will mutually
1792 recurse forever. */
1793 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1794 thissize = MIN (thissize, unit - thispos);
1796 /* If OP0 is a register, then handle OFFSET here.
1798 When handling multiword bitfields, extract_bit_field may pass
1799 down a word_mode SUBREG of a larger REG for a bitfield that actually
1800 crosses a word boundary. Thus, for a SUBREG, we must find
1801 the current word starting from the base register. */
1802 if (GET_CODE (op0) == SUBREG)
1804 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1805 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1806 GET_MODE (SUBREG_REG (op0)));
1807 offset = 0;
1809 else if (GET_CODE (op0) == REG)
1811 word = operand_subword_force (op0, offset, GET_MODE (op0));
1812 offset = 0;
1814 else
1815 word = op0;
1817 /* Extract the parts in bit-counting order,
1818 whose meaning is determined by BYTES_PER_UNIT.
1819 OFFSET is in UNITs, and UNIT is in bits.
1820 extract_fixed_bit_field wants offset in bytes. */
1821 part = extract_fixed_bit_field (word_mode, word,
1822 offset * unit / BITS_PER_UNIT,
1823 thissize, thispos, 0, 1);
1824 bitsdone += thissize;
1826 /* Shift this part into place for the result. */
1827 if (BYTES_BIG_ENDIAN)
1829 if (bitsize != bitsdone)
1830 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1831 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1833 else
1835 if (bitsdone != thissize)
1836 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1837 build_int_2 (bitsdone - thissize, 0), 0, 1);
1840 if (first)
1841 result = part;
1842 else
1843 /* Combine the parts with bitwise or. This works
1844 because we extracted each part as an unsigned bit field. */
1845 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1846 OPTAB_LIB_WIDEN);
1848 first = 0;
1851 /* Unsigned bit field: we are done. */
1852 if (unsignedp)
1853 return result;
1854 /* Signed bit field: sign-extend with two arithmetic shifts. */
1855 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1856 build_int_2 (BITS_PER_WORD - bitsize, 0),
1857 NULL_RTX, 0);
1858 return expand_shift (RSHIFT_EXPR, word_mode, result,
1859 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1862 /* Add INC into TARGET. */
1864 void
1865 expand_inc (target, inc)
1866 rtx target, inc;
1868 rtx value = expand_binop (GET_MODE (target), add_optab,
1869 target, inc,
1870 target, 0, OPTAB_LIB_WIDEN);
1871 if (value != target)
1872 emit_move_insn (target, value);
1875 /* Subtract DEC from TARGET. */
1877 void
1878 expand_dec (target, dec)
1879 rtx target, dec;
1881 rtx value = expand_binop (GET_MODE (target), sub_optab,
1882 target, dec,
1883 target, 0, OPTAB_LIB_WIDEN);
1884 if (value != target)
1885 emit_move_insn (target, value);
1888 /* Output a shift instruction for expression code CODE,
1889 with SHIFTED being the rtx for the value to shift,
1890 and AMOUNT the tree for the amount to shift by.
1891 Store the result in the rtx TARGET, if that is convenient.
1892 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1893 Return the rtx for where the value is. */
1896 expand_shift (code, mode, shifted, amount, target, unsignedp)
1897 enum tree_code code;
1898 enum machine_mode mode;
1899 rtx shifted;
1900 tree amount;
1901 rtx target;
1902 int unsignedp;
1904 rtx op1, temp = 0;
1905 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1906 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1907 int try;
1909 /* Previously detected shift-counts computed by NEGATE_EXPR
1910 and shifted in the other direction; but that does not work
1911 on all machines. */
1913 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1915 #ifdef SHIFT_COUNT_TRUNCATED
1916 if (SHIFT_COUNT_TRUNCATED)
1918 if (GET_CODE (op1) == CONST_INT
1919 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1920 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1921 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1922 % GET_MODE_BITSIZE (mode));
1923 else if (GET_CODE (op1) == SUBREG
1924 && subreg_lowpart_p (op1))
1925 op1 = SUBREG_REG (op1);
1927 #endif
1929 if (op1 == const0_rtx)
1930 return shifted;
1932 for (try = 0; temp == 0 && try < 3; try++)
1934 enum optab_methods methods;
1936 if (try == 0)
1937 methods = OPTAB_DIRECT;
1938 else if (try == 1)
1939 methods = OPTAB_WIDEN;
1940 else
1941 methods = OPTAB_LIB_WIDEN;
1943 if (rotate)
1945 /* Widening does not work for rotation. */
1946 if (methods == OPTAB_WIDEN)
1947 continue;
1948 else if (methods == OPTAB_LIB_WIDEN)
1950 /* If we have been unable to open-code this by a rotation,
1951 do it as the IOR of two shifts. I.e., to rotate A
1952 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1953 where C is the bitsize of A.
1955 It is theoretically possible that the target machine might
1956 not be able to perform either shift and hence we would
1957 be making two libcalls rather than just the one for the
1958 shift (similarly if IOR could not be done). We will allow
1959 this extremely unlikely lossage to avoid complicating the
1960 code below. */
1962 rtx subtarget = target == shifted ? 0 : target;
1963 rtx temp1;
1964 tree type = TREE_TYPE (amount);
1965 tree new_amount = make_tree (type, op1);
1966 tree other_amount
1967 = fold (build (MINUS_EXPR, type,
1968 convert (type,
1969 build_int_2 (GET_MODE_BITSIZE (mode),
1970 0)),
1971 amount));
1973 shifted = force_reg (mode, shifted);
1975 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1976 mode, shifted, new_amount, subtarget, 1);
1977 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1978 mode, shifted, other_amount, 0, 1);
1979 return expand_binop (mode, ior_optab, temp, temp1, target,
1980 unsignedp, methods);
1983 temp = expand_binop (mode,
1984 left ? rotl_optab : rotr_optab,
1985 shifted, op1, target, unsignedp, methods);
1987 /* If we don't have the rotate, but we are rotating by a constant
1988 that is in range, try a rotate in the opposite direction. */
1990 if (temp == 0 && GET_CODE (op1) == CONST_INT
1991 && INTVAL (op1) > 0
1992 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1993 temp = expand_binop (mode,
1994 left ? rotr_optab : rotl_optab,
1995 shifted,
1996 GEN_INT (GET_MODE_BITSIZE (mode)
1997 - INTVAL (op1)),
1998 target, unsignedp, methods);
2000 else if (unsignedp)
2001 temp = expand_binop (mode,
2002 left ? ashl_optab : lshr_optab,
2003 shifted, op1, target, unsignedp, methods);
2005 /* Do arithmetic shifts.
2006 Also, if we are going to widen the operand, we can just as well
2007 use an arithmetic right-shift instead of a logical one. */
2008 if (temp == 0 && ! rotate
2009 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2011 enum optab_methods methods1 = methods;
2013 /* If trying to widen a log shift to an arithmetic shift,
2014 don't accept an arithmetic shift of the same size. */
2015 if (unsignedp)
2016 methods1 = OPTAB_MUST_WIDEN;
2018 /* Arithmetic shift */
2020 temp = expand_binop (mode,
2021 left ? ashl_optab : ashr_optab,
2022 shifted, op1, target, unsignedp, methods1);
2025 /* We used to try extzv here for logical right shifts, but that was
2026 only useful for one machine, the VAX, and caused poor code
2027 generation there for lshrdi3, so the code was deleted and a
2028 define_expand for lshrsi3 was added to vax.md. */
2031 if (temp == 0)
2032 abort ();
2033 return temp;
2036 enum alg_code { alg_zero, alg_m, alg_shift,
2037 alg_add_t_m2, alg_sub_t_m2,
2038 alg_add_factor, alg_sub_factor,
2039 alg_add_t2_m, alg_sub_t2_m,
2040 alg_add, alg_subtract, alg_factor, alg_shiftop };
2042 /* This structure records a sequence of operations.
2043 `ops' is the number of operations recorded.
2044 `cost' is their total cost.
2045 The operations are stored in `op' and the corresponding
2046 logarithms of the integer coefficients in `log'.
2048 These are the operations:
2049 alg_zero total := 0;
2050 alg_m total := multiplicand;
2051 alg_shift total := total * coeff
2052 alg_add_t_m2 total := total + multiplicand * coeff;
2053 alg_sub_t_m2 total := total - multiplicand * coeff;
2054 alg_add_factor total := total * coeff + total;
2055 alg_sub_factor total := total * coeff - total;
2056 alg_add_t2_m total := total * coeff + multiplicand;
2057 alg_sub_t2_m total := total * coeff - multiplicand;
2059 The first operand must be either alg_zero or alg_m. */
2061 struct algorithm
2063 short cost;
2064 short ops;
2065 /* The size of the OP and LOG fields are not directly related to the
2066 word size, but the worst-case algorithms will be if we have few
2067 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2068 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2069 in total wordsize operations. */
2070 enum alg_code op[MAX_BITS_PER_WORD];
2071 char log[MAX_BITS_PER_WORD];
2074 static void synth_mult PARAMS ((struct algorithm *,
2075 unsigned HOST_WIDE_INT,
2076 int));
2077 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2078 int, int,
2079 unsigned HOST_WIDE_INT *,
2080 int *, int *));
2081 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2082 int));
2083 /* Compute and return the best algorithm for multiplying by T.
2084 The algorithm must cost less than cost_limit
2085 If retval.cost >= COST_LIMIT, no algorithm was found and all
2086 other field of the returned struct are undefined. */
2088 static void
2089 synth_mult (alg_out, t, cost_limit)
2090 struct algorithm *alg_out;
2091 unsigned HOST_WIDE_INT t;
2092 int cost_limit;
2094 int m;
2095 struct algorithm *alg_in, *best_alg;
2096 int cost;
2097 unsigned HOST_WIDE_INT q;
2099 /* Indicate that no algorithm is yet found. If no algorithm
2100 is found, this value will be returned and indicate failure. */
2101 alg_out->cost = cost_limit;
2103 if (cost_limit <= 0)
2104 return;
2106 /* t == 1 can be done in zero cost. */
2107 if (t == 1)
2109 alg_out->ops = 1;
2110 alg_out->cost = 0;
2111 alg_out->op[0] = alg_m;
2112 return;
2115 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2116 fail now. */
2117 if (t == 0)
2119 if (zero_cost >= cost_limit)
2120 return;
2121 else
2123 alg_out->ops = 1;
2124 alg_out->cost = zero_cost;
2125 alg_out->op[0] = alg_zero;
2126 return;
2130 /* We'll be needing a couple extra algorithm structures now. */
2132 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2133 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2135 /* If we have a group of zero bits at the low-order part of T, try
2136 multiplying by the remaining bits and then doing a shift. */
2138 if ((t & 1) == 0)
2140 m = floor_log2 (t & -t); /* m = number of low zero bits */
2141 if (m < BITS_PER_WORD)
2143 q = t >> m;
2144 cost = shift_cost[m];
2145 synth_mult (alg_in, q, cost_limit - cost);
2147 cost += alg_in->cost;
2148 if (cost < cost_limit)
2150 struct algorithm *x;
2151 x = alg_in, alg_in = best_alg, best_alg = x;
2152 best_alg->log[best_alg->ops] = m;
2153 best_alg->op[best_alg->ops] = alg_shift;
2154 cost_limit = cost;
2159 /* If we have an odd number, add or subtract one. */
2160 if ((t & 1) != 0)
2162 unsigned HOST_WIDE_INT w;
2164 for (w = 1; (w & t) != 0; w <<= 1)
2166 /* If T was -1, then W will be zero after the loop. This is another
2167 case where T ends with ...111. Handling this with (T + 1) and
2168 subtract 1 produces slightly better code and results in algorithm
2169 selection much faster than treating it like the ...0111 case
2170 below. */
2171 if (w == 0
2172 || (w > 2
2173 /* Reject the case where t is 3.
2174 Thus we prefer addition in that case. */
2175 && t != 3))
2177 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2179 cost = add_cost;
2180 synth_mult (alg_in, t + 1, cost_limit - cost);
2182 cost += alg_in->cost;
2183 if (cost < cost_limit)
2185 struct algorithm *x;
2186 x = alg_in, alg_in = best_alg, best_alg = x;
2187 best_alg->log[best_alg->ops] = 0;
2188 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2189 cost_limit = cost;
2192 else
2194 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2196 cost = add_cost;
2197 synth_mult (alg_in, t - 1, cost_limit - cost);
2199 cost += alg_in->cost;
2200 if (cost < cost_limit)
2202 struct algorithm *x;
2203 x = alg_in, alg_in = best_alg, best_alg = x;
2204 best_alg->log[best_alg->ops] = 0;
2205 best_alg->op[best_alg->ops] = alg_add_t_m2;
2206 cost_limit = cost;
2211 /* Look for factors of t of the form
2212 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2213 If we find such a factor, we can multiply by t using an algorithm that
2214 multiplies by q, shift the result by m and add/subtract it to itself.
2216 We search for large factors first and loop down, even if large factors
2217 are less probable than small; if we find a large factor we will find a
2218 good sequence quickly, and therefore be able to prune (by decreasing
2219 COST_LIMIT) the search. */
2221 for (m = floor_log2 (t - 1); m >= 2; m--)
2223 unsigned HOST_WIDE_INT d;
2225 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2226 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2228 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2229 synth_mult (alg_in, t / d, cost_limit - cost);
2231 cost += alg_in->cost;
2232 if (cost < cost_limit)
2234 struct algorithm *x;
2235 x = alg_in, alg_in = best_alg, best_alg = x;
2236 best_alg->log[best_alg->ops] = m;
2237 best_alg->op[best_alg->ops] = alg_add_factor;
2238 cost_limit = cost;
2240 /* Other factors will have been taken care of in the recursion. */
2241 break;
2244 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2245 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2247 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2248 synth_mult (alg_in, t / d, cost_limit - cost);
2250 cost += alg_in->cost;
2251 if (cost < cost_limit)
2253 struct algorithm *x;
2254 x = alg_in, alg_in = best_alg, best_alg = x;
2255 best_alg->log[best_alg->ops] = m;
2256 best_alg->op[best_alg->ops] = alg_sub_factor;
2257 cost_limit = cost;
2259 break;
2263 /* Try shift-and-add (load effective address) instructions,
2264 i.e. do a*3, a*5, a*9. */
2265 if ((t & 1) != 0)
2267 q = t - 1;
2268 q = q & -q;
2269 m = exact_log2 (q);
2270 if (m >= 0 && m < BITS_PER_WORD)
2272 cost = shiftadd_cost[m];
2273 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2275 cost += alg_in->cost;
2276 if (cost < cost_limit)
2278 struct algorithm *x;
2279 x = alg_in, alg_in = best_alg, best_alg = x;
2280 best_alg->log[best_alg->ops] = m;
2281 best_alg->op[best_alg->ops] = alg_add_t2_m;
2282 cost_limit = cost;
2286 q = t + 1;
2287 q = q & -q;
2288 m = exact_log2 (q);
2289 if (m >= 0 && m < BITS_PER_WORD)
2291 cost = shiftsub_cost[m];
2292 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2294 cost += alg_in->cost;
2295 if (cost < cost_limit)
2297 struct algorithm *x;
2298 x = alg_in, alg_in = best_alg, best_alg = x;
2299 best_alg->log[best_alg->ops] = m;
2300 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2301 cost_limit = cost;
2306 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2307 we have not found any algorithm. */
2308 if (cost_limit == alg_out->cost)
2309 return;
2311 /* If we are getting a too long sequence for `struct algorithm'
2312 to record, make this search fail. */
2313 if (best_alg->ops == MAX_BITS_PER_WORD)
2314 return;
2316 /* Copy the algorithm from temporary space to the space at alg_out.
2317 We avoid using structure assignment because the majority of
2318 best_alg is normally undefined, and this is a critical function. */
2319 alg_out->ops = best_alg->ops + 1;
2320 alg_out->cost = cost_limit;
2321 memcpy (alg_out->op, best_alg->op,
2322 alg_out->ops * sizeof *alg_out->op);
2323 memcpy (alg_out->log, best_alg->log,
2324 alg_out->ops * sizeof *alg_out->log);
2327 /* Perform a multiplication and return an rtx for the result.
2328 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2329 TARGET is a suggestion for where to store the result (an rtx).
2331 We check specially for a constant integer as OP1.
2332 If you want this check for OP0 as well, then before calling
2333 you should swap the two operands if OP0 would be constant. */
2336 expand_mult (mode, op0, op1, target, unsignedp)
2337 enum machine_mode mode;
2338 rtx op0, op1, target;
2339 int unsignedp;
2341 rtx const_op1 = op1;
2343 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2344 less than or equal in size to `unsigned int' this doesn't matter.
2345 If the mode is larger than `unsigned int', then synth_mult works only
2346 if the constant value exactly fits in an `unsigned int' without any
2347 truncation. This means that multiplying by negative values does
2348 not work; results are off by 2^32 on a 32 bit machine. */
2350 /* If we are multiplying in DImode, it may still be a win
2351 to try to work with shifts and adds. */
2352 if (GET_CODE (op1) == CONST_DOUBLE
2353 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2354 && HOST_BITS_PER_INT >= BITS_PER_WORD
2355 && CONST_DOUBLE_HIGH (op1) == 0)
2356 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2357 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2358 && GET_CODE (op1) == CONST_INT
2359 && INTVAL (op1) < 0)
2360 const_op1 = 0;
2362 /* We used to test optimize here, on the grounds that it's better to
2363 produce a smaller program when -O is not used.
2364 But this causes such a terrible slowdown sometimes
2365 that it seems better to use synth_mult always. */
2367 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2368 && (unsignedp || ! flag_trapv))
2370 struct algorithm alg;
2371 struct algorithm alg2;
2372 HOST_WIDE_INT val = INTVAL (op1);
2373 HOST_WIDE_INT val_so_far;
2374 rtx insn;
2375 int mult_cost;
2376 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2378 /* op0 must be register to make mult_cost match the precomputed
2379 shiftadd_cost array. */
2380 op0 = force_reg (mode, op0);
2382 /* Try to do the computation three ways: multiply by the negative of OP1
2383 and then negate, do the multiplication directly, or do multiplication
2384 by OP1 - 1. */
2386 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2387 mult_cost = MIN (12 * add_cost, mult_cost);
2389 synth_mult (&alg, val, mult_cost);
2391 /* This works only if the inverted value actually fits in an
2392 `unsigned int' */
2393 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2395 synth_mult (&alg2, - val,
2396 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2397 if (alg2.cost + negate_cost < alg.cost)
2398 alg = alg2, variant = negate_variant;
2401 /* This proves very useful for division-by-constant. */
2402 synth_mult (&alg2, val - 1,
2403 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2404 if (alg2.cost + add_cost < alg.cost)
2405 alg = alg2, variant = add_variant;
2407 if (alg.cost < mult_cost)
2409 /* We found something cheaper than a multiply insn. */
2410 int opno;
2411 rtx accum, tem;
2412 enum machine_mode nmode;
2414 op0 = protect_from_queue (op0, 0);
2416 /* Avoid referencing memory over and over.
2417 For speed, but also for correctness when mem is volatile. */
2418 if (GET_CODE (op0) == MEM)
2419 op0 = force_reg (mode, op0);
2421 /* ACCUM starts out either as OP0 or as a zero, depending on
2422 the first operation. */
2424 if (alg.op[0] == alg_zero)
2426 accum = copy_to_mode_reg (mode, const0_rtx);
2427 val_so_far = 0;
2429 else if (alg.op[0] == alg_m)
2431 accum = copy_to_mode_reg (mode, op0);
2432 val_so_far = 1;
2434 else
2435 abort ();
2437 for (opno = 1; opno < alg.ops; opno++)
2439 int log = alg.log[opno];
2440 int preserve = preserve_subexpressions_p ();
2441 rtx shift_subtarget = preserve ? 0 : accum;
2442 rtx add_target
2443 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2444 && ! preserve)
2445 ? target : 0;
2446 rtx accum_target = preserve ? 0 : accum;
2448 switch (alg.op[opno])
2450 case alg_shift:
2451 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2452 build_int_2 (log, 0), NULL_RTX, 0);
2453 val_so_far <<= log;
2454 break;
2456 case alg_add_t_m2:
2457 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2458 build_int_2 (log, 0), NULL_RTX, 0);
2459 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2460 add_target
2461 ? add_target : accum_target);
2462 val_so_far += (HOST_WIDE_INT) 1 << log;
2463 break;
2465 case alg_sub_t_m2:
2466 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2467 build_int_2 (log, 0), NULL_RTX, 0);
2468 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2469 add_target
2470 ? add_target : accum_target);
2471 val_so_far -= (HOST_WIDE_INT) 1 << log;
2472 break;
2474 case alg_add_t2_m:
2475 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2476 build_int_2 (log, 0), shift_subtarget,
2478 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2479 add_target
2480 ? add_target : accum_target);
2481 val_so_far = (val_so_far << log) + 1;
2482 break;
2484 case alg_sub_t2_m:
2485 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2486 build_int_2 (log, 0), shift_subtarget,
2488 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2489 add_target
2490 ? add_target : accum_target);
2491 val_so_far = (val_so_far << log) - 1;
2492 break;
2494 case alg_add_factor:
2495 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2496 build_int_2 (log, 0), NULL_RTX, 0);
2497 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2498 add_target
2499 ? add_target : accum_target);
2500 val_so_far += val_so_far << log;
2501 break;
2503 case alg_sub_factor:
2504 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2505 build_int_2 (log, 0), NULL_RTX, 0);
2506 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2507 (add_target ? add_target
2508 : preserve ? 0 : tem));
2509 val_so_far = (val_so_far << log) - val_so_far;
2510 break;
2512 default:
2513 abort ();
2516 /* Write a REG_EQUAL note on the last insn so that we can cse
2517 multiplication sequences. Note that if ACCUM is a SUBREG,
2518 we've set the inner register and must properly indicate
2519 that. */
2521 tem = op0, nmode = mode;
2522 if (GET_CODE (accum) == SUBREG)
2524 nmode = GET_MODE (SUBREG_REG (accum));
2525 tem = gen_lowpart (nmode, op0);
2528 insn = get_last_insn ();
2529 set_unique_reg_note (insn,
2530 REG_EQUAL,
2531 gen_rtx_MULT (nmode, tem,
2532 GEN_INT (val_so_far)));
2535 if (variant == negate_variant)
2537 val_so_far = - val_so_far;
2538 accum = expand_unop (mode, neg_optab, accum, target, 0);
2540 else if (variant == add_variant)
2542 val_so_far = val_so_far + 1;
2543 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2546 if (val != val_so_far)
2547 abort ();
2549 return accum;
2553 /* This used to use umul_optab if unsigned, but for non-widening multiply
2554 there is no difference between signed and unsigned. */
2555 op0 = expand_binop (mode,
2556 ! unsignedp
2557 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2558 ? smulv_optab : smul_optab,
2559 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2560 if (op0 == 0)
2561 abort ();
2562 return op0;
2565 /* Return the smallest n such that 2**n >= X. */
2568 ceil_log2 (x)
2569 unsigned HOST_WIDE_INT x;
2571 return floor_log2 (x - 1) + 1;
2574 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2575 replace division by D, and put the least significant N bits of the result
2576 in *MULTIPLIER_PTR and return the most significant bit.
2578 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2579 needed precision is in PRECISION (should be <= N).
2581 PRECISION should be as small as possible so this function can choose
2582 multiplier more freely.
2584 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2585 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2587 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2588 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2590 static
2591 unsigned HOST_WIDE_INT
2592 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2593 unsigned HOST_WIDE_INT d;
2594 int n;
2595 int precision;
2596 unsigned HOST_WIDE_INT *multiplier_ptr;
2597 int *post_shift_ptr;
2598 int *lgup_ptr;
2600 HOST_WIDE_INT mhigh_hi, mlow_hi;
2601 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2602 int lgup, post_shift;
2603 int pow, pow2;
2604 unsigned HOST_WIDE_INT nl, dummy1;
2605 HOST_WIDE_INT nh, dummy2;
2607 /* lgup = ceil(log2(divisor)); */
2608 lgup = ceil_log2 (d);
2610 if (lgup > n)
2611 abort ();
2613 pow = n + lgup;
2614 pow2 = n + lgup - precision;
2616 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2618 /* We could handle this with some effort, but this case is much better
2619 handled directly with a scc insn, so rely on caller using that. */
2620 abort ();
2623 /* mlow = 2^(N + lgup)/d */
2624 if (pow >= HOST_BITS_PER_WIDE_INT)
2626 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2627 nl = 0;
2629 else
2631 nh = 0;
2632 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2634 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2635 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2637 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2638 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2639 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2640 else
2641 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2642 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2643 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2645 if (mhigh_hi && nh - d >= d)
2646 abort ();
2647 if (mhigh_hi > 1 || mlow_hi > 1)
2648 abort ();
2649 /* assert that mlow < mhigh. */
2650 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2651 abort ();
2653 /* If precision == N, then mlow, mhigh exceed 2^N
2654 (but they do not exceed 2^(N+1)). */
2656 /* Reduce to lowest terms */
2657 for (post_shift = lgup; post_shift > 0; post_shift--)
2659 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2660 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2661 if (ml_lo >= mh_lo)
2662 break;
2664 mlow_hi = 0;
2665 mlow_lo = ml_lo;
2666 mhigh_hi = 0;
2667 mhigh_lo = mh_lo;
2670 *post_shift_ptr = post_shift;
2671 *lgup_ptr = lgup;
2672 if (n < HOST_BITS_PER_WIDE_INT)
2674 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2675 *multiplier_ptr = mhigh_lo & mask;
2676 return mhigh_lo >= mask;
2678 else
2680 *multiplier_ptr = mhigh_lo;
2681 return mhigh_hi;
2685 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2686 congruent to 1 (mod 2**N). */
2688 static unsigned HOST_WIDE_INT
2689 invert_mod2n (x, n)
2690 unsigned HOST_WIDE_INT x;
2691 int n;
2693 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2695 /* The algorithm notes that the choice y = x satisfies
2696 x*y == 1 mod 2^3, since x is assumed odd.
2697 Each iteration doubles the number of bits of significance in y. */
2699 unsigned HOST_WIDE_INT mask;
2700 unsigned HOST_WIDE_INT y = x;
2701 int nbit = 3;
2703 mask = (n == HOST_BITS_PER_WIDE_INT
2704 ? ~(unsigned HOST_WIDE_INT) 0
2705 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2707 while (nbit < n)
2709 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2710 nbit *= 2;
2712 return y;
2715 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2716 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2717 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2718 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2719 become signed.
2721 The result is put in TARGET if that is convenient.
2723 MODE is the mode of operation. */
2726 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2727 enum machine_mode mode;
2728 rtx adj_operand, op0, op1, target;
2729 int unsignedp;
2731 rtx tem;
2732 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2734 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2735 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2736 NULL_RTX, 0);
2737 tem = expand_and (mode, tem, op1, NULL_RTX);
2738 adj_operand
2739 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2740 adj_operand);
2742 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2743 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2744 NULL_RTX, 0);
2745 tem = expand_and (mode, tem, op0, NULL_RTX);
2746 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2747 target);
2749 return target;
2752 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2753 in TARGET if that is convenient, and return where the result is. If the
2754 operation can not be performed, 0 is returned.
2756 MODE is the mode of operation and result.
2758 UNSIGNEDP nonzero means unsigned multiply.
2760 MAX_COST is the total allowed cost for the expanded RTL. */
2763 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2764 enum machine_mode mode;
2765 rtx op0, target;
2766 unsigned HOST_WIDE_INT cnst1;
2767 int unsignedp;
2768 int max_cost;
2770 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2771 optab mul_highpart_optab;
2772 optab moptab;
2773 rtx tem;
2774 int size = GET_MODE_BITSIZE (mode);
2775 rtx op1, wide_op1;
2777 /* We can't support modes wider than HOST_BITS_PER_INT. */
2778 if (size > HOST_BITS_PER_WIDE_INT)
2779 abort ();
2781 op1 = gen_int_mode (cnst1, mode);
2783 wide_op1
2784 = immed_double_const (cnst1,
2785 (unsignedp
2786 ? (HOST_WIDE_INT) 0
2787 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2788 wider_mode);
2790 /* expand_mult handles constant multiplication of word_mode
2791 or narrower. It does a poor job for large modes. */
2792 if (size < BITS_PER_WORD
2793 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2795 /* We have to do this, since expand_binop doesn't do conversion for
2796 multiply. Maybe change expand_binop to handle widening multiply? */
2797 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2799 /* We know that this can't have signed overflow, so pretend this is
2800 an unsigned multiply. */
2801 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2802 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2803 build_int_2 (size, 0), NULL_RTX, 1);
2804 return convert_modes (mode, wider_mode, tem, unsignedp);
2807 if (target == 0)
2808 target = gen_reg_rtx (mode);
2810 /* Firstly, try using a multiplication insn that only generates the needed
2811 high part of the product, and in the sign flavor of unsignedp. */
2812 if (mul_highpart_cost[(int) mode] < max_cost)
2814 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2815 target = expand_binop (mode, mul_highpart_optab,
2816 op0, op1, target, unsignedp, OPTAB_DIRECT);
2817 if (target)
2818 return target;
2821 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2822 Need to adjust the result after the multiplication. */
2823 if (size - 1 < BITS_PER_WORD
2824 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2825 < max_cost))
2827 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2828 target = expand_binop (mode, mul_highpart_optab,
2829 op0, op1, target, unsignedp, OPTAB_DIRECT);
2830 if (target)
2831 /* We used the wrong signedness. Adjust the result. */
2832 return expand_mult_highpart_adjust (mode, target, op0,
2833 op1, target, unsignedp);
2836 /* Try widening multiplication. */
2837 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2838 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2839 && mul_widen_cost[(int) wider_mode] < max_cost)
2841 op1 = force_reg (mode, op1);
2842 goto try;
2845 /* Try widening the mode and perform a non-widening multiplication. */
2846 moptab = smul_optab;
2847 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2848 && size - 1 < BITS_PER_WORD
2849 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2851 op1 = wide_op1;
2852 goto try;
2855 /* Try widening multiplication of opposite signedness, and adjust. */
2856 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2857 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2858 && size - 1 < BITS_PER_WORD
2859 && (mul_widen_cost[(int) wider_mode]
2860 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2862 rtx regop1 = force_reg (mode, op1);
2863 tem = expand_binop (wider_mode, moptab, op0, regop1,
2864 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2865 if (tem != 0)
2867 /* Extract the high half of the just generated product. */
2868 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2869 build_int_2 (size, 0), NULL_RTX, 1);
2870 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2871 /* We used the wrong signedness. Adjust the result. */
2872 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2873 target, unsignedp);
2877 return 0;
2879 try:
2880 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2881 tem = expand_binop (wider_mode, moptab, op0, op1,
2882 NULL_RTX, unsignedp, OPTAB_WIDEN);
2883 if (tem == 0)
2884 return 0;
2886 /* Extract the high half of the just generated product. */
2887 if (mode == word_mode)
2889 return gen_highpart (mode, tem);
2891 else
2893 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2894 build_int_2 (size, 0), NULL_RTX, 1);
2895 return convert_modes (mode, wider_mode, tem, unsignedp);
2899 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2900 if that is convenient, and returning where the result is.
2901 You may request either the quotient or the remainder as the result;
2902 specify REM_FLAG nonzero to get the remainder.
2904 CODE is the expression code for which kind of division this is;
2905 it controls how rounding is done. MODE is the machine mode to use.
2906 UNSIGNEDP nonzero means do unsigned division. */
2908 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2909 and then correct it by or'ing in missing high bits
2910 if result of ANDI is nonzero.
2911 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2912 This could optimize to a bfexts instruction.
2913 But C doesn't use these operations, so their optimizations are
2914 left for later. */
2915 /* ??? For modulo, we don't actually need the highpart of the first product,
2916 the low part will do nicely. And for small divisors, the second multiply
2917 can also be a low-part only multiply or even be completely left out.
2918 E.g. to calculate the remainder of a division by 3 with a 32 bit
2919 multiply, multiply with 0x55555556 and extract the upper two bits;
2920 the result is exact for inputs up to 0x1fffffff.
2921 The input range can be reduced by using cross-sum rules.
2922 For odd divisors >= 3, the following table gives right shift counts
2923 so that if an number is shifted by an integer multiple of the given
2924 amount, the remainder stays the same:
2925 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2926 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2927 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2928 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2929 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2931 Cross-sum rules for even numbers can be derived by leaving as many bits
2932 to the right alone as the divisor has zeros to the right.
2933 E.g. if x is an unsigned 32 bit number:
2934 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2937 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2940 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2941 int rem_flag;
2942 enum tree_code code;
2943 enum machine_mode mode;
2944 rtx op0, op1, target;
2945 int unsignedp;
2947 enum machine_mode compute_mode;
2948 rtx tquotient;
2949 rtx quotient = 0, remainder = 0;
2950 rtx last;
2951 int size;
2952 rtx insn, set;
2953 optab optab1, optab2;
2954 int op1_is_constant, op1_is_pow2;
2955 int max_cost, extra_cost;
2956 static HOST_WIDE_INT last_div_const = 0;
2958 op1_is_constant = GET_CODE (op1) == CONST_INT;
2959 op1_is_pow2 = (op1_is_constant
2960 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2961 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2964 This is the structure of expand_divmod:
2966 First comes code to fix up the operands so we can perform the operations
2967 correctly and efficiently.
2969 Second comes a switch statement with code specific for each rounding mode.
2970 For some special operands this code emits all RTL for the desired
2971 operation, for other cases, it generates only a quotient and stores it in
2972 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2973 to indicate that it has not done anything.
2975 Last comes code that finishes the operation. If QUOTIENT is set and
2976 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2977 QUOTIENT is not set, it is computed using trunc rounding.
2979 We try to generate special code for division and remainder when OP1 is a
2980 constant. If |OP1| = 2**n we can use shifts and some other fast
2981 operations. For other values of OP1, we compute a carefully selected
2982 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2983 by m.
2985 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2986 half of the product. Different strategies for generating the product are
2987 implemented in expand_mult_highpart.
2989 If what we actually want is the remainder, we generate that by another
2990 by-constant multiplication and a subtraction. */
2992 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2993 code below will malfunction if we are, so check here and handle
2994 the special case if so. */
2995 if (op1 == const1_rtx)
2996 return rem_flag ? const0_rtx : op0;
2998 /* When dividing by -1, we could get an overflow.
2999 negv_optab can handle overflows. */
3000 if (! unsignedp && op1 == constm1_rtx)
3002 if (rem_flag)
3003 return const0_rtx;
3004 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3005 ? negv_optab : neg_optab, op0, target, 0);
3008 if (target
3009 /* Don't use the function value register as a target
3010 since we have to read it as well as write it,
3011 and function-inlining gets confused by this. */
3012 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3013 /* Don't clobber an operand while doing a multi-step calculation. */
3014 || ((rem_flag || op1_is_constant)
3015 && (reg_mentioned_p (target, op0)
3016 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3017 || reg_mentioned_p (target, op1)
3018 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3019 target = 0;
3021 /* Get the mode in which to perform this computation. Normally it will
3022 be MODE, but sometimes we can't do the desired operation in MODE.
3023 If so, pick a wider mode in which we can do the operation. Convert
3024 to that mode at the start to avoid repeated conversions.
3026 First see what operations we need. These depend on the expression
3027 we are evaluating. (We assume that divxx3 insns exist under the
3028 same conditions that modxx3 insns and that these insns don't normally
3029 fail. If these assumptions are not correct, we may generate less
3030 efficient code in some cases.)
3032 Then see if we find a mode in which we can open-code that operation
3033 (either a division, modulus, or shift). Finally, check for the smallest
3034 mode for which we can do the operation with a library call. */
3036 /* We might want to refine this now that we have division-by-constant
3037 optimization. Since expand_mult_highpart tries so many variants, it is
3038 not straightforward to generalize this. Maybe we should make an array
3039 of possible modes in init_expmed? Save this for GCC 2.7. */
3041 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3042 ? (unsignedp ? lshr_optab : ashr_optab)
3043 : (unsignedp ? udiv_optab : sdiv_optab));
3044 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3045 ? optab1
3046 : (unsignedp ? udivmod_optab : sdivmod_optab));
3048 for (compute_mode = mode; compute_mode != VOIDmode;
3049 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3050 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3051 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3052 break;
3054 if (compute_mode == VOIDmode)
3055 for (compute_mode = mode; compute_mode != VOIDmode;
3056 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3057 if (optab1->handlers[(int) compute_mode].libfunc
3058 || optab2->handlers[(int) compute_mode].libfunc)
3059 break;
3061 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3062 in expand_binop. */
3063 if (compute_mode == VOIDmode)
3064 compute_mode = mode;
3066 if (target && GET_MODE (target) == compute_mode)
3067 tquotient = target;
3068 else
3069 tquotient = gen_reg_rtx (compute_mode);
3071 size = GET_MODE_BITSIZE (compute_mode);
3072 #if 0
3073 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3074 (mode), and thereby get better code when OP1 is a constant. Do that
3075 later. It will require going over all usages of SIZE below. */
3076 size = GET_MODE_BITSIZE (mode);
3077 #endif
3079 /* Only deduct something for a REM if the last divide done was
3080 for a different constant. Then set the constant of the last
3081 divide. */
3082 max_cost = div_cost[(int) compute_mode]
3083 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3084 && INTVAL (op1) == last_div_const)
3085 ? mul_cost[(int) compute_mode] + add_cost : 0);
3087 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3089 /* Now convert to the best mode to use. */
3090 if (compute_mode != mode)
3092 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3093 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3095 /* convert_modes may have placed op1 into a register, so we
3096 must recompute the following. */
3097 op1_is_constant = GET_CODE (op1) == CONST_INT;
3098 op1_is_pow2 = (op1_is_constant
3099 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3100 || (! unsignedp
3101 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3104 /* If one of the operands is a volatile MEM, copy it into a register. */
3106 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3107 op0 = force_reg (compute_mode, op0);
3108 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3109 op1 = force_reg (compute_mode, op1);
3111 /* If we need the remainder or if OP1 is constant, we need to
3112 put OP0 in a register in case it has any queued subexpressions. */
3113 if (rem_flag || op1_is_constant)
3114 op0 = force_reg (compute_mode, op0);
3116 last = get_last_insn ();
3118 /* Promote floor rounding to trunc rounding for unsigned operations. */
3119 if (unsignedp)
3121 if (code == FLOOR_DIV_EXPR)
3122 code = TRUNC_DIV_EXPR;
3123 if (code == FLOOR_MOD_EXPR)
3124 code = TRUNC_MOD_EXPR;
3125 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3126 code = TRUNC_DIV_EXPR;
3129 if (op1 != const0_rtx)
3130 switch (code)
3132 case TRUNC_MOD_EXPR:
3133 case TRUNC_DIV_EXPR:
3134 if (op1_is_constant)
3136 if (unsignedp)
3138 unsigned HOST_WIDE_INT mh, ml;
3139 int pre_shift, post_shift;
3140 int dummy;
3141 unsigned HOST_WIDE_INT d = INTVAL (op1);
3143 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3145 pre_shift = floor_log2 (d);
3146 if (rem_flag)
3148 remainder
3149 = expand_binop (compute_mode, and_optab, op0,
3150 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3151 remainder, 1,
3152 OPTAB_LIB_WIDEN);
3153 if (remainder)
3154 return gen_lowpart (mode, remainder);
3156 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3157 build_int_2 (pre_shift, 0),
3158 tquotient, 1);
3160 else if (size <= HOST_BITS_PER_WIDE_INT)
3162 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3164 /* Most significant bit of divisor is set; emit an scc
3165 insn. */
3166 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3167 compute_mode, 1, 1);
3168 if (quotient == 0)
3169 goto fail1;
3171 else
3173 /* Find a suitable multiplier and right shift count
3174 instead of multiplying with D. */
3176 mh = choose_multiplier (d, size, size,
3177 &ml, &post_shift, &dummy);
3179 /* If the suggested multiplier is more than SIZE bits,
3180 we can do better for even divisors, using an
3181 initial right shift. */
3182 if (mh != 0 && (d & 1) == 0)
3184 pre_shift = floor_log2 (d & -d);
3185 mh = choose_multiplier (d >> pre_shift, size,
3186 size - pre_shift,
3187 &ml, &post_shift, &dummy);
3188 if (mh)
3189 abort ();
3191 else
3192 pre_shift = 0;
3194 if (mh != 0)
3196 rtx t1, t2, t3, t4;
3198 if (post_shift - 1 >= BITS_PER_WORD)
3199 goto fail1;
3201 extra_cost = (shift_cost[post_shift - 1]
3202 + shift_cost[1] + 2 * add_cost);
3203 t1 = expand_mult_highpart (compute_mode, op0, ml,
3204 NULL_RTX, 1,
3205 max_cost - extra_cost);
3206 if (t1 == 0)
3207 goto fail1;
3208 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3209 op0, t1),
3210 NULL_RTX);
3211 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3212 build_int_2 (1, 0), NULL_RTX,1);
3213 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3214 t1, t3),
3215 NULL_RTX);
3216 quotient
3217 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3218 build_int_2 (post_shift - 1, 0),
3219 tquotient, 1);
3221 else
3223 rtx t1, t2;
3225 if (pre_shift >= BITS_PER_WORD
3226 || post_shift >= BITS_PER_WORD)
3227 goto fail1;
3229 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3230 build_int_2 (pre_shift, 0),
3231 NULL_RTX, 1);
3232 extra_cost = (shift_cost[pre_shift]
3233 + shift_cost[post_shift]);
3234 t2 = expand_mult_highpart (compute_mode, t1, ml,
3235 NULL_RTX, 1,
3236 max_cost - extra_cost);
3237 if (t2 == 0)
3238 goto fail1;
3239 quotient
3240 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3241 build_int_2 (post_shift, 0),
3242 tquotient, 1);
3246 else /* Too wide mode to use tricky code */
3247 break;
3249 insn = get_last_insn ();
3250 if (insn != last
3251 && (set = single_set (insn)) != 0
3252 && SET_DEST (set) == quotient)
3253 set_unique_reg_note (insn,
3254 REG_EQUAL,
3255 gen_rtx_UDIV (compute_mode, op0, op1));
3257 else /* TRUNC_DIV, signed */
3259 unsigned HOST_WIDE_INT ml;
3260 int lgup, post_shift;
3261 HOST_WIDE_INT d = INTVAL (op1);
3262 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3264 /* n rem d = n rem -d */
3265 if (rem_flag && d < 0)
3267 d = abs_d;
3268 op1 = gen_int_mode (abs_d, compute_mode);
3271 if (d == 1)
3272 quotient = op0;
3273 else if (d == -1)
3274 quotient = expand_unop (compute_mode, neg_optab, op0,
3275 tquotient, 0);
3276 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3278 /* This case is not handled correctly below. */
3279 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3280 compute_mode, 1, 1);
3281 if (quotient == 0)
3282 goto fail1;
3284 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3285 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3286 /* ??? The cheap metric is computed only for
3287 word_mode. If this operation is wider, this may
3288 not be so. Assume true if the optab has an
3289 expander for this mode. */
3290 && (((rem_flag ? smod_optab : sdiv_optab)
3291 ->handlers[(int) compute_mode].insn_code
3292 != CODE_FOR_nothing)
3293 || (sdivmod_optab->handlers[(int) compute_mode]
3294 .insn_code != CODE_FOR_nothing)))
3296 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3298 lgup = floor_log2 (abs_d);
3299 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3301 rtx label = gen_label_rtx ();
3302 rtx t1;
3304 t1 = copy_to_mode_reg (compute_mode, op0);
3305 do_cmp_and_jump (t1, const0_rtx, GE,
3306 compute_mode, label);
3307 expand_inc (t1, gen_int_mode (abs_d - 1,
3308 compute_mode));
3309 emit_label (label);
3310 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3311 build_int_2 (lgup, 0),
3312 tquotient, 0);
3314 else
3316 rtx t1, t2, t3;
3317 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3318 build_int_2 (size - 1, 0),
3319 NULL_RTX, 0);
3320 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3321 build_int_2 (size - lgup, 0),
3322 NULL_RTX, 1);
3323 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3324 op0, t2),
3325 NULL_RTX);
3326 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3327 build_int_2 (lgup, 0),
3328 tquotient, 0);
3331 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3332 the quotient. */
3333 if (d < 0)
3335 insn = get_last_insn ();
3336 if (insn != last
3337 && (set = single_set (insn)) != 0
3338 && SET_DEST (set) == quotient
3339 && abs_d < ((unsigned HOST_WIDE_INT) 1
3340 << (HOST_BITS_PER_WIDE_INT - 1)))
3341 set_unique_reg_note (insn,
3342 REG_EQUAL,
3343 gen_rtx_DIV (compute_mode,
3344 op0,
3345 GEN_INT
3346 (trunc_int_for_mode
3347 (abs_d,
3348 compute_mode))));
3350 quotient = expand_unop (compute_mode, neg_optab,
3351 quotient, quotient, 0);
3354 else if (size <= HOST_BITS_PER_WIDE_INT)
3356 choose_multiplier (abs_d, size, size - 1,
3357 &ml, &post_shift, &lgup);
3358 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3360 rtx t1, t2, t3;
3362 if (post_shift >= BITS_PER_WORD
3363 || size - 1 >= BITS_PER_WORD)
3364 goto fail1;
3366 extra_cost = (shift_cost[post_shift]
3367 + shift_cost[size - 1] + add_cost);
3368 t1 = expand_mult_highpart (compute_mode, op0, ml,
3369 NULL_RTX, 0,
3370 max_cost - extra_cost);
3371 if (t1 == 0)
3372 goto fail1;
3373 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3374 build_int_2 (post_shift, 0), NULL_RTX, 0);
3375 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3376 build_int_2 (size - 1, 0), NULL_RTX, 0);
3377 if (d < 0)
3378 quotient
3379 = force_operand (gen_rtx_MINUS (compute_mode,
3380 t3, t2),
3381 tquotient);
3382 else
3383 quotient
3384 = force_operand (gen_rtx_MINUS (compute_mode,
3385 t2, t3),
3386 tquotient);
3388 else
3390 rtx t1, t2, t3, t4;
3392 if (post_shift >= BITS_PER_WORD
3393 || size - 1 >= BITS_PER_WORD)
3394 goto fail1;
3396 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3397 extra_cost = (shift_cost[post_shift]
3398 + shift_cost[size - 1] + 2 * add_cost);
3399 t1 = expand_mult_highpart (compute_mode, op0, ml,
3400 NULL_RTX, 0,
3401 max_cost - extra_cost);
3402 if (t1 == 0)
3403 goto fail1;
3404 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3405 t1, op0),
3406 NULL_RTX);
3407 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3408 build_int_2 (post_shift, 0),
3409 NULL_RTX, 0);
3410 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3411 build_int_2 (size - 1, 0),
3412 NULL_RTX, 0);
3413 if (d < 0)
3414 quotient
3415 = force_operand (gen_rtx_MINUS (compute_mode,
3416 t4, t3),
3417 tquotient);
3418 else
3419 quotient
3420 = force_operand (gen_rtx_MINUS (compute_mode,
3421 t3, t4),
3422 tquotient);
3425 else /* Too wide mode to use tricky code */
3426 break;
3428 insn = get_last_insn ();
3429 if (insn != last
3430 && (set = single_set (insn)) != 0
3431 && SET_DEST (set) == quotient)
3432 set_unique_reg_note (insn,
3433 REG_EQUAL,
3434 gen_rtx_DIV (compute_mode, op0, op1));
3436 break;
3438 fail1:
3439 delete_insns_since (last);
3440 break;
3442 case FLOOR_DIV_EXPR:
3443 case FLOOR_MOD_EXPR:
3444 /* We will come here only for signed operations. */
3445 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3447 unsigned HOST_WIDE_INT mh, ml;
3448 int pre_shift, lgup, post_shift;
3449 HOST_WIDE_INT d = INTVAL (op1);
3451 if (d > 0)
3453 /* We could just as easily deal with negative constants here,
3454 but it does not seem worth the trouble for GCC 2.6. */
3455 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3457 pre_shift = floor_log2 (d);
3458 if (rem_flag)
3460 remainder = expand_binop (compute_mode, and_optab, op0,
3461 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3462 remainder, 0, OPTAB_LIB_WIDEN);
3463 if (remainder)
3464 return gen_lowpart (mode, remainder);
3466 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3467 build_int_2 (pre_shift, 0),
3468 tquotient, 0);
3470 else
3472 rtx t1, t2, t3, t4;
3474 mh = choose_multiplier (d, size, size - 1,
3475 &ml, &post_shift, &lgup);
3476 if (mh)
3477 abort ();
3479 if (post_shift < BITS_PER_WORD
3480 && size - 1 < BITS_PER_WORD)
3482 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3483 build_int_2 (size - 1, 0),
3484 NULL_RTX, 0);
3485 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3486 NULL_RTX, 0, OPTAB_WIDEN);
3487 extra_cost = (shift_cost[post_shift]
3488 + shift_cost[size - 1] + 2 * add_cost);
3489 t3 = expand_mult_highpart (compute_mode, t2, ml,
3490 NULL_RTX, 1,
3491 max_cost - extra_cost);
3492 if (t3 != 0)
3494 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3495 build_int_2 (post_shift, 0),
3496 NULL_RTX, 1);
3497 quotient = expand_binop (compute_mode, xor_optab,
3498 t4, t1, tquotient, 0,
3499 OPTAB_WIDEN);
3504 else
3506 rtx nsign, t1, t2, t3, t4;
3507 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3508 op0, constm1_rtx), NULL_RTX);
3509 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3510 0, OPTAB_WIDEN);
3511 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3512 build_int_2 (size - 1, 0), NULL_RTX, 0);
3513 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3514 NULL_RTX);
3515 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3516 NULL_RTX, 0);
3517 if (t4)
3519 rtx t5;
3520 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3521 NULL_RTX, 0);
3522 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3523 t4, t5),
3524 tquotient);
3529 if (quotient != 0)
3530 break;
3531 delete_insns_since (last);
3533 /* Try using an instruction that produces both the quotient and
3534 remainder, using truncation. We can easily compensate the quotient
3535 or remainder to get floor rounding, once we have the remainder.
3536 Notice that we compute also the final remainder value here,
3537 and return the result right away. */
3538 if (target == 0 || GET_MODE (target) != compute_mode)
3539 target = gen_reg_rtx (compute_mode);
3541 if (rem_flag)
3543 remainder
3544 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3545 quotient = gen_reg_rtx (compute_mode);
3547 else
3549 quotient
3550 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3551 remainder = gen_reg_rtx (compute_mode);
3554 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3555 quotient, remainder, 0))
3557 /* This could be computed with a branch-less sequence.
3558 Save that for later. */
3559 rtx tem;
3560 rtx label = gen_label_rtx ();
3561 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3562 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3563 NULL_RTX, 0, OPTAB_WIDEN);
3564 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3565 expand_dec (quotient, const1_rtx);
3566 expand_inc (remainder, op1);
3567 emit_label (label);
3568 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3571 /* No luck with division elimination or divmod. Have to do it
3572 by conditionally adjusting op0 *and* the result. */
3574 rtx label1, label2, label3, label4, label5;
3575 rtx adjusted_op0;
3576 rtx tem;
3578 quotient = gen_reg_rtx (compute_mode);
3579 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3580 label1 = gen_label_rtx ();
3581 label2 = gen_label_rtx ();
3582 label3 = gen_label_rtx ();
3583 label4 = gen_label_rtx ();
3584 label5 = gen_label_rtx ();
3585 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3586 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3587 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3588 quotient, 0, OPTAB_LIB_WIDEN);
3589 if (tem != quotient)
3590 emit_move_insn (quotient, tem);
3591 emit_jump_insn (gen_jump (label5));
3592 emit_barrier ();
3593 emit_label (label1);
3594 expand_inc (adjusted_op0, const1_rtx);
3595 emit_jump_insn (gen_jump (label4));
3596 emit_barrier ();
3597 emit_label (label2);
3598 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3599 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3600 quotient, 0, OPTAB_LIB_WIDEN);
3601 if (tem != quotient)
3602 emit_move_insn (quotient, tem);
3603 emit_jump_insn (gen_jump (label5));
3604 emit_barrier ();
3605 emit_label (label3);
3606 expand_dec (adjusted_op0, const1_rtx);
3607 emit_label (label4);
3608 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3609 quotient, 0, OPTAB_LIB_WIDEN);
3610 if (tem != quotient)
3611 emit_move_insn (quotient, tem);
3612 expand_dec (quotient, const1_rtx);
3613 emit_label (label5);
3615 break;
3617 case CEIL_DIV_EXPR:
3618 case CEIL_MOD_EXPR:
3619 if (unsignedp)
3621 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3623 rtx t1, t2, t3;
3624 unsigned HOST_WIDE_INT d = INTVAL (op1);
3625 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3626 build_int_2 (floor_log2 (d), 0),
3627 tquotient, 1);
3628 t2 = expand_binop (compute_mode, and_optab, op0,
3629 GEN_INT (d - 1),
3630 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3631 t3 = gen_reg_rtx (compute_mode);
3632 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3633 compute_mode, 1, 1);
3634 if (t3 == 0)
3636 rtx lab;
3637 lab = gen_label_rtx ();
3638 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3639 expand_inc (t1, const1_rtx);
3640 emit_label (lab);
3641 quotient = t1;
3643 else
3644 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3645 t1, t3),
3646 tquotient);
3647 break;
3650 /* Try using an instruction that produces both the quotient and
3651 remainder, using truncation. We can easily compensate the
3652 quotient or remainder to get ceiling rounding, once we have the
3653 remainder. Notice that we compute also the final remainder
3654 value here, and return the result right away. */
3655 if (target == 0 || GET_MODE (target) != compute_mode)
3656 target = gen_reg_rtx (compute_mode);
3658 if (rem_flag)
3660 remainder = (GET_CODE (target) == REG
3661 ? target : gen_reg_rtx (compute_mode));
3662 quotient = gen_reg_rtx (compute_mode);
3664 else
3666 quotient = (GET_CODE (target) == REG
3667 ? target : gen_reg_rtx (compute_mode));
3668 remainder = gen_reg_rtx (compute_mode);
3671 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3672 remainder, 1))
3674 /* This could be computed with a branch-less sequence.
3675 Save that for later. */
3676 rtx label = gen_label_rtx ();
3677 do_cmp_and_jump (remainder, const0_rtx, EQ,
3678 compute_mode, label);
3679 expand_inc (quotient, const1_rtx);
3680 expand_dec (remainder, op1);
3681 emit_label (label);
3682 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3685 /* No luck with division elimination or divmod. Have to do it
3686 by conditionally adjusting op0 *and* the result. */
3688 rtx label1, label2;
3689 rtx adjusted_op0, tem;
3691 quotient = gen_reg_rtx (compute_mode);
3692 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3693 label1 = gen_label_rtx ();
3694 label2 = gen_label_rtx ();
3695 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3696 compute_mode, label1);
3697 emit_move_insn (quotient, const0_rtx);
3698 emit_jump_insn (gen_jump (label2));
3699 emit_barrier ();
3700 emit_label (label1);
3701 expand_dec (adjusted_op0, const1_rtx);
3702 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3703 quotient, 1, OPTAB_LIB_WIDEN);
3704 if (tem != quotient)
3705 emit_move_insn (quotient, tem);
3706 expand_inc (quotient, const1_rtx);
3707 emit_label (label2);
3710 else /* signed */
3712 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3713 && INTVAL (op1) >= 0)
3715 /* This is extremely similar to the code for the unsigned case
3716 above. For 2.7 we should merge these variants, but for
3717 2.6.1 I don't want to touch the code for unsigned since that
3718 get used in C. The signed case will only be used by other
3719 languages (Ada). */
3721 rtx t1, t2, t3;
3722 unsigned HOST_WIDE_INT d = INTVAL (op1);
3723 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3724 build_int_2 (floor_log2 (d), 0),
3725 tquotient, 0);
3726 t2 = expand_binop (compute_mode, and_optab, op0,
3727 GEN_INT (d - 1),
3728 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3729 t3 = gen_reg_rtx (compute_mode);
3730 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3731 compute_mode, 1, 1);
3732 if (t3 == 0)
3734 rtx lab;
3735 lab = gen_label_rtx ();
3736 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3737 expand_inc (t1, const1_rtx);
3738 emit_label (lab);
3739 quotient = t1;
3741 else
3742 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3743 t1, t3),
3744 tquotient);
3745 break;
3748 /* Try using an instruction that produces both the quotient and
3749 remainder, using truncation. We can easily compensate the
3750 quotient or remainder to get ceiling rounding, once we have the
3751 remainder. Notice that we compute also the final remainder
3752 value here, and return the result right away. */
3753 if (target == 0 || GET_MODE (target) != compute_mode)
3754 target = gen_reg_rtx (compute_mode);
3755 if (rem_flag)
3757 remainder= (GET_CODE (target) == REG
3758 ? target : gen_reg_rtx (compute_mode));
3759 quotient = gen_reg_rtx (compute_mode);
3761 else
3763 quotient = (GET_CODE (target) == REG
3764 ? target : gen_reg_rtx (compute_mode));
3765 remainder = gen_reg_rtx (compute_mode);
3768 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3769 remainder, 0))
3771 /* This could be computed with a branch-less sequence.
3772 Save that for later. */
3773 rtx tem;
3774 rtx label = gen_label_rtx ();
3775 do_cmp_and_jump (remainder, const0_rtx, EQ,
3776 compute_mode, label);
3777 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3778 NULL_RTX, 0, OPTAB_WIDEN);
3779 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3780 expand_inc (quotient, const1_rtx);
3781 expand_dec (remainder, op1);
3782 emit_label (label);
3783 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3786 /* No luck with division elimination or divmod. Have to do it
3787 by conditionally adjusting op0 *and* the result. */
3789 rtx label1, label2, label3, label4, label5;
3790 rtx adjusted_op0;
3791 rtx tem;
3793 quotient = gen_reg_rtx (compute_mode);
3794 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3795 label1 = gen_label_rtx ();
3796 label2 = gen_label_rtx ();
3797 label3 = gen_label_rtx ();
3798 label4 = gen_label_rtx ();
3799 label5 = gen_label_rtx ();
3800 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3801 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3802 compute_mode, label1);
3803 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3804 quotient, 0, OPTAB_LIB_WIDEN);
3805 if (tem != quotient)
3806 emit_move_insn (quotient, tem);
3807 emit_jump_insn (gen_jump (label5));
3808 emit_barrier ();
3809 emit_label (label1);
3810 expand_dec (adjusted_op0, const1_rtx);
3811 emit_jump_insn (gen_jump (label4));
3812 emit_barrier ();
3813 emit_label (label2);
3814 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3815 compute_mode, label3);
3816 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3817 quotient, 0, OPTAB_LIB_WIDEN);
3818 if (tem != quotient)
3819 emit_move_insn (quotient, tem);
3820 emit_jump_insn (gen_jump (label5));
3821 emit_barrier ();
3822 emit_label (label3);
3823 expand_inc (adjusted_op0, const1_rtx);
3824 emit_label (label4);
3825 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3826 quotient, 0, OPTAB_LIB_WIDEN);
3827 if (tem != quotient)
3828 emit_move_insn (quotient, tem);
3829 expand_inc (quotient, const1_rtx);
3830 emit_label (label5);
3833 break;
3835 case EXACT_DIV_EXPR:
3836 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3838 HOST_WIDE_INT d = INTVAL (op1);
3839 unsigned HOST_WIDE_INT ml;
3840 int pre_shift;
3841 rtx t1;
3843 pre_shift = floor_log2 (d & -d);
3844 ml = invert_mod2n (d >> pre_shift, size);
3845 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3846 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3847 quotient = expand_mult (compute_mode, t1,
3848 gen_int_mode (ml, compute_mode),
3849 NULL_RTX, 0);
3851 insn = get_last_insn ();
3852 set_unique_reg_note (insn,
3853 REG_EQUAL,
3854 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3855 compute_mode,
3856 op0, op1));
3858 break;
3860 case ROUND_DIV_EXPR:
3861 case ROUND_MOD_EXPR:
3862 if (unsignedp)
3864 rtx tem;
3865 rtx label;
3866 label = gen_label_rtx ();
3867 quotient = gen_reg_rtx (compute_mode);
3868 remainder = gen_reg_rtx (compute_mode);
3869 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3871 rtx tem;
3872 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3873 quotient, 1, OPTAB_LIB_WIDEN);
3874 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3875 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3876 remainder, 1, OPTAB_LIB_WIDEN);
3878 tem = plus_constant (op1, -1);
3879 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3880 build_int_2 (1, 0), NULL_RTX, 1);
3881 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3882 expand_inc (quotient, const1_rtx);
3883 expand_dec (remainder, op1);
3884 emit_label (label);
3886 else
3888 rtx abs_rem, abs_op1, tem, mask;
3889 rtx label;
3890 label = gen_label_rtx ();
3891 quotient = gen_reg_rtx (compute_mode);
3892 remainder = gen_reg_rtx (compute_mode);
3893 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3895 rtx tem;
3896 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3897 quotient, 0, OPTAB_LIB_WIDEN);
3898 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3899 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3900 remainder, 0, OPTAB_LIB_WIDEN);
3902 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3903 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3904 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3905 build_int_2 (1, 0), NULL_RTX, 1);
3906 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3907 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3908 NULL_RTX, 0, OPTAB_WIDEN);
3909 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3910 build_int_2 (size - 1, 0), NULL_RTX, 0);
3911 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3912 NULL_RTX, 0, OPTAB_WIDEN);
3913 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3914 NULL_RTX, 0, OPTAB_WIDEN);
3915 expand_inc (quotient, tem);
3916 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3917 NULL_RTX, 0, OPTAB_WIDEN);
3918 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3919 NULL_RTX, 0, OPTAB_WIDEN);
3920 expand_dec (remainder, tem);
3921 emit_label (label);
3923 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3925 default:
3926 abort ();
3929 if (quotient == 0)
3931 if (target && GET_MODE (target) != compute_mode)
3932 target = 0;
3934 if (rem_flag)
3936 /* Try to produce the remainder without producing the quotient.
3937 If we seem to have a divmod pattern that does not require widening,
3938 don't try widening here. We should really have an WIDEN argument
3939 to expand_twoval_binop, since what we'd really like to do here is
3940 1) try a mod insn in compute_mode
3941 2) try a divmod insn in compute_mode
3942 3) try a div insn in compute_mode and multiply-subtract to get
3943 remainder
3944 4) try the same things with widening allowed. */
3945 remainder
3946 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3947 op0, op1, target,
3948 unsignedp,
3949 ((optab2->handlers[(int) compute_mode].insn_code
3950 != CODE_FOR_nothing)
3951 ? OPTAB_DIRECT : OPTAB_WIDEN));
3952 if (remainder == 0)
3954 /* No luck there. Can we do remainder and divide at once
3955 without a library call? */
3956 remainder = gen_reg_rtx (compute_mode);
3957 if (! expand_twoval_binop ((unsignedp
3958 ? udivmod_optab
3959 : sdivmod_optab),
3960 op0, op1,
3961 NULL_RTX, remainder, unsignedp))
3962 remainder = 0;
3965 if (remainder)
3966 return gen_lowpart (mode, remainder);
3969 /* Produce the quotient. Try a quotient insn, but not a library call.
3970 If we have a divmod in this mode, use it in preference to widening
3971 the div (for this test we assume it will not fail). Note that optab2
3972 is set to the one of the two optabs that the call below will use. */
3973 quotient
3974 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3975 op0, op1, rem_flag ? NULL_RTX : target,
3976 unsignedp,
3977 ((optab2->handlers[(int) compute_mode].insn_code
3978 != CODE_FOR_nothing)
3979 ? OPTAB_DIRECT : OPTAB_WIDEN));
3981 if (quotient == 0)
3983 /* No luck there. Try a quotient-and-remainder insn,
3984 keeping the quotient alone. */
3985 quotient = gen_reg_rtx (compute_mode);
3986 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3987 op0, op1,
3988 quotient, NULL_RTX, unsignedp))
3990 quotient = 0;
3991 if (! rem_flag)
3992 /* Still no luck. If we are not computing the remainder,
3993 use a library call for the quotient. */
3994 quotient = sign_expand_binop (compute_mode,
3995 udiv_optab, sdiv_optab,
3996 op0, op1, target,
3997 unsignedp, OPTAB_LIB_WIDEN);
4002 if (rem_flag)
4004 if (target && GET_MODE (target) != compute_mode)
4005 target = 0;
4007 if (quotient == 0)
4008 /* No divide instruction either. Use library for remainder. */
4009 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4010 op0, op1, target,
4011 unsignedp, OPTAB_LIB_WIDEN);
4012 else
4014 /* We divided. Now finish doing X - Y * (X / Y). */
4015 remainder = expand_mult (compute_mode, quotient, op1,
4016 NULL_RTX, unsignedp);
4017 remainder = expand_binop (compute_mode, sub_optab, op0,
4018 remainder, target, unsignedp,
4019 OPTAB_LIB_WIDEN);
4023 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4026 /* Return a tree node with data type TYPE, describing the value of X.
4027 Usually this is an RTL_EXPR, if there is no obvious better choice.
4028 X may be an expression, however we only support those expressions
4029 generated by loop.c. */
4031 tree
4032 make_tree (type, x)
4033 tree type;
4034 rtx x;
4036 tree t;
4038 switch (GET_CODE (x))
4040 case CONST_INT:
4041 t = build_int_2 (INTVAL (x),
4042 (TREE_UNSIGNED (type)
4043 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4044 || INTVAL (x) >= 0 ? 0 : -1);
4045 TREE_TYPE (t) = type;
4046 return t;
4048 case CONST_DOUBLE:
4049 if (GET_MODE (x) == VOIDmode)
4051 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4052 TREE_TYPE (t) = type;
4054 else
4056 REAL_VALUE_TYPE d;
4058 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4059 t = build_real (type, d);
4062 return t;
4064 case CONST_VECTOR:
4066 int i, units;
4067 rtx elt;
4068 tree t = NULL_TREE;
4070 units = CONST_VECTOR_NUNITS (x);
4072 /* Build a tree with vector elements. */
4073 for (i = units - 1; i >= 0; --i)
4075 elt = CONST_VECTOR_ELT (x, i);
4076 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4079 return build_vector (type, t);
4082 case PLUS:
4083 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4084 make_tree (type, XEXP (x, 1))));
4086 case MINUS:
4087 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4088 make_tree (type, XEXP (x, 1))));
4090 case NEG:
4091 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4093 case MULT:
4094 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4095 make_tree (type, XEXP (x, 1))));
4097 case ASHIFT:
4098 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4099 make_tree (type, XEXP (x, 1))));
4101 case LSHIFTRT:
4102 t = (*lang_hooks.types.unsigned_type) (type);
4103 return fold (convert (type,
4104 build (RSHIFT_EXPR, t,
4105 make_tree (t, XEXP (x, 0)),
4106 make_tree (type, XEXP (x, 1)))));
4108 case ASHIFTRT:
4109 t = (*lang_hooks.types.signed_type) (type);
4110 return fold (convert (type,
4111 build (RSHIFT_EXPR, t,
4112 make_tree (t, XEXP (x, 0)),
4113 make_tree (type, XEXP (x, 1)))));
4115 case DIV:
4116 if (TREE_CODE (type) != REAL_TYPE)
4117 t = (*lang_hooks.types.signed_type) (type);
4118 else
4119 t = type;
4121 return fold (convert (type,
4122 build (TRUNC_DIV_EXPR, t,
4123 make_tree (t, XEXP (x, 0)),
4124 make_tree (t, XEXP (x, 1)))));
4125 case UDIV:
4126 t = (*lang_hooks.types.unsigned_type) (type);
4127 return fold (convert (type,
4128 build (TRUNC_DIV_EXPR, t,
4129 make_tree (t, XEXP (x, 0)),
4130 make_tree (t, XEXP (x, 1)))));
4132 case SIGN_EXTEND:
4133 case ZERO_EXTEND:
4134 t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4135 GET_CODE (x) == ZERO_EXTEND);
4136 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4138 default:
4139 t = make_node (RTL_EXPR);
4140 TREE_TYPE (t) = type;
4142 #ifdef POINTERS_EXTEND_UNSIGNED
4143 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4144 ptr_mode. So convert. */
4145 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4146 x = convert_memory_address (TYPE_MODE (type), x);
4147 #endif
4149 RTL_EXPR_RTL (t) = x;
4150 /* There are no insns to be output
4151 when this rtl_expr is used. */
4152 RTL_EXPR_SEQUENCE (t) = 0;
4153 return t;
4157 /* Check whether the multiplication X * MULT + ADD overflows.
4158 X, MULT and ADD must be CONST_*.
4159 MODE is the machine mode for the computation.
4160 X and MULT must have mode MODE. ADD may have a different mode.
4161 So can X (defaults to same as MODE).
4162 UNSIGNEDP is nonzero to do unsigned multiplication. */
4164 bool
4165 const_mult_add_overflow_p (x, mult, add, mode, unsignedp)
4166 rtx x, mult, add;
4167 enum machine_mode mode;
4168 int unsignedp;
4170 tree type, mult_type, add_type, result;
4172 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4174 /* In order to get a proper overflow indication from an unsigned
4175 type, we have to pretend that it's a sizetype. */
4176 mult_type = type;
4177 if (unsignedp)
4179 mult_type = copy_node (type);
4180 TYPE_IS_SIZETYPE (mult_type) = 1;
4183 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4184 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4186 result = fold (build (PLUS_EXPR, mult_type,
4187 fold (build (MULT_EXPR, mult_type,
4188 make_tree (mult_type, x),
4189 make_tree (mult_type, mult))),
4190 make_tree (add_type, add)));
4192 return TREE_CONSTANT_OVERFLOW (result);
4195 /* Return an rtx representing the value of X * MULT + ADD.
4196 TARGET is a suggestion for where to store the result (an rtx).
4197 MODE is the machine mode for the computation.
4198 X and MULT must have mode MODE. ADD may have a different mode.
4199 So can X (defaults to same as MODE).
4200 UNSIGNEDP is nonzero to do unsigned multiplication.
4201 This may emit insns. */
4204 expand_mult_add (x, target, mult, add, mode, unsignedp)
4205 rtx x, target, mult, add;
4206 enum machine_mode mode;
4207 int unsignedp;
4209 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4210 tree add_type = (GET_MODE (add) == VOIDmode
4211 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4212 unsignedp));
4213 tree result = fold (build (PLUS_EXPR, type,
4214 fold (build (MULT_EXPR, type,
4215 make_tree (type, x),
4216 make_tree (type, mult))),
4217 make_tree (add_type, add)));
4219 return expand_expr (result, target, VOIDmode, 0);
4222 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4223 and returning TARGET.
4225 If TARGET is 0, a pseudo-register or constant is returned. */
4228 expand_and (mode, op0, op1, target)
4229 enum machine_mode mode;
4230 rtx op0, op1, target;
4232 rtx tem = 0;
4234 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4235 tem = simplify_binary_operation (AND, mode, op0, op1);
4236 if (tem == 0)
4237 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4239 if (target == 0)
4240 target = tem;
4241 else if (tem != target)
4242 emit_move_insn (target, tem);
4243 return target;
4246 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4247 and storing in TARGET. Normally return TARGET.
4248 Return 0 if that cannot be done.
4250 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4251 it is VOIDmode, they cannot both be CONST_INT.
4253 UNSIGNEDP is for the case where we have to widen the operands
4254 to perform the operation. It says to use zero-extension.
4256 NORMALIZEP is 1 if we should convert the result to be either zero
4257 or one. Normalize is -1 if we should convert the result to be
4258 either zero or -1. If NORMALIZEP is zero, the result will be left
4259 "raw" out of the scc insn. */
4262 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4263 rtx target;
4264 enum rtx_code code;
4265 rtx op0, op1;
4266 enum machine_mode mode;
4267 int unsignedp;
4268 int normalizep;
4270 rtx subtarget;
4271 enum insn_code icode;
4272 enum machine_mode compare_mode;
4273 enum machine_mode target_mode = GET_MODE (target);
4274 rtx tem;
4275 rtx last = get_last_insn ();
4276 rtx pattern, comparison;
4278 /* ??? Ok to do this and then fail? */
4279 op0 = protect_from_queue (op0, 0);
4280 op1 = protect_from_queue (op1, 0);
4282 if (unsignedp)
4283 code = unsigned_condition (code);
4285 /* If one operand is constant, make it the second one. Only do this
4286 if the other operand is not constant as well. */
4288 if (swap_commutative_operands_p (op0, op1))
4290 tem = op0;
4291 op0 = op1;
4292 op1 = tem;
4293 code = swap_condition (code);
4296 if (mode == VOIDmode)
4297 mode = GET_MODE (op0);
4299 /* For some comparisons with 1 and -1, we can convert this to
4300 comparisons with zero. This will often produce more opportunities for
4301 store-flag insns. */
4303 switch (code)
4305 case LT:
4306 if (op1 == const1_rtx)
4307 op1 = const0_rtx, code = LE;
4308 break;
4309 case LE:
4310 if (op1 == constm1_rtx)
4311 op1 = const0_rtx, code = LT;
4312 break;
4313 case GE:
4314 if (op1 == const1_rtx)
4315 op1 = const0_rtx, code = GT;
4316 break;
4317 case GT:
4318 if (op1 == constm1_rtx)
4319 op1 = const0_rtx, code = GE;
4320 break;
4321 case GEU:
4322 if (op1 == const1_rtx)
4323 op1 = const0_rtx, code = NE;
4324 break;
4325 case LTU:
4326 if (op1 == const1_rtx)
4327 op1 = const0_rtx, code = EQ;
4328 break;
4329 default:
4330 break;
4333 /* If we are comparing a double-word integer with zero, we can convert
4334 the comparison into one involving a single word. */
4335 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4336 && GET_MODE_CLASS (mode) == MODE_INT
4337 && op1 == const0_rtx
4338 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4340 if (code == EQ || code == NE)
4342 /* Do a logical OR of the two words and compare the result. */
4343 rtx op0h = gen_highpart (word_mode, op0);
4344 rtx op0l = gen_lowpart (word_mode, op0);
4345 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4346 NULL_RTX, unsignedp, OPTAB_DIRECT);
4347 if (op0both != 0)
4348 return emit_store_flag (target, code, op0both, op1, word_mode,
4349 unsignedp, normalizep);
4351 else if (code == LT || code == GE)
4352 /* If testing the sign bit, can just test on high word. */
4353 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4354 op1, word_mode, unsignedp, normalizep);
4357 /* From now on, we won't change CODE, so set ICODE now. */
4358 icode = setcc_gen_code[(int) code];
4360 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4361 complement of A (for GE) and shifting the sign bit to the low bit. */
4362 if (op1 == const0_rtx && (code == LT || code == GE)
4363 && GET_MODE_CLASS (mode) == MODE_INT
4364 && (normalizep || STORE_FLAG_VALUE == 1
4365 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4366 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4367 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4369 subtarget = target;
4371 /* If the result is to be wider than OP0, it is best to convert it
4372 first. If it is to be narrower, it is *incorrect* to convert it
4373 first. */
4374 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4376 op0 = protect_from_queue (op0, 0);
4377 op0 = convert_modes (target_mode, mode, op0, 0);
4378 mode = target_mode;
4381 if (target_mode != mode)
4382 subtarget = 0;
4384 if (code == GE)
4385 op0 = expand_unop (mode, one_cmpl_optab, op0,
4386 ((STORE_FLAG_VALUE == 1 || normalizep)
4387 ? 0 : subtarget), 0);
4389 if (STORE_FLAG_VALUE == 1 || normalizep)
4390 /* If we are supposed to produce a 0/1 value, we want to do
4391 a logical shift from the sign bit to the low-order bit; for
4392 a -1/0 value, we do an arithmetic shift. */
4393 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4394 size_int (GET_MODE_BITSIZE (mode) - 1),
4395 subtarget, normalizep != -1);
4397 if (mode != target_mode)
4398 op0 = convert_modes (target_mode, mode, op0, 0);
4400 return op0;
4403 if (icode != CODE_FOR_nothing)
4405 insn_operand_predicate_fn pred;
4407 /* We think we may be able to do this with a scc insn. Emit the
4408 comparison and then the scc insn.
4410 compare_from_rtx may call emit_queue, which would be deleted below
4411 if the scc insn fails. So call it ourselves before setting LAST.
4412 Likewise for do_pending_stack_adjust. */
4414 emit_queue ();
4415 do_pending_stack_adjust ();
4416 last = get_last_insn ();
4418 comparison
4419 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4420 if (GET_CODE (comparison) == CONST_INT)
4421 return (comparison == const0_rtx ? const0_rtx
4422 : normalizep == 1 ? const1_rtx
4423 : normalizep == -1 ? constm1_rtx
4424 : const_true_rtx);
4426 /* The code of COMPARISON may not match CODE if compare_from_rtx
4427 decided to swap its operands and reverse the original code.
4429 We know that compare_from_rtx returns either a CONST_INT or
4430 a new comparison code, so it is safe to just extract the
4431 code from COMPARISON. */
4432 code = GET_CODE (comparison);
4434 /* Get a reference to the target in the proper mode for this insn. */
4435 compare_mode = insn_data[(int) icode].operand[0].mode;
4436 subtarget = target;
4437 pred = insn_data[(int) icode].operand[0].predicate;
4438 if (preserve_subexpressions_p ()
4439 || ! (*pred) (subtarget, compare_mode))
4440 subtarget = gen_reg_rtx (compare_mode);
4442 pattern = GEN_FCN (icode) (subtarget);
4443 if (pattern)
4445 emit_insn (pattern);
4447 /* If we are converting to a wider mode, first convert to
4448 TARGET_MODE, then normalize. This produces better combining
4449 opportunities on machines that have a SIGN_EXTRACT when we are
4450 testing a single bit. This mostly benefits the 68k.
4452 If STORE_FLAG_VALUE does not have the sign bit set when
4453 interpreted in COMPARE_MODE, we can do this conversion as
4454 unsigned, which is usually more efficient. */
4455 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4457 convert_move (target, subtarget,
4458 (GET_MODE_BITSIZE (compare_mode)
4459 <= HOST_BITS_PER_WIDE_INT)
4460 && 0 == (STORE_FLAG_VALUE
4461 & ((HOST_WIDE_INT) 1
4462 << (GET_MODE_BITSIZE (compare_mode) -1))));
4463 op0 = target;
4464 compare_mode = target_mode;
4466 else
4467 op0 = subtarget;
4469 /* If we want to keep subexpressions around, don't reuse our
4470 last target. */
4472 if (preserve_subexpressions_p ())
4473 subtarget = 0;
4475 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4476 we don't have to do anything. */
4477 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4479 /* STORE_FLAG_VALUE might be the most negative number, so write
4480 the comparison this way to avoid a compiler-time warning. */
4481 else if (- normalizep == STORE_FLAG_VALUE)
4482 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4484 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4485 makes it hard to use a value of just the sign bit due to
4486 ANSI integer constant typing rules. */
4487 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4488 && (STORE_FLAG_VALUE
4489 & ((HOST_WIDE_INT) 1
4490 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4491 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4492 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4493 subtarget, normalizep == 1);
4494 else if (STORE_FLAG_VALUE & 1)
4496 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4497 if (normalizep == -1)
4498 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4500 else
4501 abort ();
4503 /* If we were converting to a smaller mode, do the
4504 conversion now. */
4505 if (target_mode != compare_mode)
4507 convert_move (target, op0, 0);
4508 return target;
4510 else
4511 return op0;
4515 delete_insns_since (last);
4517 /* If expensive optimizations, use different pseudo registers for each
4518 insn, instead of reusing the same pseudo. This leads to better CSE,
4519 but slows down the compiler, since there are more pseudos */
4520 subtarget = (!flag_expensive_optimizations
4521 && (target_mode == mode)) ? target : NULL_RTX;
4523 /* If we reached here, we can't do this with a scc insn. However, there
4524 are some comparisons that can be done directly. For example, if
4525 this is an equality comparison of integers, we can try to exclusive-or
4526 (or subtract) the two operands and use a recursive call to try the
4527 comparison with zero. Don't do any of these cases if branches are
4528 very cheap. */
4530 if (BRANCH_COST > 0
4531 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4532 && op1 != const0_rtx)
4534 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4535 OPTAB_WIDEN);
4537 if (tem == 0)
4538 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4539 OPTAB_WIDEN);
4540 if (tem != 0)
4541 tem = emit_store_flag (target, code, tem, const0_rtx,
4542 mode, unsignedp, normalizep);
4543 if (tem == 0)
4544 delete_insns_since (last);
4545 return tem;
4548 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4549 the constant zero. Reject all other comparisons at this point. Only
4550 do LE and GT if branches are expensive since they are expensive on
4551 2-operand machines. */
4553 if (BRANCH_COST == 0
4554 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4555 || (code != EQ && code != NE
4556 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4557 return 0;
4559 /* See what we need to return. We can only return a 1, -1, or the
4560 sign bit. */
4562 if (normalizep == 0)
4564 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4565 normalizep = STORE_FLAG_VALUE;
4567 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4568 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4569 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4571 else
4572 return 0;
4575 /* Try to put the result of the comparison in the sign bit. Assume we can't
4576 do the necessary operation below. */
4578 tem = 0;
4580 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4581 the sign bit set. */
4583 if (code == LE)
4585 /* This is destructive, so SUBTARGET can't be OP0. */
4586 if (rtx_equal_p (subtarget, op0))
4587 subtarget = 0;
4589 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4590 OPTAB_WIDEN);
4591 if (tem)
4592 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4593 OPTAB_WIDEN);
4596 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4597 number of bits in the mode of OP0, minus one. */
4599 if (code == GT)
4601 if (rtx_equal_p (subtarget, op0))
4602 subtarget = 0;
4604 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4605 size_int (GET_MODE_BITSIZE (mode) - 1),
4606 subtarget, 0);
4607 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4608 OPTAB_WIDEN);
4611 if (code == EQ || code == NE)
4613 /* For EQ or NE, one way to do the comparison is to apply an operation
4614 that converts the operand into a positive number if it is nonzero
4615 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4616 for NE we negate. This puts the result in the sign bit. Then we
4617 normalize with a shift, if needed.
4619 Two operations that can do the above actions are ABS and FFS, so try
4620 them. If that doesn't work, and MODE is smaller than a full word,
4621 we can use zero-extension to the wider mode (an unsigned conversion)
4622 as the operation. */
4624 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4625 that is compensated by the subsequent overflow when subtracting
4626 one / negating. */
4628 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4629 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4630 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4631 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4632 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4634 op0 = protect_from_queue (op0, 0);
4635 tem = convert_modes (word_mode, mode, op0, 1);
4636 mode = word_mode;
4639 if (tem != 0)
4641 if (code == EQ)
4642 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4643 0, OPTAB_WIDEN);
4644 else
4645 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4648 /* If we couldn't do it that way, for NE we can "or" the two's complement
4649 of the value with itself. For EQ, we take the one's complement of
4650 that "or", which is an extra insn, so we only handle EQ if branches
4651 are expensive. */
4653 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4655 if (rtx_equal_p (subtarget, op0))
4656 subtarget = 0;
4658 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4659 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4660 OPTAB_WIDEN);
4662 if (tem && code == EQ)
4663 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4667 if (tem && normalizep)
4668 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4669 size_int (GET_MODE_BITSIZE (mode) - 1),
4670 subtarget, normalizep == 1);
4672 if (tem)
4674 if (GET_MODE (tem) != target_mode)
4676 convert_move (target, tem, 0);
4677 tem = target;
4679 else if (!subtarget)
4681 emit_move_insn (target, tem);
4682 tem = target;
4685 else
4686 delete_insns_since (last);
4688 return tem;
4691 /* Like emit_store_flag, but always succeeds. */
4694 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4695 rtx target;
4696 enum rtx_code code;
4697 rtx op0, op1;
4698 enum machine_mode mode;
4699 int unsignedp;
4700 int normalizep;
4702 rtx tem, label;
4704 /* First see if emit_store_flag can do the job. */
4705 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4706 if (tem != 0)
4707 return tem;
4709 if (normalizep == 0)
4710 normalizep = 1;
4712 /* If this failed, we have to do this with set/compare/jump/set code. */
4714 if (GET_CODE (target) != REG
4715 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4716 target = gen_reg_rtx (GET_MODE (target));
4718 emit_move_insn (target, const1_rtx);
4719 label = gen_label_rtx ();
4720 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4721 NULL_RTX, label);
4723 emit_move_insn (target, const0_rtx);
4724 emit_label (label);
4726 return target;
4729 /* Perform possibly multi-word comparison and conditional jump to LABEL
4730 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4732 The algorithm is based on the code in expr.c:do_jump.
4734 Note that this does not perform a general comparison. Only variants
4735 generated within expmed.c are correctly handled, others abort (but could
4736 be handled if needed). */
4738 static void
4739 do_cmp_and_jump (arg1, arg2, op, mode, label)
4740 rtx arg1, arg2, label;
4741 enum rtx_code op;
4742 enum machine_mode mode;
4744 /* If this mode is an integer too wide to compare properly,
4745 compare word by word. Rely on cse to optimize constant cases. */
4747 if (GET_MODE_CLASS (mode) == MODE_INT
4748 && ! can_compare_p (op, mode, ccp_jump))
4750 rtx label2 = gen_label_rtx ();
4752 switch (op)
4754 case LTU:
4755 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4756 break;
4758 case LEU:
4759 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4760 break;
4762 case LT:
4763 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4764 break;
4766 case GT:
4767 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4768 break;
4770 case GE:
4771 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4772 break;
4774 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4775 that's the only equality operations we do */
4776 case EQ:
4777 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4778 abort ();
4779 do_jump_by_parts_equality_rtx (arg1, label2, label);
4780 break;
4782 case NE:
4783 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4784 abort ();
4785 do_jump_by_parts_equality_rtx (arg1, label, label2);
4786 break;
4788 default:
4789 abort ();
4792 emit_label (label2);
4794 else
4795 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);