* class.c (check_bitfield_decl): New function, split out from
[official-gcc.git] / gcc / expmed.c
blobbe458ce8d05724ed8c8cea33a1426f3bbc295c64
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "toplev.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "flags.h"
30 #include "insn-flags.h"
31 #include "insn-codes.h"
32 #include "insn-config.h"
33 #include "expr.h"
34 #include "real.h"
35 #include "recog.h"
37 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
38 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
39 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
40 int, int, rtx, int, int));
41 static rtx mask_rtx PROTO((enum machine_mode, int,
42 int, int));
43 static rtx lshift_value PROTO((enum machine_mode, rtx,
44 int, int));
45 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
46 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
47 enum machine_mode, rtx));
49 #define CEIL(x,y) (((x) + (y) - 1) / (y))
51 /* Non-zero means divides or modulus operations are relatively cheap for
52 powers of two, so don't use branches; emit the operation instead.
53 Usually, this will mean that the MD file will emit non-branch
54 sequences. */
56 static int sdiv_pow2_cheap, smod_pow2_cheap;
58 #ifndef SLOW_UNALIGNED_ACCESS
59 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
60 #endif
62 /* For compilers that support multiple targets with different word sizes,
63 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
64 is the H8/300(H) compiler. */
66 #ifndef MAX_BITS_PER_WORD
67 #define MAX_BITS_PER_WORD BITS_PER_WORD
68 #endif
70 /* Cost of various pieces of RTL. Note that some of these are indexed by
71 shift count and some by mode. */
72 static int add_cost, negate_cost, zero_cost;
73 static int shift_cost[MAX_BITS_PER_WORD];
74 static int shiftadd_cost[MAX_BITS_PER_WORD];
75 static int shiftsub_cost[MAX_BITS_PER_WORD];
76 static int mul_cost[NUM_MACHINE_MODES];
77 static int div_cost[NUM_MACHINE_MODES];
78 static int mul_widen_cost[NUM_MACHINE_MODES];
79 static int mul_highpart_cost[NUM_MACHINE_MODES];
81 void
82 init_expmed ()
84 char *free_point;
85 /* This is "some random pseudo register" for purposes of calling recog
86 to see what insns exist. */
87 rtx reg = gen_rtx_REG (word_mode, 10000);
88 rtx shift_insn, shiftadd_insn, shiftsub_insn;
89 int dummy;
90 int m;
91 enum machine_mode mode, wider_mode;
93 start_sequence ();
95 /* Since we are on the permanent obstack, we must be sure we save this
96 spot AFTER we call start_sequence, since it will reuse the rtl it
97 makes. */
98 free_point = (char *) oballoc (0);
100 reg = gen_rtx_REG (word_mode, 10000);
102 zero_cost = rtx_cost (const0_rtx, 0);
103 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
105 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
106 gen_rtx_ASHIFT (word_mode, reg,
107 const0_rtx)));
109 shiftadd_insn
110 = emit_insn (gen_rtx_SET (VOIDmode, reg,
111 gen_rtx_PLUS (word_mode,
112 gen_rtx_MULT (word_mode,
113 reg, const0_rtx),
114 reg)));
116 shiftsub_insn
117 = emit_insn (gen_rtx_SET (VOIDmode, reg,
118 gen_rtx_MINUS (word_mode,
119 gen_rtx_MULT (word_mode,
120 reg, const0_rtx),
121 reg)));
123 init_recog ();
125 shift_cost[0] = 0;
126 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
128 for (m = 1; m < MAX_BITS_PER_WORD; m++)
130 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
132 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
133 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
134 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
136 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
137 = GEN_INT ((HOST_WIDE_INT) 1 << m);
138 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
139 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
141 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
142 = GEN_INT ((HOST_WIDE_INT) 1 << m);
143 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
144 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
147 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
149 sdiv_pow2_cheap
150 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
151 <= 2 * add_cost);
152 smod_pow2_cheap
153 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
154 <= 2 * add_cost);
156 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
157 mode != VOIDmode;
158 mode = GET_MODE_WIDER_MODE (mode))
160 reg = gen_rtx_REG (mode, 10000);
161 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
162 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
163 wider_mode = GET_MODE_WIDER_MODE (mode);
164 if (wider_mode != VOIDmode)
166 mul_widen_cost[(int) wider_mode]
167 = rtx_cost (gen_rtx_MULT (wider_mode,
168 gen_rtx_ZERO_EXTEND (wider_mode, reg),
169 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
170 SET);
171 mul_highpart_cost[(int) mode]
172 = rtx_cost (gen_rtx_TRUNCATE
173 (mode,
174 gen_rtx_LSHIFTRT (wider_mode,
175 gen_rtx_MULT (wider_mode,
176 gen_rtx_ZERO_EXTEND
177 (wider_mode, reg),
178 gen_rtx_ZERO_EXTEND
179 (wider_mode, reg)),
180 GEN_INT (GET_MODE_BITSIZE (mode)))),
181 SET);
185 /* Free the objects we just allocated. */
186 end_sequence ();
187 obfree (free_point);
190 /* Return an rtx representing minus the value of X.
191 MODE is the intended mode of the result,
192 useful if X is a CONST_INT. */
195 negate_rtx (mode, x)
196 enum machine_mode mode;
197 rtx x;
199 rtx result = simplify_unary_operation (NEG, mode, x, mode);
201 if (result == 0)
202 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
204 return result;
207 /* Generate code to store value from rtx VALUE
208 into a bit-field within structure STR_RTX
209 containing BITSIZE bits starting at bit BITNUM.
210 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
211 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
212 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
214 /* ??? Note that there are two different ideas here for how
215 to determine the size to count bits within, for a register.
216 One is BITS_PER_WORD, and the other is the size of operand 3
217 of the insv pattern.
219 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
220 else, we use the mode of operand 3. */
223 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
224 rtx str_rtx;
225 register int bitsize;
226 int bitnum;
227 enum machine_mode fieldmode;
228 rtx value;
229 int align;
230 int total_size;
232 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
233 register int offset = bitnum / unit;
234 register int bitpos = bitnum % unit;
235 register rtx op0 = str_rtx;
236 #ifdef HAVE_insv
237 int insv_bitsize;
238 enum machine_mode op_mode;
240 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
241 if (op_mode == VOIDmode)
242 op_mode = word_mode;
243 insv_bitsize = GET_MODE_BITSIZE (op_mode);
244 #endif
246 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
247 abort ();
249 /* Discount the part of the structure before the desired byte.
250 We need to know how many bytes are safe to reference after it. */
251 if (total_size >= 0)
252 total_size -= (bitpos / BIGGEST_ALIGNMENT
253 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
255 while (GET_CODE (op0) == SUBREG)
257 /* The following line once was done only if WORDS_BIG_ENDIAN,
258 but I think that is a mistake. WORDS_BIG_ENDIAN is
259 meaningful at a much higher level; when structures are copied
260 between memory and regs, the higher-numbered regs
261 always get higher addresses. */
262 offset += SUBREG_WORD (op0);
263 /* We used to adjust BITPOS here, but now we do the whole adjustment
264 right after the loop. */
265 op0 = SUBREG_REG (op0);
268 /* Make sure we are playing with integral modes. Pun with subregs
269 if we aren't. */
271 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
272 if (imode != GET_MODE (op0))
274 if (GET_CODE (op0) == MEM)
275 op0 = change_address (op0, imode, NULL_RTX);
276 else if (imode != BLKmode)
277 op0 = gen_lowpart (imode, op0);
278 else
279 abort ();
283 /* If OP0 is a register, BITPOS must count within a word.
284 But as we have it, it counts within whatever size OP0 now has.
285 On a bigendian machine, these are not the same, so convert. */
286 if (BYTES_BIG_ENDIAN
287 && GET_CODE (op0) != MEM
288 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
289 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
291 value = protect_from_queue (value, 0);
293 if (flag_force_mem)
294 value = force_not_mem (value);
296 /* Note that the adjustment of BITPOS above has no effect on whether
297 BITPOS is 0 in a REG bigger than a word. */
298 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
299 && (GET_CODE (op0) != MEM
300 || ! SLOW_UNALIGNED_ACCESS
301 || (offset * BITS_PER_UNIT % bitsize == 0
302 && align % GET_MODE_SIZE (fieldmode) == 0))
303 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
305 /* Storing in a full-word or multi-word field in a register
306 can be done with just SUBREG. */
307 if (GET_MODE (op0) != fieldmode)
309 if (GET_CODE (op0) == SUBREG)
311 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
312 || GET_MODE_CLASS (fieldmode) == MODE_INT
313 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
314 op0 = SUBREG_REG (op0);
315 else
316 /* Else we've got some float mode source being extracted into
317 a different float mode destination -- this combination of
318 subregs results in Severe Tire Damage. */
319 abort ();
321 if (GET_CODE (op0) == REG)
322 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
323 else
324 op0 = change_address (op0, fieldmode,
325 plus_constant (XEXP (op0, 0), offset));
327 emit_move_insn (op0, value);
328 return value;
331 /* Storing an lsb-aligned field in a register
332 can be done with a movestrict instruction. */
334 if (GET_CODE (op0) != MEM
335 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
336 && bitsize == GET_MODE_BITSIZE (fieldmode)
337 && (GET_MODE (op0) == fieldmode
338 || (movstrict_optab->handlers[(int) fieldmode].insn_code
339 != CODE_FOR_nothing)))
341 /* Get appropriate low part of the value being stored. */
342 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
343 value = gen_lowpart (fieldmode, value);
344 else if (!(GET_CODE (value) == SYMBOL_REF
345 || GET_CODE (value) == LABEL_REF
346 || GET_CODE (value) == CONST))
347 value = convert_to_mode (fieldmode, value, 0);
349 if (GET_MODE (op0) == fieldmode)
350 emit_move_insn (op0, value);
351 else
353 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
354 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
355 value = copy_to_mode_reg (fieldmode, value);
357 if (GET_CODE (op0) == SUBREG)
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
370 emit_insn (GEN_FCN (icode)
371 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
373 return value;
376 /* Handle fields bigger than a word. */
378 if (bitsize > BITS_PER_WORD)
380 /* Here we transfer the words of the field
381 in the order least significant first.
382 This is because the most significant word is the one which may
383 be less than full.
384 However, only do that if the value is not BLKmode. */
386 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
388 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
389 int i;
391 /* This is the mode we must force value to, so that there will be enough
392 subwords to extract. Note that fieldmode will often (always?) be
393 VOIDmode, because that is what store_field uses to indicate that this
394 is a bit field, but passing VOIDmode to operand_subword_force will
395 result in an abort. */
396 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
398 for (i = 0; i < nwords; i++)
400 /* If I is 0, use the low-order word in both field and target;
401 if I is 1, use the next to lowest word; and so on. */
402 int wordnum = (backwards ? nwords - i - 1 : i);
403 int bit_offset = (backwards
404 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
405 : i * BITS_PER_WORD);
406 store_bit_field (op0, MIN (BITS_PER_WORD,
407 bitsize - i * BITS_PER_WORD),
408 bitnum + bit_offset, word_mode,
409 operand_subword_force (value, wordnum,
410 (GET_MODE (value) == VOIDmode
411 ? fieldmode
412 : GET_MODE (value))),
413 align, total_size);
415 return value;
418 /* From here on we can assume that the field to be stored in is
419 a full-word (whatever type that is), since it is shorter than a word. */
421 /* OFFSET is the number of words or bytes (UNIT says which)
422 from STR_RTX to the first word or byte containing part of the field. */
424 if (GET_CODE (op0) != MEM)
426 if (offset != 0
427 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
429 if (GET_CODE (op0) != REG)
431 /* Since this is a destination (lvalue), we can't copy it to a
432 pseudo. We can trivially remove a SUBREG that does not
433 change the size of the operand. Such a SUBREG may have been
434 added above. Otherwise, abort. */
435 if (GET_CODE (op0) == SUBREG
436 && (GET_MODE_SIZE (GET_MODE (op0))
437 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
438 op0 = SUBREG_REG (op0);
439 else
440 abort ();
442 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
443 op0, offset);
445 offset = 0;
447 else
449 op0 = protect_from_queue (op0, 1);
452 /* If VALUE is a floating-point mode, access it as an integer of the
453 corresponding size. This can occur on a machine with 64 bit registers
454 that uses SFmode for float. This can also occur for unaligned float
455 structure fields. */
456 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
458 if (GET_CODE (value) != REG)
459 value = copy_to_reg (value);
460 value = gen_rtx_SUBREG (word_mode, value, 0);
463 /* Now OFFSET is nonzero only if OP0 is memory
464 and is therefore always measured in bytes. */
466 #ifdef HAVE_insv
467 if (HAVE_insv
468 && GET_MODE (value) != BLKmode
469 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
470 /* Ensure insv's size is wide enough for this field. */
471 && (insv_bitsize >= bitsize)
472 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
473 && (bitsize + bitpos > insv_bitsize)))
475 int xbitpos = bitpos;
476 rtx value1;
477 rtx xop0 = op0;
478 rtx last = get_last_insn ();
479 rtx pat;
480 enum machine_mode maxmode;
481 int save_volatile_ok = volatile_ok;
483 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
484 if (maxmode == VOIDmode)
485 maxmode = word_mode;
487 volatile_ok = 1;
489 /* If this machine's insv can only insert into a register, copy OP0
490 into a register and save it back later. */
491 /* This used to check flag_force_mem, but that was a serious
492 de-optimization now that flag_force_mem is enabled by -O2. */
493 if (GET_CODE (op0) == MEM
494 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
495 (op0, VOIDmode)))
497 rtx tempreg;
498 enum machine_mode bestmode;
500 /* Get the mode to use for inserting into this field. If OP0 is
501 BLKmode, get the smallest mode consistent with the alignment. If
502 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
503 mode. Otherwise, use the smallest mode containing the field. */
505 if (GET_MODE (op0) == BLKmode
506 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
507 bestmode
508 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
509 MEM_VOLATILE_P (op0));
510 else
511 bestmode = GET_MODE (op0);
513 if (bestmode == VOIDmode
514 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
515 goto insv_loses;
517 /* Adjust address to point to the containing unit of that mode. */
518 unit = GET_MODE_BITSIZE (bestmode);
519 /* Compute offset as multiple of this unit, counting in bytes. */
520 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
521 bitpos = bitnum % unit;
522 op0 = change_address (op0, bestmode,
523 plus_constant (XEXP (op0, 0), offset));
525 /* Fetch that unit, store the bitfield in it, then store the unit. */
526 tempreg = copy_to_reg (op0);
527 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
528 align, total_size);
529 emit_move_insn (op0, tempreg);
530 return value;
532 volatile_ok = save_volatile_ok;
534 /* Add OFFSET into OP0's address. */
535 if (GET_CODE (xop0) == MEM)
536 xop0 = change_address (xop0, byte_mode,
537 plus_constant (XEXP (xop0, 0), offset));
539 /* If xop0 is a register, we need it in MAXMODE
540 to make it acceptable to the format of insv. */
541 if (GET_CODE (xop0) == SUBREG)
542 /* We can't just change the mode, because this might clobber op0,
543 and we will need the original value of op0 if insv fails. */
544 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
545 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
546 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
548 /* On big-endian machines, we count bits from the most significant.
549 If the bit field insn does not, we must invert. */
551 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
552 xbitpos = unit - bitsize - xbitpos;
554 /* We have been counting XBITPOS within UNIT.
555 Count instead within the size of the register. */
556 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
557 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
559 unit = GET_MODE_BITSIZE (maxmode);
561 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
562 value1 = value;
563 if (GET_MODE (value) != maxmode)
565 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
567 /* Optimization: Don't bother really extending VALUE
568 if it has all the bits we will actually use. However,
569 if we must narrow it, be sure we do it correctly. */
571 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
573 /* Avoid making subreg of a subreg, or of a mem. */
574 if (GET_CODE (value1) != REG)
575 value1 = copy_to_reg (value1);
576 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
578 else
579 value1 = gen_lowpart (maxmode, value1);
581 else if (!CONSTANT_P (value))
582 /* Parse phase is supposed to make VALUE's data type
583 match that of the component reference, which is a type
584 at least as wide as the field; so VALUE should have
585 a mode that corresponds to that type. */
586 abort ();
589 /* If this machine's insv insists on a register,
590 get VALUE1 into a register. */
591 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
592 (value1, maxmode)))
593 value1 = force_reg (maxmode, value1);
595 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
596 if (pat)
597 emit_insn (pat);
598 else
600 delete_insns_since (last);
601 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
604 else
605 insv_loses:
606 #endif
607 /* Insv is not available; store using shifts and boolean ops. */
608 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
609 return value;
612 /* Use shifts and boolean operations to store VALUE
613 into a bit field of width BITSIZE
614 in a memory location specified by OP0 except offset by OFFSET bytes.
615 (OFFSET must be 0 if OP0 is a register.)
616 The field starts at position BITPOS within the byte.
617 (If OP0 is a register, it may be a full word or a narrower mode,
618 but BITPOS still counts within a full word,
619 which is significant on bigendian machines.)
620 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
622 Note that protect_from_queue has already been done on OP0 and VALUE. */
624 static void
625 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
626 register rtx op0;
627 register int offset, bitsize, bitpos;
628 register rtx value;
629 int struct_align;
631 register enum machine_mode mode;
632 int total_bits = BITS_PER_WORD;
633 rtx subtarget, temp;
634 int all_zero = 0;
635 int all_one = 0;
637 if (! SLOW_UNALIGNED_ACCESS)
638 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
640 /* There is a case not handled here:
641 a structure with a known alignment of just a halfword
642 and a field split across two aligned halfwords within the structure.
643 Or likewise a structure with a known alignment of just a byte
644 and a field split across two bytes.
645 Such cases are not supposed to be able to occur. */
647 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
649 if (offset != 0)
650 abort ();
651 /* Special treatment for a bit field split across two registers. */
652 if (bitsize + bitpos > BITS_PER_WORD)
654 store_split_bit_field (op0, bitsize, bitpos,
655 value, BITS_PER_WORD);
656 return;
659 else
661 /* Get the proper mode to use for this field. We want a mode that
662 includes the entire field. If such a mode would be larger than
663 a word, we won't be doing the extraction the normal way. */
665 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
666 struct_align * BITS_PER_UNIT, word_mode,
667 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
669 if (mode == VOIDmode)
671 /* The only way this should occur is if the field spans word
672 boundaries. */
673 store_split_bit_field (op0,
674 bitsize, bitpos + offset * BITS_PER_UNIT,
675 value, struct_align);
676 return;
679 total_bits = GET_MODE_BITSIZE (mode);
681 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
682 be in the range 0 to total_bits-1, and put any excess bytes in
683 OFFSET. */
684 if (bitpos >= total_bits)
686 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
687 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
688 * BITS_PER_UNIT);
691 /* Get ref to an aligned byte, halfword, or word containing the field.
692 Adjust BITPOS to be position within a word,
693 and OFFSET to be the offset of that word.
694 Then alter OP0 to refer to that word. */
695 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
696 offset -= (offset % (total_bits / BITS_PER_UNIT));
697 op0 = change_address (op0, mode,
698 plus_constant (XEXP (op0, 0), offset));
701 mode = GET_MODE (op0);
703 /* Now MODE is either some integral mode for a MEM as OP0,
704 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
705 The bit field is contained entirely within OP0.
706 BITPOS is the starting bit number within OP0.
707 (OP0's mode may actually be narrower than MODE.) */
709 if (BYTES_BIG_ENDIAN)
710 /* BITPOS is the distance between our msb
711 and that of the containing datum.
712 Convert it to the distance from the lsb. */
713 bitpos = total_bits - bitsize - bitpos;
715 /* Now BITPOS is always the distance between our lsb
716 and that of OP0. */
718 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
719 we must first convert its mode to MODE. */
721 if (GET_CODE (value) == CONST_INT)
723 register HOST_WIDE_INT v = INTVAL (value);
725 if (bitsize < HOST_BITS_PER_WIDE_INT)
726 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
728 if (v == 0)
729 all_zero = 1;
730 else if ((bitsize < HOST_BITS_PER_WIDE_INT
731 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
732 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
733 all_one = 1;
735 value = lshift_value (mode, value, bitpos, bitsize);
737 else
739 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
740 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
742 if (GET_MODE (value) != mode)
744 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
745 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
746 value = gen_lowpart (mode, value);
747 else
748 value = convert_to_mode (mode, value, 1);
751 if (must_and)
752 value = expand_binop (mode, and_optab, value,
753 mask_rtx (mode, 0, bitsize, 0),
754 NULL_RTX, 1, OPTAB_LIB_WIDEN);
755 if (bitpos > 0)
756 value = expand_shift (LSHIFT_EXPR, mode, value,
757 build_int_2 (bitpos, 0), NULL_RTX, 1);
760 /* Now clear the chosen bits in OP0,
761 except that if VALUE is -1 we need not bother. */
763 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
765 if (! all_one)
767 temp = expand_binop (mode, and_optab, op0,
768 mask_rtx (mode, bitpos, bitsize, 1),
769 subtarget, 1, OPTAB_LIB_WIDEN);
770 subtarget = temp;
772 else
773 temp = op0;
775 /* Now logical-or VALUE into OP0, unless it is zero. */
777 if (! all_zero)
778 temp = expand_binop (mode, ior_optab, temp, value,
779 subtarget, 1, OPTAB_LIB_WIDEN);
780 if (op0 != temp)
781 emit_move_insn (op0, temp);
784 /* Store a bit field that is split across multiple accessible memory objects.
786 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
787 BITSIZE is the field width; BITPOS the position of its first bit
788 (within the word).
789 VALUE is the value to store.
790 ALIGN is the known alignment of OP0, measured in bytes.
791 This is also the size of the memory objects to be used.
793 This does not yet handle fields wider than BITS_PER_WORD. */
795 static void
796 store_split_bit_field (op0, bitsize, bitpos, value, align)
797 rtx op0;
798 int bitsize, bitpos;
799 rtx value;
800 int align;
802 int unit;
803 int bitsdone = 0;
805 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
806 much at a time. */
807 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
808 unit = BITS_PER_WORD;
809 else
810 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
812 /* If VALUE is a constant other than a CONST_INT, get it into a register in
813 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
814 that VALUE might be a floating-point constant. */
815 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
817 rtx word = gen_lowpart_common (word_mode, value);
819 if (word && (value != word))
820 value = word;
821 else
822 value = gen_lowpart_common (word_mode,
823 force_reg (GET_MODE (value) != VOIDmode
824 ? GET_MODE (value)
825 : word_mode, value));
827 else if (GET_CODE (value) == ADDRESSOF)
828 value = copy_to_reg (value);
830 while (bitsdone < bitsize)
832 int thissize;
833 rtx part, word;
834 int thispos;
835 int offset;
837 offset = (bitpos + bitsdone) / unit;
838 thispos = (bitpos + bitsdone) % unit;
840 /* THISSIZE must not overrun a word boundary. Otherwise,
841 store_fixed_bit_field will call us again, and we will mutually
842 recurse forever. */
843 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
844 thissize = MIN (thissize, unit - thispos);
846 if (BYTES_BIG_ENDIAN)
848 int total_bits;
850 /* We must do an endian conversion exactly the same way as it is
851 done in extract_bit_field, so that the two calls to
852 extract_fixed_bit_field will have comparable arguments. */
853 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
854 total_bits = BITS_PER_WORD;
855 else
856 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
858 /* Fetch successively less significant portions. */
859 if (GET_CODE (value) == CONST_INT)
860 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
861 >> (bitsize - bitsdone - thissize))
862 & (((HOST_WIDE_INT) 1 << thissize) - 1));
863 else
864 /* The args are chosen so that the last part includes the
865 lsb. Give extract_bit_field the value it needs (with
866 endianness compensation) to fetch the piece we want.
868 ??? We have no idea what the alignment of VALUE is, so
869 we have to use a guess. */
870 part
871 = extract_fixed_bit_field
872 (word_mode, value, 0, thissize,
873 total_bits - bitsize + bitsdone, NULL_RTX, 1,
874 GET_MODE (value) == VOIDmode
875 ? UNITS_PER_WORD
876 : (GET_MODE (value) == BLKmode
878 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
880 else
882 /* Fetch successively more significant portions. */
883 if (GET_CODE (value) == CONST_INT)
884 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
885 >> bitsdone)
886 & (((HOST_WIDE_INT) 1 << thissize) - 1));
887 else
888 part
889 = extract_fixed_bit_field
890 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
891 GET_MODE (value) == VOIDmode
892 ? UNITS_PER_WORD
893 : (GET_MODE (value) == BLKmode
895 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
898 /* If OP0 is a register, then handle OFFSET here.
900 When handling multiword bitfields, extract_bit_field may pass
901 down a word_mode SUBREG of a larger REG for a bitfield that actually
902 crosses a word boundary. Thus, for a SUBREG, we must find
903 the current word starting from the base register. */
904 if (GET_CODE (op0) == SUBREG)
906 word = operand_subword_force (SUBREG_REG (op0),
907 SUBREG_WORD (op0) + offset,
908 GET_MODE (SUBREG_REG (op0)));
909 offset = 0;
911 else if (GET_CODE (op0) == REG)
913 word = operand_subword_force (op0, offset, GET_MODE (op0));
914 offset = 0;
916 else
917 word = op0;
919 /* OFFSET is in UNITs, and UNIT is in bits.
920 store_fixed_bit_field wants offset in bytes. */
921 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
922 thissize, thispos, part, align);
923 bitsdone += thissize;
927 /* Generate code to extract a byte-field from STR_RTX
928 containing BITSIZE bits, starting at BITNUM,
929 and put it in TARGET if possible (if TARGET is nonzero).
930 Regardless of TARGET, we return the rtx for where the value is placed.
931 It may be a QUEUED.
933 STR_RTX is the structure containing the byte (a REG or MEM).
934 UNSIGNEDP is nonzero if this is an unsigned bit field.
935 MODE is the natural mode of the field value once extracted.
936 TMODE is the mode the caller would like the value to have;
937 but the value may be returned with type MODE instead.
939 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
940 TOTAL_SIZE is the size in bytes of the containing structure,
941 or -1 if varying.
943 If a TARGET is specified and we can store in it at no extra cost,
944 we do so, and return TARGET.
945 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
946 if they are equally easy. */
949 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
950 target, mode, tmode, align, total_size)
951 rtx str_rtx;
952 register int bitsize;
953 int bitnum;
954 int unsignedp;
955 rtx target;
956 enum machine_mode mode, tmode;
957 int align;
958 int total_size;
960 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
961 register int offset = bitnum / unit;
962 register int bitpos = bitnum % unit;
963 register rtx op0 = str_rtx;
964 rtx spec_target = target;
965 rtx spec_target_subreg = 0;
966 enum machine_mode int_mode;
967 #ifdef HAVE_extv
968 int extv_bitsize;
969 enum machine_mode extv_mode;
970 #endif
971 #ifdef HAVE_extzv
972 int extzv_bitsize;
973 enum machine_mode extzv_mode;
974 #endif
976 #ifdef HAVE_extv
977 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
978 if (extv_mode == VOIDmode)
979 extv_mode = word_mode;
980 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
981 #endif
983 #ifdef HAVE_extzv
984 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
985 if (extzv_mode == VOIDmode)
986 extzv_mode = word_mode;
987 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
988 #endif
990 /* Discount the part of the structure before the desired byte.
991 We need to know how many bytes are safe to reference after it. */
992 if (total_size >= 0)
993 total_size -= (bitpos / BIGGEST_ALIGNMENT
994 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
996 if (tmode == VOIDmode)
997 tmode = mode;
998 while (GET_CODE (op0) == SUBREG)
1000 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1001 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1003 offset += SUBREG_WORD (op0);
1005 inner_size = MIN (inner_size, BITS_PER_WORD);
1007 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1009 bitpos += inner_size - outer_size;
1010 if (bitpos > unit)
1012 offset += (bitpos / unit);
1013 bitpos %= unit;
1017 op0 = SUBREG_REG (op0);
1020 /* Make sure we are playing with integral modes. Pun with subregs
1021 if we aren't. */
1023 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1024 if (imode != GET_MODE (op0))
1026 if (GET_CODE (op0) == MEM)
1027 op0 = change_address (op0, imode, NULL_RTX);
1028 else if (imode != BLKmode)
1029 op0 = gen_lowpart (imode, op0);
1030 else
1031 abort ();
1035 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1036 If that's wrong, the solution is to test for it and set TARGET to 0
1037 if needed. */
1039 /* If OP0 is a register, BITPOS must count within a word.
1040 But as we have it, it counts within whatever size OP0 now has.
1041 On a bigendian machine, these are not the same, so convert. */
1042 if (BYTES_BIG_ENDIAN
1043 && GET_CODE (op0) != MEM
1044 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1045 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1047 /* Extracting a full-word or multi-word value
1048 from a structure in a register or aligned memory.
1049 This can be done with just SUBREG.
1050 So too extracting a subword value in
1051 the least significant part of the register. */
1053 if (((GET_CODE (op0) != MEM
1054 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1055 GET_MODE_BITSIZE (GET_MODE (op0))))
1056 || (GET_CODE (op0) == MEM
1057 && (! SLOW_UNALIGNED_ACCESS
1058 || (offset * BITS_PER_UNIT % bitsize == 0
1059 && align * BITS_PER_UNIT % bitsize == 0))))
1060 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1061 && bitpos % BITS_PER_WORD == 0)
1062 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1063 /* ??? The big endian test here is wrong. This is correct
1064 if the value is in a register, and if mode_for_size is not
1065 the same mode as op0. This causes us to get unnecessarily
1066 inefficient code from the Thumb port when -mbig-endian. */
1067 && (BYTES_BIG_ENDIAN
1068 ? bitpos + bitsize == BITS_PER_WORD
1069 : bitpos == 0))))
1071 enum machine_mode mode1
1072 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1074 if (mode1 != GET_MODE (op0))
1076 if (GET_CODE (op0) == SUBREG)
1078 if (GET_MODE (SUBREG_REG (op0)) == mode1
1079 || GET_MODE_CLASS (mode1) == MODE_INT
1080 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1081 op0 = SUBREG_REG (op0);
1082 else
1083 /* Else we've got some float mode source being extracted into
1084 a different float mode destination -- this combination of
1085 subregs results in Severe Tire Damage. */
1086 abort ();
1088 if (GET_CODE (op0) == REG)
1089 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1090 else
1091 op0 = change_address (op0, mode1,
1092 plus_constant (XEXP (op0, 0), offset));
1094 if (mode1 != mode)
1095 return convert_to_mode (tmode, op0, unsignedp);
1096 return op0;
1099 /* Handle fields bigger than a word. */
1101 if (bitsize > BITS_PER_WORD)
1103 /* Here we transfer the words of the field
1104 in the order least significant first.
1105 This is because the most significant word is the one which may
1106 be less than full. */
1108 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1109 int i;
1111 if (target == 0 || GET_CODE (target) != REG)
1112 target = gen_reg_rtx (mode);
1114 /* Indicate for flow that the entire target reg is being set. */
1115 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1117 for (i = 0; i < nwords; i++)
1119 /* If I is 0, use the low-order word in both field and target;
1120 if I is 1, use the next to lowest word; and so on. */
1121 /* Word number in TARGET to use. */
1122 int wordnum = (WORDS_BIG_ENDIAN
1123 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1124 : i);
1125 /* Offset from start of field in OP0. */
1126 int bit_offset = (WORDS_BIG_ENDIAN
1127 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1128 : i * BITS_PER_WORD);
1129 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1130 rtx result_part
1131 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1132 bitsize - i * BITS_PER_WORD),
1133 bitnum + bit_offset,
1134 1, target_part, mode, word_mode,
1135 align, total_size);
1137 if (target_part == 0)
1138 abort ();
1140 if (result_part != target_part)
1141 emit_move_insn (target_part, result_part);
1144 if (unsignedp)
1146 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1147 need to be zero'd out. */
1148 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1150 int i,total_words;
1152 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1153 for (i = nwords; i < total_words; i++)
1155 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1156 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1157 emit_move_insn (target_part, const0_rtx);
1160 return target;
1163 /* Signed bit field: sign-extend with two arithmetic shifts. */
1164 target = expand_shift (LSHIFT_EXPR, mode, target,
1165 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1166 NULL_RTX, 0);
1167 return expand_shift (RSHIFT_EXPR, mode, target,
1168 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1169 NULL_RTX, 0);
1172 /* From here on we know the desired field is smaller than a word. */
1174 /* Check if there is a correspondingly-sized integer field, so we can
1175 safely extract it as one size of integer, if necessary; then
1176 truncate or extend to the size that is wanted; then use SUBREGs or
1177 convert_to_mode to get one of the modes we really wanted. */
1179 int_mode = int_mode_for_mode (tmode);
1180 if (int_mode == BLKmode)
1181 int_mode = int_mode_for_mode (mode);
1182 if (int_mode == BLKmode)
1183 abort(); /* Should probably push op0 out to memory and then
1184 do a load. */
1186 /* OFFSET is the number of words or bytes (UNIT says which)
1187 from STR_RTX to the first word or byte containing part of the field. */
1189 if (GET_CODE (op0) != MEM)
1191 if (offset != 0
1192 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1194 if (GET_CODE (op0) != REG)
1195 op0 = copy_to_reg (op0);
1196 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1197 op0, offset);
1199 offset = 0;
1201 else
1203 op0 = protect_from_queue (str_rtx, 1);
1206 /* Now OFFSET is nonzero only for memory operands. */
1208 if (unsignedp)
1210 #ifdef HAVE_extzv
1211 if (HAVE_extzv
1212 && (extzv_bitsize >= bitsize)
1213 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1214 && (bitsize + bitpos > extzv_bitsize)))
1216 int xbitpos = bitpos, xoffset = offset;
1217 rtx bitsize_rtx, bitpos_rtx;
1218 rtx last = get_last_insn ();
1219 rtx xop0 = op0;
1220 rtx xtarget = target;
1221 rtx xspec_target = spec_target;
1222 rtx xspec_target_subreg = spec_target_subreg;
1223 rtx pat;
1224 enum machine_mode maxmode;
1226 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1227 if (maxmode == VOIDmode)
1228 maxmode = word_mode;
1230 if (GET_CODE (xop0) == MEM)
1232 int save_volatile_ok = volatile_ok;
1233 volatile_ok = 1;
1235 /* Is the memory operand acceptable? */
1236 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1237 (xop0, GET_MODE (xop0))))
1239 /* No, load into a reg and extract from there. */
1240 enum machine_mode bestmode;
1242 /* Get the mode to use for inserting into this field. If
1243 OP0 is BLKmode, get the smallest mode consistent with the
1244 alignment. If OP0 is a non-BLKmode object that is no
1245 wider than MAXMODE, use its mode. Otherwise, use the
1246 smallest mode containing the field. */
1248 if (GET_MODE (xop0) == BLKmode
1249 || (GET_MODE_SIZE (GET_MODE (op0))
1250 > GET_MODE_SIZE (maxmode)))
1251 bestmode = get_best_mode (bitsize, bitnum,
1252 align * BITS_PER_UNIT, maxmode,
1253 MEM_VOLATILE_P (xop0));
1254 else
1255 bestmode = GET_MODE (xop0);
1257 if (bestmode == VOIDmode
1258 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1259 goto extzv_loses;
1261 /* Compute offset as multiple of this unit,
1262 counting in bytes. */
1263 unit = GET_MODE_BITSIZE (bestmode);
1264 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1265 xbitpos = bitnum % unit;
1266 xop0 = change_address (xop0, bestmode,
1267 plus_constant (XEXP (xop0, 0),
1268 xoffset));
1269 /* Fetch it to a register in that size. */
1270 xop0 = force_reg (bestmode, xop0);
1272 /* XBITPOS counts within UNIT, which is what is expected. */
1274 else
1275 /* Get ref to first byte containing part of the field. */
1276 xop0 = change_address (xop0, byte_mode,
1277 plus_constant (XEXP (xop0, 0), xoffset));
1279 volatile_ok = save_volatile_ok;
1282 /* If op0 is a register, we need it in MAXMODE (which is usually
1283 SImode). to make it acceptable to the format of extzv. */
1284 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1285 goto extzv_loses;
1286 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1287 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1289 /* On big-endian machines, we count bits from the most significant.
1290 If the bit field insn does not, we must invert. */
1291 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1292 xbitpos = unit - bitsize - xbitpos;
1294 /* Now convert from counting within UNIT to counting in MAXMODE. */
1295 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1296 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1298 unit = GET_MODE_BITSIZE (maxmode);
1300 if (xtarget == 0
1301 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1302 xtarget = xspec_target = gen_reg_rtx (tmode);
1304 if (GET_MODE (xtarget) != maxmode)
1306 if (GET_CODE (xtarget) == REG)
1308 int wider = (GET_MODE_SIZE (maxmode)
1309 > GET_MODE_SIZE (GET_MODE (xtarget)));
1310 xtarget = gen_lowpart (maxmode, xtarget);
1311 if (wider)
1312 xspec_target_subreg = xtarget;
1314 else
1315 xtarget = gen_reg_rtx (maxmode);
1318 /* If this machine's extzv insists on a register target,
1319 make sure we have one. */
1320 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1321 (xtarget, maxmode)))
1322 xtarget = gen_reg_rtx (maxmode);
1324 bitsize_rtx = GEN_INT (bitsize);
1325 bitpos_rtx = GEN_INT (xbitpos);
1327 pat = gen_extzv (protect_from_queue (xtarget, 1),
1328 xop0, bitsize_rtx, bitpos_rtx);
1329 if (pat)
1331 emit_insn (pat);
1332 target = xtarget;
1333 spec_target = xspec_target;
1334 spec_target_subreg = xspec_target_subreg;
1336 else
1338 delete_insns_since (last);
1339 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1340 bitpos, target, 1, align);
1343 else
1344 extzv_loses:
1345 #endif
1346 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1347 bitpos, target, 1, align);
1349 else
1351 #ifdef HAVE_extv
1352 if (HAVE_extv
1353 && (extv_bitsize >= bitsize)
1354 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1355 && (bitsize + bitpos > extv_bitsize)))
1357 int xbitpos = bitpos, xoffset = offset;
1358 rtx bitsize_rtx, bitpos_rtx;
1359 rtx last = get_last_insn ();
1360 rtx xop0 = op0, xtarget = target;
1361 rtx xspec_target = spec_target;
1362 rtx xspec_target_subreg = spec_target_subreg;
1363 rtx pat;
1364 enum machine_mode maxmode;
1366 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1367 if (maxmode == VOIDmode)
1368 maxmode = word_mode;
1370 if (GET_CODE (xop0) == MEM)
1372 /* Is the memory operand acceptable? */
1373 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1374 (xop0, GET_MODE (xop0))))
1376 /* No, load into a reg and extract from there. */
1377 enum machine_mode bestmode;
1379 /* Get the mode to use for inserting into this field. If
1380 OP0 is BLKmode, get the smallest mode consistent with the
1381 alignment. If OP0 is a non-BLKmode object that is no
1382 wider than MAXMODE, use its mode. Otherwise, use the
1383 smallest mode containing the field. */
1385 if (GET_MODE (xop0) == BLKmode
1386 || (GET_MODE_SIZE (GET_MODE (op0))
1387 > GET_MODE_SIZE (maxmode)))
1388 bestmode = get_best_mode (bitsize, bitnum,
1389 align * BITS_PER_UNIT, maxmode,
1390 MEM_VOLATILE_P (xop0));
1391 else
1392 bestmode = GET_MODE (xop0);
1394 if (bestmode == VOIDmode
1395 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1396 goto extv_loses;
1398 /* Compute offset as multiple of this unit,
1399 counting in bytes. */
1400 unit = GET_MODE_BITSIZE (bestmode);
1401 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1402 xbitpos = bitnum % unit;
1403 xop0 = change_address (xop0, bestmode,
1404 plus_constant (XEXP (xop0, 0),
1405 xoffset));
1406 /* Fetch it to a register in that size. */
1407 xop0 = force_reg (bestmode, xop0);
1409 /* XBITPOS counts within UNIT, which is what is expected. */
1411 else
1412 /* Get ref to first byte containing part of the field. */
1413 xop0 = change_address (xop0, byte_mode,
1414 plus_constant (XEXP (xop0, 0), xoffset));
1417 /* If op0 is a register, we need it in MAXMODE (which is usually
1418 SImode) to make it acceptable to the format of extv. */
1419 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1420 goto extv_loses;
1421 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1422 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1424 /* On big-endian machines, we count bits from the most significant.
1425 If the bit field insn does not, we must invert. */
1426 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1427 xbitpos = unit - bitsize - xbitpos;
1429 /* XBITPOS counts within a size of UNIT.
1430 Adjust to count within a size of MAXMODE. */
1431 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1432 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1434 unit = GET_MODE_BITSIZE (maxmode);
1436 if (xtarget == 0
1437 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1438 xtarget = xspec_target = gen_reg_rtx (tmode);
1440 if (GET_MODE (xtarget) != maxmode)
1442 if (GET_CODE (xtarget) == REG)
1444 int wider = (GET_MODE_SIZE (maxmode)
1445 > GET_MODE_SIZE (GET_MODE (xtarget)));
1446 xtarget = gen_lowpart (maxmode, xtarget);
1447 if (wider)
1448 xspec_target_subreg = xtarget;
1450 else
1451 xtarget = gen_reg_rtx (maxmode);
1454 /* If this machine's extv insists on a register target,
1455 make sure we have one. */
1456 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1457 (xtarget, maxmode)))
1458 xtarget = gen_reg_rtx (maxmode);
1460 bitsize_rtx = GEN_INT (bitsize);
1461 bitpos_rtx = GEN_INT (xbitpos);
1463 pat = gen_extv (protect_from_queue (xtarget, 1),
1464 xop0, bitsize_rtx, bitpos_rtx);
1465 if (pat)
1467 emit_insn (pat);
1468 target = xtarget;
1469 spec_target = xspec_target;
1470 spec_target_subreg = xspec_target_subreg;
1472 else
1474 delete_insns_since (last);
1475 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1476 bitpos, target, 0, align);
1479 else
1480 extv_loses:
1481 #endif
1482 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1483 bitpos, target, 0, align);
1485 if (target == spec_target)
1486 return target;
1487 if (target == spec_target_subreg)
1488 return spec_target;
1489 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1491 /* If the target mode is floating-point, first convert to the
1492 integer mode of that size and then access it as a floating-point
1493 value via a SUBREG. */
1494 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1496 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1497 MODE_INT, 0),
1498 target, unsignedp);
1499 if (GET_CODE (target) != REG)
1500 target = copy_to_reg (target);
1501 return gen_rtx_SUBREG (tmode, target, 0);
1503 else
1504 return convert_to_mode (tmode, target, unsignedp);
1506 return target;
1509 /* Extract a bit field using shifts and boolean operations
1510 Returns an rtx to represent the value.
1511 OP0 addresses a register (word) or memory (byte).
1512 BITPOS says which bit within the word or byte the bit field starts in.
1513 OFFSET says how many bytes farther the bit field starts;
1514 it is 0 if OP0 is a register.
1515 BITSIZE says how many bits long the bit field is.
1516 (If OP0 is a register, it may be narrower than a full word,
1517 but BITPOS still counts within a full word,
1518 which is significant on bigendian machines.)
1520 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1521 If TARGET is nonzero, attempts to store the value there
1522 and return TARGET, but this is not guaranteed.
1523 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1525 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1527 static rtx
1528 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1529 target, unsignedp, align)
1530 enum machine_mode tmode;
1531 register rtx op0, target;
1532 register int offset, bitsize, bitpos;
1533 int unsignedp;
1534 int align;
1536 int total_bits = BITS_PER_WORD;
1537 enum machine_mode mode;
1539 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1541 /* Special treatment for a bit field split across two registers. */
1542 if (bitsize + bitpos > BITS_PER_WORD)
1543 return extract_split_bit_field (op0, bitsize, bitpos,
1544 unsignedp, align);
1546 else
1548 /* Get the proper mode to use for this field. We want a mode that
1549 includes the entire field. If such a mode would be larger than
1550 a word, we won't be doing the extraction the normal way. */
1552 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1553 align * BITS_PER_UNIT, word_mode,
1554 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1556 if (mode == VOIDmode)
1557 /* The only way this should occur is if the field spans word
1558 boundaries. */
1559 return extract_split_bit_field (op0, bitsize,
1560 bitpos + offset * BITS_PER_UNIT,
1561 unsignedp, align);
1563 total_bits = GET_MODE_BITSIZE (mode);
1565 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1566 be in the range 0 to total_bits-1, and put any excess bytes in
1567 OFFSET. */
1568 if (bitpos >= total_bits)
1570 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1571 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1572 * BITS_PER_UNIT);
1575 /* Get ref to an aligned byte, halfword, or word containing the field.
1576 Adjust BITPOS to be position within a word,
1577 and OFFSET to be the offset of that word.
1578 Then alter OP0 to refer to that word. */
1579 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1580 offset -= (offset % (total_bits / BITS_PER_UNIT));
1581 op0 = change_address (op0, mode,
1582 plus_constant (XEXP (op0, 0), offset));
1585 mode = GET_MODE (op0);
1587 if (BYTES_BIG_ENDIAN)
1589 /* BITPOS is the distance between our msb and that of OP0.
1590 Convert it to the distance from the lsb. */
1592 bitpos = total_bits - bitsize - bitpos;
1595 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1596 We have reduced the big-endian case to the little-endian case. */
1598 if (unsignedp)
1600 if (bitpos)
1602 /* If the field does not already start at the lsb,
1603 shift it so it does. */
1604 tree amount = build_int_2 (bitpos, 0);
1605 /* Maybe propagate the target for the shift. */
1606 /* But not if we will return it--could confuse integrate.c. */
1607 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1608 && !REG_FUNCTION_VALUE_P (target)
1609 ? target : 0);
1610 if (tmode != mode) subtarget = 0;
1611 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1613 /* Convert the value to the desired mode. */
1614 if (mode != tmode)
1615 op0 = convert_to_mode (tmode, op0, 1);
1617 /* Unless the msb of the field used to be the msb when we shifted,
1618 mask out the upper bits. */
1620 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1621 #if 0
1622 #ifdef SLOW_ZERO_EXTEND
1623 /* Always generate an `and' if
1624 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1625 will combine fruitfully with the zero-extend. */
1626 || tmode != mode
1627 #endif
1628 #endif
1630 return expand_binop (GET_MODE (op0), and_optab, op0,
1631 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1632 target, 1, OPTAB_LIB_WIDEN);
1633 return op0;
1636 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1637 then arithmetic-shift its lsb to the lsb of the word. */
1638 op0 = force_reg (mode, op0);
1639 if (mode != tmode)
1640 target = 0;
1642 /* Find the narrowest integer mode that contains the field. */
1644 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1645 mode = GET_MODE_WIDER_MODE (mode))
1646 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1648 op0 = convert_to_mode (mode, op0, 0);
1649 break;
1652 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1654 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1655 /* Maybe propagate the target for the shift. */
1656 /* But not if we will return the result--could confuse integrate.c. */
1657 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1658 && ! REG_FUNCTION_VALUE_P (target)
1659 ? target : 0);
1660 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1663 return expand_shift (RSHIFT_EXPR, mode, op0,
1664 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1665 target, 0);
1668 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1669 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1670 complement of that if COMPLEMENT. The mask is truncated if
1671 necessary to the width of mode MODE. The mask is zero-extended if
1672 BITSIZE+BITPOS is too small for MODE. */
1674 static rtx
1675 mask_rtx (mode, bitpos, bitsize, complement)
1676 enum machine_mode mode;
1677 int bitpos, bitsize, complement;
1679 HOST_WIDE_INT masklow, maskhigh;
1681 if (bitpos < HOST_BITS_PER_WIDE_INT)
1682 masklow = (HOST_WIDE_INT) -1 << bitpos;
1683 else
1684 masklow = 0;
1686 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1687 masklow &= ((unsigned HOST_WIDE_INT) -1
1688 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1690 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1691 maskhigh = -1;
1692 else
1693 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1695 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1696 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1697 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1698 else
1699 maskhigh = 0;
1701 if (complement)
1703 maskhigh = ~maskhigh;
1704 masklow = ~masklow;
1707 return immed_double_const (masklow, maskhigh, mode);
1710 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1711 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1713 static rtx
1714 lshift_value (mode, value, bitpos, bitsize)
1715 enum machine_mode mode;
1716 rtx value;
1717 int bitpos, bitsize;
1719 unsigned HOST_WIDE_INT v = INTVAL (value);
1720 HOST_WIDE_INT low, high;
1722 if (bitsize < HOST_BITS_PER_WIDE_INT)
1723 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1725 if (bitpos < HOST_BITS_PER_WIDE_INT)
1727 low = v << bitpos;
1728 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1730 else
1732 low = 0;
1733 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1736 return immed_double_const (low, high, mode);
1739 /* Extract a bit field that is split across two words
1740 and return an RTX for the result.
1742 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1743 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1744 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1746 ALIGN is the known alignment of OP0, measured in bytes.
1747 This is also the size of the memory objects to be used. */
1749 static rtx
1750 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1751 rtx op0;
1752 int bitsize, bitpos, unsignedp, align;
1754 int unit;
1755 int bitsdone = 0;
1756 rtx result = NULL_RTX;
1757 int first = 1;
1759 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1760 much at a time. */
1761 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1762 unit = BITS_PER_WORD;
1763 else
1764 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1766 while (bitsdone < bitsize)
1768 int thissize;
1769 rtx part, word;
1770 int thispos;
1771 int offset;
1773 offset = (bitpos + bitsdone) / unit;
1774 thispos = (bitpos + bitsdone) % unit;
1776 /* THISSIZE must not overrun a word boundary. Otherwise,
1777 extract_fixed_bit_field will call us again, and we will mutually
1778 recurse forever. */
1779 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1780 thissize = MIN (thissize, unit - thispos);
1782 /* If OP0 is a register, then handle OFFSET here.
1784 When handling multiword bitfields, extract_bit_field may pass
1785 down a word_mode SUBREG of a larger REG for a bitfield that actually
1786 crosses a word boundary. Thus, for a SUBREG, we must find
1787 the current word starting from the base register. */
1788 if (GET_CODE (op0) == SUBREG)
1790 word = operand_subword_force (SUBREG_REG (op0),
1791 SUBREG_WORD (op0) + offset,
1792 GET_MODE (SUBREG_REG (op0)));
1793 offset = 0;
1795 else if (GET_CODE (op0) == REG)
1797 word = operand_subword_force (op0, offset, GET_MODE (op0));
1798 offset = 0;
1800 else
1801 word = op0;
1803 /* Extract the parts in bit-counting order,
1804 whose meaning is determined by BYTES_PER_UNIT.
1805 OFFSET is in UNITs, and UNIT is in bits.
1806 extract_fixed_bit_field wants offset in bytes. */
1807 part = extract_fixed_bit_field (word_mode, word,
1808 offset * unit / BITS_PER_UNIT,
1809 thissize, thispos, 0, 1, align);
1810 bitsdone += thissize;
1812 /* Shift this part into place for the result. */
1813 if (BYTES_BIG_ENDIAN)
1815 if (bitsize != bitsdone)
1816 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1817 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1819 else
1821 if (bitsdone != thissize)
1822 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1823 build_int_2 (bitsdone - thissize, 0), 0, 1);
1826 if (first)
1827 result = part;
1828 else
1829 /* Combine the parts with bitwise or. This works
1830 because we extracted each part as an unsigned bit field. */
1831 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1832 OPTAB_LIB_WIDEN);
1834 first = 0;
1837 /* Unsigned bit field: we are done. */
1838 if (unsignedp)
1839 return result;
1840 /* Signed bit field: sign-extend with two arithmetic shifts. */
1841 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1842 build_int_2 (BITS_PER_WORD - bitsize, 0),
1843 NULL_RTX, 0);
1844 return expand_shift (RSHIFT_EXPR, word_mode, result,
1845 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1848 /* Add INC into TARGET. */
1850 void
1851 expand_inc (target, inc)
1852 rtx target, inc;
1854 rtx value = expand_binop (GET_MODE (target), add_optab,
1855 target, inc,
1856 target, 0, OPTAB_LIB_WIDEN);
1857 if (value != target)
1858 emit_move_insn (target, value);
1861 /* Subtract DEC from TARGET. */
1863 void
1864 expand_dec (target, dec)
1865 rtx target, dec;
1867 rtx value = expand_binop (GET_MODE (target), sub_optab,
1868 target, dec,
1869 target, 0, OPTAB_LIB_WIDEN);
1870 if (value != target)
1871 emit_move_insn (target, value);
1874 /* Output a shift instruction for expression code CODE,
1875 with SHIFTED being the rtx for the value to shift,
1876 and AMOUNT the tree for the amount to shift by.
1877 Store the result in the rtx TARGET, if that is convenient.
1878 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1879 Return the rtx for where the value is. */
1882 expand_shift (code, mode, shifted, amount, target, unsignedp)
1883 enum tree_code code;
1884 register enum machine_mode mode;
1885 rtx shifted;
1886 tree amount;
1887 register rtx target;
1888 int unsignedp;
1890 register rtx op1, temp = 0;
1891 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1892 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1893 int try;
1895 /* Previously detected shift-counts computed by NEGATE_EXPR
1896 and shifted in the other direction; but that does not work
1897 on all machines. */
1899 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1901 #ifdef SHIFT_COUNT_TRUNCATED
1902 if (SHIFT_COUNT_TRUNCATED)
1904 if (GET_CODE (op1) == CONST_INT
1905 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1906 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1907 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1908 % GET_MODE_BITSIZE (mode));
1909 else if (GET_CODE (op1) == SUBREG
1910 && SUBREG_WORD (op1) == 0)
1911 op1 = SUBREG_REG (op1);
1913 #endif
1915 if (op1 == const0_rtx)
1916 return shifted;
1918 for (try = 0; temp == 0 && try < 3; try++)
1920 enum optab_methods methods;
1922 if (try == 0)
1923 methods = OPTAB_DIRECT;
1924 else if (try == 1)
1925 methods = OPTAB_WIDEN;
1926 else
1927 methods = OPTAB_LIB_WIDEN;
1929 if (rotate)
1931 /* Widening does not work for rotation. */
1932 if (methods == OPTAB_WIDEN)
1933 continue;
1934 else if (methods == OPTAB_LIB_WIDEN)
1936 /* If we have been unable to open-code this by a rotation,
1937 do it as the IOR of two shifts. I.e., to rotate A
1938 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1939 where C is the bitsize of A.
1941 It is theoretically possible that the target machine might
1942 not be able to perform either shift and hence we would
1943 be making two libcalls rather than just the one for the
1944 shift (similarly if IOR could not be done). We will allow
1945 this extremely unlikely lossage to avoid complicating the
1946 code below. */
1948 rtx subtarget = target == shifted ? 0 : target;
1949 rtx temp1;
1950 tree type = TREE_TYPE (amount);
1951 tree new_amount = make_tree (type, op1);
1952 tree other_amount
1953 = fold (build (MINUS_EXPR, type,
1954 convert (type,
1955 build_int_2 (GET_MODE_BITSIZE (mode),
1956 0)),
1957 amount));
1959 shifted = force_reg (mode, shifted);
1961 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1962 mode, shifted, new_amount, subtarget, 1);
1963 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1964 mode, shifted, other_amount, 0, 1);
1965 return expand_binop (mode, ior_optab, temp, temp1, target,
1966 unsignedp, methods);
1969 temp = expand_binop (mode,
1970 left ? rotl_optab : rotr_optab,
1971 shifted, op1, target, unsignedp, methods);
1973 /* If we don't have the rotate, but we are rotating by a constant
1974 that is in range, try a rotate in the opposite direction. */
1976 if (temp == 0 && GET_CODE (op1) == CONST_INT
1977 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1978 temp = expand_binop (mode,
1979 left ? rotr_optab : rotl_optab,
1980 shifted,
1981 GEN_INT (GET_MODE_BITSIZE (mode)
1982 - INTVAL (op1)),
1983 target, unsignedp, methods);
1985 else if (unsignedp)
1986 temp = expand_binop (mode,
1987 left ? ashl_optab : lshr_optab,
1988 shifted, op1, target, unsignedp, methods);
1990 /* Do arithmetic shifts.
1991 Also, if we are going to widen the operand, we can just as well
1992 use an arithmetic right-shift instead of a logical one. */
1993 if (temp == 0 && ! rotate
1994 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1996 enum optab_methods methods1 = methods;
1998 /* If trying to widen a log shift to an arithmetic shift,
1999 don't accept an arithmetic shift of the same size. */
2000 if (unsignedp)
2001 methods1 = OPTAB_MUST_WIDEN;
2003 /* Arithmetic shift */
2005 temp = expand_binop (mode,
2006 left ? ashl_optab : ashr_optab,
2007 shifted, op1, target, unsignedp, methods1);
2010 /* We used to try extzv here for logical right shifts, but that was
2011 only useful for one machine, the VAX, and caused poor code
2012 generation there for lshrdi3, so the code was deleted and a
2013 define_expand for lshrsi3 was added to vax.md. */
2016 if (temp == 0)
2017 abort ();
2018 return temp;
2021 enum alg_code { alg_zero, alg_m, alg_shift,
2022 alg_add_t_m2, alg_sub_t_m2,
2023 alg_add_factor, alg_sub_factor,
2024 alg_add_t2_m, alg_sub_t2_m,
2025 alg_add, alg_subtract, alg_factor, alg_shiftop };
2027 /* This structure records a sequence of operations.
2028 `ops' is the number of operations recorded.
2029 `cost' is their total cost.
2030 The operations are stored in `op' and the corresponding
2031 logarithms of the integer coefficients in `log'.
2033 These are the operations:
2034 alg_zero total := 0;
2035 alg_m total := multiplicand;
2036 alg_shift total := total * coeff
2037 alg_add_t_m2 total := total + multiplicand * coeff;
2038 alg_sub_t_m2 total := total - multiplicand * coeff;
2039 alg_add_factor total := total * coeff + total;
2040 alg_sub_factor total := total * coeff - total;
2041 alg_add_t2_m total := total * coeff + multiplicand;
2042 alg_sub_t2_m total := total * coeff - multiplicand;
2044 The first operand must be either alg_zero or alg_m. */
2046 struct algorithm
2048 short cost;
2049 short ops;
2050 /* The size of the OP and LOG fields are not directly related to the
2051 word size, but the worst-case algorithms will be if we have few
2052 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2053 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2054 in total wordsize operations. */
2055 enum alg_code op[MAX_BITS_PER_WORD];
2056 char log[MAX_BITS_PER_WORD];
2059 static void synth_mult PROTO((struct algorithm *,
2060 unsigned HOST_WIDE_INT,
2061 int));
2062 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
2063 int, int,
2064 unsigned HOST_WIDE_INT *,
2065 int *, int *));
2066 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
2067 int));
2068 /* Compute and return the best algorithm for multiplying by T.
2069 The algorithm must cost less than cost_limit
2070 If retval.cost >= COST_LIMIT, no algorithm was found and all
2071 other field of the returned struct are undefined. */
2073 static void
2074 synth_mult (alg_out, t, cost_limit)
2075 struct algorithm *alg_out;
2076 unsigned HOST_WIDE_INT t;
2077 int cost_limit;
2079 int m;
2080 struct algorithm *alg_in, *best_alg;
2081 int cost;
2082 unsigned HOST_WIDE_INT q;
2084 /* Indicate that no algorithm is yet found. If no algorithm
2085 is found, this value will be returned and indicate failure. */
2086 alg_out->cost = cost_limit;
2088 if (cost_limit <= 0)
2089 return;
2091 /* t == 1 can be done in zero cost. */
2092 if (t == 1)
2094 alg_out->ops = 1;
2095 alg_out->cost = 0;
2096 alg_out->op[0] = alg_m;
2097 return;
2100 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2101 fail now. */
2102 if (t == 0)
2104 if (zero_cost >= cost_limit)
2105 return;
2106 else
2108 alg_out->ops = 1;
2109 alg_out->cost = zero_cost;
2110 alg_out->op[0] = alg_zero;
2111 return;
2115 /* We'll be needing a couple extra algorithm structures now. */
2117 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2118 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2120 /* If we have a group of zero bits at the low-order part of T, try
2121 multiplying by the remaining bits and then doing a shift. */
2123 if ((t & 1) == 0)
2125 m = floor_log2 (t & -t); /* m = number of low zero bits */
2126 q = t >> m;
2127 cost = shift_cost[m];
2128 synth_mult (alg_in, q, cost_limit - cost);
2130 cost += alg_in->cost;
2131 if (cost < cost_limit)
2133 struct algorithm *x;
2134 x = alg_in, alg_in = best_alg, best_alg = x;
2135 best_alg->log[best_alg->ops] = m;
2136 best_alg->op[best_alg->ops] = alg_shift;
2137 cost_limit = cost;
2141 /* If we have an odd number, add or subtract one. */
2142 if ((t & 1) != 0)
2144 unsigned HOST_WIDE_INT w;
2146 for (w = 1; (w & t) != 0; w <<= 1)
2148 /* If T was -1, then W will be zero after the loop. This is another
2149 case where T ends with ...111. Handling this with (T + 1) and
2150 subtract 1 produces slightly better code and results in algorithm
2151 selection much faster than treating it like the ...0111 case
2152 below. */
2153 if (w == 0
2154 || (w > 2
2155 /* Reject the case where t is 3.
2156 Thus we prefer addition in that case. */
2157 && t != 3))
2159 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2161 cost = add_cost;
2162 synth_mult (alg_in, t + 1, cost_limit - cost);
2164 cost += alg_in->cost;
2165 if (cost < cost_limit)
2167 struct algorithm *x;
2168 x = alg_in, alg_in = best_alg, best_alg = x;
2169 best_alg->log[best_alg->ops] = 0;
2170 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2171 cost_limit = cost;
2174 else
2176 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2178 cost = add_cost;
2179 synth_mult (alg_in, t - 1, cost_limit - cost);
2181 cost += alg_in->cost;
2182 if (cost < cost_limit)
2184 struct algorithm *x;
2185 x = alg_in, alg_in = best_alg, best_alg = x;
2186 best_alg->log[best_alg->ops] = 0;
2187 best_alg->op[best_alg->ops] = alg_add_t_m2;
2188 cost_limit = cost;
2193 /* Look for factors of t of the form
2194 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2195 If we find such a factor, we can multiply by t using an algorithm that
2196 multiplies by q, shift the result by m and add/subtract it to itself.
2198 We search for large factors first and loop down, even if large factors
2199 are less probable than small; if we find a large factor we will find a
2200 good sequence quickly, and therefore be able to prune (by decreasing
2201 COST_LIMIT) the search. */
2203 for (m = floor_log2 (t - 1); m >= 2; m--)
2205 unsigned HOST_WIDE_INT d;
2207 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2208 if (t % d == 0 && t > d)
2210 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2211 synth_mult (alg_in, t / d, cost_limit - cost);
2213 cost += alg_in->cost;
2214 if (cost < cost_limit)
2216 struct algorithm *x;
2217 x = alg_in, alg_in = best_alg, best_alg = x;
2218 best_alg->log[best_alg->ops] = m;
2219 best_alg->op[best_alg->ops] = alg_add_factor;
2220 cost_limit = cost;
2222 /* Other factors will have been taken care of in the recursion. */
2223 break;
2226 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2227 if (t % d == 0 && t > d)
2229 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2230 synth_mult (alg_in, t / d, cost_limit - cost);
2232 cost += alg_in->cost;
2233 if (cost < cost_limit)
2235 struct algorithm *x;
2236 x = alg_in, alg_in = best_alg, best_alg = x;
2237 best_alg->log[best_alg->ops] = m;
2238 best_alg->op[best_alg->ops] = alg_sub_factor;
2239 cost_limit = cost;
2241 break;
2245 /* Try shift-and-add (load effective address) instructions,
2246 i.e. do a*3, a*5, a*9. */
2247 if ((t & 1) != 0)
2249 q = t - 1;
2250 q = q & -q;
2251 m = exact_log2 (q);
2252 if (m >= 0)
2254 cost = shiftadd_cost[m];
2255 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2257 cost += alg_in->cost;
2258 if (cost < cost_limit)
2260 struct algorithm *x;
2261 x = alg_in, alg_in = best_alg, best_alg = x;
2262 best_alg->log[best_alg->ops] = m;
2263 best_alg->op[best_alg->ops] = alg_add_t2_m;
2264 cost_limit = cost;
2268 q = t + 1;
2269 q = q & -q;
2270 m = exact_log2 (q);
2271 if (m >= 0)
2273 cost = shiftsub_cost[m];
2274 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2276 cost += alg_in->cost;
2277 if (cost < cost_limit)
2279 struct algorithm *x;
2280 x = alg_in, alg_in = best_alg, best_alg = x;
2281 best_alg->log[best_alg->ops] = m;
2282 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2283 cost_limit = cost;
2288 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2289 we have not found any algorithm. */
2290 if (cost_limit == alg_out->cost)
2291 return;
2293 /* If we are getting a too long sequence for `struct algorithm'
2294 to record, make this search fail. */
2295 if (best_alg->ops == MAX_BITS_PER_WORD)
2296 return;
2298 /* Copy the algorithm from temporary space to the space at alg_out.
2299 We avoid using structure assignment because the majority of
2300 best_alg is normally undefined, and this is a critical function. */
2301 alg_out->ops = best_alg->ops + 1;
2302 alg_out->cost = cost_limit;
2303 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2304 alg_out->ops * sizeof *alg_out->op);
2305 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2306 alg_out->ops * sizeof *alg_out->log);
2309 /* Perform a multiplication and return an rtx for the result.
2310 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2311 TARGET is a suggestion for where to store the result (an rtx).
2313 We check specially for a constant integer as OP1.
2314 If you want this check for OP0 as well, then before calling
2315 you should swap the two operands if OP0 would be constant. */
2318 expand_mult (mode, op0, op1, target, unsignedp)
2319 enum machine_mode mode;
2320 register rtx op0, op1, target;
2321 int unsignedp;
2323 rtx const_op1 = op1;
2325 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2326 less than or equal in size to `unsigned int' this doesn't matter.
2327 If the mode is larger than `unsigned int', then synth_mult works only
2328 if the constant value exactly fits in an `unsigned int' without any
2329 truncation. This means that multiplying by negative values does
2330 not work; results are off by 2^32 on a 32 bit machine. */
2332 /* If we are multiplying in DImode, it may still be a win
2333 to try to work with shifts and adds. */
2334 if (GET_CODE (op1) == CONST_DOUBLE
2335 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2336 && HOST_BITS_PER_INT >= BITS_PER_WORD
2337 && CONST_DOUBLE_HIGH (op1) == 0)
2338 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2339 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2340 && GET_CODE (op1) == CONST_INT
2341 && INTVAL (op1) < 0)
2342 const_op1 = 0;
2344 /* We used to test optimize here, on the grounds that it's better to
2345 produce a smaller program when -O is not used.
2346 But this causes such a terrible slowdown sometimes
2347 that it seems better to use synth_mult always. */
2349 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2351 struct algorithm alg;
2352 struct algorithm alg2;
2353 HOST_WIDE_INT val = INTVAL (op1);
2354 HOST_WIDE_INT val_so_far;
2355 rtx insn;
2356 int mult_cost;
2357 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2359 /* Try to do the computation three ways: multiply by the negative of OP1
2360 and then negate, do the multiplication directly, or do multiplication
2361 by OP1 - 1. */
2363 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2364 mult_cost = MIN (12 * add_cost, mult_cost);
2366 synth_mult (&alg, val, mult_cost);
2368 /* This works only if the inverted value actually fits in an
2369 `unsigned int' */
2370 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2372 synth_mult (&alg2, - val,
2373 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2374 if (alg2.cost + negate_cost < alg.cost)
2375 alg = alg2, variant = negate_variant;
2378 /* This proves very useful for division-by-constant. */
2379 synth_mult (&alg2, val - 1,
2380 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2381 if (alg2.cost + add_cost < alg.cost)
2382 alg = alg2, variant = add_variant;
2384 if (alg.cost < mult_cost)
2386 /* We found something cheaper than a multiply insn. */
2387 int opno;
2388 rtx accum, tem;
2390 op0 = protect_from_queue (op0, 0);
2392 /* Avoid referencing memory over and over.
2393 For speed, but also for correctness when mem is volatile. */
2394 if (GET_CODE (op0) == MEM)
2395 op0 = force_reg (mode, op0);
2397 /* ACCUM starts out either as OP0 or as a zero, depending on
2398 the first operation. */
2400 if (alg.op[0] == alg_zero)
2402 accum = copy_to_mode_reg (mode, const0_rtx);
2403 val_so_far = 0;
2405 else if (alg.op[0] == alg_m)
2407 accum = copy_to_mode_reg (mode, op0);
2408 val_so_far = 1;
2410 else
2411 abort ();
2413 for (opno = 1; opno < alg.ops; opno++)
2415 int log = alg.log[opno];
2416 int preserve = preserve_subexpressions_p ();
2417 rtx shift_subtarget = preserve ? 0 : accum;
2418 rtx add_target
2419 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2420 && ! preserve)
2421 ? target : 0;
2422 rtx accum_target = preserve ? 0 : accum;
2424 switch (alg.op[opno])
2426 case alg_shift:
2427 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2428 build_int_2 (log, 0), NULL_RTX, 0);
2429 val_so_far <<= log;
2430 break;
2432 case alg_add_t_m2:
2433 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2434 build_int_2 (log, 0), NULL_RTX, 0);
2435 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2436 add_target
2437 ? add_target : accum_target);
2438 val_so_far += (HOST_WIDE_INT) 1 << log;
2439 break;
2441 case alg_sub_t_m2:
2442 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2443 build_int_2 (log, 0), NULL_RTX, 0);
2444 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2445 add_target
2446 ? add_target : accum_target);
2447 val_so_far -= (HOST_WIDE_INT) 1 << log;
2448 break;
2450 case alg_add_t2_m:
2451 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2452 build_int_2 (log, 0), shift_subtarget,
2454 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2455 add_target
2456 ? add_target : accum_target);
2457 val_so_far = (val_so_far << log) + 1;
2458 break;
2460 case alg_sub_t2_m:
2461 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2462 build_int_2 (log, 0), shift_subtarget,
2464 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2465 add_target
2466 ? add_target : accum_target);
2467 val_so_far = (val_so_far << log) - 1;
2468 break;
2470 case alg_add_factor:
2471 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2472 build_int_2 (log, 0), NULL_RTX, 0);
2473 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2474 add_target
2475 ? add_target : accum_target);
2476 val_so_far += val_so_far << log;
2477 break;
2479 case alg_sub_factor:
2480 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2481 build_int_2 (log, 0), NULL_RTX, 0);
2482 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2483 (add_target ? add_target
2484 : preserve ? 0 : tem));
2485 val_so_far = (val_so_far << log) - val_so_far;
2486 break;
2488 default:
2489 abort ();
2492 /* Write a REG_EQUAL note on the last insn so that we can cse
2493 multiplication sequences. */
2495 insn = get_last_insn ();
2496 set_unique_reg_note (insn,
2497 REG_EQUAL,
2498 gen_rtx_MULT (mode, op0,
2499 GEN_INT (val_so_far)));
2502 if (variant == negate_variant)
2504 val_so_far = - val_so_far;
2505 accum = expand_unop (mode, neg_optab, accum, target, 0);
2507 else if (variant == add_variant)
2509 val_so_far = val_so_far + 1;
2510 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2513 if (val != val_so_far)
2514 abort ();
2516 return accum;
2520 /* This used to use umul_optab if unsigned, but for non-widening multiply
2521 there is no difference between signed and unsigned. */
2522 op0 = expand_binop (mode, smul_optab,
2523 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2524 if (op0 == 0)
2525 abort ();
2526 return op0;
2529 /* Return the smallest n such that 2**n >= X. */
2532 ceil_log2 (x)
2533 unsigned HOST_WIDE_INT x;
2535 return floor_log2 (x - 1) + 1;
2538 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2539 replace division by D, and put the least significant N bits of the result
2540 in *MULTIPLIER_PTR and return the most significant bit.
2542 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2543 needed precision is in PRECISION (should be <= N).
2545 PRECISION should be as small as possible so this function can choose
2546 multiplier more freely.
2548 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2549 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2551 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2552 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2554 static
2555 unsigned HOST_WIDE_INT
2556 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2557 unsigned HOST_WIDE_INT d;
2558 int n;
2559 int precision;
2560 unsigned HOST_WIDE_INT *multiplier_ptr;
2561 int *post_shift_ptr;
2562 int *lgup_ptr;
2564 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2565 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2566 int lgup, post_shift;
2567 int pow, pow2;
2568 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2570 /* lgup = ceil(log2(divisor)); */
2571 lgup = ceil_log2 (d);
2573 if (lgup > n)
2574 abort ();
2576 pow = n + lgup;
2577 pow2 = n + lgup - precision;
2579 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2581 /* We could handle this with some effort, but this case is much better
2582 handled directly with a scc insn, so rely on caller using that. */
2583 abort ();
2586 /* mlow = 2^(N + lgup)/d */
2587 if (pow >= HOST_BITS_PER_WIDE_INT)
2589 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2590 nl = 0;
2592 else
2594 nh = 0;
2595 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2597 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2598 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2600 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2601 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2602 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2603 else
2604 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2605 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2606 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2608 if (mhigh_hi && nh - d >= d)
2609 abort ();
2610 if (mhigh_hi > 1 || mlow_hi > 1)
2611 abort ();
2612 /* assert that mlow < mhigh. */
2613 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2614 abort();
2616 /* If precision == N, then mlow, mhigh exceed 2^N
2617 (but they do not exceed 2^(N+1)). */
2619 /* Reduce to lowest terms */
2620 for (post_shift = lgup; post_shift > 0; post_shift--)
2622 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2623 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2624 if (ml_lo >= mh_lo)
2625 break;
2627 mlow_hi = 0;
2628 mlow_lo = ml_lo;
2629 mhigh_hi = 0;
2630 mhigh_lo = mh_lo;
2633 *post_shift_ptr = post_shift;
2634 *lgup_ptr = lgup;
2635 if (n < HOST_BITS_PER_WIDE_INT)
2637 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2638 *multiplier_ptr = mhigh_lo & mask;
2639 return mhigh_lo >= mask;
2641 else
2643 *multiplier_ptr = mhigh_lo;
2644 return mhigh_hi;
2648 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2649 congruent to 1 (mod 2**N). */
2651 static unsigned HOST_WIDE_INT
2652 invert_mod2n (x, n)
2653 unsigned HOST_WIDE_INT x;
2654 int n;
2656 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2658 /* The algorithm notes that the choice y = x satisfies
2659 x*y == 1 mod 2^3, since x is assumed odd.
2660 Each iteration doubles the number of bits of significance in y. */
2662 unsigned HOST_WIDE_INT mask;
2663 unsigned HOST_WIDE_INT y = x;
2664 int nbit = 3;
2666 mask = (n == HOST_BITS_PER_WIDE_INT
2667 ? ~(unsigned HOST_WIDE_INT) 0
2668 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2670 while (nbit < n)
2672 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2673 nbit *= 2;
2675 return y;
2678 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2679 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2680 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2681 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2682 become signed.
2684 The result is put in TARGET if that is convenient.
2686 MODE is the mode of operation. */
2689 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2690 enum machine_mode mode;
2691 register rtx adj_operand, op0, op1, target;
2692 int unsignedp;
2694 rtx tem;
2695 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2697 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2698 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2699 NULL_RTX, 0);
2700 tem = expand_and (tem, op1, NULL_RTX);
2701 adj_operand
2702 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2703 adj_operand);
2705 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2706 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2707 NULL_RTX, 0);
2708 tem = expand_and (tem, op0, NULL_RTX);
2709 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2710 target);
2712 return target;
2715 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2716 in TARGET if that is convenient, and return where the result is. If the
2717 operation can not be performed, 0 is returned.
2719 MODE is the mode of operation and result.
2721 UNSIGNEDP nonzero means unsigned multiply.
2723 MAX_COST is the total allowed cost for the expanded RTL. */
2726 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2727 enum machine_mode mode;
2728 register rtx op0, target;
2729 unsigned HOST_WIDE_INT cnst1;
2730 int unsignedp;
2731 int max_cost;
2733 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2734 optab mul_highpart_optab;
2735 optab moptab;
2736 rtx tem;
2737 int size = GET_MODE_BITSIZE (mode);
2738 rtx op1, wide_op1;
2740 /* We can't support modes wider than HOST_BITS_PER_INT. */
2741 if (size > HOST_BITS_PER_WIDE_INT)
2742 abort ();
2744 op1 = GEN_INT (cnst1);
2746 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2747 wide_op1 = op1;
2748 else
2749 wide_op1
2750 = immed_double_const (cnst1,
2751 (unsignedp
2752 ? (HOST_WIDE_INT) 0
2753 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2754 wider_mode);
2756 /* expand_mult handles constant multiplication of word_mode
2757 or narrower. It does a poor job for large modes. */
2758 if (size < BITS_PER_WORD
2759 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2761 /* We have to do this, since expand_binop doesn't do conversion for
2762 multiply. Maybe change expand_binop to handle widening multiply? */
2763 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2765 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2766 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2767 build_int_2 (size, 0), NULL_RTX, 1);
2768 return convert_modes (mode, wider_mode, tem, unsignedp);
2771 if (target == 0)
2772 target = gen_reg_rtx (mode);
2774 /* Firstly, try using a multiplication insn that only generates the needed
2775 high part of the product, and in the sign flavor of unsignedp. */
2776 if (mul_highpart_cost[(int) mode] < max_cost)
2778 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2779 target = expand_binop (mode, mul_highpart_optab,
2780 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2781 if (target)
2782 return target;
2785 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2786 Need to adjust the result after the multiplication. */
2787 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2789 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2790 target = expand_binop (mode, mul_highpart_optab,
2791 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2792 if (target)
2793 /* We used the wrong signedness. Adjust the result. */
2794 return expand_mult_highpart_adjust (mode, target, op0,
2795 op1, target, unsignedp);
2798 /* Try widening multiplication. */
2799 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2800 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2801 && mul_widen_cost[(int) wider_mode] < max_cost)
2803 op1 = force_reg (mode, op1);
2804 goto try;
2807 /* Try widening the mode and perform a non-widening multiplication. */
2808 moptab = smul_optab;
2809 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2810 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2812 op1 = wide_op1;
2813 goto try;
2816 /* Try widening multiplication of opposite signedness, and adjust. */
2817 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2818 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2819 && (mul_widen_cost[(int) wider_mode]
2820 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2822 rtx regop1 = force_reg (mode, op1);
2823 tem = expand_binop (wider_mode, moptab, op0, regop1,
2824 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2825 if (tem != 0)
2827 /* Extract the high half of the just generated product. */
2828 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2829 build_int_2 (size, 0), NULL_RTX, 1);
2830 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2831 /* We used the wrong signedness. Adjust the result. */
2832 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2833 target, unsignedp);
2837 return 0;
2839 try:
2840 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2841 tem = expand_binop (wider_mode, moptab, op0, op1,
2842 NULL_RTX, unsignedp, OPTAB_WIDEN);
2843 if (tem == 0)
2844 return 0;
2846 /* Extract the high half of the just generated product. */
2847 if (mode == word_mode)
2849 return gen_highpart (mode, tem);
2851 else
2853 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2854 build_int_2 (size, 0), NULL_RTX, 1);
2855 return convert_modes (mode, wider_mode, tem, unsignedp);
2859 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2860 if that is convenient, and returning where the result is.
2861 You may request either the quotient or the remainder as the result;
2862 specify REM_FLAG nonzero to get the remainder.
2864 CODE is the expression code for which kind of division this is;
2865 it controls how rounding is done. MODE is the machine mode to use.
2866 UNSIGNEDP nonzero means do unsigned division. */
2868 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2869 and then correct it by or'ing in missing high bits
2870 if result of ANDI is nonzero.
2871 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2872 This could optimize to a bfexts instruction.
2873 But C doesn't use these operations, so their optimizations are
2874 left for later. */
2875 /* ??? For modulo, we don't actually need the highpart of the first product,
2876 the low part will do nicely. And for small divisors, the second multiply
2877 can also be a low-part only multiply or even be completely left out.
2878 E.g. to calculate the remainder of a division by 3 with a 32 bit
2879 multiply, multiply with 0x55555556 and extract the upper two bits;
2880 the result is exact for inputs up to 0x1fffffff.
2881 The input range can be reduced by using cross-sum rules.
2882 For odd divisors >= 3, the following table gives right shift counts
2883 so that if an number is shifted by an integer multiple of the given
2884 amount, the remainder stays the same:
2885 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2886 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2887 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2888 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2889 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2891 Cross-sum rules for even numbers can be derived by leaving as many bits
2892 to the right alone as the divisor has zeros to the right.
2893 E.g. if x is an unsigned 32 bit number:
2894 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2897 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2900 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2901 int rem_flag;
2902 enum tree_code code;
2903 enum machine_mode mode;
2904 register rtx op0, op1, target;
2905 int unsignedp;
2907 enum machine_mode compute_mode;
2908 register rtx tquotient;
2909 rtx quotient = 0, remainder = 0;
2910 rtx last;
2911 int size;
2912 rtx insn, set;
2913 optab optab1, optab2;
2914 int op1_is_constant, op1_is_pow2;
2915 int max_cost, extra_cost;
2916 static HOST_WIDE_INT last_div_const = 0;
2918 op1_is_constant = GET_CODE (op1) == CONST_INT;
2919 op1_is_pow2 = (op1_is_constant
2920 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2921 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2924 This is the structure of expand_divmod:
2926 First comes code to fix up the operands so we can perform the operations
2927 correctly and efficiently.
2929 Second comes a switch statement with code specific for each rounding mode.
2930 For some special operands this code emits all RTL for the desired
2931 operation, for other cases, it generates only a quotient and stores it in
2932 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2933 to indicate that it has not done anything.
2935 Last comes code that finishes the operation. If QUOTIENT is set and
2936 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2937 QUOTIENT is not set, it is computed using trunc rounding.
2939 We try to generate special code for division and remainder when OP1 is a
2940 constant. If |OP1| = 2**n we can use shifts and some other fast
2941 operations. For other values of OP1, we compute a carefully selected
2942 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2943 by m.
2945 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2946 half of the product. Different strategies for generating the product are
2947 implemented in expand_mult_highpart.
2949 If what we actually want is the remainder, we generate that by another
2950 by-constant multiplication and a subtraction. */
2952 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2953 code below will malfunction if we are, so check here and handle
2954 the special case if so. */
2955 if (op1 == const1_rtx)
2956 return rem_flag ? const0_rtx : op0;
2958 if (target
2959 /* Don't use the function value register as a target
2960 since we have to read it as well as write it,
2961 and function-inlining gets confused by this. */
2962 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2963 /* Don't clobber an operand while doing a multi-step calculation. */
2964 || ((rem_flag || op1_is_constant)
2965 && (reg_mentioned_p (target, op0)
2966 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2967 || reg_mentioned_p (target, op1)
2968 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2969 target = 0;
2971 /* Get the mode in which to perform this computation. Normally it will
2972 be MODE, but sometimes we can't do the desired operation in MODE.
2973 If so, pick a wider mode in which we can do the operation. Convert
2974 to that mode at the start to avoid repeated conversions.
2976 First see what operations we need. These depend on the expression
2977 we are evaluating. (We assume that divxx3 insns exist under the
2978 same conditions that modxx3 insns and that these insns don't normally
2979 fail. If these assumptions are not correct, we may generate less
2980 efficient code in some cases.)
2982 Then see if we find a mode in which we can open-code that operation
2983 (either a division, modulus, or shift). Finally, check for the smallest
2984 mode for which we can do the operation with a library call. */
2986 /* We might want to refine this now that we have division-by-constant
2987 optimization. Since expand_mult_highpart tries so many variants, it is
2988 not straightforward to generalize this. Maybe we should make an array
2989 of possible modes in init_expmed? Save this for GCC 2.7. */
2991 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2992 : (unsignedp ? udiv_optab : sdiv_optab));
2993 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2995 for (compute_mode = mode; compute_mode != VOIDmode;
2996 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2997 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2998 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2999 break;
3001 if (compute_mode == VOIDmode)
3002 for (compute_mode = mode; compute_mode != VOIDmode;
3003 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3004 if (optab1->handlers[(int) compute_mode].libfunc
3005 || optab2->handlers[(int) compute_mode].libfunc)
3006 break;
3008 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3009 in expand_binop. */
3010 if (compute_mode == VOIDmode)
3011 compute_mode = mode;
3013 if (target && GET_MODE (target) == compute_mode)
3014 tquotient = target;
3015 else
3016 tquotient = gen_reg_rtx (compute_mode);
3018 size = GET_MODE_BITSIZE (compute_mode);
3019 #if 0
3020 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3021 (mode), and thereby get better code when OP1 is a constant. Do that
3022 later. It will require going over all usages of SIZE below. */
3023 size = GET_MODE_BITSIZE (mode);
3024 #endif
3026 /* Only deduct something for a REM if the last divide done was
3027 for a different constant. Then set the constant of the last
3028 divide. */
3029 max_cost = div_cost[(int) compute_mode]
3030 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3031 && INTVAL (op1) == last_div_const)
3032 ? mul_cost[(int) compute_mode] + add_cost : 0);
3034 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3036 /* Now convert to the best mode to use. */
3037 if (compute_mode != mode)
3039 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3040 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3042 /* convert_modes may have placed op1 into a register, so we
3043 must recompute the following. */
3044 op1_is_constant = GET_CODE (op1) == CONST_INT;
3045 op1_is_pow2 = (op1_is_constant
3046 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3047 || (! unsignedp
3048 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3051 /* If one of the operands is a volatile MEM, copy it into a register. */
3053 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3054 op0 = force_reg (compute_mode, op0);
3055 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3056 op1 = force_reg (compute_mode, op1);
3058 /* If we need the remainder or if OP1 is constant, we need to
3059 put OP0 in a register in case it has any queued subexpressions. */
3060 if (rem_flag || op1_is_constant)
3061 op0 = force_reg (compute_mode, op0);
3063 last = get_last_insn ();
3065 /* Promote floor rounding to trunc rounding for unsigned operations. */
3066 if (unsignedp)
3068 if (code == FLOOR_DIV_EXPR)
3069 code = TRUNC_DIV_EXPR;
3070 if (code == FLOOR_MOD_EXPR)
3071 code = TRUNC_MOD_EXPR;
3072 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3073 code = TRUNC_DIV_EXPR;
3076 if (op1 != const0_rtx)
3077 switch (code)
3079 case TRUNC_MOD_EXPR:
3080 case TRUNC_DIV_EXPR:
3081 if (op1_is_constant)
3083 if (unsignedp)
3085 unsigned HOST_WIDE_INT mh, ml;
3086 int pre_shift, post_shift;
3087 int dummy;
3088 unsigned HOST_WIDE_INT d = INTVAL (op1);
3090 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3092 pre_shift = floor_log2 (d);
3093 if (rem_flag)
3095 remainder
3096 = expand_binop (compute_mode, and_optab, op0,
3097 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3098 remainder, 1,
3099 OPTAB_LIB_WIDEN);
3100 if (remainder)
3101 return gen_lowpart (mode, remainder);
3103 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3104 build_int_2 (pre_shift, 0),
3105 tquotient, 1);
3107 else if (size <= HOST_BITS_PER_WIDE_INT)
3109 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3111 /* Most significant bit of divisor is set; emit an scc
3112 insn. */
3113 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3114 compute_mode, 1, 1);
3115 if (quotient == 0)
3116 goto fail1;
3118 else
3120 /* Find a suitable multiplier and right shift count
3121 instead of multiplying with D. */
3123 mh = choose_multiplier (d, size, size,
3124 &ml, &post_shift, &dummy);
3126 /* If the suggested multiplier is more than SIZE bits,
3127 we can do better for even divisors, using an
3128 initial right shift. */
3129 if (mh != 0 && (d & 1) == 0)
3131 pre_shift = floor_log2 (d & -d);
3132 mh = choose_multiplier (d >> pre_shift, size,
3133 size - pre_shift,
3134 &ml, &post_shift, &dummy);
3135 if (mh)
3136 abort ();
3138 else
3139 pre_shift = 0;
3141 if (mh != 0)
3143 rtx t1, t2, t3, t4;
3145 extra_cost = (shift_cost[post_shift - 1]
3146 + shift_cost[1] + 2 * add_cost);
3147 t1 = expand_mult_highpart (compute_mode, op0, ml,
3148 NULL_RTX, 1,
3149 max_cost - extra_cost);
3150 if (t1 == 0)
3151 goto fail1;
3152 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3153 op0, t1),
3154 NULL_RTX);
3155 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3156 build_int_2 (1, 0), NULL_RTX,1);
3157 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3158 t1, t3),
3159 NULL_RTX);
3160 quotient
3161 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3162 build_int_2 (post_shift - 1, 0),
3163 tquotient, 1);
3165 else
3167 rtx t1, t2;
3169 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3170 build_int_2 (pre_shift, 0),
3171 NULL_RTX, 1);
3172 extra_cost = (shift_cost[pre_shift]
3173 + shift_cost[post_shift]);
3174 t2 = expand_mult_highpart (compute_mode, t1, ml,
3175 NULL_RTX, 1,
3176 max_cost - extra_cost);
3177 if (t2 == 0)
3178 goto fail1;
3179 quotient
3180 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3181 build_int_2 (post_shift, 0),
3182 tquotient, 1);
3186 else /* Too wide mode to use tricky code */
3187 break;
3189 insn = get_last_insn ();
3190 if (insn != last
3191 && (set = single_set (insn)) != 0
3192 && SET_DEST (set) == quotient)
3193 set_unique_reg_note (insn,
3194 REG_EQUAL,
3195 gen_rtx_UDIV (compute_mode, op0, op1));
3197 else /* TRUNC_DIV, signed */
3199 unsigned HOST_WIDE_INT ml;
3200 int lgup, post_shift;
3201 HOST_WIDE_INT d = INTVAL (op1);
3202 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3204 /* n rem d = n rem -d */
3205 if (rem_flag && d < 0)
3207 d = abs_d;
3208 op1 = GEN_INT (abs_d);
3211 if (d == 1)
3212 quotient = op0;
3213 else if (d == -1)
3214 quotient = expand_unop (compute_mode, neg_optab, op0,
3215 tquotient, 0);
3216 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3218 /* This case is not handled correctly below. */
3219 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3220 compute_mode, 1, 1);
3221 if (quotient == 0)
3222 goto fail1;
3224 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3225 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3227 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3229 lgup = floor_log2 (abs_d);
3230 if (abs_d != 2 && BRANCH_COST < 3)
3232 rtx label = gen_label_rtx ();
3233 rtx t1;
3235 t1 = copy_to_mode_reg (compute_mode, op0);
3236 do_cmp_and_jump (t1, const0_rtx, GE,
3237 compute_mode, label);
3238 expand_inc (t1, GEN_INT (abs_d - 1));
3239 emit_label (label);
3240 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3241 build_int_2 (lgup, 0),
3242 tquotient, 0);
3244 else
3246 rtx t1, t2, t3;
3247 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3248 build_int_2 (size - 1, 0),
3249 NULL_RTX, 0);
3250 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3251 build_int_2 (size - lgup, 0),
3252 NULL_RTX, 1);
3253 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3254 op0, t2),
3255 NULL_RTX);
3256 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3257 build_int_2 (lgup, 0),
3258 tquotient, 0);
3261 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3262 the quotient. */
3263 if (d < 0)
3265 insn = get_last_insn ();
3266 if (insn != last
3267 && (set = single_set (insn)) != 0
3268 && SET_DEST (set) == quotient
3269 && abs_d < ((unsigned HOST_WIDE_INT) 1
3270 << (HOST_BITS_PER_WIDE_INT - 1)))
3271 set_unique_reg_note (insn,
3272 REG_EQUAL,
3273 gen_rtx_DIV (compute_mode,
3274 op0,
3275 GEN_INT (abs_d)));
3277 quotient = expand_unop (compute_mode, neg_optab,
3278 quotient, quotient, 0);
3281 else if (size <= HOST_BITS_PER_WIDE_INT)
3283 choose_multiplier (abs_d, size, size - 1,
3284 &ml, &post_shift, &lgup);
3285 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3287 rtx t1, t2, t3;
3289 extra_cost = (shift_cost[post_shift]
3290 + shift_cost[size - 1] + add_cost);
3291 t1 = expand_mult_highpart (compute_mode, op0, ml,
3292 NULL_RTX, 0,
3293 max_cost - extra_cost);
3294 if (t1 == 0)
3295 goto fail1;
3296 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3297 build_int_2 (post_shift, 0), NULL_RTX, 0);
3298 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3299 build_int_2 (size - 1, 0), NULL_RTX, 0);
3300 if (d < 0)
3301 quotient
3302 = force_operand (gen_rtx_MINUS (compute_mode,
3303 t3, t2),
3304 tquotient);
3305 else
3306 quotient
3307 = force_operand (gen_rtx_MINUS (compute_mode,
3308 t2, t3),
3309 tquotient);
3311 else
3313 rtx t1, t2, t3, t4;
3315 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3316 extra_cost = (shift_cost[post_shift]
3317 + shift_cost[size - 1] + 2 * add_cost);
3318 t1 = expand_mult_highpart (compute_mode, op0, ml,
3319 NULL_RTX, 0,
3320 max_cost - extra_cost);
3321 if (t1 == 0)
3322 goto fail1;
3323 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3324 t1, op0),
3325 NULL_RTX);
3326 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3327 build_int_2 (post_shift, 0),
3328 NULL_RTX, 0);
3329 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3330 build_int_2 (size - 1, 0),
3331 NULL_RTX, 0);
3332 if (d < 0)
3333 quotient
3334 = force_operand (gen_rtx_MINUS (compute_mode,
3335 t4, t3),
3336 tquotient);
3337 else
3338 quotient
3339 = force_operand (gen_rtx_MINUS (compute_mode,
3340 t3, t4),
3341 tquotient);
3344 else /* Too wide mode to use tricky code */
3345 break;
3347 insn = get_last_insn ();
3348 if (insn != last
3349 && (set = single_set (insn)) != 0
3350 && SET_DEST (set) == quotient)
3351 set_unique_reg_note (insn,
3352 REG_EQUAL,
3353 gen_rtx_DIV (compute_mode, op0, op1));
3355 break;
3357 fail1:
3358 delete_insns_since (last);
3359 break;
3361 case FLOOR_DIV_EXPR:
3362 case FLOOR_MOD_EXPR:
3363 /* We will come here only for signed operations. */
3364 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3366 unsigned HOST_WIDE_INT mh, ml;
3367 int pre_shift, lgup, post_shift;
3368 HOST_WIDE_INT d = INTVAL (op1);
3370 if (d > 0)
3372 /* We could just as easily deal with negative constants here,
3373 but it does not seem worth the trouble for GCC 2.6. */
3374 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3376 pre_shift = floor_log2 (d);
3377 if (rem_flag)
3379 remainder = expand_binop (compute_mode, and_optab, op0,
3380 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3381 remainder, 0, OPTAB_LIB_WIDEN);
3382 if (remainder)
3383 return gen_lowpart (mode, remainder);
3385 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3386 build_int_2 (pre_shift, 0),
3387 tquotient, 0);
3389 else
3391 rtx t1, t2, t3, t4;
3393 mh = choose_multiplier (d, size, size - 1,
3394 &ml, &post_shift, &lgup);
3395 if (mh)
3396 abort ();
3398 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3399 build_int_2 (size - 1, 0), NULL_RTX, 0);
3400 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3401 NULL_RTX, 0, OPTAB_WIDEN);
3402 extra_cost = (shift_cost[post_shift]
3403 + shift_cost[size - 1] + 2 * add_cost);
3404 t3 = expand_mult_highpart (compute_mode, t2, ml,
3405 NULL_RTX, 1,
3406 max_cost - extra_cost);
3407 if (t3 != 0)
3409 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3410 build_int_2 (post_shift, 0),
3411 NULL_RTX, 1);
3412 quotient = expand_binop (compute_mode, xor_optab,
3413 t4, t1, tquotient, 0,
3414 OPTAB_WIDEN);
3418 else
3420 rtx nsign, t1, t2, t3, t4;
3421 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3422 op0, constm1_rtx), NULL_RTX);
3423 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3424 0, OPTAB_WIDEN);
3425 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3426 build_int_2 (size - 1, 0), NULL_RTX, 0);
3427 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3428 NULL_RTX);
3429 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3430 NULL_RTX, 0);
3431 if (t4)
3433 rtx t5;
3434 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3435 NULL_RTX, 0);
3436 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3437 t4, t5),
3438 tquotient);
3443 if (quotient != 0)
3444 break;
3445 delete_insns_since (last);
3447 /* Try using an instruction that produces both the quotient and
3448 remainder, using truncation. We can easily compensate the quotient
3449 or remainder to get floor rounding, once we have the remainder.
3450 Notice that we compute also the final remainder value here,
3451 and return the result right away. */
3452 if (target == 0 || GET_MODE (target) != compute_mode)
3453 target = gen_reg_rtx (compute_mode);
3455 if (rem_flag)
3457 remainder
3458 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3459 quotient = gen_reg_rtx (compute_mode);
3461 else
3463 quotient
3464 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3465 remainder = gen_reg_rtx (compute_mode);
3468 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3469 quotient, remainder, 0))
3471 /* This could be computed with a branch-less sequence.
3472 Save that for later. */
3473 rtx tem;
3474 rtx label = gen_label_rtx ();
3475 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3476 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3477 NULL_RTX, 0, OPTAB_WIDEN);
3478 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3479 expand_dec (quotient, const1_rtx);
3480 expand_inc (remainder, op1);
3481 emit_label (label);
3482 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3485 /* No luck with division elimination or divmod. Have to do it
3486 by conditionally adjusting op0 *and* the result. */
3488 rtx label1, label2, label3, label4, label5;
3489 rtx adjusted_op0;
3490 rtx tem;
3492 quotient = gen_reg_rtx (compute_mode);
3493 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3494 label1 = gen_label_rtx ();
3495 label2 = gen_label_rtx ();
3496 label3 = gen_label_rtx ();
3497 label4 = gen_label_rtx ();
3498 label5 = gen_label_rtx ();
3499 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3500 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3501 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3502 quotient, 0, OPTAB_LIB_WIDEN);
3503 if (tem != quotient)
3504 emit_move_insn (quotient, tem);
3505 emit_jump_insn (gen_jump (label5));
3506 emit_barrier ();
3507 emit_label (label1);
3508 expand_inc (adjusted_op0, const1_rtx);
3509 emit_jump_insn (gen_jump (label4));
3510 emit_barrier ();
3511 emit_label (label2);
3512 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3513 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3514 quotient, 0, OPTAB_LIB_WIDEN);
3515 if (tem != quotient)
3516 emit_move_insn (quotient, tem);
3517 emit_jump_insn (gen_jump (label5));
3518 emit_barrier ();
3519 emit_label (label3);
3520 expand_dec (adjusted_op0, const1_rtx);
3521 emit_label (label4);
3522 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3523 quotient, 0, OPTAB_LIB_WIDEN);
3524 if (tem != quotient)
3525 emit_move_insn (quotient, tem);
3526 expand_dec (quotient, const1_rtx);
3527 emit_label (label5);
3529 break;
3531 case CEIL_DIV_EXPR:
3532 case CEIL_MOD_EXPR:
3533 if (unsignedp)
3535 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3537 rtx t1, t2, t3;
3538 unsigned HOST_WIDE_INT d = INTVAL (op1);
3539 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3540 build_int_2 (floor_log2 (d), 0),
3541 tquotient, 1);
3542 t2 = expand_binop (compute_mode, and_optab, op0,
3543 GEN_INT (d - 1),
3544 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3545 t3 = gen_reg_rtx (compute_mode);
3546 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3547 compute_mode, 1, 1);
3548 if (t3 == 0)
3550 rtx lab;
3551 lab = gen_label_rtx ();
3552 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3553 expand_inc (t1, const1_rtx);
3554 emit_label (lab);
3555 quotient = t1;
3557 else
3558 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3559 t1, t3),
3560 tquotient);
3561 break;
3564 /* Try using an instruction that produces both the quotient and
3565 remainder, using truncation. We can easily compensate the
3566 quotient or remainder to get ceiling rounding, once we have the
3567 remainder. Notice that we compute also the final remainder
3568 value here, and return the result right away. */
3569 if (target == 0 || GET_MODE (target) != compute_mode)
3570 target = gen_reg_rtx (compute_mode);
3572 if (rem_flag)
3574 remainder = (GET_CODE (target) == REG
3575 ? target : gen_reg_rtx (compute_mode));
3576 quotient = gen_reg_rtx (compute_mode);
3578 else
3580 quotient = (GET_CODE (target) == REG
3581 ? target : gen_reg_rtx (compute_mode));
3582 remainder = gen_reg_rtx (compute_mode);
3585 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3586 remainder, 1))
3588 /* This could be computed with a branch-less sequence.
3589 Save that for later. */
3590 rtx label = gen_label_rtx ();
3591 do_cmp_and_jump (remainder, const0_rtx, EQ,
3592 compute_mode, label);
3593 expand_inc (quotient, const1_rtx);
3594 expand_dec (remainder, op1);
3595 emit_label (label);
3596 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3599 /* No luck with division elimination or divmod. Have to do it
3600 by conditionally adjusting op0 *and* the result. */
3602 rtx label1, label2;
3603 rtx adjusted_op0, tem;
3605 quotient = gen_reg_rtx (compute_mode);
3606 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3607 label1 = gen_label_rtx ();
3608 label2 = gen_label_rtx ();
3609 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3610 compute_mode, label1);
3611 emit_move_insn (quotient, const0_rtx);
3612 emit_jump_insn (gen_jump (label2));
3613 emit_barrier ();
3614 emit_label (label1);
3615 expand_dec (adjusted_op0, const1_rtx);
3616 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3617 quotient, 1, OPTAB_LIB_WIDEN);
3618 if (tem != quotient)
3619 emit_move_insn (quotient, tem);
3620 expand_inc (quotient, const1_rtx);
3621 emit_label (label2);
3624 else /* signed */
3626 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3627 && INTVAL (op1) >= 0)
3629 /* This is extremely similar to the code for the unsigned case
3630 above. For 2.7 we should merge these variants, but for
3631 2.6.1 I don't want to touch the code for unsigned since that
3632 get used in C. The signed case will only be used by other
3633 languages (Ada). */
3635 rtx t1, t2, t3;
3636 unsigned HOST_WIDE_INT d = INTVAL (op1);
3637 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3638 build_int_2 (floor_log2 (d), 0),
3639 tquotient, 0);
3640 t2 = expand_binop (compute_mode, and_optab, op0,
3641 GEN_INT (d - 1),
3642 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3643 t3 = gen_reg_rtx (compute_mode);
3644 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3645 compute_mode, 1, 1);
3646 if (t3 == 0)
3648 rtx lab;
3649 lab = gen_label_rtx ();
3650 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3651 expand_inc (t1, const1_rtx);
3652 emit_label (lab);
3653 quotient = t1;
3655 else
3656 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3657 t1, t3),
3658 tquotient);
3659 break;
3662 /* Try using an instruction that produces both the quotient and
3663 remainder, using truncation. We can easily compensate the
3664 quotient or remainder to get ceiling rounding, once we have the
3665 remainder. Notice that we compute also the final remainder
3666 value here, and return the result right away. */
3667 if (target == 0 || GET_MODE (target) != compute_mode)
3668 target = gen_reg_rtx (compute_mode);
3669 if (rem_flag)
3671 remainder= (GET_CODE (target) == REG
3672 ? target : gen_reg_rtx (compute_mode));
3673 quotient = gen_reg_rtx (compute_mode);
3675 else
3677 quotient = (GET_CODE (target) == REG
3678 ? target : gen_reg_rtx (compute_mode));
3679 remainder = gen_reg_rtx (compute_mode);
3682 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3683 remainder, 0))
3685 /* This could be computed with a branch-less sequence.
3686 Save that for later. */
3687 rtx tem;
3688 rtx label = gen_label_rtx ();
3689 do_cmp_and_jump (remainder, const0_rtx, EQ,
3690 compute_mode, label);
3691 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3692 NULL_RTX, 0, OPTAB_WIDEN);
3693 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3694 expand_inc (quotient, const1_rtx);
3695 expand_dec (remainder, op1);
3696 emit_label (label);
3697 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3700 /* No luck with division elimination or divmod. Have to do it
3701 by conditionally adjusting op0 *and* the result. */
3703 rtx label1, label2, label3, label4, label5;
3704 rtx adjusted_op0;
3705 rtx tem;
3707 quotient = gen_reg_rtx (compute_mode);
3708 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3709 label1 = gen_label_rtx ();
3710 label2 = gen_label_rtx ();
3711 label3 = gen_label_rtx ();
3712 label4 = gen_label_rtx ();
3713 label5 = gen_label_rtx ();
3714 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3715 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3716 compute_mode, label1);
3717 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3718 quotient, 0, OPTAB_LIB_WIDEN);
3719 if (tem != quotient)
3720 emit_move_insn (quotient, tem);
3721 emit_jump_insn (gen_jump (label5));
3722 emit_barrier ();
3723 emit_label (label1);
3724 expand_dec (adjusted_op0, const1_rtx);
3725 emit_jump_insn (gen_jump (label4));
3726 emit_barrier ();
3727 emit_label (label2);
3728 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3729 compute_mode, label3);
3730 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3731 quotient, 0, OPTAB_LIB_WIDEN);
3732 if (tem != quotient)
3733 emit_move_insn (quotient, tem);
3734 emit_jump_insn (gen_jump (label5));
3735 emit_barrier ();
3736 emit_label (label3);
3737 expand_inc (adjusted_op0, const1_rtx);
3738 emit_label (label4);
3739 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3740 quotient, 0, OPTAB_LIB_WIDEN);
3741 if (tem != quotient)
3742 emit_move_insn (quotient, tem);
3743 expand_inc (quotient, const1_rtx);
3744 emit_label (label5);
3747 break;
3749 case EXACT_DIV_EXPR:
3750 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3752 HOST_WIDE_INT d = INTVAL (op1);
3753 unsigned HOST_WIDE_INT ml;
3754 int post_shift;
3755 rtx t1;
3757 post_shift = floor_log2 (d & -d);
3758 ml = invert_mod2n (d >> post_shift, size);
3759 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3760 unsignedp);
3761 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3762 build_int_2 (post_shift, 0),
3763 NULL_RTX, unsignedp);
3765 insn = get_last_insn ();
3766 set_unique_reg_note (insn,
3767 REG_EQUAL,
3768 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3769 compute_mode,
3770 op0, op1));
3772 break;
3774 case ROUND_DIV_EXPR:
3775 case ROUND_MOD_EXPR:
3776 if (unsignedp)
3778 rtx tem;
3779 rtx label;
3780 label = gen_label_rtx ();
3781 quotient = gen_reg_rtx (compute_mode);
3782 remainder = gen_reg_rtx (compute_mode);
3783 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3785 rtx tem;
3786 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3787 quotient, 1, OPTAB_LIB_WIDEN);
3788 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3789 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3790 remainder, 1, OPTAB_LIB_WIDEN);
3792 tem = plus_constant (op1, -1);
3793 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3794 build_int_2 (1, 0), NULL_RTX, 1);
3795 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3796 expand_inc (quotient, const1_rtx);
3797 expand_dec (remainder, op1);
3798 emit_label (label);
3800 else
3802 rtx abs_rem, abs_op1, tem, mask;
3803 rtx label;
3804 label = gen_label_rtx ();
3805 quotient = gen_reg_rtx (compute_mode);
3806 remainder = gen_reg_rtx (compute_mode);
3807 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3809 rtx tem;
3810 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3811 quotient, 0, OPTAB_LIB_WIDEN);
3812 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3813 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3814 remainder, 0, OPTAB_LIB_WIDEN);
3816 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3817 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3818 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3819 build_int_2 (1, 0), NULL_RTX, 1);
3820 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3821 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3822 NULL_RTX, 0, OPTAB_WIDEN);
3823 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3824 build_int_2 (size - 1, 0), NULL_RTX, 0);
3825 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3826 NULL_RTX, 0, OPTAB_WIDEN);
3827 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3828 NULL_RTX, 0, OPTAB_WIDEN);
3829 expand_inc (quotient, tem);
3830 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3831 NULL_RTX, 0, OPTAB_WIDEN);
3832 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3833 NULL_RTX, 0, OPTAB_WIDEN);
3834 expand_dec (remainder, tem);
3835 emit_label (label);
3837 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3839 default:
3840 abort ();
3843 if (quotient == 0)
3845 if (target && GET_MODE (target) != compute_mode)
3846 target = 0;
3848 if (rem_flag)
3850 /* Try to produce the remainder without producing the quotient.
3851 If we seem to have a divmod patten that does not require widening,
3852 don't try windening here. We should really have an WIDEN argument
3853 to expand_twoval_binop, since what we'd really like to do here is
3854 1) try a mod insn in compute_mode
3855 2) try a divmod insn in compute_mode
3856 3) try a div insn in compute_mode and multiply-subtract to get
3857 remainder
3858 4) try the same things with widening allowed. */
3859 remainder
3860 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3861 op0, op1, target,
3862 unsignedp,
3863 ((optab2->handlers[(int) compute_mode].insn_code
3864 != CODE_FOR_nothing)
3865 ? OPTAB_DIRECT : OPTAB_WIDEN));
3866 if (remainder == 0)
3868 /* No luck there. Can we do remainder and divide at once
3869 without a library call? */
3870 remainder = gen_reg_rtx (compute_mode);
3871 if (! expand_twoval_binop ((unsignedp
3872 ? udivmod_optab
3873 : sdivmod_optab),
3874 op0, op1,
3875 NULL_RTX, remainder, unsignedp))
3876 remainder = 0;
3879 if (remainder)
3880 return gen_lowpart (mode, remainder);
3883 /* Produce the quotient. Try a quotient insn, but not a library call.
3884 If we have a divmod in this mode, use it in preference to widening
3885 the div (for this test we assume it will not fail). Note that optab2
3886 is set to the one of the two optabs that the call below will use. */
3887 quotient
3888 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3889 op0, op1, rem_flag ? NULL_RTX : target,
3890 unsignedp,
3891 ((optab2->handlers[(int) compute_mode].insn_code
3892 != CODE_FOR_nothing)
3893 ? OPTAB_DIRECT : OPTAB_WIDEN));
3895 if (quotient == 0)
3897 /* No luck there. Try a quotient-and-remainder insn,
3898 keeping the quotient alone. */
3899 quotient = gen_reg_rtx (compute_mode);
3900 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3901 op0, op1,
3902 quotient, NULL_RTX, unsignedp))
3904 quotient = 0;
3905 if (! rem_flag)
3906 /* Still no luck. If we are not computing the remainder,
3907 use a library call for the quotient. */
3908 quotient = sign_expand_binop (compute_mode,
3909 udiv_optab, sdiv_optab,
3910 op0, op1, target,
3911 unsignedp, OPTAB_LIB_WIDEN);
3916 if (rem_flag)
3918 if (target && GET_MODE (target) != compute_mode)
3919 target = 0;
3921 if (quotient == 0)
3922 /* No divide instruction either. Use library for remainder. */
3923 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3924 op0, op1, target,
3925 unsignedp, OPTAB_LIB_WIDEN);
3926 else
3928 /* We divided. Now finish doing X - Y * (X / Y). */
3929 remainder = expand_mult (compute_mode, quotient, op1,
3930 NULL_RTX, unsignedp);
3931 remainder = expand_binop (compute_mode, sub_optab, op0,
3932 remainder, target, unsignedp,
3933 OPTAB_LIB_WIDEN);
3937 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3940 /* Return a tree node with data type TYPE, describing the value of X.
3941 Usually this is an RTL_EXPR, if there is no obvious better choice.
3942 X may be an expression, however we only support those expressions
3943 generated by loop.c. */
3945 tree
3946 make_tree (type, x)
3947 tree type;
3948 rtx x;
3950 tree t;
3952 switch (GET_CODE (x))
3954 case CONST_INT:
3955 t = build_int_2 (INTVAL (x),
3956 (TREE_UNSIGNED (type)
3957 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3958 || INTVAL (x) >= 0 ? 0 : -1);
3959 TREE_TYPE (t) = type;
3960 return t;
3962 case CONST_DOUBLE:
3963 if (GET_MODE (x) == VOIDmode)
3965 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3966 TREE_TYPE (t) = type;
3968 else
3970 REAL_VALUE_TYPE d;
3972 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3973 t = build_real (type, d);
3976 return t;
3978 case PLUS:
3979 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3980 make_tree (type, XEXP (x, 1))));
3982 case MINUS:
3983 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3984 make_tree (type, XEXP (x, 1))));
3986 case NEG:
3987 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3989 case MULT:
3990 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3991 make_tree (type, XEXP (x, 1))));
3993 case ASHIFT:
3994 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3995 make_tree (type, XEXP (x, 1))));
3997 case LSHIFTRT:
3998 return fold (convert (type,
3999 build (RSHIFT_EXPR, unsigned_type (type),
4000 make_tree (unsigned_type (type),
4001 XEXP (x, 0)),
4002 make_tree (type, XEXP (x, 1)))));
4004 case ASHIFTRT:
4005 return fold (convert (type,
4006 build (RSHIFT_EXPR, signed_type (type),
4007 make_tree (signed_type (type), XEXP (x, 0)),
4008 make_tree (type, XEXP (x, 1)))));
4010 case DIV:
4011 if (TREE_CODE (type) != REAL_TYPE)
4012 t = signed_type (type);
4013 else
4014 t = type;
4016 return fold (convert (type,
4017 build (TRUNC_DIV_EXPR, t,
4018 make_tree (t, XEXP (x, 0)),
4019 make_tree (t, XEXP (x, 1)))));
4020 case UDIV:
4021 t = unsigned_type (type);
4022 return fold (convert (type,
4023 build (TRUNC_DIV_EXPR, t,
4024 make_tree (t, XEXP (x, 0)),
4025 make_tree (t, XEXP (x, 1)))));
4026 default:
4027 t = make_node (RTL_EXPR);
4028 TREE_TYPE (t) = type;
4029 RTL_EXPR_RTL (t) = x;
4030 /* There are no insns to be output
4031 when this rtl_expr is used. */
4032 RTL_EXPR_SEQUENCE (t) = 0;
4033 return t;
4037 /* Return an rtx representing the value of X * MULT + ADD.
4038 TARGET is a suggestion for where to store the result (an rtx).
4039 MODE is the machine mode for the computation.
4040 X and MULT must have mode MODE. ADD may have a different mode.
4041 So can X (defaults to same as MODE).
4042 UNSIGNEDP is non-zero to do unsigned multiplication.
4043 This may emit insns. */
4046 expand_mult_add (x, target, mult, add, mode, unsignedp)
4047 rtx x, target, mult, add;
4048 enum machine_mode mode;
4049 int unsignedp;
4051 tree type = type_for_mode (mode, unsignedp);
4052 tree add_type = (GET_MODE (add) == VOIDmode
4053 ? type : type_for_mode (GET_MODE (add), unsignedp));
4054 tree result = fold (build (PLUS_EXPR, type,
4055 fold (build (MULT_EXPR, type,
4056 make_tree (type, x),
4057 make_tree (type, mult))),
4058 make_tree (add_type, add)));
4060 return expand_expr (result, target, VOIDmode, 0);
4063 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4064 and returning TARGET.
4066 If TARGET is 0, a pseudo-register or constant is returned. */
4069 expand_and (op0, op1, target)
4070 rtx op0, op1, target;
4072 enum machine_mode mode = VOIDmode;
4073 rtx tem;
4075 if (GET_MODE (op0) != VOIDmode)
4076 mode = GET_MODE (op0);
4077 else if (GET_MODE (op1) != VOIDmode)
4078 mode = GET_MODE (op1);
4080 if (mode != VOIDmode)
4081 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4082 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4083 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4084 else
4085 abort ();
4087 if (target == 0)
4088 target = tem;
4089 else if (tem != target)
4090 emit_move_insn (target, tem);
4091 return target;
4094 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4095 and storing in TARGET. Normally return TARGET.
4096 Return 0 if that cannot be done.
4098 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4099 it is VOIDmode, they cannot both be CONST_INT.
4101 UNSIGNEDP is for the case where we have to widen the operands
4102 to perform the operation. It says to use zero-extension.
4104 NORMALIZEP is 1 if we should convert the result to be either zero
4105 or one. Normalize is -1 if we should convert the result to be
4106 either zero or -1. If NORMALIZEP is zero, the result will be left
4107 "raw" out of the scc insn. */
4110 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4111 rtx target;
4112 enum rtx_code code;
4113 rtx op0, op1;
4114 enum machine_mode mode;
4115 int unsignedp;
4116 int normalizep;
4118 rtx subtarget;
4119 enum insn_code icode;
4120 enum machine_mode compare_mode;
4121 enum machine_mode target_mode = GET_MODE (target);
4122 rtx tem;
4123 rtx last = get_last_insn ();
4124 rtx pattern, comparison;
4126 if (unsignedp)
4127 code = unsigned_condition (code);
4129 /* If one operand is constant, make it the second one. Only do this
4130 if the other operand is not constant as well. */
4132 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4133 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4135 tem = op0;
4136 op0 = op1;
4137 op1 = tem;
4138 code = swap_condition (code);
4141 if (mode == VOIDmode)
4142 mode = GET_MODE (op0);
4144 /* For some comparisons with 1 and -1, we can convert this to
4145 comparisons with zero. This will often produce more opportunities for
4146 store-flag insns. */
4148 switch (code)
4150 case LT:
4151 if (op1 == const1_rtx)
4152 op1 = const0_rtx, code = LE;
4153 break;
4154 case LE:
4155 if (op1 == constm1_rtx)
4156 op1 = const0_rtx, code = LT;
4157 break;
4158 case GE:
4159 if (op1 == const1_rtx)
4160 op1 = const0_rtx, code = GT;
4161 break;
4162 case GT:
4163 if (op1 == constm1_rtx)
4164 op1 = const0_rtx, code = GE;
4165 break;
4166 case GEU:
4167 if (op1 == const1_rtx)
4168 op1 = const0_rtx, code = NE;
4169 break;
4170 case LTU:
4171 if (op1 == const1_rtx)
4172 op1 = const0_rtx, code = EQ;
4173 break;
4174 default:
4175 break;
4178 /* From now on, we won't change CODE, so set ICODE now. */
4179 icode = setcc_gen_code[(int) code];
4181 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4182 complement of A (for GE) and shifting the sign bit to the low bit. */
4183 if (op1 == const0_rtx && (code == LT || code == GE)
4184 && GET_MODE_CLASS (mode) == MODE_INT
4185 && (normalizep || STORE_FLAG_VALUE == 1
4186 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4187 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4188 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4190 subtarget = target;
4192 /* If the result is to be wider than OP0, it is best to convert it
4193 first. If it is to be narrower, it is *incorrect* to convert it
4194 first. */
4195 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4197 op0 = protect_from_queue (op0, 0);
4198 op0 = convert_modes (target_mode, mode, op0, 0);
4199 mode = target_mode;
4202 if (target_mode != mode)
4203 subtarget = 0;
4205 if (code == GE)
4206 op0 = expand_unop (mode, one_cmpl_optab, op0,
4207 ((STORE_FLAG_VALUE == 1 || normalizep)
4208 ? 0 : subtarget), 0);
4210 if (STORE_FLAG_VALUE == 1 || normalizep)
4211 /* If we are supposed to produce a 0/1 value, we want to do
4212 a logical shift from the sign bit to the low-order bit; for
4213 a -1/0 value, we do an arithmetic shift. */
4214 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4215 size_int (GET_MODE_BITSIZE (mode) - 1),
4216 subtarget, normalizep != -1);
4218 if (mode != target_mode)
4219 op0 = convert_modes (target_mode, mode, op0, 0);
4221 return op0;
4224 if (icode != CODE_FOR_nothing)
4226 insn_operand_predicate_fn pred;
4228 /* We think we may be able to do this with a scc insn. Emit the
4229 comparison and then the scc insn.
4231 compare_from_rtx may call emit_queue, which would be deleted below
4232 if the scc insn fails. So call it ourselves before setting LAST. */
4234 emit_queue ();
4235 last = get_last_insn ();
4237 comparison
4238 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4239 if (GET_CODE (comparison) == CONST_INT)
4240 return (comparison == const0_rtx ? const0_rtx
4241 : normalizep == 1 ? const1_rtx
4242 : normalizep == -1 ? constm1_rtx
4243 : const_true_rtx);
4245 /* If the code of COMPARISON doesn't match CODE, something is
4246 wrong; we can no longer be sure that we have the operation.
4247 We could handle this case, but it should not happen. */
4249 if (GET_CODE (comparison) != code)
4250 abort ();
4252 /* Get a reference to the target in the proper mode for this insn. */
4253 compare_mode = insn_data[(int) icode].operand[0].mode;
4254 subtarget = target;
4255 pred = insn_data[(int) icode].operand[0].predicate;
4256 if (preserve_subexpressions_p ()
4257 || ! (*pred) (subtarget, compare_mode))
4258 subtarget = gen_reg_rtx (compare_mode);
4260 pattern = GEN_FCN (icode) (subtarget);
4261 if (pattern)
4263 emit_insn (pattern);
4265 /* If we are converting to a wider mode, first convert to
4266 TARGET_MODE, then normalize. This produces better combining
4267 opportunities on machines that have a SIGN_EXTRACT when we are
4268 testing a single bit. This mostly benefits the 68k.
4270 If STORE_FLAG_VALUE does not have the sign bit set when
4271 interpreted in COMPARE_MODE, we can do this conversion as
4272 unsigned, which is usually more efficient. */
4273 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4275 convert_move (target, subtarget,
4276 (GET_MODE_BITSIZE (compare_mode)
4277 <= HOST_BITS_PER_WIDE_INT)
4278 && 0 == (STORE_FLAG_VALUE
4279 & ((HOST_WIDE_INT) 1
4280 << (GET_MODE_BITSIZE (compare_mode) -1))));
4281 op0 = target;
4282 compare_mode = target_mode;
4284 else
4285 op0 = subtarget;
4287 /* If we want to keep subexpressions around, don't reuse our
4288 last target. */
4290 if (preserve_subexpressions_p ())
4291 subtarget = 0;
4293 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4294 we don't have to do anything. */
4295 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4297 /* STORE_FLAG_VALUE might be the most negative number, so write
4298 the comparison this way to avoid a compiler-time warning. */
4299 else if (- normalizep == STORE_FLAG_VALUE)
4300 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4302 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4303 makes it hard to use a value of just the sign bit due to
4304 ANSI integer constant typing rules. */
4305 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4306 && (STORE_FLAG_VALUE
4307 & ((HOST_WIDE_INT) 1
4308 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4309 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4310 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4311 subtarget, normalizep == 1);
4312 else if (STORE_FLAG_VALUE & 1)
4314 op0 = expand_and (op0, const1_rtx, subtarget);
4315 if (normalizep == -1)
4316 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4318 else
4319 abort ();
4321 /* If we were converting to a smaller mode, do the
4322 conversion now. */
4323 if (target_mode != compare_mode)
4325 convert_move (target, op0, 0);
4326 return target;
4328 else
4329 return op0;
4333 delete_insns_since (last);
4335 /* If expensive optimizations, use different pseudo registers for each
4336 insn, instead of reusing the same pseudo. This leads to better CSE,
4337 but slows down the compiler, since there are more pseudos */
4338 subtarget = (!flag_expensive_optimizations
4339 && (target_mode == mode)) ? target : NULL_RTX;
4341 /* If we reached here, we can't do this with a scc insn. However, there
4342 are some comparisons that can be done directly. For example, if
4343 this is an equality comparison of integers, we can try to exclusive-or
4344 (or subtract) the two operands and use a recursive call to try the
4345 comparison with zero. Don't do any of these cases if branches are
4346 very cheap. */
4348 if (BRANCH_COST > 0
4349 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4350 && op1 != const0_rtx)
4352 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4353 OPTAB_WIDEN);
4355 if (tem == 0)
4356 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4357 OPTAB_WIDEN);
4358 if (tem != 0)
4359 tem = emit_store_flag (target, code, tem, const0_rtx,
4360 mode, unsignedp, normalizep);
4361 if (tem == 0)
4362 delete_insns_since (last);
4363 return tem;
4366 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4367 the constant zero. Reject all other comparisons at this point. Only
4368 do LE and GT if branches are expensive since they are expensive on
4369 2-operand machines. */
4371 if (BRANCH_COST == 0
4372 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4373 || (code != EQ && code != NE
4374 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4375 return 0;
4377 /* See what we need to return. We can only return a 1, -1, or the
4378 sign bit. */
4380 if (normalizep == 0)
4382 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4383 normalizep = STORE_FLAG_VALUE;
4385 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4386 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4387 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4389 else
4390 return 0;
4393 /* Try to put the result of the comparison in the sign bit. Assume we can't
4394 do the necessary operation below. */
4396 tem = 0;
4398 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4399 the sign bit set. */
4401 if (code == LE)
4403 /* This is destructive, so SUBTARGET can't be OP0. */
4404 if (rtx_equal_p (subtarget, op0))
4405 subtarget = 0;
4407 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4408 OPTAB_WIDEN);
4409 if (tem)
4410 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4411 OPTAB_WIDEN);
4414 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4415 number of bits in the mode of OP0, minus one. */
4417 if (code == GT)
4419 if (rtx_equal_p (subtarget, op0))
4420 subtarget = 0;
4422 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4423 size_int (GET_MODE_BITSIZE (mode) - 1),
4424 subtarget, 0);
4425 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4426 OPTAB_WIDEN);
4429 if (code == EQ || code == NE)
4431 /* For EQ or NE, one way to do the comparison is to apply an operation
4432 that converts the operand into a positive number if it is non-zero
4433 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4434 for NE we negate. This puts the result in the sign bit. Then we
4435 normalize with a shift, if needed.
4437 Two operations that can do the above actions are ABS and FFS, so try
4438 them. If that doesn't work, and MODE is smaller than a full word,
4439 we can use zero-extension to the wider mode (an unsigned conversion)
4440 as the operation. */
4442 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4443 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4444 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4445 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4446 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4448 op0 = protect_from_queue (op0, 0);
4449 tem = convert_modes (word_mode, mode, op0, 1);
4450 mode = word_mode;
4453 if (tem != 0)
4455 if (code == EQ)
4456 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4457 0, OPTAB_WIDEN);
4458 else
4459 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4462 /* If we couldn't do it that way, for NE we can "or" the two's complement
4463 of the value with itself. For EQ, we take the one's complement of
4464 that "or", which is an extra insn, so we only handle EQ if branches
4465 are expensive. */
4467 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4469 if (rtx_equal_p (subtarget, op0))
4470 subtarget = 0;
4472 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4473 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4474 OPTAB_WIDEN);
4476 if (tem && code == EQ)
4477 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4481 if (tem && normalizep)
4482 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4483 size_int (GET_MODE_BITSIZE (mode) - 1),
4484 subtarget, normalizep == 1);
4486 if (tem)
4488 if (GET_MODE (tem) != target_mode)
4490 convert_move (target, tem, 0);
4491 tem = target;
4493 else if (!subtarget)
4495 emit_move_insn (target, tem);
4496 tem = target;
4499 else
4500 delete_insns_since (last);
4502 return tem;
4505 /* Like emit_store_flag, but always succeeds. */
4508 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4509 rtx target;
4510 enum rtx_code code;
4511 rtx op0, op1;
4512 enum machine_mode mode;
4513 int unsignedp;
4514 int normalizep;
4516 rtx tem, label;
4518 /* First see if emit_store_flag can do the job. */
4519 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4520 if (tem != 0)
4521 return tem;
4523 if (normalizep == 0)
4524 normalizep = 1;
4526 /* If this failed, we have to do this with set/compare/jump/set code. */
4528 if (GET_CODE (target) != REG
4529 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4530 target = gen_reg_rtx (GET_MODE (target));
4532 emit_move_insn (target, const1_rtx);
4533 label = gen_label_rtx ();
4534 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4535 NULL_RTX, label);
4537 emit_move_insn (target, const0_rtx);
4538 emit_label (label);
4540 return target;
4543 /* Perform possibly multi-word comparison and conditional jump to LABEL
4544 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4546 The algorithm is based on the code in expr.c:do_jump.
4548 Note that this does not perform a general comparison. Only variants
4549 generated within expmed.c are correctly handled, others abort (but could
4550 be handled if needed). */
4552 static void
4553 do_cmp_and_jump (arg1, arg2, op, mode, label)
4554 rtx arg1, arg2, label;
4555 enum rtx_code op;
4556 enum machine_mode mode;
4558 /* If this mode is an integer too wide to compare properly,
4559 compare word by word. Rely on cse to optimize constant cases. */
4561 if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode, ccp_jump))
4563 rtx label2 = gen_label_rtx ();
4565 switch (op)
4567 case LTU:
4568 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4569 break;
4571 case LEU:
4572 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4573 break;
4575 case LT:
4576 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4577 break;
4579 case GT:
4580 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4581 break;
4583 case GE:
4584 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4585 break;
4587 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4588 that's the only equality operations we do */
4589 case EQ:
4590 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4591 abort();
4592 do_jump_by_parts_equality_rtx (arg1, label2, label);
4593 break;
4595 case NE:
4596 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4597 abort();
4598 do_jump_by_parts_equality_rtx (arg1, label, label2);
4599 break;
4601 default:
4602 abort();
4605 emit_label (label2);
4607 else
4609 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);