* dump.c (dequeue_and_dump): Dump DECL_EXTERNAL.
[official-gcc.git] / gcc / expmed.c
blob291d766f7034d0690da9f9969fc94cec3856912d
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "toplev.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "flags.h"
30 #include "insn-flags.h"
31 #include "insn-codes.h"
32 #include "insn-config.h"
33 #include "expr.h"
34 #include "real.h"
35 #include "recog.h"
37 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
38 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
39 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
40 int, int, rtx, int, int));
41 static rtx mask_rtx PROTO((enum machine_mode, int,
42 int, int));
43 static rtx lshift_value PROTO((enum machine_mode, rtx,
44 int, int));
45 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
46 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
47 enum machine_mode, rtx));
49 #define CEIL(x,y) (((x) + (y) - 1) / (y))
51 /* Non-zero means divides or modulus operations are relatively cheap for
52 powers of two, so don't use branches; emit the operation instead.
53 Usually, this will mean that the MD file will emit non-branch
54 sequences. */
56 static int sdiv_pow2_cheap, smod_pow2_cheap;
58 #ifndef SLOW_UNALIGNED_ACCESS
59 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
60 #endif
62 /* For compilers that support multiple targets with different word sizes,
63 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
64 is the H8/300(H) compiler. */
66 #ifndef MAX_BITS_PER_WORD
67 #define MAX_BITS_PER_WORD BITS_PER_WORD
68 #endif
70 /* Cost of various pieces of RTL. Note that some of these are indexed by
71 shift count and some by mode. */
72 static int add_cost, negate_cost, zero_cost;
73 static int shift_cost[MAX_BITS_PER_WORD];
74 static int shiftadd_cost[MAX_BITS_PER_WORD];
75 static int shiftsub_cost[MAX_BITS_PER_WORD];
76 static int mul_cost[NUM_MACHINE_MODES];
77 static int div_cost[NUM_MACHINE_MODES];
78 static int mul_widen_cost[NUM_MACHINE_MODES];
79 static int mul_highpart_cost[NUM_MACHINE_MODES];
81 void
82 init_expmed ()
84 char *free_point;
85 /* This is "some random pseudo register" for purposes of calling recog
86 to see what insns exist. */
87 rtx reg = gen_rtx_REG (word_mode, 10000);
88 rtx shift_insn, shiftadd_insn, shiftsub_insn;
89 int dummy;
90 int m;
91 enum machine_mode mode, wider_mode;
93 start_sequence ();
95 /* Since we are on the permanent obstack, we must be sure we save this
96 spot AFTER we call start_sequence, since it will reuse the rtl it
97 makes. */
98 free_point = (char *) oballoc (0);
100 reg = gen_rtx_REG (word_mode, 10000);
102 zero_cost = rtx_cost (const0_rtx, 0);
103 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
105 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
106 gen_rtx_ASHIFT (word_mode, reg,
107 const0_rtx)));
109 shiftadd_insn
110 = emit_insn (gen_rtx_SET (VOIDmode, reg,
111 gen_rtx_PLUS (word_mode,
112 gen_rtx_MULT (word_mode,
113 reg, const0_rtx),
114 reg)));
116 shiftsub_insn
117 = emit_insn (gen_rtx_SET (VOIDmode, reg,
118 gen_rtx_MINUS (word_mode,
119 gen_rtx_MULT (word_mode,
120 reg, const0_rtx),
121 reg)));
123 init_recog ();
125 shift_cost[0] = 0;
126 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
128 for (m = 1; m < MAX_BITS_PER_WORD; m++)
130 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
132 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
133 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
134 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
136 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
137 = GEN_INT ((HOST_WIDE_INT) 1 << m);
138 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
139 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
141 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
142 = GEN_INT ((HOST_WIDE_INT) 1 << m);
143 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
144 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
147 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
149 sdiv_pow2_cheap
150 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
151 <= 2 * add_cost);
152 smod_pow2_cheap
153 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
154 <= 2 * add_cost);
156 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
157 mode != VOIDmode;
158 mode = GET_MODE_WIDER_MODE (mode))
160 reg = gen_rtx_REG (mode, 10000);
161 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
162 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
163 wider_mode = GET_MODE_WIDER_MODE (mode);
164 if (wider_mode != VOIDmode)
166 mul_widen_cost[(int) wider_mode]
167 = rtx_cost (gen_rtx_MULT (wider_mode,
168 gen_rtx_ZERO_EXTEND (wider_mode, reg),
169 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
170 SET);
171 mul_highpart_cost[(int) mode]
172 = rtx_cost (gen_rtx_TRUNCATE
173 (mode,
174 gen_rtx_LSHIFTRT (wider_mode,
175 gen_rtx_MULT (wider_mode,
176 gen_rtx_ZERO_EXTEND
177 (wider_mode, reg),
178 gen_rtx_ZERO_EXTEND
179 (wider_mode, reg)),
180 GEN_INT (GET_MODE_BITSIZE (mode)))),
181 SET);
185 /* Free the objects we just allocated. */
186 end_sequence ();
187 obfree (free_point);
190 /* Return an rtx representing minus the value of X.
191 MODE is the intended mode of the result,
192 useful if X is a CONST_INT. */
195 negate_rtx (mode, x)
196 enum machine_mode mode;
197 rtx x;
199 rtx result = simplify_unary_operation (NEG, mode, x, mode);
201 if (result == 0)
202 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
204 return result;
207 /* Generate code to store value from rtx VALUE
208 into a bit-field within structure STR_RTX
209 containing BITSIZE bits starting at bit BITNUM.
210 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
211 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
212 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
214 /* ??? Note that there are two different ideas here for how
215 to determine the size to count bits within, for a register.
216 One is BITS_PER_WORD, and the other is the size of operand 3
217 of the insv pattern.
219 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
220 else, we use the mode of operand 3. */
223 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
224 rtx str_rtx;
225 register int bitsize;
226 int bitnum;
227 enum machine_mode fieldmode;
228 rtx value;
229 int align;
230 int total_size;
232 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
233 register int offset = bitnum / unit;
234 register int bitpos = bitnum % unit;
235 register rtx op0 = str_rtx;
236 #ifdef HAVE_insv
237 int insv_bitsize;
238 enum machine_mode op_mode;
240 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
241 if (op_mode == VOIDmode)
242 op_mode = word_mode;
243 insv_bitsize = GET_MODE_BITSIZE (op_mode);
244 #endif
246 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
247 abort ();
249 /* Discount the part of the structure before the desired byte.
250 We need to know how many bytes are safe to reference after it. */
251 if (total_size >= 0)
252 total_size -= (bitpos / BIGGEST_ALIGNMENT
253 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
255 while (GET_CODE (op0) == SUBREG)
257 /* The following line once was done only if WORDS_BIG_ENDIAN,
258 but I think that is a mistake. WORDS_BIG_ENDIAN is
259 meaningful at a much higher level; when structures are copied
260 between memory and regs, the higher-numbered regs
261 always get higher addresses. */
262 offset += SUBREG_WORD (op0);
263 /* We used to adjust BITPOS here, but now we do the whole adjustment
264 right after the loop. */
265 op0 = SUBREG_REG (op0);
268 /* Make sure we are playing with integral modes. Pun with subregs
269 if we aren't. */
271 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
272 if (imode != GET_MODE (op0))
274 if (GET_CODE (op0) == MEM)
275 op0 = change_address (op0, imode, NULL_RTX);
276 else if (imode != BLKmode)
277 op0 = gen_lowpart (imode, op0);
278 else
279 abort ();
283 /* If OP0 is a register, BITPOS must count within a word.
284 But as we have it, it counts within whatever size OP0 now has.
285 On a bigendian machine, these are not the same, so convert. */
286 if (BYTES_BIG_ENDIAN
287 && GET_CODE (op0) != MEM
288 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
289 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
291 value = protect_from_queue (value, 0);
293 if (flag_force_mem)
294 value = force_not_mem (value);
296 /* Note that the adjustment of BITPOS above has no effect on whether
297 BITPOS is 0 in a REG bigger than a word. */
298 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
299 && (GET_CODE (op0) != MEM
300 || ! SLOW_UNALIGNED_ACCESS
301 || (offset * BITS_PER_UNIT % bitsize == 0
302 && align % GET_MODE_SIZE (fieldmode) == 0))
303 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
305 /* Storing in a full-word or multi-word field in a register
306 can be done with just SUBREG. */
307 if (GET_MODE (op0) != fieldmode)
309 if (GET_CODE (op0) == SUBREG)
311 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
312 || GET_MODE_CLASS (fieldmode) == MODE_INT
313 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
314 op0 = SUBREG_REG (op0);
315 else
316 /* Else we've got some float mode source being extracted into
317 a different float mode destination -- this combination of
318 subregs results in Severe Tire Damage. */
319 abort ();
321 if (GET_CODE (op0) == REG)
322 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
323 else
324 op0 = change_address (op0, fieldmode,
325 plus_constant (XEXP (op0, 0), offset));
327 emit_move_insn (op0, value);
328 return value;
331 /* Storing an lsb-aligned field in a register
332 can be done with a movestrict instruction. */
334 if (GET_CODE (op0) != MEM
335 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
336 && bitsize == GET_MODE_BITSIZE (fieldmode)
337 && (GET_MODE (op0) == fieldmode
338 || (movstrict_optab->handlers[(int) fieldmode].insn_code
339 != CODE_FOR_nothing)))
341 /* Get appropriate low part of the value being stored. */
342 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
343 value = gen_lowpart (fieldmode, value);
344 else if (!(GET_CODE (value) == SYMBOL_REF
345 || GET_CODE (value) == LABEL_REF
346 || GET_CODE (value) == CONST))
347 value = convert_to_mode (fieldmode, value, 0);
349 if (GET_MODE (op0) == fieldmode)
350 emit_move_insn (op0, value);
351 else
353 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
354 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
355 value = copy_to_mode_reg (fieldmode, value);
357 if (GET_CODE (op0) == SUBREG)
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
370 emit_insn (GEN_FCN (icode)
371 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
373 return value;
376 /* Handle fields bigger than a word. */
378 if (bitsize > BITS_PER_WORD)
380 /* Here we transfer the words of the field
381 in the order least significant first.
382 This is because the most significant word is the one which may
383 be less than full.
384 However, only do that if the value is not BLKmode. */
386 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
388 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
389 int i;
391 /* This is the mode we must force value to, so that there will be enough
392 subwords to extract. Note that fieldmode will often (always?) be
393 VOIDmode, because that is what store_field uses to indicate that this
394 is a bit field, but passing VOIDmode to operand_subword_force will
395 result in an abort. */
396 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
398 for (i = 0; i < nwords; i++)
400 /* If I is 0, use the low-order word in both field and target;
401 if I is 1, use the next to lowest word; and so on. */
402 int wordnum = (backwards ? nwords - i - 1 : i);
403 int bit_offset = (backwards
404 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
405 : i * BITS_PER_WORD);
406 store_bit_field (op0, MIN (BITS_PER_WORD,
407 bitsize - i * BITS_PER_WORD),
408 bitnum + bit_offset, word_mode,
409 operand_subword_force (value, wordnum,
410 (GET_MODE (value) == VOIDmode
411 ? fieldmode
412 : GET_MODE (value))),
413 align, total_size);
415 return value;
418 /* From here on we can assume that the field to be stored in is
419 a full-word (whatever type that is), since it is shorter than a word. */
421 /* OFFSET is the number of words or bytes (UNIT says which)
422 from STR_RTX to the first word or byte containing part of the field. */
424 if (GET_CODE (op0) != MEM)
426 if (offset != 0
427 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
429 if (GET_CODE (op0) != REG)
431 /* Since this is a destination (lvalue), we can't copy it to a
432 pseudo. We can trivially remove a SUBREG that does not
433 change the size of the operand. Such a SUBREG may have been
434 added above. Otherwise, abort. */
435 if (GET_CODE (op0) == SUBREG
436 && (GET_MODE_SIZE (GET_MODE (op0))
437 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
438 op0 = SUBREG_REG (op0);
439 else
440 abort ();
442 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
443 op0, offset);
445 offset = 0;
447 else
449 op0 = protect_from_queue (op0, 1);
452 /* If VALUE is a floating-point mode, access it as an integer of the
453 corresponding size. This can occur on a machine with 64 bit registers
454 that uses SFmode for float. This can also occur for unaligned float
455 structure fields. */
456 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
458 if (GET_CODE (value) != REG)
459 value = copy_to_reg (value);
460 value = gen_rtx_SUBREG (word_mode, value, 0);
463 /* Now OFFSET is nonzero only if OP0 is memory
464 and is therefore always measured in bytes. */
466 #ifdef HAVE_insv
467 if (HAVE_insv
468 && GET_MODE (value) != BLKmode
469 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
470 /* Ensure insv's size is wide enough for this field. */
471 && (insv_bitsize >= bitsize)
472 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
473 && (bitsize + bitpos > insv_bitsize)))
475 int xbitpos = bitpos;
476 rtx value1;
477 rtx xop0 = op0;
478 rtx last = get_last_insn ();
479 rtx pat;
480 enum machine_mode maxmode;
481 int save_volatile_ok = volatile_ok;
483 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
484 if (maxmode == VOIDmode)
485 maxmode = word_mode;
487 volatile_ok = 1;
489 /* If this machine's insv can only insert into a register, copy OP0
490 into a register and save it back later. */
491 /* This used to check flag_force_mem, but that was a serious
492 de-optimization now that flag_force_mem is enabled by -O2. */
493 if (GET_CODE (op0) == MEM
494 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
495 (op0, VOIDmode)))
497 rtx tempreg;
498 enum machine_mode bestmode;
500 /* Get the mode to use for inserting into this field. If OP0 is
501 BLKmode, get the smallest mode consistent with the alignment. If
502 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
503 mode. Otherwise, use the smallest mode containing the field. */
505 if (GET_MODE (op0) == BLKmode
506 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
507 bestmode
508 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
509 MEM_VOLATILE_P (op0));
510 else
511 bestmode = GET_MODE (op0);
513 if (bestmode == VOIDmode
514 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
515 goto insv_loses;
517 /* Adjust address to point to the containing unit of that mode. */
518 unit = GET_MODE_BITSIZE (bestmode);
519 /* Compute offset as multiple of this unit, counting in bytes. */
520 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
521 bitpos = bitnum % unit;
522 op0 = change_address (op0, bestmode,
523 plus_constant (XEXP (op0, 0), offset));
525 /* Fetch that unit, store the bitfield in it, then store the unit. */
526 tempreg = copy_to_reg (op0);
527 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
528 align, total_size);
529 emit_move_insn (op0, tempreg);
530 return value;
532 volatile_ok = save_volatile_ok;
534 /* Add OFFSET into OP0's address. */
535 if (GET_CODE (xop0) == MEM)
536 xop0 = change_address (xop0, byte_mode,
537 plus_constant (XEXP (xop0, 0), offset));
539 /* If xop0 is a register, we need it in MAXMODE
540 to make it acceptable to the format of insv. */
541 if (GET_CODE (xop0) == SUBREG)
542 /* We can't just change the mode, because this might clobber op0,
543 and we will need the original value of op0 if insv fails. */
544 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
545 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
546 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
548 /* On big-endian machines, we count bits from the most significant.
549 If the bit field insn does not, we must invert. */
551 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
552 xbitpos = unit - bitsize - xbitpos;
554 /* We have been counting XBITPOS within UNIT.
555 Count instead within the size of the register. */
556 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
557 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
559 unit = GET_MODE_BITSIZE (maxmode);
561 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
562 value1 = value;
563 if (GET_MODE (value) != maxmode)
565 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
567 /* Optimization: Don't bother really extending VALUE
568 if it has all the bits we will actually use. However,
569 if we must narrow it, be sure we do it correctly. */
571 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
573 /* Avoid making subreg of a subreg, or of a mem. */
574 if (GET_CODE (value1) != REG)
575 value1 = copy_to_reg (value1);
576 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
578 else
579 value1 = gen_lowpart (maxmode, value1);
581 else if (!CONSTANT_P (value))
582 /* Parse phase is supposed to make VALUE's data type
583 match that of the component reference, which is a type
584 at least as wide as the field; so VALUE should have
585 a mode that corresponds to that type. */
586 abort ();
589 /* If this machine's insv insists on a register,
590 get VALUE1 into a register. */
591 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
592 (value1, maxmode)))
593 value1 = force_reg (maxmode, value1);
595 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
596 if (pat)
597 emit_insn (pat);
598 else
600 delete_insns_since (last);
601 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
604 else
605 insv_loses:
606 #endif
607 /* Insv is not available; store using shifts and boolean ops. */
608 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
609 return value;
612 /* Use shifts and boolean operations to store VALUE
613 into a bit field of width BITSIZE
614 in a memory location specified by OP0 except offset by OFFSET bytes.
615 (OFFSET must be 0 if OP0 is a register.)
616 The field starts at position BITPOS within the byte.
617 (If OP0 is a register, it may be a full word or a narrower mode,
618 but BITPOS still counts within a full word,
619 which is significant on bigendian machines.)
620 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
622 Note that protect_from_queue has already been done on OP0 and VALUE. */
624 static void
625 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
626 register rtx op0;
627 register int offset, bitsize, bitpos;
628 register rtx value;
629 int struct_align;
631 register enum machine_mode mode;
632 int total_bits = BITS_PER_WORD;
633 rtx subtarget, temp;
634 int all_zero = 0;
635 int all_one = 0;
637 if (! SLOW_UNALIGNED_ACCESS)
638 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
640 /* There is a case not handled here:
641 a structure with a known alignment of just a halfword
642 and a field split across two aligned halfwords within the structure.
643 Or likewise a structure with a known alignment of just a byte
644 and a field split across two bytes.
645 Such cases are not supposed to be able to occur. */
647 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
649 if (offset != 0)
650 abort ();
651 /* Special treatment for a bit field split across two registers. */
652 if (bitsize + bitpos > BITS_PER_WORD)
654 store_split_bit_field (op0, bitsize, bitpos,
655 value, BITS_PER_WORD);
656 return;
659 else
661 /* Get the proper mode to use for this field. We want a mode that
662 includes the entire field. If such a mode would be larger than
663 a word, we won't be doing the extraction the normal way. */
665 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
666 struct_align * BITS_PER_UNIT, word_mode,
667 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
669 if (mode == VOIDmode)
671 /* The only way this should occur is if the field spans word
672 boundaries. */
673 store_split_bit_field (op0,
674 bitsize, bitpos + offset * BITS_PER_UNIT,
675 value, struct_align);
676 return;
679 total_bits = GET_MODE_BITSIZE (mode);
681 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
682 be in the range 0 to total_bits-1, and put any excess bytes in
683 OFFSET. */
684 if (bitpos >= total_bits)
686 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
687 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
688 * BITS_PER_UNIT);
691 /* Get ref to an aligned byte, halfword, or word containing the field.
692 Adjust BITPOS to be position within a word,
693 and OFFSET to be the offset of that word.
694 Then alter OP0 to refer to that word. */
695 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
696 offset -= (offset % (total_bits / BITS_PER_UNIT));
697 op0 = change_address (op0, mode,
698 plus_constant (XEXP (op0, 0), offset));
701 mode = GET_MODE (op0);
703 /* Now MODE is either some integral mode for a MEM as OP0,
704 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
705 The bit field is contained entirely within OP0.
706 BITPOS is the starting bit number within OP0.
707 (OP0's mode may actually be narrower than MODE.) */
709 if (BYTES_BIG_ENDIAN)
710 /* BITPOS is the distance between our msb
711 and that of the containing datum.
712 Convert it to the distance from the lsb. */
713 bitpos = total_bits - bitsize - bitpos;
715 /* Now BITPOS is always the distance between our lsb
716 and that of OP0. */
718 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
719 we must first convert its mode to MODE. */
721 if (GET_CODE (value) == CONST_INT)
723 register HOST_WIDE_INT v = INTVAL (value);
725 if (bitsize < HOST_BITS_PER_WIDE_INT)
726 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
728 if (v == 0)
729 all_zero = 1;
730 else if ((bitsize < HOST_BITS_PER_WIDE_INT
731 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
732 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
733 all_one = 1;
735 value = lshift_value (mode, value, bitpos, bitsize);
737 else
739 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
740 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
742 if (GET_MODE (value) != mode)
744 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
745 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
746 value = gen_lowpart (mode, value);
747 else
748 value = convert_to_mode (mode, value, 1);
751 if (must_and)
752 value = expand_binop (mode, and_optab, value,
753 mask_rtx (mode, 0, bitsize, 0),
754 NULL_RTX, 1, OPTAB_LIB_WIDEN);
755 if (bitpos > 0)
756 value = expand_shift (LSHIFT_EXPR, mode, value,
757 build_int_2 (bitpos, 0), NULL_RTX, 1);
760 /* Now clear the chosen bits in OP0,
761 except that if VALUE is -1 we need not bother. */
763 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
765 if (! all_one)
767 temp = expand_binop (mode, and_optab, op0,
768 mask_rtx (mode, bitpos, bitsize, 1),
769 subtarget, 1, OPTAB_LIB_WIDEN);
770 subtarget = temp;
772 else
773 temp = op0;
775 /* Now logical-or VALUE into OP0, unless it is zero. */
777 if (! all_zero)
778 temp = expand_binop (mode, ior_optab, temp, value,
779 subtarget, 1, OPTAB_LIB_WIDEN);
780 if (op0 != temp)
781 emit_move_insn (op0, temp);
784 /* Store a bit field that is split across multiple accessible memory objects.
786 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
787 BITSIZE is the field width; BITPOS the position of its first bit
788 (within the word).
789 VALUE is the value to store.
790 ALIGN is the known alignment of OP0, measured in bytes.
791 This is also the size of the memory objects to be used.
793 This does not yet handle fields wider than BITS_PER_WORD. */
795 static void
796 store_split_bit_field (op0, bitsize, bitpos, value, align)
797 rtx op0;
798 int bitsize, bitpos;
799 rtx value;
800 int align;
802 int unit;
803 int bitsdone = 0;
805 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
806 much at a time. */
807 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
808 unit = BITS_PER_WORD;
809 else
810 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
812 /* If VALUE is a constant other than a CONST_INT, get it into a register in
813 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
814 that VALUE might be a floating-point constant. */
815 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
817 rtx word = gen_lowpart_common (word_mode, value);
819 if (word && (value != word))
820 value = word;
821 else
822 value = gen_lowpart_common (word_mode,
823 force_reg (GET_MODE (value) != VOIDmode
824 ? GET_MODE (value)
825 : word_mode, value));
827 else if (GET_CODE (value) == ADDRESSOF)
828 value = copy_to_reg (value);
830 while (bitsdone < bitsize)
832 int thissize;
833 rtx part, word;
834 int thispos;
835 int offset;
837 offset = (bitpos + bitsdone) / unit;
838 thispos = (bitpos + bitsdone) % unit;
840 /* THISSIZE must not overrun a word boundary. Otherwise,
841 store_fixed_bit_field will call us again, and we will mutually
842 recurse forever. */
843 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
844 thissize = MIN (thissize, unit - thispos);
846 if (BYTES_BIG_ENDIAN)
848 int total_bits;
850 /* We must do an endian conversion exactly the same way as it is
851 done in extract_bit_field, so that the two calls to
852 extract_fixed_bit_field will have comparable arguments. */
853 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
854 total_bits = BITS_PER_WORD;
855 else
856 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
858 /* Fetch successively less significant portions. */
859 if (GET_CODE (value) == CONST_INT)
860 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
861 >> (bitsize - bitsdone - thissize))
862 & (((HOST_WIDE_INT) 1 << thissize) - 1));
863 else
864 /* The args are chosen so that the last part includes the
865 lsb. Give extract_bit_field the value it needs (with
866 endianness compensation) to fetch the piece we want.
868 ??? We have no idea what the alignment of VALUE is, so
869 we have to use a guess. */
870 part
871 = extract_fixed_bit_field
872 (word_mode, value, 0, thissize,
873 total_bits - bitsize + bitsdone, NULL_RTX, 1,
874 GET_MODE (value) == VOIDmode
875 ? UNITS_PER_WORD
876 : (GET_MODE (value) == BLKmode
878 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
880 else
882 /* Fetch successively more significant portions. */
883 if (GET_CODE (value) == CONST_INT)
884 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
885 >> bitsdone)
886 & (((HOST_WIDE_INT) 1 << thissize) - 1));
887 else
888 part
889 = extract_fixed_bit_field
890 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
891 GET_MODE (value) == VOIDmode
892 ? UNITS_PER_WORD
893 : (GET_MODE (value) == BLKmode
895 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
898 /* If OP0 is a register, then handle OFFSET here.
900 When handling multiword bitfields, extract_bit_field may pass
901 down a word_mode SUBREG of a larger REG for a bitfield that actually
902 crosses a word boundary. Thus, for a SUBREG, we must find
903 the current word starting from the base register. */
904 if (GET_CODE (op0) == SUBREG)
906 word = operand_subword_force (SUBREG_REG (op0),
907 SUBREG_WORD (op0) + offset,
908 GET_MODE (SUBREG_REG (op0)));
909 offset = 0;
911 else if (GET_CODE (op0) == REG)
913 word = operand_subword_force (op0, offset, GET_MODE (op0));
914 offset = 0;
916 else
917 word = op0;
919 /* OFFSET is in UNITs, and UNIT is in bits.
920 store_fixed_bit_field wants offset in bytes. */
921 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
922 thissize, thispos, part, align);
923 bitsdone += thissize;
927 /* Generate code to extract a byte-field from STR_RTX
928 containing BITSIZE bits, starting at BITNUM,
929 and put it in TARGET if possible (if TARGET is nonzero).
930 Regardless of TARGET, we return the rtx for where the value is placed.
931 It may be a QUEUED.
933 STR_RTX is the structure containing the byte (a REG or MEM).
934 UNSIGNEDP is nonzero if this is an unsigned bit field.
935 MODE is the natural mode of the field value once extracted.
936 TMODE is the mode the caller would like the value to have;
937 but the value may be returned with type MODE instead.
939 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
940 TOTAL_SIZE is the size in bytes of the containing structure,
941 or -1 if varying.
943 If a TARGET is specified and we can store in it at no extra cost,
944 we do so, and return TARGET.
945 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
946 if they are equally easy. */
949 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
950 target, mode, tmode, align, total_size)
951 rtx str_rtx;
952 register int bitsize;
953 int bitnum;
954 int unsignedp;
955 rtx target;
956 enum machine_mode mode, tmode;
957 int align;
958 int total_size;
960 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
961 register int offset = bitnum / unit;
962 register int bitpos = bitnum % unit;
963 register rtx op0 = str_rtx;
964 rtx spec_target = target;
965 rtx spec_target_subreg = 0;
966 #ifdef HAVE_extv
967 int extv_bitsize;
968 enum machine_mode extv_mode;
969 #endif
970 #ifdef HAVE_extzv
971 int extzv_bitsize;
972 enum machine_mode extzv_mode;
973 #endif
975 #ifdef HAVE_extv
976 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
977 if (extv_mode == VOIDmode)
978 extv_mode = word_mode;
979 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
980 #endif
982 #ifdef HAVE_extzv
983 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
984 if (extzv_mode == VOIDmode)
985 extzv_mode = word_mode;
986 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
987 #endif
989 /* Discount the part of the structure before the desired byte.
990 We need to know how many bytes are safe to reference after it. */
991 if (total_size >= 0)
992 total_size -= (bitpos / BIGGEST_ALIGNMENT
993 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
995 if (tmode == VOIDmode)
996 tmode = mode;
997 while (GET_CODE (op0) == SUBREG)
999 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1000 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1002 offset += SUBREG_WORD (op0);
1004 inner_size = MIN (inner_size, BITS_PER_WORD);
1006 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1008 bitpos += inner_size - outer_size;
1009 if (bitpos > unit)
1011 offset += (bitpos / unit);
1012 bitpos %= unit;
1016 op0 = SUBREG_REG (op0);
1019 /* Make sure we are playing with integral modes. Pun with subregs
1020 if we aren't. */
1022 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1023 if (imode != GET_MODE (op0))
1025 if (GET_CODE (op0) == MEM)
1026 op0 = change_address (op0, imode, NULL_RTX);
1027 else if (imode != BLKmode)
1028 op0 = gen_lowpart (imode, op0);
1029 else
1030 abort ();
1034 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1035 If that's wrong, the solution is to test for it and set TARGET to 0
1036 if needed. */
1038 /* If OP0 is a register, BITPOS must count within a word.
1039 But as we have it, it counts within whatever size OP0 now has.
1040 On a bigendian machine, these are not the same, so convert. */
1041 if (BYTES_BIG_ENDIAN
1042 && GET_CODE (op0) != MEM
1043 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1044 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1046 /* Extracting a full-word or multi-word value
1047 from a structure in a register or aligned memory.
1048 This can be done with just SUBREG.
1049 So too extracting a subword value in
1050 the least significant part of the register. */
1052 if (((GET_CODE (op0) != MEM
1053 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1054 GET_MODE_BITSIZE (GET_MODE (op0))))
1055 || (GET_CODE (op0) == MEM
1056 && (! SLOW_UNALIGNED_ACCESS
1057 || (offset * BITS_PER_UNIT % bitsize == 0
1058 && align * BITS_PER_UNIT % bitsize == 0))))
1059 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1060 && bitpos % BITS_PER_WORD == 0)
1061 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1062 /* ??? The big endian test here is wrong. This is correct
1063 if the value is in a register, and if mode_for_size is not
1064 the same mode as op0. This causes us to get unnecessarily
1065 inefficient code from the Thumb port when -mbig-endian. */
1066 && (BYTES_BIG_ENDIAN
1067 ? bitpos + bitsize == BITS_PER_WORD
1068 : bitpos == 0))))
1070 enum machine_mode mode1
1071 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1073 if (mode1 != GET_MODE (op0))
1075 if (GET_CODE (op0) == SUBREG)
1077 if (GET_MODE (SUBREG_REG (op0)) == mode1
1078 || GET_MODE_CLASS (mode1) == MODE_INT
1079 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1080 op0 = SUBREG_REG (op0);
1081 else
1082 /* Else we've got some float mode source being extracted into
1083 a different float mode destination -- this combination of
1084 subregs results in Severe Tire Damage. */
1085 abort ();
1087 if (GET_CODE (op0) == REG)
1088 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1089 else
1090 op0 = change_address (op0, mode1,
1091 plus_constant (XEXP (op0, 0), offset));
1093 if (mode1 != mode)
1094 return convert_to_mode (tmode, op0, unsignedp);
1095 return op0;
1098 /* Handle fields bigger than a word. */
1100 if (bitsize > BITS_PER_WORD)
1102 /* Here we transfer the words of the field
1103 in the order least significant first.
1104 This is because the most significant word is the one which may
1105 be less than full. */
1107 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1108 int i;
1110 if (target == 0 || GET_CODE (target) != REG)
1111 target = gen_reg_rtx (mode);
1113 /* Indicate for flow that the entire target reg is being set. */
1114 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1116 for (i = 0; i < nwords; i++)
1118 /* If I is 0, use the low-order word in both field and target;
1119 if I is 1, use the next to lowest word; and so on. */
1120 /* Word number in TARGET to use. */
1121 int wordnum = (WORDS_BIG_ENDIAN
1122 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1123 : i);
1124 /* Offset from start of field in OP0. */
1125 int bit_offset = (WORDS_BIG_ENDIAN
1126 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1127 : i * BITS_PER_WORD);
1128 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1129 rtx result_part
1130 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1131 bitsize - i * BITS_PER_WORD),
1132 bitnum + bit_offset,
1133 1, target_part, mode, word_mode,
1134 align, total_size);
1136 if (target_part == 0)
1137 abort ();
1139 if (result_part != target_part)
1140 emit_move_insn (target_part, result_part);
1143 if (unsignedp)
1145 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1146 need to be zero'd out. */
1147 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1149 int i,total_words;
1151 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1152 for (i = nwords; i < total_words; i++)
1154 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1155 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1156 emit_move_insn (target_part, const0_rtx);
1159 return target;
1162 /* Signed bit field: sign-extend with two arithmetic shifts. */
1163 target = expand_shift (LSHIFT_EXPR, mode, target,
1164 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1165 NULL_RTX, 0);
1166 return expand_shift (RSHIFT_EXPR, mode, target,
1167 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1168 NULL_RTX, 0);
1171 /* From here on we know the desired field is smaller than a word
1172 so we can assume it is an integer. So we can safely extract it as one
1173 size of integer, if necessary, and then truncate or extend
1174 to the size that is wanted. */
1176 /* OFFSET is the number of words or bytes (UNIT says which)
1177 from STR_RTX to the first word or byte containing part of the field. */
1179 if (GET_CODE (op0) != MEM)
1181 if (offset != 0
1182 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1184 if (GET_CODE (op0) != REG)
1185 op0 = copy_to_reg (op0);
1186 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1187 op0, offset);
1189 offset = 0;
1191 else
1193 op0 = protect_from_queue (str_rtx, 1);
1196 /* Now OFFSET is nonzero only for memory operands. */
1198 if (unsignedp)
1200 #ifdef HAVE_extzv
1201 if (HAVE_extzv
1202 && (extzv_bitsize >= bitsize)
1203 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1204 && (bitsize + bitpos > extzv_bitsize)))
1206 int xbitpos = bitpos, xoffset = offset;
1207 rtx bitsize_rtx, bitpos_rtx;
1208 rtx last = get_last_insn ();
1209 rtx xop0 = op0;
1210 rtx xtarget = target;
1211 rtx xspec_target = spec_target;
1212 rtx xspec_target_subreg = spec_target_subreg;
1213 rtx pat;
1214 enum machine_mode maxmode;
1216 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1217 if (maxmode == VOIDmode)
1218 maxmode = word_mode;
1220 if (GET_CODE (xop0) == MEM)
1222 int save_volatile_ok = volatile_ok;
1223 volatile_ok = 1;
1225 /* Is the memory operand acceptable? */
1226 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1227 (xop0, GET_MODE (xop0))))
1229 /* No, load into a reg and extract from there. */
1230 enum machine_mode bestmode;
1232 /* Get the mode to use for inserting into this field. If
1233 OP0 is BLKmode, get the smallest mode consistent with the
1234 alignment. If OP0 is a non-BLKmode object that is no
1235 wider than MAXMODE, use its mode. Otherwise, use the
1236 smallest mode containing the field. */
1238 if (GET_MODE (xop0) == BLKmode
1239 || (GET_MODE_SIZE (GET_MODE (op0))
1240 > GET_MODE_SIZE (maxmode)))
1241 bestmode = get_best_mode (bitsize, bitnum,
1242 align * BITS_PER_UNIT, maxmode,
1243 MEM_VOLATILE_P (xop0));
1244 else
1245 bestmode = GET_MODE (xop0);
1247 if (bestmode == VOIDmode
1248 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1249 goto extzv_loses;
1251 /* Compute offset as multiple of this unit,
1252 counting in bytes. */
1253 unit = GET_MODE_BITSIZE (bestmode);
1254 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1255 xbitpos = bitnum % unit;
1256 xop0 = change_address (xop0, bestmode,
1257 plus_constant (XEXP (xop0, 0),
1258 xoffset));
1259 /* Fetch it to a register in that size. */
1260 xop0 = force_reg (bestmode, xop0);
1262 /* XBITPOS counts within UNIT, which is what is expected. */
1264 else
1265 /* Get ref to first byte containing part of the field. */
1266 xop0 = change_address (xop0, byte_mode,
1267 plus_constant (XEXP (xop0, 0), xoffset));
1269 volatile_ok = save_volatile_ok;
1272 /* If op0 is a register, we need it in MAXMODE (which is usually
1273 SImode). to make it acceptable to the format of extzv. */
1274 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1275 goto extzv_loses;
1276 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1277 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1279 /* On big-endian machines, we count bits from the most significant.
1280 If the bit field insn does not, we must invert. */
1281 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1282 xbitpos = unit - bitsize - xbitpos;
1284 /* Now convert from counting within UNIT to counting in MAXMODE. */
1285 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1286 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1288 unit = GET_MODE_BITSIZE (maxmode);
1290 if (xtarget == 0
1291 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1292 xtarget = xspec_target = gen_reg_rtx (tmode);
1294 if (GET_MODE (xtarget) != maxmode)
1296 if (GET_CODE (xtarget) == REG)
1298 int wider = (GET_MODE_SIZE (maxmode)
1299 > GET_MODE_SIZE (GET_MODE (xtarget)));
1300 xtarget = gen_lowpart (maxmode, xtarget);
1301 if (wider)
1302 xspec_target_subreg = xtarget;
1304 else
1305 xtarget = gen_reg_rtx (maxmode);
1308 /* If this machine's extzv insists on a register target,
1309 make sure we have one. */
1310 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1311 (xtarget, maxmode)))
1312 xtarget = gen_reg_rtx (maxmode);
1314 bitsize_rtx = GEN_INT (bitsize);
1315 bitpos_rtx = GEN_INT (xbitpos);
1317 pat = gen_extzv (protect_from_queue (xtarget, 1),
1318 xop0, bitsize_rtx, bitpos_rtx);
1319 if (pat)
1321 emit_insn (pat);
1322 target = xtarget;
1323 spec_target = xspec_target;
1324 spec_target_subreg = xspec_target_subreg;
1326 else
1328 delete_insns_since (last);
1329 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1330 bitpos, target, 1, align);
1333 else
1334 extzv_loses:
1335 #endif
1336 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1337 target, 1, align);
1339 else
1341 #ifdef HAVE_extv
1342 if (HAVE_extv
1343 && (extv_bitsize >= bitsize)
1344 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1345 && (bitsize + bitpos > extv_bitsize)))
1347 int xbitpos = bitpos, xoffset = offset;
1348 rtx bitsize_rtx, bitpos_rtx;
1349 rtx last = get_last_insn ();
1350 rtx xop0 = op0, xtarget = target;
1351 rtx xspec_target = spec_target;
1352 rtx xspec_target_subreg = spec_target_subreg;
1353 rtx pat;
1354 enum machine_mode maxmode;
1356 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1357 if (maxmode == VOIDmode)
1358 maxmode = word_mode;
1360 if (GET_CODE (xop0) == MEM)
1362 /* Is the memory operand acceptable? */
1363 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1364 (xop0, GET_MODE (xop0))))
1366 /* No, load into a reg and extract from there. */
1367 enum machine_mode bestmode;
1369 /* Get the mode to use for inserting into this field. If
1370 OP0 is BLKmode, get the smallest mode consistent with the
1371 alignment. If OP0 is a non-BLKmode object that is no
1372 wider than MAXMODE, use its mode. Otherwise, use the
1373 smallest mode containing the field. */
1375 if (GET_MODE (xop0) == BLKmode
1376 || (GET_MODE_SIZE (GET_MODE (op0))
1377 > GET_MODE_SIZE (maxmode)))
1378 bestmode = get_best_mode (bitsize, bitnum,
1379 align * BITS_PER_UNIT, maxmode,
1380 MEM_VOLATILE_P (xop0));
1381 else
1382 bestmode = GET_MODE (xop0);
1384 if (bestmode == VOIDmode
1385 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1386 goto extv_loses;
1388 /* Compute offset as multiple of this unit,
1389 counting in bytes. */
1390 unit = GET_MODE_BITSIZE (bestmode);
1391 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1392 xbitpos = bitnum % unit;
1393 xop0 = change_address (xop0, bestmode,
1394 plus_constant (XEXP (xop0, 0),
1395 xoffset));
1396 /* Fetch it to a register in that size. */
1397 xop0 = force_reg (bestmode, xop0);
1399 /* XBITPOS counts within UNIT, which is what is expected. */
1401 else
1402 /* Get ref to first byte containing part of the field. */
1403 xop0 = change_address (xop0, byte_mode,
1404 plus_constant (XEXP (xop0, 0), xoffset));
1407 /* If op0 is a register, we need it in MAXMODE (which is usually
1408 SImode) to make it acceptable to the format of extv. */
1409 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1410 goto extv_loses;
1411 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1412 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1414 /* On big-endian machines, we count bits from the most significant.
1415 If the bit field insn does not, we must invert. */
1416 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1417 xbitpos = unit - bitsize - xbitpos;
1419 /* XBITPOS counts within a size of UNIT.
1420 Adjust to count within a size of MAXMODE. */
1421 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1422 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1424 unit = GET_MODE_BITSIZE (maxmode);
1426 if (xtarget == 0
1427 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1428 xtarget = xspec_target = gen_reg_rtx (tmode);
1430 if (GET_MODE (xtarget) != maxmode)
1432 if (GET_CODE (xtarget) == REG)
1434 int wider = (GET_MODE_SIZE (maxmode)
1435 > GET_MODE_SIZE (GET_MODE (xtarget)));
1436 xtarget = gen_lowpart (maxmode, xtarget);
1437 if (wider)
1438 xspec_target_subreg = xtarget;
1440 else
1441 xtarget = gen_reg_rtx (maxmode);
1444 /* If this machine's extv insists on a register target,
1445 make sure we have one. */
1446 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1447 (xtarget, maxmode)))
1448 xtarget = gen_reg_rtx (maxmode);
1450 bitsize_rtx = GEN_INT (bitsize);
1451 bitpos_rtx = GEN_INT (xbitpos);
1453 pat = gen_extv (protect_from_queue (xtarget, 1),
1454 xop0, bitsize_rtx, bitpos_rtx);
1455 if (pat)
1457 emit_insn (pat);
1458 target = xtarget;
1459 spec_target = xspec_target;
1460 spec_target_subreg = xspec_target_subreg;
1462 else
1464 delete_insns_since (last);
1465 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1466 bitpos, target, 0, align);
1469 else
1470 extv_loses:
1471 #endif
1472 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1473 target, 0, align);
1475 if (target == spec_target)
1476 return target;
1477 if (target == spec_target_subreg)
1478 return spec_target;
1479 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1481 /* If the target mode is floating-point, first convert to the
1482 integer mode of that size and then access it as a floating-point
1483 value via a SUBREG. */
1484 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1486 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1487 MODE_INT, 0),
1488 target, unsignedp);
1489 if (GET_CODE (target) != REG)
1490 target = copy_to_reg (target);
1491 return gen_rtx_SUBREG (tmode, target, 0);
1493 else
1494 return convert_to_mode (tmode, target, unsignedp);
1496 return target;
1499 /* Extract a bit field using shifts and boolean operations
1500 Returns an rtx to represent the value.
1501 OP0 addresses a register (word) or memory (byte).
1502 BITPOS says which bit within the word or byte the bit field starts in.
1503 OFFSET says how many bytes farther the bit field starts;
1504 it is 0 if OP0 is a register.
1505 BITSIZE says how many bits long the bit field is.
1506 (If OP0 is a register, it may be narrower than a full word,
1507 but BITPOS still counts within a full word,
1508 which is significant on bigendian machines.)
1510 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1511 If TARGET is nonzero, attempts to store the value there
1512 and return TARGET, but this is not guaranteed.
1513 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1515 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1517 static rtx
1518 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1519 target, unsignedp, align)
1520 enum machine_mode tmode;
1521 register rtx op0, target;
1522 register int offset, bitsize, bitpos;
1523 int unsignedp;
1524 int align;
1526 int total_bits = BITS_PER_WORD;
1527 enum machine_mode mode;
1529 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1531 /* Special treatment for a bit field split across two registers. */
1532 if (bitsize + bitpos > BITS_PER_WORD)
1533 return extract_split_bit_field (op0, bitsize, bitpos,
1534 unsignedp, align);
1536 else
1538 /* Get the proper mode to use for this field. We want a mode that
1539 includes the entire field. If such a mode would be larger than
1540 a word, we won't be doing the extraction the normal way. */
1542 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1543 align * BITS_PER_UNIT, word_mode,
1544 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1546 if (mode == VOIDmode)
1547 /* The only way this should occur is if the field spans word
1548 boundaries. */
1549 return extract_split_bit_field (op0, bitsize,
1550 bitpos + offset * BITS_PER_UNIT,
1551 unsignedp, align);
1553 total_bits = GET_MODE_BITSIZE (mode);
1555 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1556 be in the range 0 to total_bits-1, and put any excess bytes in
1557 OFFSET. */
1558 if (bitpos >= total_bits)
1560 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1561 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1562 * BITS_PER_UNIT);
1565 /* Get ref to an aligned byte, halfword, or word containing the field.
1566 Adjust BITPOS to be position within a word,
1567 and OFFSET to be the offset of that word.
1568 Then alter OP0 to refer to that word. */
1569 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1570 offset -= (offset % (total_bits / BITS_PER_UNIT));
1571 op0 = change_address (op0, mode,
1572 plus_constant (XEXP (op0, 0), offset));
1575 mode = GET_MODE (op0);
1577 if (BYTES_BIG_ENDIAN)
1579 /* BITPOS is the distance between our msb and that of OP0.
1580 Convert it to the distance from the lsb. */
1582 bitpos = total_bits - bitsize - bitpos;
1585 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1586 We have reduced the big-endian case to the little-endian case. */
1588 if (unsignedp)
1590 if (bitpos)
1592 /* If the field does not already start at the lsb,
1593 shift it so it does. */
1594 tree amount = build_int_2 (bitpos, 0);
1595 /* Maybe propagate the target for the shift. */
1596 /* But not if we will return it--could confuse integrate.c. */
1597 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1598 && !REG_FUNCTION_VALUE_P (target)
1599 ? target : 0);
1600 if (tmode != mode) subtarget = 0;
1601 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1603 /* Convert the value to the desired mode. */
1604 if (mode != tmode)
1605 op0 = convert_to_mode (tmode, op0, 1);
1607 /* Unless the msb of the field used to be the msb when we shifted,
1608 mask out the upper bits. */
1610 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1611 #if 0
1612 #ifdef SLOW_ZERO_EXTEND
1613 /* Always generate an `and' if
1614 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1615 will combine fruitfully with the zero-extend. */
1616 || tmode != mode
1617 #endif
1618 #endif
1620 return expand_binop (GET_MODE (op0), and_optab, op0,
1621 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1622 target, 1, OPTAB_LIB_WIDEN);
1623 return op0;
1626 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1627 then arithmetic-shift its lsb to the lsb of the word. */
1628 op0 = force_reg (mode, op0);
1629 if (mode != tmode)
1630 target = 0;
1632 /* Find the narrowest integer mode that contains the field. */
1634 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1635 mode = GET_MODE_WIDER_MODE (mode))
1636 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1638 op0 = convert_to_mode (mode, op0, 0);
1639 break;
1642 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1644 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1645 /* Maybe propagate the target for the shift. */
1646 /* But not if we will return the result--could confuse integrate.c. */
1647 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1648 && ! REG_FUNCTION_VALUE_P (target)
1649 ? target : 0);
1650 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1653 return expand_shift (RSHIFT_EXPR, mode, op0,
1654 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1655 target, 0);
1658 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1659 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1660 complement of that if COMPLEMENT. The mask is truncated if
1661 necessary to the width of mode MODE. The mask is zero-extended if
1662 BITSIZE+BITPOS is too small for MODE. */
1664 static rtx
1665 mask_rtx (mode, bitpos, bitsize, complement)
1666 enum machine_mode mode;
1667 int bitpos, bitsize, complement;
1669 HOST_WIDE_INT masklow, maskhigh;
1671 if (bitpos < HOST_BITS_PER_WIDE_INT)
1672 masklow = (HOST_WIDE_INT) -1 << bitpos;
1673 else
1674 masklow = 0;
1676 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1677 masklow &= ((unsigned HOST_WIDE_INT) -1
1678 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1680 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1681 maskhigh = -1;
1682 else
1683 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1685 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1686 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1687 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1688 else
1689 maskhigh = 0;
1691 if (complement)
1693 maskhigh = ~maskhigh;
1694 masklow = ~masklow;
1697 return immed_double_const (masklow, maskhigh, mode);
1700 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1701 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1703 static rtx
1704 lshift_value (mode, value, bitpos, bitsize)
1705 enum machine_mode mode;
1706 rtx value;
1707 int bitpos, bitsize;
1709 unsigned HOST_WIDE_INT v = INTVAL (value);
1710 HOST_WIDE_INT low, high;
1712 if (bitsize < HOST_BITS_PER_WIDE_INT)
1713 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1715 if (bitpos < HOST_BITS_PER_WIDE_INT)
1717 low = v << bitpos;
1718 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1720 else
1722 low = 0;
1723 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1726 return immed_double_const (low, high, mode);
1729 /* Extract a bit field that is split across two words
1730 and return an RTX for the result.
1732 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1733 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1734 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1736 ALIGN is the known alignment of OP0, measured in bytes.
1737 This is also the size of the memory objects to be used. */
1739 static rtx
1740 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1741 rtx op0;
1742 int bitsize, bitpos, unsignedp, align;
1744 int unit;
1745 int bitsdone = 0;
1746 rtx result = NULL_RTX;
1747 int first = 1;
1749 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1750 much at a time. */
1751 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1752 unit = BITS_PER_WORD;
1753 else
1754 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1756 while (bitsdone < bitsize)
1758 int thissize;
1759 rtx part, word;
1760 int thispos;
1761 int offset;
1763 offset = (bitpos + bitsdone) / unit;
1764 thispos = (bitpos + bitsdone) % unit;
1766 /* THISSIZE must not overrun a word boundary. Otherwise,
1767 extract_fixed_bit_field will call us again, and we will mutually
1768 recurse forever. */
1769 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1770 thissize = MIN (thissize, unit - thispos);
1772 /* If OP0 is a register, then handle OFFSET here.
1774 When handling multiword bitfields, extract_bit_field may pass
1775 down a word_mode SUBREG of a larger REG for a bitfield that actually
1776 crosses a word boundary. Thus, for a SUBREG, we must find
1777 the current word starting from the base register. */
1778 if (GET_CODE (op0) == SUBREG)
1780 word = operand_subword_force (SUBREG_REG (op0),
1781 SUBREG_WORD (op0) + offset,
1782 GET_MODE (SUBREG_REG (op0)));
1783 offset = 0;
1785 else if (GET_CODE (op0) == REG)
1787 word = operand_subword_force (op0, offset, GET_MODE (op0));
1788 offset = 0;
1790 else
1791 word = op0;
1793 /* Extract the parts in bit-counting order,
1794 whose meaning is determined by BYTES_PER_UNIT.
1795 OFFSET is in UNITs, and UNIT is in bits.
1796 extract_fixed_bit_field wants offset in bytes. */
1797 part = extract_fixed_bit_field (word_mode, word,
1798 offset * unit / BITS_PER_UNIT,
1799 thissize, thispos, 0, 1, align);
1800 bitsdone += thissize;
1802 /* Shift this part into place for the result. */
1803 if (BYTES_BIG_ENDIAN)
1805 if (bitsize != bitsdone)
1806 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1807 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1809 else
1811 if (bitsdone != thissize)
1812 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1813 build_int_2 (bitsdone - thissize, 0), 0, 1);
1816 if (first)
1817 result = part;
1818 else
1819 /* Combine the parts with bitwise or. This works
1820 because we extracted each part as an unsigned bit field. */
1821 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1822 OPTAB_LIB_WIDEN);
1824 first = 0;
1827 /* Unsigned bit field: we are done. */
1828 if (unsignedp)
1829 return result;
1830 /* Signed bit field: sign-extend with two arithmetic shifts. */
1831 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1832 build_int_2 (BITS_PER_WORD - bitsize, 0),
1833 NULL_RTX, 0);
1834 return expand_shift (RSHIFT_EXPR, word_mode, result,
1835 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1838 /* Add INC into TARGET. */
1840 void
1841 expand_inc (target, inc)
1842 rtx target, inc;
1844 rtx value = expand_binop (GET_MODE (target), add_optab,
1845 target, inc,
1846 target, 0, OPTAB_LIB_WIDEN);
1847 if (value != target)
1848 emit_move_insn (target, value);
1851 /* Subtract DEC from TARGET. */
1853 void
1854 expand_dec (target, dec)
1855 rtx target, dec;
1857 rtx value = expand_binop (GET_MODE (target), sub_optab,
1858 target, dec,
1859 target, 0, OPTAB_LIB_WIDEN);
1860 if (value != target)
1861 emit_move_insn (target, value);
1864 /* Output a shift instruction for expression code CODE,
1865 with SHIFTED being the rtx for the value to shift,
1866 and AMOUNT the tree for the amount to shift by.
1867 Store the result in the rtx TARGET, if that is convenient.
1868 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1869 Return the rtx for where the value is. */
1872 expand_shift (code, mode, shifted, amount, target, unsignedp)
1873 enum tree_code code;
1874 register enum machine_mode mode;
1875 rtx shifted;
1876 tree amount;
1877 register rtx target;
1878 int unsignedp;
1880 register rtx op1, temp = 0;
1881 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1882 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1883 int try;
1885 /* Previously detected shift-counts computed by NEGATE_EXPR
1886 and shifted in the other direction; but that does not work
1887 on all machines. */
1889 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1891 #ifdef SHIFT_COUNT_TRUNCATED
1892 if (SHIFT_COUNT_TRUNCATED)
1894 if (GET_CODE (op1) == CONST_INT
1895 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1896 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1897 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1898 % GET_MODE_BITSIZE (mode));
1899 else if (GET_CODE (op1) == SUBREG
1900 && SUBREG_WORD (op1) == 0)
1901 op1 = SUBREG_REG (op1);
1903 #endif
1905 if (op1 == const0_rtx)
1906 return shifted;
1908 for (try = 0; temp == 0 && try < 3; try++)
1910 enum optab_methods methods;
1912 if (try == 0)
1913 methods = OPTAB_DIRECT;
1914 else if (try == 1)
1915 methods = OPTAB_WIDEN;
1916 else
1917 methods = OPTAB_LIB_WIDEN;
1919 if (rotate)
1921 /* Widening does not work for rotation. */
1922 if (methods == OPTAB_WIDEN)
1923 continue;
1924 else if (methods == OPTAB_LIB_WIDEN)
1926 /* If we have been unable to open-code this by a rotation,
1927 do it as the IOR of two shifts. I.e., to rotate A
1928 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1929 where C is the bitsize of A.
1931 It is theoretically possible that the target machine might
1932 not be able to perform either shift and hence we would
1933 be making two libcalls rather than just the one for the
1934 shift (similarly if IOR could not be done). We will allow
1935 this extremely unlikely lossage to avoid complicating the
1936 code below. */
1938 rtx subtarget = target == shifted ? 0 : target;
1939 rtx temp1;
1940 tree type = TREE_TYPE (amount);
1941 tree new_amount = make_tree (type, op1);
1942 tree other_amount
1943 = fold (build (MINUS_EXPR, type,
1944 convert (type,
1945 build_int_2 (GET_MODE_BITSIZE (mode),
1946 0)),
1947 amount));
1949 shifted = force_reg (mode, shifted);
1951 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1952 mode, shifted, new_amount, subtarget, 1);
1953 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1954 mode, shifted, other_amount, 0, 1);
1955 return expand_binop (mode, ior_optab, temp, temp1, target,
1956 unsignedp, methods);
1959 temp = expand_binop (mode,
1960 left ? rotl_optab : rotr_optab,
1961 shifted, op1, target, unsignedp, methods);
1963 /* If we don't have the rotate, but we are rotating by a constant
1964 that is in range, try a rotate in the opposite direction. */
1966 if (temp == 0 && GET_CODE (op1) == CONST_INT
1967 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1968 temp = expand_binop (mode,
1969 left ? rotr_optab : rotl_optab,
1970 shifted,
1971 GEN_INT (GET_MODE_BITSIZE (mode)
1972 - INTVAL (op1)),
1973 target, unsignedp, methods);
1975 else if (unsignedp)
1976 temp = expand_binop (mode,
1977 left ? ashl_optab : lshr_optab,
1978 shifted, op1, target, unsignedp, methods);
1980 /* Do arithmetic shifts.
1981 Also, if we are going to widen the operand, we can just as well
1982 use an arithmetic right-shift instead of a logical one. */
1983 if (temp == 0 && ! rotate
1984 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1986 enum optab_methods methods1 = methods;
1988 /* If trying to widen a log shift to an arithmetic shift,
1989 don't accept an arithmetic shift of the same size. */
1990 if (unsignedp)
1991 methods1 = OPTAB_MUST_WIDEN;
1993 /* Arithmetic shift */
1995 temp = expand_binop (mode,
1996 left ? ashl_optab : ashr_optab,
1997 shifted, op1, target, unsignedp, methods1);
2000 /* We used to try extzv here for logical right shifts, but that was
2001 only useful for one machine, the VAX, and caused poor code
2002 generation there for lshrdi3, so the code was deleted and a
2003 define_expand for lshrsi3 was added to vax.md. */
2006 if (temp == 0)
2007 abort ();
2008 return temp;
2011 enum alg_code { alg_zero, alg_m, alg_shift,
2012 alg_add_t_m2, alg_sub_t_m2,
2013 alg_add_factor, alg_sub_factor,
2014 alg_add_t2_m, alg_sub_t2_m,
2015 alg_add, alg_subtract, alg_factor, alg_shiftop };
2017 /* This structure records a sequence of operations.
2018 `ops' is the number of operations recorded.
2019 `cost' is their total cost.
2020 The operations are stored in `op' and the corresponding
2021 logarithms of the integer coefficients in `log'.
2023 These are the operations:
2024 alg_zero total := 0;
2025 alg_m total := multiplicand;
2026 alg_shift total := total * coeff
2027 alg_add_t_m2 total := total + multiplicand * coeff;
2028 alg_sub_t_m2 total := total - multiplicand * coeff;
2029 alg_add_factor total := total * coeff + total;
2030 alg_sub_factor total := total * coeff - total;
2031 alg_add_t2_m total := total * coeff + multiplicand;
2032 alg_sub_t2_m total := total * coeff - multiplicand;
2034 The first operand must be either alg_zero or alg_m. */
2036 struct algorithm
2038 short cost;
2039 short ops;
2040 /* The size of the OP and LOG fields are not directly related to the
2041 word size, but the worst-case algorithms will be if we have few
2042 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2043 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2044 in total wordsize operations. */
2045 enum alg_code op[MAX_BITS_PER_WORD];
2046 char log[MAX_BITS_PER_WORD];
2049 static void synth_mult PROTO((struct algorithm *,
2050 unsigned HOST_WIDE_INT,
2051 int));
2052 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
2053 int, int,
2054 unsigned HOST_WIDE_INT *,
2055 int *, int *));
2056 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
2057 int));
2058 /* Compute and return the best algorithm for multiplying by T.
2059 The algorithm must cost less than cost_limit
2060 If retval.cost >= COST_LIMIT, no algorithm was found and all
2061 other field of the returned struct are undefined. */
2063 static void
2064 synth_mult (alg_out, t, cost_limit)
2065 struct algorithm *alg_out;
2066 unsigned HOST_WIDE_INT t;
2067 int cost_limit;
2069 int m;
2070 struct algorithm *alg_in, *best_alg;
2071 int cost;
2072 unsigned HOST_WIDE_INT q;
2074 /* Indicate that no algorithm is yet found. If no algorithm
2075 is found, this value will be returned and indicate failure. */
2076 alg_out->cost = cost_limit;
2078 if (cost_limit <= 0)
2079 return;
2081 /* t == 1 can be done in zero cost. */
2082 if (t == 1)
2084 alg_out->ops = 1;
2085 alg_out->cost = 0;
2086 alg_out->op[0] = alg_m;
2087 return;
2090 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2091 fail now. */
2092 if (t == 0)
2094 if (zero_cost >= cost_limit)
2095 return;
2096 else
2098 alg_out->ops = 1;
2099 alg_out->cost = zero_cost;
2100 alg_out->op[0] = alg_zero;
2101 return;
2105 /* We'll be needing a couple extra algorithm structures now. */
2107 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2108 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2110 /* If we have a group of zero bits at the low-order part of T, try
2111 multiplying by the remaining bits and then doing a shift. */
2113 if ((t & 1) == 0)
2115 m = floor_log2 (t & -t); /* m = number of low zero bits */
2116 q = t >> m;
2117 cost = shift_cost[m];
2118 synth_mult (alg_in, q, cost_limit - cost);
2120 cost += alg_in->cost;
2121 if (cost < cost_limit)
2123 struct algorithm *x;
2124 x = alg_in, alg_in = best_alg, best_alg = x;
2125 best_alg->log[best_alg->ops] = m;
2126 best_alg->op[best_alg->ops] = alg_shift;
2127 cost_limit = cost;
2131 /* If we have an odd number, add or subtract one. */
2132 if ((t & 1) != 0)
2134 unsigned HOST_WIDE_INT w;
2136 for (w = 1; (w & t) != 0; w <<= 1)
2138 /* If T was -1, then W will be zero after the loop. This is another
2139 case where T ends with ...111. Handling this with (T + 1) and
2140 subtract 1 produces slightly better code and results in algorithm
2141 selection much faster than treating it like the ...0111 case
2142 below. */
2143 if (w == 0
2144 || (w > 2
2145 /* Reject the case where t is 3.
2146 Thus we prefer addition in that case. */
2147 && t != 3))
2149 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2151 cost = add_cost;
2152 synth_mult (alg_in, t + 1, cost_limit - cost);
2154 cost += alg_in->cost;
2155 if (cost < cost_limit)
2157 struct algorithm *x;
2158 x = alg_in, alg_in = best_alg, best_alg = x;
2159 best_alg->log[best_alg->ops] = 0;
2160 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2161 cost_limit = cost;
2164 else
2166 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2168 cost = add_cost;
2169 synth_mult (alg_in, t - 1, cost_limit - cost);
2171 cost += alg_in->cost;
2172 if (cost < cost_limit)
2174 struct algorithm *x;
2175 x = alg_in, alg_in = best_alg, best_alg = x;
2176 best_alg->log[best_alg->ops] = 0;
2177 best_alg->op[best_alg->ops] = alg_add_t_m2;
2178 cost_limit = cost;
2183 /* Look for factors of t of the form
2184 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2185 If we find such a factor, we can multiply by t using an algorithm that
2186 multiplies by q, shift the result by m and add/subtract it to itself.
2188 We search for large factors first and loop down, even if large factors
2189 are less probable than small; if we find a large factor we will find a
2190 good sequence quickly, and therefore be able to prune (by decreasing
2191 COST_LIMIT) the search. */
2193 for (m = floor_log2 (t - 1); m >= 2; m--)
2195 unsigned HOST_WIDE_INT d;
2197 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2198 if (t % d == 0 && t > d)
2200 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2201 synth_mult (alg_in, t / d, cost_limit - cost);
2203 cost += alg_in->cost;
2204 if (cost < cost_limit)
2206 struct algorithm *x;
2207 x = alg_in, alg_in = best_alg, best_alg = x;
2208 best_alg->log[best_alg->ops] = m;
2209 best_alg->op[best_alg->ops] = alg_add_factor;
2210 cost_limit = cost;
2212 /* Other factors will have been taken care of in the recursion. */
2213 break;
2216 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2217 if (t % d == 0 && t > d)
2219 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2220 synth_mult (alg_in, t / d, cost_limit - cost);
2222 cost += alg_in->cost;
2223 if (cost < cost_limit)
2225 struct algorithm *x;
2226 x = alg_in, alg_in = best_alg, best_alg = x;
2227 best_alg->log[best_alg->ops] = m;
2228 best_alg->op[best_alg->ops] = alg_sub_factor;
2229 cost_limit = cost;
2231 break;
2235 /* Try shift-and-add (load effective address) instructions,
2236 i.e. do a*3, a*5, a*9. */
2237 if ((t & 1) != 0)
2239 q = t - 1;
2240 q = q & -q;
2241 m = exact_log2 (q);
2242 if (m >= 0)
2244 cost = shiftadd_cost[m];
2245 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2247 cost += alg_in->cost;
2248 if (cost < cost_limit)
2250 struct algorithm *x;
2251 x = alg_in, alg_in = best_alg, best_alg = x;
2252 best_alg->log[best_alg->ops] = m;
2253 best_alg->op[best_alg->ops] = alg_add_t2_m;
2254 cost_limit = cost;
2258 q = t + 1;
2259 q = q & -q;
2260 m = exact_log2 (q);
2261 if (m >= 0)
2263 cost = shiftsub_cost[m];
2264 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2266 cost += alg_in->cost;
2267 if (cost < cost_limit)
2269 struct algorithm *x;
2270 x = alg_in, alg_in = best_alg, best_alg = x;
2271 best_alg->log[best_alg->ops] = m;
2272 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2273 cost_limit = cost;
2278 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2279 we have not found any algorithm. */
2280 if (cost_limit == alg_out->cost)
2281 return;
2283 /* If we are getting a too long sequence for `struct algorithm'
2284 to record, make this search fail. */
2285 if (best_alg->ops == MAX_BITS_PER_WORD)
2286 return;
2288 /* Copy the algorithm from temporary space to the space at alg_out.
2289 We avoid using structure assignment because the majority of
2290 best_alg is normally undefined, and this is a critical function. */
2291 alg_out->ops = best_alg->ops + 1;
2292 alg_out->cost = cost_limit;
2293 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2294 alg_out->ops * sizeof *alg_out->op);
2295 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2296 alg_out->ops * sizeof *alg_out->log);
2299 /* Perform a multiplication and return an rtx for the result.
2300 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2301 TARGET is a suggestion for where to store the result (an rtx).
2303 We check specially for a constant integer as OP1.
2304 If you want this check for OP0 as well, then before calling
2305 you should swap the two operands if OP0 would be constant. */
2308 expand_mult (mode, op0, op1, target, unsignedp)
2309 enum machine_mode mode;
2310 register rtx op0, op1, target;
2311 int unsignedp;
2313 rtx const_op1 = op1;
2315 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2316 less than or equal in size to `unsigned int' this doesn't matter.
2317 If the mode is larger than `unsigned int', then synth_mult works only
2318 if the constant value exactly fits in an `unsigned int' without any
2319 truncation. This means that multiplying by negative values does
2320 not work; results are off by 2^32 on a 32 bit machine. */
2322 /* If we are multiplying in DImode, it may still be a win
2323 to try to work with shifts and adds. */
2324 if (GET_CODE (op1) == CONST_DOUBLE
2325 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2326 && HOST_BITS_PER_INT >= BITS_PER_WORD
2327 && CONST_DOUBLE_HIGH (op1) == 0)
2328 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2329 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2330 && GET_CODE (op1) == CONST_INT
2331 && INTVAL (op1) < 0)
2332 const_op1 = 0;
2334 /* We used to test optimize here, on the grounds that it's better to
2335 produce a smaller program when -O is not used.
2336 But this causes such a terrible slowdown sometimes
2337 that it seems better to use synth_mult always. */
2339 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2341 struct algorithm alg;
2342 struct algorithm alg2;
2343 HOST_WIDE_INT val = INTVAL (op1);
2344 HOST_WIDE_INT val_so_far;
2345 rtx insn;
2346 int mult_cost;
2347 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2349 /* Try to do the computation three ways: multiply by the negative of OP1
2350 and then negate, do the multiplication directly, or do multiplication
2351 by OP1 - 1. */
2353 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2354 mult_cost = MIN (12 * add_cost, mult_cost);
2356 synth_mult (&alg, val, mult_cost);
2358 /* This works only if the inverted value actually fits in an
2359 `unsigned int' */
2360 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2362 synth_mult (&alg2, - val,
2363 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2364 if (alg2.cost + negate_cost < alg.cost)
2365 alg = alg2, variant = negate_variant;
2368 /* This proves very useful for division-by-constant. */
2369 synth_mult (&alg2, val - 1,
2370 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2371 if (alg2.cost + add_cost < alg.cost)
2372 alg = alg2, variant = add_variant;
2374 if (alg.cost < mult_cost)
2376 /* We found something cheaper than a multiply insn. */
2377 int opno;
2378 rtx accum, tem;
2380 op0 = protect_from_queue (op0, 0);
2382 /* Avoid referencing memory over and over.
2383 For speed, but also for correctness when mem is volatile. */
2384 if (GET_CODE (op0) == MEM)
2385 op0 = force_reg (mode, op0);
2387 /* ACCUM starts out either as OP0 or as a zero, depending on
2388 the first operation. */
2390 if (alg.op[0] == alg_zero)
2392 accum = copy_to_mode_reg (mode, const0_rtx);
2393 val_so_far = 0;
2395 else if (alg.op[0] == alg_m)
2397 accum = copy_to_mode_reg (mode, op0);
2398 val_so_far = 1;
2400 else
2401 abort ();
2403 for (opno = 1; opno < alg.ops; opno++)
2405 int log = alg.log[opno];
2406 int preserve = preserve_subexpressions_p ();
2407 rtx shift_subtarget = preserve ? 0 : accum;
2408 rtx add_target
2409 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2410 && ! preserve)
2411 ? target : 0;
2412 rtx accum_target = preserve ? 0 : accum;
2414 switch (alg.op[opno])
2416 case alg_shift:
2417 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2418 build_int_2 (log, 0), NULL_RTX, 0);
2419 val_so_far <<= log;
2420 break;
2422 case alg_add_t_m2:
2423 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2424 build_int_2 (log, 0), NULL_RTX, 0);
2425 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2426 add_target
2427 ? add_target : accum_target);
2428 val_so_far += (HOST_WIDE_INT) 1 << log;
2429 break;
2431 case alg_sub_t_m2:
2432 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2433 build_int_2 (log, 0), NULL_RTX, 0);
2434 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2435 add_target
2436 ? add_target : accum_target);
2437 val_so_far -= (HOST_WIDE_INT) 1 << log;
2438 break;
2440 case alg_add_t2_m:
2441 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2442 build_int_2 (log, 0), shift_subtarget,
2444 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2445 add_target
2446 ? add_target : accum_target);
2447 val_so_far = (val_so_far << log) + 1;
2448 break;
2450 case alg_sub_t2_m:
2451 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2452 build_int_2 (log, 0), shift_subtarget,
2454 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2455 add_target
2456 ? add_target : accum_target);
2457 val_so_far = (val_so_far << log) - 1;
2458 break;
2460 case alg_add_factor:
2461 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2462 build_int_2 (log, 0), NULL_RTX, 0);
2463 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2464 add_target
2465 ? add_target : accum_target);
2466 val_so_far += val_so_far << log;
2467 break;
2469 case alg_sub_factor:
2470 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2471 build_int_2 (log, 0), NULL_RTX, 0);
2472 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2473 (add_target ? add_target
2474 : preserve ? 0 : tem));
2475 val_so_far = (val_so_far << log) - val_so_far;
2476 break;
2478 default:
2479 abort ();
2482 /* Write a REG_EQUAL note on the last insn so that we can cse
2483 multiplication sequences. */
2485 insn = get_last_insn ();
2486 set_unique_reg_note (insn,
2487 REG_EQUAL,
2488 gen_rtx_MULT (mode, op0,
2489 GEN_INT (val_so_far)));
2492 if (variant == negate_variant)
2494 val_so_far = - val_so_far;
2495 accum = expand_unop (mode, neg_optab, accum, target, 0);
2497 else if (variant == add_variant)
2499 val_so_far = val_so_far + 1;
2500 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2503 if (val != val_so_far)
2504 abort ();
2506 return accum;
2510 /* This used to use umul_optab if unsigned, but for non-widening multiply
2511 there is no difference between signed and unsigned. */
2512 op0 = expand_binop (mode, smul_optab,
2513 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2514 if (op0 == 0)
2515 abort ();
2516 return op0;
2519 /* Return the smallest n such that 2**n >= X. */
2522 ceil_log2 (x)
2523 unsigned HOST_WIDE_INT x;
2525 return floor_log2 (x - 1) + 1;
2528 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2529 replace division by D, and put the least significant N bits of the result
2530 in *MULTIPLIER_PTR and return the most significant bit.
2532 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2533 needed precision is in PRECISION (should be <= N).
2535 PRECISION should be as small as possible so this function can choose
2536 multiplier more freely.
2538 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2539 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2541 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2542 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2544 static
2545 unsigned HOST_WIDE_INT
2546 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2547 unsigned HOST_WIDE_INT d;
2548 int n;
2549 int precision;
2550 unsigned HOST_WIDE_INT *multiplier_ptr;
2551 int *post_shift_ptr;
2552 int *lgup_ptr;
2554 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2555 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2556 int lgup, post_shift;
2557 int pow, pow2;
2558 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2560 /* lgup = ceil(log2(divisor)); */
2561 lgup = ceil_log2 (d);
2563 if (lgup > n)
2564 abort ();
2566 pow = n + lgup;
2567 pow2 = n + lgup - precision;
2569 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2571 /* We could handle this with some effort, but this case is much better
2572 handled directly with a scc insn, so rely on caller using that. */
2573 abort ();
2576 /* mlow = 2^(N + lgup)/d */
2577 if (pow >= HOST_BITS_PER_WIDE_INT)
2579 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2580 nl = 0;
2582 else
2584 nh = 0;
2585 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2587 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2588 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2590 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2591 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2592 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2593 else
2594 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2595 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2596 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2598 if (mhigh_hi && nh - d >= d)
2599 abort ();
2600 if (mhigh_hi > 1 || mlow_hi > 1)
2601 abort ();
2602 /* assert that mlow < mhigh. */
2603 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2604 abort();
2606 /* If precision == N, then mlow, mhigh exceed 2^N
2607 (but they do not exceed 2^(N+1)). */
2609 /* Reduce to lowest terms */
2610 for (post_shift = lgup; post_shift > 0; post_shift--)
2612 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2613 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2614 if (ml_lo >= mh_lo)
2615 break;
2617 mlow_hi = 0;
2618 mlow_lo = ml_lo;
2619 mhigh_hi = 0;
2620 mhigh_lo = mh_lo;
2623 *post_shift_ptr = post_shift;
2624 *lgup_ptr = lgup;
2625 if (n < HOST_BITS_PER_WIDE_INT)
2627 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2628 *multiplier_ptr = mhigh_lo & mask;
2629 return mhigh_lo >= mask;
2631 else
2633 *multiplier_ptr = mhigh_lo;
2634 return mhigh_hi;
2638 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2639 congruent to 1 (mod 2**N). */
2641 static unsigned HOST_WIDE_INT
2642 invert_mod2n (x, n)
2643 unsigned HOST_WIDE_INT x;
2644 int n;
2646 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2648 /* The algorithm notes that the choice y = x satisfies
2649 x*y == 1 mod 2^3, since x is assumed odd.
2650 Each iteration doubles the number of bits of significance in y. */
2652 unsigned HOST_WIDE_INT mask;
2653 unsigned HOST_WIDE_INT y = x;
2654 int nbit = 3;
2656 mask = (n == HOST_BITS_PER_WIDE_INT
2657 ? ~(unsigned HOST_WIDE_INT) 0
2658 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2660 while (nbit < n)
2662 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2663 nbit *= 2;
2665 return y;
2668 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2669 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2670 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2671 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2672 become signed.
2674 The result is put in TARGET if that is convenient.
2676 MODE is the mode of operation. */
2679 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2680 enum machine_mode mode;
2681 register rtx adj_operand, op0, op1, target;
2682 int unsignedp;
2684 rtx tem;
2685 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2687 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2688 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2689 NULL_RTX, 0);
2690 tem = expand_and (tem, op1, NULL_RTX);
2691 adj_operand
2692 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2693 adj_operand);
2695 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2696 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2697 NULL_RTX, 0);
2698 tem = expand_and (tem, op0, NULL_RTX);
2699 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2700 target);
2702 return target;
2705 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2706 in TARGET if that is convenient, and return where the result is. If the
2707 operation can not be performed, 0 is returned.
2709 MODE is the mode of operation and result.
2711 UNSIGNEDP nonzero means unsigned multiply.
2713 MAX_COST is the total allowed cost for the expanded RTL. */
2716 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2717 enum machine_mode mode;
2718 register rtx op0, target;
2719 unsigned HOST_WIDE_INT cnst1;
2720 int unsignedp;
2721 int max_cost;
2723 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2724 optab mul_highpart_optab;
2725 optab moptab;
2726 rtx tem;
2727 int size = GET_MODE_BITSIZE (mode);
2728 rtx op1, wide_op1;
2730 /* We can't support modes wider than HOST_BITS_PER_INT. */
2731 if (size > HOST_BITS_PER_WIDE_INT)
2732 abort ();
2734 op1 = GEN_INT (cnst1);
2736 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2737 wide_op1 = op1;
2738 else
2739 wide_op1
2740 = immed_double_const (cnst1,
2741 (unsignedp
2742 ? (HOST_WIDE_INT) 0
2743 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2744 wider_mode);
2746 /* expand_mult handles constant multiplication of word_mode
2747 or narrower. It does a poor job for large modes. */
2748 if (size < BITS_PER_WORD
2749 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2751 /* We have to do this, since expand_binop doesn't do conversion for
2752 multiply. Maybe change expand_binop to handle widening multiply? */
2753 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2755 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2756 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2757 build_int_2 (size, 0), NULL_RTX, 1);
2758 return convert_modes (mode, wider_mode, tem, unsignedp);
2761 if (target == 0)
2762 target = gen_reg_rtx (mode);
2764 /* Firstly, try using a multiplication insn that only generates the needed
2765 high part of the product, and in the sign flavor of unsignedp. */
2766 if (mul_highpart_cost[(int) mode] < max_cost)
2768 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2769 target = expand_binop (mode, mul_highpart_optab,
2770 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2771 if (target)
2772 return target;
2775 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2776 Need to adjust the result after the multiplication. */
2777 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2779 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2780 target = expand_binop (mode, mul_highpart_optab,
2781 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2782 if (target)
2783 /* We used the wrong signedness. Adjust the result. */
2784 return expand_mult_highpart_adjust (mode, target, op0,
2785 op1, target, unsignedp);
2788 /* Try widening multiplication. */
2789 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2790 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2791 && mul_widen_cost[(int) wider_mode] < max_cost)
2793 op1 = force_reg (mode, op1);
2794 goto try;
2797 /* Try widening the mode and perform a non-widening multiplication. */
2798 moptab = smul_optab;
2799 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2800 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2802 op1 = wide_op1;
2803 goto try;
2806 /* Try widening multiplication of opposite signedness, and adjust. */
2807 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2808 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2809 && (mul_widen_cost[(int) wider_mode]
2810 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2812 rtx regop1 = force_reg (mode, op1);
2813 tem = expand_binop (wider_mode, moptab, op0, regop1,
2814 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2815 if (tem != 0)
2817 /* Extract the high half of the just generated product. */
2818 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2819 build_int_2 (size, 0), NULL_RTX, 1);
2820 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2821 /* We used the wrong signedness. Adjust the result. */
2822 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2823 target, unsignedp);
2827 return 0;
2829 try:
2830 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2831 tem = expand_binop (wider_mode, moptab, op0, op1,
2832 NULL_RTX, unsignedp, OPTAB_WIDEN);
2833 if (tem == 0)
2834 return 0;
2836 /* Extract the high half of the just generated product. */
2837 if (mode == word_mode)
2839 return gen_highpart (mode, tem);
2841 else
2843 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2844 build_int_2 (size, 0), NULL_RTX, 1);
2845 return convert_modes (mode, wider_mode, tem, unsignedp);
2849 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2850 if that is convenient, and returning where the result is.
2851 You may request either the quotient or the remainder as the result;
2852 specify REM_FLAG nonzero to get the remainder.
2854 CODE is the expression code for which kind of division this is;
2855 it controls how rounding is done. MODE is the machine mode to use.
2856 UNSIGNEDP nonzero means do unsigned division. */
2858 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2859 and then correct it by or'ing in missing high bits
2860 if result of ANDI is nonzero.
2861 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2862 This could optimize to a bfexts instruction.
2863 But C doesn't use these operations, so their optimizations are
2864 left for later. */
2865 /* ??? For modulo, we don't actually need the highpart of the first product,
2866 the low part will do nicely. And for small divisors, the second multiply
2867 can also be a low-part only multiply or even be completely left out.
2868 E.g. to calculate the remainder of a division by 3 with a 32 bit
2869 multiply, multiply with 0x55555556 and extract the upper two bits;
2870 the result is exact for inputs up to 0x1fffffff.
2871 The input range can be reduced by using cross-sum rules.
2872 For odd divisors >= 3, the following table gives right shift counts
2873 so that if an number is shifted by an integer multiple of the given
2874 amount, the remainder stays the same:
2875 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2876 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2877 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2878 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2879 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2881 Cross-sum rules for even numbers can be derived by leaving as many bits
2882 to the right alone as the divisor has zeros to the right.
2883 E.g. if x is an unsigned 32 bit number:
2884 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2887 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2890 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2891 int rem_flag;
2892 enum tree_code code;
2893 enum machine_mode mode;
2894 register rtx op0, op1, target;
2895 int unsignedp;
2897 enum machine_mode compute_mode;
2898 register rtx tquotient;
2899 rtx quotient = 0, remainder = 0;
2900 rtx last;
2901 int size;
2902 rtx insn, set;
2903 optab optab1, optab2;
2904 int op1_is_constant, op1_is_pow2;
2905 int max_cost, extra_cost;
2906 static HOST_WIDE_INT last_div_const = 0;
2908 op1_is_constant = GET_CODE (op1) == CONST_INT;
2909 op1_is_pow2 = (op1_is_constant
2910 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2911 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2914 This is the structure of expand_divmod:
2916 First comes code to fix up the operands so we can perform the operations
2917 correctly and efficiently.
2919 Second comes a switch statement with code specific for each rounding mode.
2920 For some special operands this code emits all RTL for the desired
2921 operation, for other cases, it generates only a quotient and stores it in
2922 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2923 to indicate that it has not done anything.
2925 Last comes code that finishes the operation. If QUOTIENT is set and
2926 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2927 QUOTIENT is not set, it is computed using trunc rounding.
2929 We try to generate special code for division and remainder when OP1 is a
2930 constant. If |OP1| = 2**n we can use shifts and some other fast
2931 operations. For other values of OP1, we compute a carefully selected
2932 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2933 by m.
2935 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2936 half of the product. Different strategies for generating the product are
2937 implemented in expand_mult_highpart.
2939 If what we actually want is the remainder, we generate that by another
2940 by-constant multiplication and a subtraction. */
2942 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2943 code below will malfunction if we are, so check here and handle
2944 the special case if so. */
2945 if (op1 == const1_rtx)
2946 return rem_flag ? const0_rtx : op0;
2948 if (target
2949 /* Don't use the function value register as a target
2950 since we have to read it as well as write it,
2951 and function-inlining gets confused by this. */
2952 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2953 /* Don't clobber an operand while doing a multi-step calculation. */
2954 || ((rem_flag || op1_is_constant)
2955 && (reg_mentioned_p (target, op0)
2956 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2957 || reg_mentioned_p (target, op1)
2958 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2959 target = 0;
2961 /* Get the mode in which to perform this computation. Normally it will
2962 be MODE, but sometimes we can't do the desired operation in MODE.
2963 If so, pick a wider mode in which we can do the operation. Convert
2964 to that mode at the start to avoid repeated conversions.
2966 First see what operations we need. These depend on the expression
2967 we are evaluating. (We assume that divxx3 insns exist under the
2968 same conditions that modxx3 insns and that these insns don't normally
2969 fail. If these assumptions are not correct, we may generate less
2970 efficient code in some cases.)
2972 Then see if we find a mode in which we can open-code that operation
2973 (either a division, modulus, or shift). Finally, check for the smallest
2974 mode for which we can do the operation with a library call. */
2976 /* We might want to refine this now that we have division-by-constant
2977 optimization. Since expand_mult_highpart tries so many variants, it is
2978 not straightforward to generalize this. Maybe we should make an array
2979 of possible modes in init_expmed? Save this for GCC 2.7. */
2981 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2982 : (unsignedp ? udiv_optab : sdiv_optab));
2983 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2985 for (compute_mode = mode; compute_mode != VOIDmode;
2986 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2987 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2988 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2989 break;
2991 if (compute_mode == VOIDmode)
2992 for (compute_mode = mode; compute_mode != VOIDmode;
2993 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2994 if (optab1->handlers[(int) compute_mode].libfunc
2995 || optab2->handlers[(int) compute_mode].libfunc)
2996 break;
2998 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2999 in expand_binop. */
3000 if (compute_mode == VOIDmode)
3001 compute_mode = mode;
3003 if (target && GET_MODE (target) == compute_mode)
3004 tquotient = target;
3005 else
3006 tquotient = gen_reg_rtx (compute_mode);
3008 size = GET_MODE_BITSIZE (compute_mode);
3009 #if 0
3010 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3011 (mode), and thereby get better code when OP1 is a constant. Do that
3012 later. It will require going over all usages of SIZE below. */
3013 size = GET_MODE_BITSIZE (mode);
3014 #endif
3016 /* Only deduct something for a REM if the last divide done was
3017 for a different constant. Then set the constant of the last
3018 divide. */
3019 max_cost = div_cost[(int) compute_mode]
3020 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3021 && INTVAL (op1) == last_div_const)
3022 ? mul_cost[(int) compute_mode] + add_cost : 0);
3024 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3026 /* Now convert to the best mode to use. */
3027 if (compute_mode != mode)
3029 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3030 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3032 /* convert_modes may have placed op1 into a register, so we
3033 must recompute the following. */
3034 op1_is_constant = GET_CODE (op1) == CONST_INT;
3035 op1_is_pow2 = (op1_is_constant
3036 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3037 || (! unsignedp
3038 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3041 /* If one of the operands is a volatile MEM, copy it into a register. */
3043 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3044 op0 = force_reg (compute_mode, op0);
3045 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3046 op1 = force_reg (compute_mode, op1);
3048 /* If we need the remainder or if OP1 is constant, we need to
3049 put OP0 in a register in case it has any queued subexpressions. */
3050 if (rem_flag || op1_is_constant)
3051 op0 = force_reg (compute_mode, op0);
3053 last = get_last_insn ();
3055 /* Promote floor rounding to trunc rounding for unsigned operations. */
3056 if (unsignedp)
3058 if (code == FLOOR_DIV_EXPR)
3059 code = TRUNC_DIV_EXPR;
3060 if (code == FLOOR_MOD_EXPR)
3061 code = TRUNC_MOD_EXPR;
3062 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3063 code = TRUNC_DIV_EXPR;
3066 if (op1 != const0_rtx)
3067 switch (code)
3069 case TRUNC_MOD_EXPR:
3070 case TRUNC_DIV_EXPR:
3071 if (op1_is_constant)
3073 if (unsignedp)
3075 unsigned HOST_WIDE_INT mh, ml;
3076 int pre_shift, post_shift;
3077 int dummy;
3078 unsigned HOST_WIDE_INT d = INTVAL (op1);
3080 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3082 pre_shift = floor_log2 (d);
3083 if (rem_flag)
3085 remainder
3086 = expand_binop (compute_mode, and_optab, op0,
3087 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3088 remainder, 1,
3089 OPTAB_LIB_WIDEN);
3090 if (remainder)
3091 return gen_lowpart (mode, remainder);
3093 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3094 build_int_2 (pre_shift, 0),
3095 tquotient, 1);
3097 else if (size <= HOST_BITS_PER_WIDE_INT)
3099 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3101 /* Most significant bit of divisor is set; emit an scc
3102 insn. */
3103 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3104 compute_mode, 1, 1);
3105 if (quotient == 0)
3106 goto fail1;
3108 else
3110 /* Find a suitable multiplier and right shift count
3111 instead of multiplying with D. */
3113 mh = choose_multiplier (d, size, size,
3114 &ml, &post_shift, &dummy);
3116 /* If the suggested multiplier is more than SIZE bits,
3117 we can do better for even divisors, using an
3118 initial right shift. */
3119 if (mh != 0 && (d & 1) == 0)
3121 pre_shift = floor_log2 (d & -d);
3122 mh = choose_multiplier (d >> pre_shift, size,
3123 size - pre_shift,
3124 &ml, &post_shift, &dummy);
3125 if (mh)
3126 abort ();
3128 else
3129 pre_shift = 0;
3131 if (mh != 0)
3133 rtx t1, t2, t3, t4;
3135 extra_cost = (shift_cost[post_shift - 1]
3136 + shift_cost[1] + 2 * add_cost);
3137 t1 = expand_mult_highpart (compute_mode, op0, ml,
3138 NULL_RTX, 1,
3139 max_cost - extra_cost);
3140 if (t1 == 0)
3141 goto fail1;
3142 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3143 op0, t1),
3144 NULL_RTX);
3145 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3146 build_int_2 (1, 0), NULL_RTX,1);
3147 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3148 t1, t3),
3149 NULL_RTX);
3150 quotient
3151 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3152 build_int_2 (post_shift - 1, 0),
3153 tquotient, 1);
3155 else
3157 rtx t1, t2;
3159 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3160 build_int_2 (pre_shift, 0),
3161 NULL_RTX, 1);
3162 extra_cost = (shift_cost[pre_shift]
3163 + shift_cost[post_shift]);
3164 t2 = expand_mult_highpart (compute_mode, t1, ml,
3165 NULL_RTX, 1,
3166 max_cost - extra_cost);
3167 if (t2 == 0)
3168 goto fail1;
3169 quotient
3170 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3171 build_int_2 (post_shift, 0),
3172 tquotient, 1);
3176 else /* Too wide mode to use tricky code */
3177 break;
3179 insn = get_last_insn ();
3180 if (insn != last
3181 && (set = single_set (insn)) != 0
3182 && SET_DEST (set) == quotient)
3183 set_unique_reg_note (insn,
3184 REG_EQUAL,
3185 gen_rtx_UDIV (compute_mode, op0, op1));
3187 else /* TRUNC_DIV, signed */
3189 unsigned HOST_WIDE_INT ml;
3190 int lgup, post_shift;
3191 HOST_WIDE_INT d = INTVAL (op1);
3192 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3194 /* n rem d = n rem -d */
3195 if (rem_flag && d < 0)
3197 d = abs_d;
3198 op1 = GEN_INT (abs_d);
3201 if (d == 1)
3202 quotient = op0;
3203 else if (d == -1)
3204 quotient = expand_unop (compute_mode, neg_optab, op0,
3205 tquotient, 0);
3206 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3208 /* This case is not handled correctly below. */
3209 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3210 compute_mode, 1, 1);
3211 if (quotient == 0)
3212 goto fail1;
3214 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3215 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3217 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3219 lgup = floor_log2 (abs_d);
3220 if (abs_d != 2 && BRANCH_COST < 3)
3222 rtx label = gen_label_rtx ();
3223 rtx t1;
3225 t1 = copy_to_mode_reg (compute_mode, op0);
3226 do_cmp_and_jump (t1, const0_rtx, GE,
3227 compute_mode, label);
3228 expand_inc (t1, GEN_INT (abs_d - 1));
3229 emit_label (label);
3230 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3231 build_int_2 (lgup, 0),
3232 tquotient, 0);
3234 else
3236 rtx t1, t2, t3;
3237 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3238 build_int_2 (size - 1, 0),
3239 NULL_RTX, 0);
3240 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3241 build_int_2 (size - lgup, 0),
3242 NULL_RTX, 1);
3243 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3244 op0, t2),
3245 NULL_RTX);
3246 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3247 build_int_2 (lgup, 0),
3248 tquotient, 0);
3251 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3252 the quotient. */
3253 if (d < 0)
3255 insn = get_last_insn ();
3256 if (insn != last
3257 && (set = single_set (insn)) != 0
3258 && SET_DEST (set) == quotient
3259 && abs_d < ((unsigned HOST_WIDE_INT) 1
3260 << (HOST_BITS_PER_WIDE_INT - 1)))
3261 set_unique_reg_note (insn,
3262 REG_EQUAL,
3263 gen_rtx_DIV (compute_mode,
3264 op0,
3265 GEN_INT (abs_d)));
3267 quotient = expand_unop (compute_mode, neg_optab,
3268 quotient, quotient, 0);
3271 else if (size <= HOST_BITS_PER_WIDE_INT)
3273 choose_multiplier (abs_d, size, size - 1,
3274 &ml, &post_shift, &lgup);
3275 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3277 rtx t1, t2, t3;
3279 extra_cost = (shift_cost[post_shift]
3280 + shift_cost[size - 1] + add_cost);
3281 t1 = expand_mult_highpart (compute_mode, op0, ml,
3282 NULL_RTX, 0,
3283 max_cost - extra_cost);
3284 if (t1 == 0)
3285 goto fail1;
3286 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3287 build_int_2 (post_shift, 0), NULL_RTX, 0);
3288 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3289 build_int_2 (size - 1, 0), NULL_RTX, 0);
3290 if (d < 0)
3291 quotient
3292 = force_operand (gen_rtx_MINUS (compute_mode,
3293 t3, t2),
3294 tquotient);
3295 else
3296 quotient
3297 = force_operand (gen_rtx_MINUS (compute_mode,
3298 t2, t3),
3299 tquotient);
3301 else
3303 rtx t1, t2, t3, t4;
3305 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3306 extra_cost = (shift_cost[post_shift]
3307 + shift_cost[size - 1] + 2 * add_cost);
3308 t1 = expand_mult_highpart (compute_mode, op0, ml,
3309 NULL_RTX, 0,
3310 max_cost - extra_cost);
3311 if (t1 == 0)
3312 goto fail1;
3313 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3314 t1, op0),
3315 NULL_RTX);
3316 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3317 build_int_2 (post_shift, 0),
3318 NULL_RTX, 0);
3319 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3320 build_int_2 (size - 1, 0),
3321 NULL_RTX, 0);
3322 if (d < 0)
3323 quotient
3324 = force_operand (gen_rtx_MINUS (compute_mode,
3325 t4, t3),
3326 tquotient);
3327 else
3328 quotient
3329 = force_operand (gen_rtx_MINUS (compute_mode,
3330 t3, t4),
3331 tquotient);
3334 else /* Too wide mode to use tricky code */
3335 break;
3337 insn = get_last_insn ();
3338 if (insn != last
3339 && (set = single_set (insn)) != 0
3340 && SET_DEST (set) == quotient)
3341 set_unique_reg_note (insn,
3342 REG_EQUAL,
3343 gen_rtx_DIV (compute_mode, op0, op1));
3345 break;
3347 fail1:
3348 delete_insns_since (last);
3349 break;
3351 case FLOOR_DIV_EXPR:
3352 case FLOOR_MOD_EXPR:
3353 /* We will come here only for signed operations. */
3354 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3356 unsigned HOST_WIDE_INT mh, ml;
3357 int pre_shift, lgup, post_shift;
3358 HOST_WIDE_INT d = INTVAL (op1);
3360 if (d > 0)
3362 /* We could just as easily deal with negative constants here,
3363 but it does not seem worth the trouble for GCC 2.6. */
3364 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3366 pre_shift = floor_log2 (d);
3367 if (rem_flag)
3369 remainder = expand_binop (compute_mode, and_optab, op0,
3370 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3371 remainder, 0, OPTAB_LIB_WIDEN);
3372 if (remainder)
3373 return gen_lowpart (mode, remainder);
3375 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3376 build_int_2 (pre_shift, 0),
3377 tquotient, 0);
3379 else
3381 rtx t1, t2, t3, t4;
3383 mh = choose_multiplier (d, size, size - 1,
3384 &ml, &post_shift, &lgup);
3385 if (mh)
3386 abort ();
3388 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3389 build_int_2 (size - 1, 0), NULL_RTX, 0);
3390 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3391 NULL_RTX, 0, OPTAB_WIDEN);
3392 extra_cost = (shift_cost[post_shift]
3393 + shift_cost[size - 1] + 2 * add_cost);
3394 t3 = expand_mult_highpart (compute_mode, t2, ml,
3395 NULL_RTX, 1,
3396 max_cost - extra_cost);
3397 if (t3 != 0)
3399 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3400 build_int_2 (post_shift, 0),
3401 NULL_RTX, 1);
3402 quotient = expand_binop (compute_mode, xor_optab,
3403 t4, t1, tquotient, 0,
3404 OPTAB_WIDEN);
3408 else
3410 rtx nsign, t1, t2, t3, t4;
3411 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3412 op0, constm1_rtx), NULL_RTX);
3413 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3414 0, OPTAB_WIDEN);
3415 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3416 build_int_2 (size - 1, 0), NULL_RTX, 0);
3417 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3418 NULL_RTX);
3419 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3420 NULL_RTX, 0);
3421 if (t4)
3423 rtx t5;
3424 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3425 NULL_RTX, 0);
3426 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3427 t4, t5),
3428 tquotient);
3433 if (quotient != 0)
3434 break;
3435 delete_insns_since (last);
3437 /* Try using an instruction that produces both the quotient and
3438 remainder, using truncation. We can easily compensate the quotient
3439 or remainder to get floor rounding, once we have the remainder.
3440 Notice that we compute also the final remainder value here,
3441 and return the result right away. */
3442 if (target == 0 || GET_MODE (target) != compute_mode)
3443 target = gen_reg_rtx (compute_mode);
3445 if (rem_flag)
3447 remainder
3448 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3449 quotient = gen_reg_rtx (compute_mode);
3451 else
3453 quotient
3454 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3455 remainder = gen_reg_rtx (compute_mode);
3458 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3459 quotient, remainder, 0))
3461 /* This could be computed with a branch-less sequence.
3462 Save that for later. */
3463 rtx tem;
3464 rtx label = gen_label_rtx ();
3465 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3466 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3467 NULL_RTX, 0, OPTAB_WIDEN);
3468 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3469 expand_dec (quotient, const1_rtx);
3470 expand_inc (remainder, op1);
3471 emit_label (label);
3472 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3475 /* No luck with division elimination or divmod. Have to do it
3476 by conditionally adjusting op0 *and* the result. */
3478 rtx label1, label2, label3, label4, label5;
3479 rtx adjusted_op0;
3480 rtx tem;
3482 quotient = gen_reg_rtx (compute_mode);
3483 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3484 label1 = gen_label_rtx ();
3485 label2 = gen_label_rtx ();
3486 label3 = gen_label_rtx ();
3487 label4 = gen_label_rtx ();
3488 label5 = gen_label_rtx ();
3489 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3490 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3491 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3492 quotient, 0, OPTAB_LIB_WIDEN);
3493 if (tem != quotient)
3494 emit_move_insn (quotient, tem);
3495 emit_jump_insn (gen_jump (label5));
3496 emit_barrier ();
3497 emit_label (label1);
3498 expand_inc (adjusted_op0, const1_rtx);
3499 emit_jump_insn (gen_jump (label4));
3500 emit_barrier ();
3501 emit_label (label2);
3502 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3503 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3504 quotient, 0, OPTAB_LIB_WIDEN);
3505 if (tem != quotient)
3506 emit_move_insn (quotient, tem);
3507 emit_jump_insn (gen_jump (label5));
3508 emit_barrier ();
3509 emit_label (label3);
3510 expand_dec (adjusted_op0, const1_rtx);
3511 emit_label (label4);
3512 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3513 quotient, 0, OPTAB_LIB_WIDEN);
3514 if (tem != quotient)
3515 emit_move_insn (quotient, tem);
3516 expand_dec (quotient, const1_rtx);
3517 emit_label (label5);
3519 break;
3521 case CEIL_DIV_EXPR:
3522 case CEIL_MOD_EXPR:
3523 if (unsignedp)
3525 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3527 rtx t1, t2, t3;
3528 unsigned HOST_WIDE_INT d = INTVAL (op1);
3529 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3530 build_int_2 (floor_log2 (d), 0),
3531 tquotient, 1);
3532 t2 = expand_binop (compute_mode, and_optab, op0,
3533 GEN_INT (d - 1),
3534 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3535 t3 = gen_reg_rtx (compute_mode);
3536 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3537 compute_mode, 1, 1);
3538 if (t3 == 0)
3540 rtx lab;
3541 lab = gen_label_rtx ();
3542 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3543 expand_inc (t1, const1_rtx);
3544 emit_label (lab);
3545 quotient = t1;
3547 else
3548 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3549 t1, t3),
3550 tquotient);
3551 break;
3554 /* Try using an instruction that produces both the quotient and
3555 remainder, using truncation. We can easily compensate the
3556 quotient or remainder to get ceiling rounding, once we have the
3557 remainder. Notice that we compute also the final remainder
3558 value here, and return the result right away. */
3559 if (target == 0 || GET_MODE (target) != compute_mode)
3560 target = gen_reg_rtx (compute_mode);
3562 if (rem_flag)
3564 remainder = (GET_CODE (target) == REG
3565 ? target : gen_reg_rtx (compute_mode));
3566 quotient = gen_reg_rtx (compute_mode);
3568 else
3570 quotient = (GET_CODE (target) == REG
3571 ? target : gen_reg_rtx (compute_mode));
3572 remainder = gen_reg_rtx (compute_mode);
3575 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3576 remainder, 1))
3578 /* This could be computed with a branch-less sequence.
3579 Save that for later. */
3580 rtx label = gen_label_rtx ();
3581 do_cmp_and_jump (remainder, const0_rtx, EQ,
3582 compute_mode, label);
3583 expand_inc (quotient, const1_rtx);
3584 expand_dec (remainder, op1);
3585 emit_label (label);
3586 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3589 /* No luck with division elimination or divmod. Have to do it
3590 by conditionally adjusting op0 *and* the result. */
3592 rtx label1, label2;
3593 rtx adjusted_op0, tem;
3595 quotient = gen_reg_rtx (compute_mode);
3596 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3597 label1 = gen_label_rtx ();
3598 label2 = gen_label_rtx ();
3599 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3600 compute_mode, label1);
3601 emit_move_insn (quotient, const0_rtx);
3602 emit_jump_insn (gen_jump (label2));
3603 emit_barrier ();
3604 emit_label (label1);
3605 expand_dec (adjusted_op0, const1_rtx);
3606 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3607 quotient, 1, OPTAB_LIB_WIDEN);
3608 if (tem != quotient)
3609 emit_move_insn (quotient, tem);
3610 expand_inc (quotient, const1_rtx);
3611 emit_label (label2);
3614 else /* signed */
3616 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3617 && INTVAL (op1) >= 0)
3619 /* This is extremely similar to the code for the unsigned case
3620 above. For 2.7 we should merge these variants, but for
3621 2.6.1 I don't want to touch the code for unsigned since that
3622 get used in C. The signed case will only be used by other
3623 languages (Ada). */
3625 rtx t1, t2, t3;
3626 unsigned HOST_WIDE_INT d = INTVAL (op1);
3627 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3628 build_int_2 (floor_log2 (d), 0),
3629 tquotient, 0);
3630 t2 = expand_binop (compute_mode, and_optab, op0,
3631 GEN_INT (d - 1),
3632 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3633 t3 = gen_reg_rtx (compute_mode);
3634 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3635 compute_mode, 1, 1);
3636 if (t3 == 0)
3638 rtx lab;
3639 lab = gen_label_rtx ();
3640 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3641 expand_inc (t1, const1_rtx);
3642 emit_label (lab);
3643 quotient = t1;
3645 else
3646 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3647 t1, t3),
3648 tquotient);
3649 break;
3652 /* Try using an instruction that produces both the quotient and
3653 remainder, using truncation. We can easily compensate the
3654 quotient or remainder to get ceiling rounding, once we have the
3655 remainder. Notice that we compute also the final remainder
3656 value here, and return the result right away. */
3657 if (target == 0 || GET_MODE (target) != compute_mode)
3658 target = gen_reg_rtx (compute_mode);
3659 if (rem_flag)
3661 remainder= (GET_CODE (target) == REG
3662 ? target : gen_reg_rtx (compute_mode));
3663 quotient = gen_reg_rtx (compute_mode);
3665 else
3667 quotient = (GET_CODE (target) == REG
3668 ? target : gen_reg_rtx (compute_mode));
3669 remainder = gen_reg_rtx (compute_mode);
3672 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3673 remainder, 0))
3675 /* This could be computed with a branch-less sequence.
3676 Save that for later. */
3677 rtx tem;
3678 rtx label = gen_label_rtx ();
3679 do_cmp_and_jump (remainder, const0_rtx, EQ,
3680 compute_mode, label);
3681 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3682 NULL_RTX, 0, OPTAB_WIDEN);
3683 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3684 expand_inc (quotient, const1_rtx);
3685 expand_dec (remainder, op1);
3686 emit_label (label);
3687 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3690 /* No luck with division elimination or divmod. Have to do it
3691 by conditionally adjusting op0 *and* the result. */
3693 rtx label1, label2, label3, label4, label5;
3694 rtx adjusted_op0;
3695 rtx tem;
3697 quotient = gen_reg_rtx (compute_mode);
3698 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3699 label1 = gen_label_rtx ();
3700 label2 = gen_label_rtx ();
3701 label3 = gen_label_rtx ();
3702 label4 = gen_label_rtx ();
3703 label5 = gen_label_rtx ();
3704 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3705 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3706 compute_mode, label1);
3707 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3708 quotient, 0, OPTAB_LIB_WIDEN);
3709 if (tem != quotient)
3710 emit_move_insn (quotient, tem);
3711 emit_jump_insn (gen_jump (label5));
3712 emit_barrier ();
3713 emit_label (label1);
3714 expand_dec (adjusted_op0, const1_rtx);
3715 emit_jump_insn (gen_jump (label4));
3716 emit_barrier ();
3717 emit_label (label2);
3718 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3719 compute_mode, label3);
3720 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3721 quotient, 0, OPTAB_LIB_WIDEN);
3722 if (tem != quotient)
3723 emit_move_insn (quotient, tem);
3724 emit_jump_insn (gen_jump (label5));
3725 emit_barrier ();
3726 emit_label (label3);
3727 expand_inc (adjusted_op0, const1_rtx);
3728 emit_label (label4);
3729 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3730 quotient, 0, OPTAB_LIB_WIDEN);
3731 if (tem != quotient)
3732 emit_move_insn (quotient, tem);
3733 expand_inc (quotient, const1_rtx);
3734 emit_label (label5);
3737 break;
3739 case EXACT_DIV_EXPR:
3740 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3742 HOST_WIDE_INT d = INTVAL (op1);
3743 unsigned HOST_WIDE_INT ml;
3744 int post_shift;
3745 rtx t1;
3747 post_shift = floor_log2 (d & -d);
3748 ml = invert_mod2n (d >> post_shift, size);
3749 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3750 unsignedp);
3751 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3752 build_int_2 (post_shift, 0),
3753 NULL_RTX, unsignedp);
3755 insn = get_last_insn ();
3756 set_unique_reg_note (insn,
3757 REG_EQUAL,
3758 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3759 compute_mode,
3760 op0, op1));
3762 break;
3764 case ROUND_DIV_EXPR:
3765 case ROUND_MOD_EXPR:
3766 if (unsignedp)
3768 rtx tem;
3769 rtx label;
3770 label = gen_label_rtx ();
3771 quotient = gen_reg_rtx (compute_mode);
3772 remainder = gen_reg_rtx (compute_mode);
3773 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3775 rtx tem;
3776 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3777 quotient, 1, OPTAB_LIB_WIDEN);
3778 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3779 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3780 remainder, 1, OPTAB_LIB_WIDEN);
3782 tem = plus_constant (op1, -1);
3783 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3784 build_int_2 (1, 0), NULL_RTX, 1);
3785 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3786 expand_inc (quotient, const1_rtx);
3787 expand_dec (remainder, op1);
3788 emit_label (label);
3790 else
3792 rtx abs_rem, abs_op1, tem, mask;
3793 rtx label;
3794 label = gen_label_rtx ();
3795 quotient = gen_reg_rtx (compute_mode);
3796 remainder = gen_reg_rtx (compute_mode);
3797 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3799 rtx tem;
3800 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3801 quotient, 0, OPTAB_LIB_WIDEN);
3802 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3803 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3804 remainder, 0, OPTAB_LIB_WIDEN);
3806 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3807 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3808 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3809 build_int_2 (1, 0), NULL_RTX, 1);
3810 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3811 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3812 NULL_RTX, 0, OPTAB_WIDEN);
3813 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3814 build_int_2 (size - 1, 0), NULL_RTX, 0);
3815 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3816 NULL_RTX, 0, OPTAB_WIDEN);
3817 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3818 NULL_RTX, 0, OPTAB_WIDEN);
3819 expand_inc (quotient, tem);
3820 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3821 NULL_RTX, 0, OPTAB_WIDEN);
3822 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3823 NULL_RTX, 0, OPTAB_WIDEN);
3824 expand_dec (remainder, tem);
3825 emit_label (label);
3827 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3829 default:
3830 abort ();
3833 if (quotient == 0)
3835 if (target && GET_MODE (target) != compute_mode)
3836 target = 0;
3838 if (rem_flag)
3840 /* Try to produce the remainder without producing the quotient.
3841 If we seem to have a divmod patten that does not require widening,
3842 don't try windening here. We should really have an WIDEN argument
3843 to expand_twoval_binop, since what we'd really like to do here is
3844 1) try a mod insn in compute_mode
3845 2) try a divmod insn in compute_mode
3846 3) try a div insn in compute_mode and multiply-subtract to get
3847 remainder
3848 4) try the same things with widening allowed. */
3849 remainder
3850 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3851 op0, op1, target,
3852 unsignedp,
3853 ((optab2->handlers[(int) compute_mode].insn_code
3854 != CODE_FOR_nothing)
3855 ? OPTAB_DIRECT : OPTAB_WIDEN));
3856 if (remainder == 0)
3858 /* No luck there. Can we do remainder and divide at once
3859 without a library call? */
3860 remainder = gen_reg_rtx (compute_mode);
3861 if (! expand_twoval_binop ((unsignedp
3862 ? udivmod_optab
3863 : sdivmod_optab),
3864 op0, op1,
3865 NULL_RTX, remainder, unsignedp))
3866 remainder = 0;
3869 if (remainder)
3870 return gen_lowpart (mode, remainder);
3873 /* Produce the quotient. Try a quotient insn, but not a library call.
3874 If we have a divmod in this mode, use it in preference to widening
3875 the div (for this test we assume it will not fail). Note that optab2
3876 is set to the one of the two optabs that the call below will use. */
3877 quotient
3878 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3879 op0, op1, rem_flag ? NULL_RTX : target,
3880 unsignedp,
3881 ((optab2->handlers[(int) compute_mode].insn_code
3882 != CODE_FOR_nothing)
3883 ? OPTAB_DIRECT : OPTAB_WIDEN));
3885 if (quotient == 0)
3887 /* No luck there. Try a quotient-and-remainder insn,
3888 keeping the quotient alone. */
3889 quotient = gen_reg_rtx (compute_mode);
3890 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3891 op0, op1,
3892 quotient, NULL_RTX, unsignedp))
3894 quotient = 0;
3895 if (! rem_flag)
3896 /* Still no luck. If we are not computing the remainder,
3897 use a library call for the quotient. */
3898 quotient = sign_expand_binop (compute_mode,
3899 udiv_optab, sdiv_optab,
3900 op0, op1, target,
3901 unsignedp, OPTAB_LIB_WIDEN);
3906 if (rem_flag)
3908 if (target && GET_MODE (target) != compute_mode)
3909 target = 0;
3911 if (quotient == 0)
3912 /* No divide instruction either. Use library for remainder. */
3913 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3914 op0, op1, target,
3915 unsignedp, OPTAB_LIB_WIDEN);
3916 else
3918 /* We divided. Now finish doing X - Y * (X / Y). */
3919 remainder = expand_mult (compute_mode, quotient, op1,
3920 NULL_RTX, unsignedp);
3921 remainder = expand_binop (compute_mode, sub_optab, op0,
3922 remainder, target, unsignedp,
3923 OPTAB_LIB_WIDEN);
3927 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3930 /* Return a tree node with data type TYPE, describing the value of X.
3931 Usually this is an RTL_EXPR, if there is no obvious better choice.
3932 X may be an expression, however we only support those expressions
3933 generated by loop.c. */
3935 tree
3936 make_tree (type, x)
3937 tree type;
3938 rtx x;
3940 tree t;
3942 switch (GET_CODE (x))
3944 case CONST_INT:
3945 t = build_int_2 (INTVAL (x),
3946 (TREE_UNSIGNED (type)
3947 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3948 || INTVAL (x) >= 0 ? 0 : -1);
3949 TREE_TYPE (t) = type;
3950 return t;
3952 case CONST_DOUBLE:
3953 if (GET_MODE (x) == VOIDmode)
3955 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3956 TREE_TYPE (t) = type;
3958 else
3960 REAL_VALUE_TYPE d;
3962 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3963 t = build_real (type, d);
3966 return t;
3968 case PLUS:
3969 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3970 make_tree (type, XEXP (x, 1))));
3972 case MINUS:
3973 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3974 make_tree (type, XEXP (x, 1))));
3976 case NEG:
3977 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3979 case MULT:
3980 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3981 make_tree (type, XEXP (x, 1))));
3983 case ASHIFT:
3984 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3985 make_tree (type, XEXP (x, 1))));
3987 case LSHIFTRT:
3988 return fold (convert (type,
3989 build (RSHIFT_EXPR, unsigned_type (type),
3990 make_tree (unsigned_type (type),
3991 XEXP (x, 0)),
3992 make_tree (type, XEXP (x, 1)))));
3994 case ASHIFTRT:
3995 return fold (convert (type,
3996 build (RSHIFT_EXPR, signed_type (type),
3997 make_tree (signed_type (type), XEXP (x, 0)),
3998 make_tree (type, XEXP (x, 1)))));
4000 case DIV:
4001 if (TREE_CODE (type) != REAL_TYPE)
4002 t = signed_type (type);
4003 else
4004 t = type;
4006 return fold (convert (type,
4007 build (TRUNC_DIV_EXPR, t,
4008 make_tree (t, XEXP (x, 0)),
4009 make_tree (t, XEXP (x, 1)))));
4010 case UDIV:
4011 t = unsigned_type (type);
4012 return fold (convert (type,
4013 build (TRUNC_DIV_EXPR, t,
4014 make_tree (t, XEXP (x, 0)),
4015 make_tree (t, XEXP (x, 1)))));
4016 default:
4017 t = make_node (RTL_EXPR);
4018 TREE_TYPE (t) = type;
4019 RTL_EXPR_RTL (t) = x;
4020 /* There are no insns to be output
4021 when this rtl_expr is used. */
4022 RTL_EXPR_SEQUENCE (t) = 0;
4023 return t;
4027 /* Return an rtx representing the value of X * MULT + ADD.
4028 TARGET is a suggestion for where to store the result (an rtx).
4029 MODE is the machine mode for the computation.
4030 X and MULT must have mode MODE. ADD may have a different mode.
4031 So can X (defaults to same as MODE).
4032 UNSIGNEDP is non-zero to do unsigned multiplication.
4033 This may emit insns. */
4036 expand_mult_add (x, target, mult, add, mode, unsignedp)
4037 rtx x, target, mult, add;
4038 enum machine_mode mode;
4039 int unsignedp;
4041 tree type = type_for_mode (mode, unsignedp);
4042 tree add_type = (GET_MODE (add) == VOIDmode
4043 ? type : type_for_mode (GET_MODE (add), unsignedp));
4044 tree result = fold (build (PLUS_EXPR, type,
4045 fold (build (MULT_EXPR, type,
4046 make_tree (type, x),
4047 make_tree (type, mult))),
4048 make_tree (add_type, add)));
4050 return expand_expr (result, target, VOIDmode, 0);
4053 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4054 and returning TARGET.
4056 If TARGET is 0, a pseudo-register or constant is returned. */
4059 expand_and (op0, op1, target)
4060 rtx op0, op1, target;
4062 enum machine_mode mode = VOIDmode;
4063 rtx tem;
4065 if (GET_MODE (op0) != VOIDmode)
4066 mode = GET_MODE (op0);
4067 else if (GET_MODE (op1) != VOIDmode)
4068 mode = GET_MODE (op1);
4070 if (mode != VOIDmode)
4071 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4072 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4073 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4074 else
4075 abort ();
4077 if (target == 0)
4078 target = tem;
4079 else if (tem != target)
4080 emit_move_insn (target, tem);
4081 return target;
4084 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4085 and storing in TARGET. Normally return TARGET.
4086 Return 0 if that cannot be done.
4088 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4089 it is VOIDmode, they cannot both be CONST_INT.
4091 UNSIGNEDP is for the case where we have to widen the operands
4092 to perform the operation. It says to use zero-extension.
4094 NORMALIZEP is 1 if we should convert the result to be either zero
4095 or one. Normalize is -1 if we should convert the result to be
4096 either zero or -1. If NORMALIZEP is zero, the result will be left
4097 "raw" out of the scc insn. */
4100 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4101 rtx target;
4102 enum rtx_code code;
4103 rtx op0, op1;
4104 enum machine_mode mode;
4105 int unsignedp;
4106 int normalizep;
4108 rtx subtarget;
4109 enum insn_code icode;
4110 enum machine_mode compare_mode;
4111 enum machine_mode target_mode = GET_MODE (target);
4112 rtx tem;
4113 rtx last = get_last_insn ();
4114 rtx pattern, comparison;
4116 if (unsignedp)
4117 code = unsigned_condition (code);
4119 /* If one operand is constant, make it the second one. Only do this
4120 if the other operand is not constant as well. */
4122 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4123 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4125 tem = op0;
4126 op0 = op1;
4127 op1 = tem;
4128 code = swap_condition (code);
4131 if (mode == VOIDmode)
4132 mode = GET_MODE (op0);
4134 /* For some comparisons with 1 and -1, we can convert this to
4135 comparisons with zero. This will often produce more opportunities for
4136 store-flag insns. */
4138 switch (code)
4140 case LT:
4141 if (op1 == const1_rtx)
4142 op1 = const0_rtx, code = LE;
4143 break;
4144 case LE:
4145 if (op1 == constm1_rtx)
4146 op1 = const0_rtx, code = LT;
4147 break;
4148 case GE:
4149 if (op1 == const1_rtx)
4150 op1 = const0_rtx, code = GT;
4151 break;
4152 case GT:
4153 if (op1 == constm1_rtx)
4154 op1 = const0_rtx, code = GE;
4155 break;
4156 case GEU:
4157 if (op1 == const1_rtx)
4158 op1 = const0_rtx, code = NE;
4159 break;
4160 case LTU:
4161 if (op1 == const1_rtx)
4162 op1 = const0_rtx, code = EQ;
4163 break;
4164 default:
4165 break;
4168 /* From now on, we won't change CODE, so set ICODE now. */
4169 icode = setcc_gen_code[(int) code];
4171 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4172 complement of A (for GE) and shifting the sign bit to the low bit. */
4173 if (op1 == const0_rtx && (code == LT || code == GE)
4174 && GET_MODE_CLASS (mode) == MODE_INT
4175 && (normalizep || STORE_FLAG_VALUE == 1
4176 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4177 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4178 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4180 subtarget = target;
4182 /* If the result is to be wider than OP0, it is best to convert it
4183 first. If it is to be narrower, it is *incorrect* to convert it
4184 first. */
4185 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4187 op0 = protect_from_queue (op0, 0);
4188 op0 = convert_modes (target_mode, mode, op0, 0);
4189 mode = target_mode;
4192 if (target_mode != mode)
4193 subtarget = 0;
4195 if (code == GE)
4196 op0 = expand_unop (mode, one_cmpl_optab, op0,
4197 ((STORE_FLAG_VALUE == 1 || normalizep)
4198 ? 0 : subtarget), 0);
4200 if (STORE_FLAG_VALUE == 1 || normalizep)
4201 /* If we are supposed to produce a 0/1 value, we want to do
4202 a logical shift from the sign bit to the low-order bit; for
4203 a -1/0 value, we do an arithmetic shift. */
4204 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4205 size_int (GET_MODE_BITSIZE (mode) - 1),
4206 subtarget, normalizep != -1);
4208 if (mode != target_mode)
4209 op0 = convert_modes (target_mode, mode, op0, 0);
4211 return op0;
4214 if (icode != CODE_FOR_nothing)
4216 insn_operand_predicate_fn pred;
4218 /* We think we may be able to do this with a scc insn. Emit the
4219 comparison and then the scc insn.
4221 compare_from_rtx may call emit_queue, which would be deleted below
4222 if the scc insn fails. So call it ourselves before setting LAST. */
4224 emit_queue ();
4225 last = get_last_insn ();
4227 comparison
4228 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4229 if (GET_CODE (comparison) == CONST_INT)
4230 return (comparison == const0_rtx ? const0_rtx
4231 : normalizep == 1 ? const1_rtx
4232 : normalizep == -1 ? constm1_rtx
4233 : const_true_rtx);
4235 /* If the code of COMPARISON doesn't match CODE, something is
4236 wrong; we can no longer be sure that we have the operation.
4237 We could handle this case, but it should not happen. */
4239 if (GET_CODE (comparison) != code)
4240 abort ();
4242 /* Get a reference to the target in the proper mode for this insn. */
4243 compare_mode = insn_data[(int) icode].operand[0].mode;
4244 subtarget = target;
4245 pred = insn_data[(int) icode].operand[0].predicate;
4246 if (preserve_subexpressions_p ()
4247 || ! (*pred) (subtarget, compare_mode))
4248 subtarget = gen_reg_rtx (compare_mode);
4250 pattern = GEN_FCN (icode) (subtarget);
4251 if (pattern)
4253 emit_insn (pattern);
4255 /* If we are converting to a wider mode, first convert to
4256 TARGET_MODE, then normalize. This produces better combining
4257 opportunities on machines that have a SIGN_EXTRACT when we are
4258 testing a single bit. This mostly benefits the 68k.
4260 If STORE_FLAG_VALUE does not have the sign bit set when
4261 interpreted in COMPARE_MODE, we can do this conversion as
4262 unsigned, which is usually more efficient. */
4263 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4265 convert_move (target, subtarget,
4266 (GET_MODE_BITSIZE (compare_mode)
4267 <= HOST_BITS_PER_WIDE_INT)
4268 && 0 == (STORE_FLAG_VALUE
4269 & ((HOST_WIDE_INT) 1
4270 << (GET_MODE_BITSIZE (compare_mode) -1))));
4271 op0 = target;
4272 compare_mode = target_mode;
4274 else
4275 op0 = subtarget;
4277 /* If we want to keep subexpressions around, don't reuse our
4278 last target. */
4280 if (preserve_subexpressions_p ())
4281 subtarget = 0;
4283 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4284 we don't have to do anything. */
4285 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4287 /* STORE_FLAG_VALUE might be the most negative number, so write
4288 the comparison this way to avoid a compiler-time warning. */
4289 else if (- normalizep == STORE_FLAG_VALUE)
4290 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4292 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4293 makes it hard to use a value of just the sign bit due to
4294 ANSI integer constant typing rules. */
4295 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4296 && (STORE_FLAG_VALUE
4297 & ((HOST_WIDE_INT) 1
4298 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4299 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4300 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4301 subtarget, normalizep == 1);
4302 else if (STORE_FLAG_VALUE & 1)
4304 op0 = expand_and (op0, const1_rtx, subtarget);
4305 if (normalizep == -1)
4306 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4308 else
4309 abort ();
4311 /* If we were converting to a smaller mode, do the
4312 conversion now. */
4313 if (target_mode != compare_mode)
4315 convert_move (target, op0, 0);
4316 return target;
4318 else
4319 return op0;
4323 delete_insns_since (last);
4325 /* If expensive optimizations, use different pseudo registers for each
4326 insn, instead of reusing the same pseudo. This leads to better CSE,
4327 but slows down the compiler, since there are more pseudos */
4328 subtarget = (!flag_expensive_optimizations
4329 && (target_mode == mode)) ? target : NULL_RTX;
4331 /* If we reached here, we can't do this with a scc insn. However, there
4332 are some comparisons that can be done directly. For example, if
4333 this is an equality comparison of integers, we can try to exclusive-or
4334 (or subtract) the two operands and use a recursive call to try the
4335 comparison with zero. Don't do any of these cases if branches are
4336 very cheap. */
4338 if (BRANCH_COST > 0
4339 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4340 && op1 != const0_rtx)
4342 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4343 OPTAB_WIDEN);
4345 if (tem == 0)
4346 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4347 OPTAB_WIDEN);
4348 if (tem != 0)
4349 tem = emit_store_flag (target, code, tem, const0_rtx,
4350 mode, unsignedp, normalizep);
4351 if (tem == 0)
4352 delete_insns_since (last);
4353 return tem;
4356 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4357 the constant zero. Reject all other comparisons at this point. Only
4358 do LE and GT if branches are expensive since they are expensive on
4359 2-operand machines. */
4361 if (BRANCH_COST == 0
4362 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4363 || (code != EQ && code != NE
4364 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4365 return 0;
4367 /* See what we need to return. We can only return a 1, -1, or the
4368 sign bit. */
4370 if (normalizep == 0)
4372 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4373 normalizep = STORE_FLAG_VALUE;
4375 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4376 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4377 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4379 else
4380 return 0;
4383 /* Try to put the result of the comparison in the sign bit. Assume we can't
4384 do the necessary operation below. */
4386 tem = 0;
4388 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4389 the sign bit set. */
4391 if (code == LE)
4393 /* This is destructive, so SUBTARGET can't be OP0. */
4394 if (rtx_equal_p (subtarget, op0))
4395 subtarget = 0;
4397 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4398 OPTAB_WIDEN);
4399 if (tem)
4400 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4401 OPTAB_WIDEN);
4404 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4405 number of bits in the mode of OP0, minus one. */
4407 if (code == GT)
4409 if (rtx_equal_p (subtarget, op0))
4410 subtarget = 0;
4412 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4413 size_int (GET_MODE_BITSIZE (mode) - 1),
4414 subtarget, 0);
4415 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4416 OPTAB_WIDEN);
4419 if (code == EQ || code == NE)
4421 /* For EQ or NE, one way to do the comparison is to apply an operation
4422 that converts the operand into a positive number if it is non-zero
4423 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4424 for NE we negate. This puts the result in the sign bit. Then we
4425 normalize with a shift, if needed.
4427 Two operations that can do the above actions are ABS and FFS, so try
4428 them. If that doesn't work, and MODE is smaller than a full word,
4429 we can use zero-extension to the wider mode (an unsigned conversion)
4430 as the operation. */
4432 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4433 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4434 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4435 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4436 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4438 op0 = protect_from_queue (op0, 0);
4439 tem = convert_modes (word_mode, mode, op0, 1);
4440 mode = word_mode;
4443 if (tem != 0)
4445 if (code == EQ)
4446 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4447 0, OPTAB_WIDEN);
4448 else
4449 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4452 /* If we couldn't do it that way, for NE we can "or" the two's complement
4453 of the value with itself. For EQ, we take the one's complement of
4454 that "or", which is an extra insn, so we only handle EQ if branches
4455 are expensive. */
4457 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4459 if (rtx_equal_p (subtarget, op0))
4460 subtarget = 0;
4462 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4463 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4464 OPTAB_WIDEN);
4466 if (tem && code == EQ)
4467 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4471 if (tem && normalizep)
4472 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4473 size_int (GET_MODE_BITSIZE (mode) - 1),
4474 subtarget, normalizep == 1);
4476 if (tem)
4478 if (GET_MODE (tem) != target_mode)
4480 convert_move (target, tem, 0);
4481 tem = target;
4483 else if (!subtarget)
4485 emit_move_insn (target, tem);
4486 tem = target;
4489 else
4490 delete_insns_since (last);
4492 return tem;
4495 /* Like emit_store_flag, but always succeeds. */
4498 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4499 rtx target;
4500 enum rtx_code code;
4501 rtx op0, op1;
4502 enum machine_mode mode;
4503 int unsignedp;
4504 int normalizep;
4506 rtx tem, label;
4508 /* First see if emit_store_flag can do the job. */
4509 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4510 if (tem != 0)
4511 return tem;
4513 if (normalizep == 0)
4514 normalizep = 1;
4516 /* If this failed, we have to do this with set/compare/jump/set code. */
4518 if (GET_CODE (target) != REG
4519 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4520 target = gen_reg_rtx (GET_MODE (target));
4522 emit_move_insn (target, const1_rtx);
4523 label = gen_label_rtx ();
4524 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4525 NULL_RTX, label);
4527 emit_move_insn (target, const0_rtx);
4528 emit_label (label);
4530 return target;
4533 /* Perform possibly multi-word comparison and conditional jump to LABEL
4534 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4536 The algorithm is based on the code in expr.c:do_jump.
4538 Note that this does not perform a general comparison. Only variants
4539 generated within expmed.c are correctly handled, others abort (but could
4540 be handled if needed). */
4542 static void
4543 do_cmp_and_jump (arg1, arg2, op, mode, label)
4544 rtx arg1, arg2, label;
4545 enum rtx_code op;
4546 enum machine_mode mode;
4548 /* If this mode is an integer too wide to compare properly,
4549 compare word by word. Rely on cse to optimize constant cases. */
4551 if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode, ccp_jump))
4553 rtx label2 = gen_label_rtx ();
4555 switch (op)
4557 case LTU:
4558 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4559 break;
4561 case LEU:
4562 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4563 break;
4565 case LT:
4566 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4567 break;
4569 case GT:
4570 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4571 break;
4573 case GE:
4574 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4575 break;
4577 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4578 that's the only equality operations we do */
4579 case EQ:
4580 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4581 abort();
4582 do_jump_by_parts_equality_rtx (arg1, label2, label);
4583 break;
4585 case NE:
4586 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4587 abort();
4588 do_jump_by_parts_equality_rtx (arg1, label, label2);
4589 break;
4591 default:
4592 abort();
4595 emit_label (label2);
4597 else
4599 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);