* rtl.h (rtunion_def): Constify member `rtstr'.
[official-gcc.git] / gcc / expmed.c
bloba7f542bd9a940f7587bc25f1efe8ceab14945354
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-flags.h"
32 #include "insn-codes.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "real.h"
36 #include "recog.h"
38 static void store_fixed_bit_field PARAMS ((rtx, int, int, int, rtx,
39 unsigned int));
40 static void store_split_bit_field PARAMS ((rtx, int, int, rtx,
41 unsigned int));
42 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx, int,
43 int, int, rtx, int,
44 unsigned int));
45 static rtx mask_rtx PARAMS ((enum machine_mode, int,
46 int, int));
47 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
48 int, int));
49 static rtx extract_split_bit_field PARAMS ((rtx, int, int, int,
50 unsigned int));
51 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
52 enum machine_mode, rtx));
54 /* Non-zero means divides or modulus operations are relatively cheap for
55 powers of two, so don't use branches; emit the operation instead.
56 Usually, this will mean that the MD file will emit non-branch
57 sequences. */
59 static int sdiv_pow2_cheap, smod_pow2_cheap;
61 #ifndef SLOW_UNALIGNED_ACCESS
62 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
63 #endif
65 /* For compilers that support multiple targets with different word sizes,
66 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
67 is the H8/300(H) compiler. */
69 #ifndef MAX_BITS_PER_WORD
70 #define MAX_BITS_PER_WORD BITS_PER_WORD
71 #endif
73 /* Cost of various pieces of RTL. Note that some of these are indexed by
74 shift count and some by mode. */
75 static int add_cost, negate_cost, zero_cost;
76 static int shift_cost[MAX_BITS_PER_WORD];
77 static int shiftadd_cost[MAX_BITS_PER_WORD];
78 static int shiftsub_cost[MAX_BITS_PER_WORD];
79 static int mul_cost[NUM_MACHINE_MODES];
80 static int div_cost[NUM_MACHINE_MODES];
81 static int mul_widen_cost[NUM_MACHINE_MODES];
82 static int mul_highpart_cost[NUM_MACHINE_MODES];
84 void
85 init_expmed ()
87 char *free_point;
88 /* This is "some random pseudo register" for purposes of calling recog
89 to see what insns exist. */
90 rtx reg = gen_rtx_REG (word_mode, 10000);
91 rtx shift_insn, shiftadd_insn, shiftsub_insn;
92 int dummy;
93 int m;
94 enum machine_mode mode, wider_mode;
96 start_sequence ();
98 /* Since we are on the permanent obstack, we must be sure we save this
99 spot AFTER we call start_sequence, since it will reuse the rtl it
100 makes. */
101 free_point = (char *) oballoc (0);
103 reg = gen_rtx_REG (word_mode, 10000);
105 zero_cost = rtx_cost (const0_rtx, 0);
106 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
108 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_ASHIFT (word_mode, reg,
110 const0_rtx)));
112 shiftadd_insn
113 = emit_insn (gen_rtx_SET (VOIDmode, reg,
114 gen_rtx_PLUS (word_mode,
115 gen_rtx_MULT (word_mode,
116 reg, const0_rtx),
117 reg)));
119 shiftsub_insn
120 = emit_insn (gen_rtx_SET (VOIDmode, reg,
121 gen_rtx_MINUS (word_mode,
122 gen_rtx_MULT (word_mode,
123 reg, const0_rtx),
124 reg)));
126 init_recog ();
128 shift_cost[0] = 0;
129 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
131 for (m = 1; m < MAX_BITS_PER_WORD; m++)
133 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
135 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
136 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
137 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
142 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
144 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
145 = GEN_INT ((HOST_WIDE_INT) 1 << m);
146 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
147 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
150 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
152 sdiv_pow2_cheap
153 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
154 <= 2 * add_cost);
155 smod_pow2_cheap
156 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
157 <= 2 * add_cost);
159 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
160 mode != VOIDmode;
161 mode = GET_MODE_WIDER_MODE (mode))
163 reg = gen_rtx_REG (mode, 10000);
164 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
165 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
166 wider_mode = GET_MODE_WIDER_MODE (mode);
167 if (wider_mode != VOIDmode)
169 mul_widen_cost[(int) wider_mode]
170 = rtx_cost (gen_rtx_MULT (wider_mode,
171 gen_rtx_ZERO_EXTEND (wider_mode, reg),
172 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
173 SET);
174 mul_highpart_cost[(int) mode]
175 = rtx_cost (gen_rtx_TRUNCATE
176 (mode,
177 gen_rtx_LSHIFTRT (wider_mode,
178 gen_rtx_MULT (wider_mode,
179 gen_rtx_ZERO_EXTEND
180 (wider_mode, reg),
181 gen_rtx_ZERO_EXTEND
182 (wider_mode, reg)),
183 GEN_INT (GET_MODE_BITSIZE (mode)))),
184 SET);
188 /* Free the objects we just allocated. */
189 end_sequence ();
190 obfree (free_point);
193 /* Return an rtx representing minus the value of X.
194 MODE is the intended mode of the result,
195 useful if X is a CONST_INT. */
198 negate_rtx (mode, x)
199 enum machine_mode mode;
200 rtx x;
202 rtx result = simplify_unary_operation (NEG, mode, x, mode);
204 if (result == 0)
205 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
207 return result;
210 /* Generate code to store value from rtx VALUE
211 into a bit-field within structure STR_RTX
212 containing BITSIZE bits starting at bit BITNUM.
213 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
214 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
215 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
217 /* ??? Note that there are two different ideas here for how
218 to determine the size to count bits within, for a register.
219 One is BITS_PER_WORD, and the other is the size of operand 3
220 of the insv pattern.
222 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
223 else, we use the mode of operand 3. */
226 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
227 rtx str_rtx;
228 register int bitsize;
229 int bitnum;
230 enum machine_mode fieldmode;
231 rtx value;
232 unsigned int align;
233 int total_size;
235 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
236 register int offset = bitnum / unit;
237 register int bitpos = bitnum % unit;
238 register rtx op0 = str_rtx;
239 #ifdef HAVE_insv
240 int insv_bitsize;
241 enum machine_mode op_mode;
243 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
244 if (op_mode == VOIDmode)
245 op_mode = word_mode;
246 insv_bitsize = GET_MODE_BITSIZE (op_mode);
247 #endif
249 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
250 abort ();
252 /* Discount the part of the structure before the desired byte.
253 We need to know how many bytes are safe to reference after it. */
254 if (total_size >= 0)
255 total_size -= (bitpos / BIGGEST_ALIGNMENT
256 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
258 while (GET_CODE (op0) == SUBREG)
260 /* The following line once was done only if WORDS_BIG_ENDIAN,
261 but I think that is a mistake. WORDS_BIG_ENDIAN is
262 meaningful at a much higher level; when structures are copied
263 between memory and regs, the higher-numbered regs
264 always get higher addresses. */
265 offset += SUBREG_WORD (op0);
266 /* We used to adjust BITPOS here, but now we do the whole adjustment
267 right after the loop. */
268 op0 = SUBREG_REG (op0);
271 /* Make sure we are playing with integral modes. Pun with subregs
272 if we aren't. */
274 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
275 if (imode != GET_MODE (op0))
277 if (GET_CODE (op0) == MEM)
278 op0 = change_address (op0, imode, NULL_RTX);
279 else if (imode != BLKmode)
280 op0 = gen_lowpart (imode, op0);
281 else
282 abort ();
286 /* If OP0 is a register, BITPOS must count within a word.
287 But as we have it, it counts within whatever size OP0 now has.
288 On a bigendian machine, these are not the same, so convert. */
289 if (BYTES_BIG_ENDIAN
290 && GET_CODE (op0) != MEM
291 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
292 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
294 value = protect_from_queue (value, 0);
296 if (flag_force_mem)
297 value = force_not_mem (value);
299 if ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
300 || (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)
301 && GET_MODE_SIZE (fieldmode) != 0))
302 && (GET_CODE (op0) != MEM
303 || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
304 || (offset * BITS_PER_UNIT % bitsize == 0
305 && align % GET_MODE_SIZE (fieldmode) == 0))
306 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
307 && bitsize == GET_MODE_BITSIZE (fieldmode))
309 /* Storing in a full-word or multi-word field in a register
310 can be done with just SUBREG. Also, storing in the entire object
311 can be done with just SUBREG. */
312 if (GET_MODE (op0) != fieldmode)
314 if (GET_CODE (op0) == SUBREG)
316 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
317 || GET_MODE_CLASS (fieldmode) == MODE_INT
318 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
319 op0 = SUBREG_REG (op0);
320 else
321 /* Else we've got some float mode source being extracted into
322 a different float mode destination -- this combination of
323 subregs results in Severe Tire Damage. */
324 abort ();
326 if (GET_CODE (op0) == REG)
327 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
328 else
329 op0 = change_address (op0, fieldmode,
330 plus_constant (XEXP (op0, 0), offset));
332 emit_move_insn (op0, value);
333 return value;
336 /* Storing an lsb-aligned field in a register
337 can be done with a movestrict instruction. */
339 if (GET_CODE (op0) != MEM
340 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
341 && bitsize == GET_MODE_BITSIZE (fieldmode)
342 && (movstrict_optab->handlers[(int) fieldmode].insn_code
343 != CODE_FOR_nothing))
345 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
347 /* Get appropriate low part of the value being stored. */
348 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
349 value = gen_lowpart (fieldmode, value);
350 else if (!(GET_CODE (value) == SYMBOL_REF
351 || GET_CODE (value) == LABEL_REF
352 || GET_CODE (value) == CONST))
353 value = convert_to_mode (fieldmode, value, 0);
355 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
356 value = copy_to_mode_reg (fieldmode, value);
358 if (GET_CODE (op0) == SUBREG)
360 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
361 || GET_MODE_CLASS (fieldmode) == MODE_INT
362 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
363 op0 = SUBREG_REG (op0);
364 else
365 /* Else we've got some float mode source being extracted into
366 a different float mode destination -- this combination of
367 subregs results in Severe Tire Damage. */
368 abort ();
371 emit_insn (GEN_FCN (icode)
372 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
374 return value;
377 /* Handle fields bigger than a word. */
379 if (bitsize > BITS_PER_WORD)
381 /* Here we transfer the words of the field
382 in the order least significant first.
383 This is because the most significant word is the one which may
384 be less than full.
385 However, only do that if the value is not BLKmode. */
387 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
389 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
390 int i;
392 /* This is the mode we must force value to, so that there will be enough
393 subwords to extract. Note that fieldmode will often (always?) be
394 VOIDmode, because that is what store_field uses to indicate that this
395 is a bit field, but passing VOIDmode to operand_subword_force will
396 result in an abort. */
397 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
399 for (i = 0; i < nwords; i++)
401 /* If I is 0, use the low-order word in both field and target;
402 if I is 1, use the next to lowest word; and so on. */
403 int wordnum = (backwards ? nwords - i - 1 : i);
404 int bit_offset = (backwards
405 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
406 : i * BITS_PER_WORD);
407 store_bit_field (op0, MIN (BITS_PER_WORD,
408 bitsize - i * BITS_PER_WORD),
409 bitnum + bit_offset, word_mode,
410 operand_subword_force (value, wordnum,
411 (GET_MODE (value) == VOIDmode
412 ? fieldmode
413 : GET_MODE (value))),
414 align, total_size);
416 return value;
419 /* From here on we can assume that the field to be stored in is
420 a full-word (whatever type that is), since it is shorter than a word. */
422 /* OFFSET is the number of words or bytes (UNIT says which)
423 from STR_RTX to the first word or byte containing part of the field. */
425 if (GET_CODE (op0) != MEM)
427 if (offset != 0
428 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
430 if (GET_CODE (op0) != REG)
432 /* Since this is a destination (lvalue), we can't copy it to a
433 pseudo. We can trivially remove a SUBREG that does not
434 change the size of the operand. Such a SUBREG may have been
435 added above. Otherwise, abort. */
436 if (GET_CODE (op0) == SUBREG
437 && (GET_MODE_SIZE (GET_MODE (op0))
438 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
439 op0 = SUBREG_REG (op0);
440 else
441 abort ();
443 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
444 op0, offset);
446 offset = 0;
448 else
450 op0 = protect_from_queue (op0, 1);
453 /* If VALUE is a floating-point mode, access it as an integer of the
454 corresponding size. This can occur on a machine with 64 bit registers
455 that uses SFmode for float. This can also occur for unaligned float
456 structure fields. */
457 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
459 if (GET_CODE (value) != REG)
460 value = copy_to_reg (value);
461 value = gen_rtx_SUBREG (word_mode, value, 0);
464 /* Now OFFSET is nonzero only if OP0 is memory
465 and is therefore always measured in bytes. */
467 #ifdef HAVE_insv
468 if (HAVE_insv
469 && GET_MODE (value) != BLKmode
470 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
471 /* Ensure insv's size is wide enough for this field. */
472 && (insv_bitsize >= bitsize)
473 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
474 && (bitsize + bitpos > insv_bitsize)))
476 int xbitpos = bitpos;
477 rtx value1;
478 rtx xop0 = op0;
479 rtx last = get_last_insn ();
480 rtx pat;
481 enum machine_mode maxmode;
482 int save_volatile_ok = volatile_ok;
484 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
485 if (maxmode == VOIDmode)
486 maxmode = word_mode;
488 volatile_ok = 1;
490 /* If this machine's insv can only insert into a register, copy OP0
491 into a register and save it back later. */
492 /* This used to check flag_force_mem, but that was a serious
493 de-optimization now that flag_force_mem is enabled by -O2. */
494 if (GET_CODE (op0) == MEM
495 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
496 (op0, VOIDmode)))
498 rtx tempreg;
499 enum machine_mode bestmode;
501 /* Get the mode to use for inserting into this field. If OP0 is
502 BLKmode, get the smallest mode consistent with the alignment. If
503 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
504 mode. Otherwise, use the smallest mode containing the field. */
506 if (GET_MODE (op0) == BLKmode
507 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
508 bestmode
509 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
510 MEM_VOLATILE_P (op0));
511 else
512 bestmode = GET_MODE (op0);
514 if (bestmode == VOIDmode
515 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
516 && GET_MODE_SIZE (bestmode) > (int) align))
517 goto insv_loses;
519 /* Adjust address to point to the containing unit of that mode. */
520 unit = GET_MODE_BITSIZE (bestmode);
521 /* Compute offset as multiple of this unit, counting in bytes. */
522 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
523 bitpos = bitnum % unit;
524 op0 = change_address (op0, bestmode,
525 plus_constant (XEXP (op0, 0), offset));
527 /* Fetch that unit, store the bitfield in it, then store the unit. */
528 tempreg = copy_to_reg (op0);
529 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
530 align, total_size);
531 emit_move_insn (op0, tempreg);
532 return value;
534 volatile_ok = save_volatile_ok;
536 /* Add OFFSET into OP0's address. */
537 if (GET_CODE (xop0) == MEM)
538 xop0 = change_address (xop0, byte_mode,
539 plus_constant (XEXP (xop0, 0), offset));
541 /* If xop0 is a register, we need it in MAXMODE
542 to make it acceptable to the format of insv. */
543 if (GET_CODE (xop0) == SUBREG)
544 /* We can't just change the mode, because this might clobber op0,
545 and we will need the original value of op0 if insv fails. */
546 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
547 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
548 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
550 /* On big-endian machines, we count bits from the most significant.
551 If the bit field insn does not, we must invert. */
553 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
554 xbitpos = unit - bitsize - xbitpos;
556 /* We have been counting XBITPOS within UNIT.
557 Count instead within the size of the register. */
558 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
559 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
561 unit = GET_MODE_BITSIZE (maxmode);
563 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
564 value1 = value;
565 if (GET_MODE (value) != maxmode)
567 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
569 /* Optimization: Don't bother really extending VALUE
570 if it has all the bits we will actually use. However,
571 if we must narrow it, be sure we do it correctly. */
573 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
575 /* Avoid making subreg of a subreg, or of a mem. */
576 if (GET_CODE (value1) != REG)
577 value1 = copy_to_reg (value1);
578 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
580 else
581 value1 = gen_lowpart (maxmode, value1);
583 else if (!CONSTANT_P (value))
584 /* Parse phase is supposed to make VALUE's data type
585 match that of the component reference, which is a type
586 at least as wide as the field; so VALUE should have
587 a mode that corresponds to that type. */
588 abort ();
591 /* If this machine's insv insists on a register,
592 get VALUE1 into a register. */
593 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
594 (value1, maxmode)))
595 value1 = force_reg (maxmode, value1);
597 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
598 if (pat)
599 emit_insn (pat);
600 else
602 delete_insns_since (last);
603 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
606 else
607 insv_loses:
608 #endif
609 /* Insv is not available; store using shifts and boolean ops. */
610 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
611 return value;
614 /* Use shifts and boolean operations to store VALUE
615 into a bit field of width BITSIZE
616 in a memory location specified by OP0 except offset by OFFSET bytes.
617 (OFFSET must be 0 if OP0 is a register.)
618 The field starts at position BITPOS within the byte.
619 (If OP0 is a register, it may be a full word or a narrower mode,
620 but BITPOS still counts within a full word,
621 which is significant on bigendian machines.)
622 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
624 Note that protect_from_queue has already been done on OP0 and VALUE. */
626 static void
627 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
628 register rtx op0;
629 register int offset, bitsize, bitpos;
630 register rtx value;
631 unsigned int struct_align;
633 register enum machine_mode mode;
634 int total_bits = BITS_PER_WORD;
635 rtx subtarget, temp;
636 int all_zero = 0;
637 int all_one = 0;
639 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
640 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
642 /* There is a case not handled here:
643 a structure with a known alignment of just a halfword
644 and a field split across two aligned halfwords within the structure.
645 Or likewise a structure with a known alignment of just a byte
646 and a field split across two bytes.
647 Such cases are not supposed to be able to occur. */
649 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
651 if (offset != 0)
652 abort ();
653 /* Special treatment for a bit field split across two registers. */
654 if (bitsize + bitpos > BITS_PER_WORD)
656 store_split_bit_field (op0, bitsize, bitpos,
657 value, BITS_PER_WORD);
658 return;
661 else
663 /* Get the proper mode to use for this field. We want a mode that
664 includes the entire field. If such a mode would be larger than
665 a word, we won't be doing the extraction the normal way. */
667 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
668 struct_align * BITS_PER_UNIT, word_mode,
669 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
671 if (mode == VOIDmode)
673 /* The only way this should occur is if the field spans word
674 boundaries. */
675 store_split_bit_field (op0,
676 bitsize, bitpos + offset * BITS_PER_UNIT,
677 value, struct_align);
678 return;
681 total_bits = GET_MODE_BITSIZE (mode);
683 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
684 be in the range 0 to total_bits-1, and put any excess bytes in
685 OFFSET. */
686 if (bitpos >= total_bits)
688 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
689 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
690 * BITS_PER_UNIT);
693 /* Get ref to an aligned byte, halfword, or word containing the field.
694 Adjust BITPOS to be position within a word,
695 and OFFSET to be the offset of that word.
696 Then alter OP0 to refer to that word. */
697 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
698 offset -= (offset % (total_bits / BITS_PER_UNIT));
699 op0 = change_address (op0, mode,
700 plus_constant (XEXP (op0, 0), offset));
703 mode = GET_MODE (op0);
705 /* Now MODE is either some integral mode for a MEM as OP0,
706 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
707 The bit field is contained entirely within OP0.
708 BITPOS is the starting bit number within OP0.
709 (OP0's mode may actually be narrower than MODE.) */
711 if (BYTES_BIG_ENDIAN)
712 /* BITPOS is the distance between our msb
713 and that of the containing datum.
714 Convert it to the distance from the lsb. */
715 bitpos = total_bits - bitsize - bitpos;
717 /* Now BITPOS is always the distance between our lsb
718 and that of OP0. */
720 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
721 we must first convert its mode to MODE. */
723 if (GET_CODE (value) == CONST_INT)
725 register HOST_WIDE_INT v = INTVAL (value);
727 if (bitsize < HOST_BITS_PER_WIDE_INT)
728 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
730 if (v == 0)
731 all_zero = 1;
732 else if ((bitsize < HOST_BITS_PER_WIDE_INT
733 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
734 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
735 all_one = 1;
737 value = lshift_value (mode, value, bitpos, bitsize);
739 else
741 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
742 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
744 if (GET_MODE (value) != mode)
746 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
747 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
748 value = gen_lowpart (mode, value);
749 else
750 value = convert_to_mode (mode, value, 1);
753 if (must_and)
754 value = expand_binop (mode, and_optab, value,
755 mask_rtx (mode, 0, bitsize, 0),
756 NULL_RTX, 1, OPTAB_LIB_WIDEN);
757 if (bitpos > 0)
758 value = expand_shift (LSHIFT_EXPR, mode, value,
759 build_int_2 (bitpos, 0), NULL_RTX, 1);
762 /* Now clear the chosen bits in OP0,
763 except that if VALUE is -1 we need not bother. */
765 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
767 if (! all_one)
769 temp = expand_binop (mode, and_optab, op0,
770 mask_rtx (mode, bitpos, bitsize, 1),
771 subtarget, 1, OPTAB_LIB_WIDEN);
772 subtarget = temp;
774 else
775 temp = op0;
777 /* Now logical-or VALUE into OP0, unless it is zero. */
779 if (! all_zero)
780 temp = expand_binop (mode, ior_optab, temp, value,
781 subtarget, 1, OPTAB_LIB_WIDEN);
782 if (op0 != temp)
783 emit_move_insn (op0, temp);
786 /* Store a bit field that is split across multiple accessible memory objects.
788 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
789 BITSIZE is the field width; BITPOS the position of its first bit
790 (within the word).
791 VALUE is the value to store.
792 ALIGN is the known alignment of OP0, measured in bytes.
793 This is also the size of the memory objects to be used.
795 This does not yet handle fields wider than BITS_PER_WORD. */
797 static void
798 store_split_bit_field (op0, bitsize, bitpos, value, align)
799 rtx op0;
800 int bitsize, bitpos;
801 rtx value;
802 unsigned int align;
804 int unit;
805 int bitsdone = 0;
807 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
808 much at a time. */
809 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
810 unit = BITS_PER_WORD;
811 else
812 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
814 /* If VALUE is a constant other than a CONST_INT, get it into a register in
815 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
816 that VALUE might be a floating-point constant. */
817 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
819 rtx word = gen_lowpart_common (word_mode, value);
821 if (word && (value != word))
822 value = word;
823 else
824 value = gen_lowpart_common (word_mode,
825 force_reg (GET_MODE (value) != VOIDmode
826 ? GET_MODE (value)
827 : word_mode, value));
829 else if (GET_CODE (value) == ADDRESSOF)
830 value = copy_to_reg (value);
832 while (bitsdone < bitsize)
834 int thissize;
835 rtx part, word;
836 int thispos;
837 int offset;
839 offset = (bitpos + bitsdone) / unit;
840 thispos = (bitpos + bitsdone) % unit;
842 /* THISSIZE must not overrun a word boundary. Otherwise,
843 store_fixed_bit_field will call us again, and we will mutually
844 recurse forever. */
845 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
846 thissize = MIN (thissize, unit - thispos);
848 if (BYTES_BIG_ENDIAN)
850 int total_bits;
852 /* We must do an endian conversion exactly the same way as it is
853 done in extract_bit_field, so that the two calls to
854 extract_fixed_bit_field will have comparable arguments. */
855 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
856 total_bits = BITS_PER_WORD;
857 else
858 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
860 /* Fetch successively less significant portions. */
861 if (GET_CODE (value) == CONST_INT)
862 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
863 >> (bitsize - bitsdone - thissize))
864 & (((HOST_WIDE_INT) 1 << thissize) - 1));
865 else
866 /* The args are chosen so that the last part includes the
867 lsb. Give extract_bit_field the value it needs (with
868 endianness compensation) to fetch the piece we want.
870 ??? We have no idea what the alignment of VALUE is, so
871 we have to use a guess. */
872 part
873 = extract_fixed_bit_field
874 (word_mode, value, 0, thissize,
875 total_bits - bitsize + bitsdone, NULL_RTX, 1,
876 GET_MODE (value) == VOIDmode
877 ? UNITS_PER_WORD
878 : (GET_MODE (value) == BLKmode
880 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
882 else
884 /* Fetch successively more significant portions. */
885 if (GET_CODE (value) == CONST_INT)
886 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
887 >> bitsdone)
888 & (((HOST_WIDE_INT) 1 << thissize) - 1));
889 else
890 part
891 = extract_fixed_bit_field
892 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
893 GET_MODE (value) == VOIDmode
894 ? UNITS_PER_WORD
895 : (GET_MODE (value) == BLKmode
897 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
900 /* If OP0 is a register, then handle OFFSET here.
902 When handling multiword bitfields, extract_bit_field may pass
903 down a word_mode SUBREG of a larger REG for a bitfield that actually
904 crosses a word boundary. Thus, for a SUBREG, we must find
905 the current word starting from the base register. */
906 if (GET_CODE (op0) == SUBREG)
908 word = operand_subword_force (SUBREG_REG (op0),
909 SUBREG_WORD (op0) + offset,
910 GET_MODE (SUBREG_REG (op0)));
911 offset = 0;
913 else if (GET_CODE (op0) == REG)
915 word = operand_subword_force (op0, offset, GET_MODE (op0));
916 offset = 0;
918 else
919 word = op0;
921 /* OFFSET is in UNITs, and UNIT is in bits.
922 store_fixed_bit_field wants offset in bytes. */
923 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
924 thissize, thispos, part, align);
925 bitsdone += thissize;
929 /* Generate code to extract a byte-field from STR_RTX
930 containing BITSIZE bits, starting at BITNUM,
931 and put it in TARGET if possible (if TARGET is nonzero).
932 Regardless of TARGET, we return the rtx for where the value is placed.
933 It may be a QUEUED.
935 STR_RTX is the structure containing the byte (a REG or MEM).
936 UNSIGNEDP is nonzero if this is an unsigned bit field.
937 MODE is the natural mode of the field value once extracted.
938 TMODE is the mode the caller would like the value to have;
939 but the value may be returned with type MODE instead.
941 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
942 TOTAL_SIZE is the size in bytes of the containing structure,
943 or -1 if varying.
945 If a TARGET is specified and we can store in it at no extra cost,
946 we do so, and return TARGET.
947 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
948 if they are equally easy. */
951 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
952 target, mode, tmode, align, total_size)
953 rtx str_rtx;
954 register int bitsize;
955 int bitnum;
956 int unsignedp;
957 rtx target;
958 enum machine_mode mode, tmode;
959 unsigned int align;
960 int total_size;
962 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
963 register int offset = bitnum / unit;
964 register int bitpos = bitnum % unit;
965 register rtx op0 = str_rtx;
966 rtx spec_target = target;
967 rtx spec_target_subreg = 0;
968 enum machine_mode int_mode;
969 #ifdef HAVE_extv
970 int extv_bitsize;
971 enum machine_mode extv_mode;
972 #endif
973 #ifdef HAVE_extzv
974 int extzv_bitsize;
975 enum machine_mode extzv_mode;
976 #endif
978 #ifdef HAVE_extv
979 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
980 if (extv_mode == VOIDmode)
981 extv_mode = word_mode;
982 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
983 #endif
985 #ifdef HAVE_extzv
986 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
987 if (extzv_mode == VOIDmode)
988 extzv_mode = word_mode;
989 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
990 #endif
992 /* Discount the part of the structure before the desired byte.
993 We need to know how many bytes are safe to reference after it. */
994 if (total_size >= 0)
995 total_size -= (bitpos / BIGGEST_ALIGNMENT
996 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
998 if (tmode == VOIDmode)
999 tmode = mode;
1000 while (GET_CODE (op0) == SUBREG)
1002 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1003 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1005 offset += SUBREG_WORD (op0);
1007 inner_size = MIN (inner_size, BITS_PER_WORD);
1009 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1011 bitpos += inner_size - outer_size;
1012 if (bitpos > unit)
1014 offset += (bitpos / unit);
1015 bitpos %= unit;
1019 op0 = SUBREG_REG (op0);
1022 /* Make sure we are playing with integral modes. Pun with subregs
1023 if we aren't. */
1025 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1026 if (imode != GET_MODE (op0))
1028 if (GET_CODE (op0) == MEM)
1029 op0 = change_address (op0, imode, NULL_RTX);
1030 else if (imode != BLKmode)
1031 op0 = gen_lowpart (imode, op0);
1032 else
1033 abort ();
1037 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1038 If that's wrong, the solution is to test for it and set TARGET to 0
1039 if needed. */
1041 /* If OP0 is a register, BITPOS must count within a word.
1042 But as we have it, it counts within whatever size OP0 now has.
1043 On a bigendian machine, these are not the same, so convert. */
1044 if (BYTES_BIG_ENDIAN
1045 && GET_CODE (op0) != MEM
1046 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1047 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1049 /* Extracting a full-word or multi-word value
1050 from a structure in a register or aligned memory.
1051 This can be done with just SUBREG.
1052 So too extracting a subword value in
1053 the least significant part of the register. */
1055 if (((GET_CODE (op0) != MEM
1056 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1057 GET_MODE_BITSIZE (GET_MODE (op0))))
1058 || (GET_CODE (op0) == MEM
1059 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1060 || (offset * BITS_PER_UNIT % bitsize == 0
1061 && align * BITS_PER_UNIT % bitsize == 0))))
1062 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1063 && bitpos % BITS_PER_WORD == 0)
1064 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1065 /* ??? The big endian test here is wrong. This is correct
1066 if the value is in a register, and if mode_for_size is not
1067 the same mode as op0. This causes us to get unnecessarily
1068 inefficient code from the Thumb port when -mbig-endian. */
1069 && (BYTES_BIG_ENDIAN
1070 ? bitpos + bitsize == BITS_PER_WORD
1071 : bitpos == 0))))
1073 enum machine_mode mode1
1074 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1076 if (mode1 != GET_MODE (op0))
1078 if (GET_CODE (op0) == SUBREG)
1080 if (GET_MODE (SUBREG_REG (op0)) == mode1
1081 || GET_MODE_CLASS (mode1) == MODE_INT
1082 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1083 op0 = SUBREG_REG (op0);
1084 else
1085 /* Else we've got some float mode source being extracted into
1086 a different float mode destination -- this combination of
1087 subregs results in Severe Tire Damage. */
1088 abort ();
1090 if (GET_CODE (op0) == REG)
1091 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1092 else
1093 op0 = change_address (op0, mode1,
1094 plus_constant (XEXP (op0, 0), offset));
1096 if (mode1 != mode)
1097 return convert_to_mode (tmode, op0, unsignedp);
1098 return op0;
1101 /* Handle fields bigger than a word. */
1103 if (bitsize > BITS_PER_WORD)
1105 /* Here we transfer the words of the field
1106 in the order least significant first.
1107 This is because the most significant word is the one which may
1108 be less than full. */
1110 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1111 int i;
1113 if (target == 0 || GET_CODE (target) != REG)
1114 target = gen_reg_rtx (mode);
1116 /* Indicate for flow that the entire target reg is being set. */
1117 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1119 for (i = 0; i < nwords; i++)
1121 /* If I is 0, use the low-order word in both field and target;
1122 if I is 1, use the next to lowest word; and so on. */
1123 /* Word number in TARGET to use. */
1124 int wordnum = (WORDS_BIG_ENDIAN
1125 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1126 : i);
1127 /* Offset from start of field in OP0. */
1128 int bit_offset = (WORDS_BIG_ENDIAN
1129 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1130 : i * BITS_PER_WORD);
1131 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1132 rtx result_part
1133 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1134 bitsize - i * BITS_PER_WORD),
1135 bitnum + bit_offset,
1136 1, target_part, mode, word_mode,
1137 align, total_size);
1139 if (target_part == 0)
1140 abort ();
1142 if (result_part != target_part)
1143 emit_move_insn (target_part, result_part);
1146 if (unsignedp)
1148 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1149 need to be zero'd out. */
1150 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1152 int i,total_words;
1154 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1155 for (i = nwords; i < total_words; i++)
1157 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1158 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1159 emit_move_insn (target_part, const0_rtx);
1162 return target;
1165 /* Signed bit field: sign-extend with two arithmetic shifts. */
1166 target = expand_shift (LSHIFT_EXPR, mode, target,
1167 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1168 NULL_RTX, 0);
1169 return expand_shift (RSHIFT_EXPR, mode, target,
1170 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1171 NULL_RTX, 0);
1174 /* From here on we know the desired field is smaller than a word. */
1176 /* Check if there is a correspondingly-sized integer field, so we can
1177 safely extract it as one size of integer, if necessary; then
1178 truncate or extend to the size that is wanted; then use SUBREGs or
1179 convert_to_mode to get one of the modes we really wanted. */
1181 int_mode = int_mode_for_mode (tmode);
1182 if (int_mode == BLKmode)
1183 int_mode = int_mode_for_mode (mode);
1184 if (int_mode == BLKmode)
1185 abort(); /* Should probably push op0 out to memory and then
1186 do a load. */
1188 /* OFFSET is the number of words or bytes (UNIT says which)
1189 from STR_RTX to the first word or byte containing part of the field. */
1191 if (GET_CODE (op0) != MEM)
1193 if (offset != 0
1194 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1196 if (GET_CODE (op0) != REG)
1197 op0 = copy_to_reg (op0);
1198 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1199 op0, offset);
1201 offset = 0;
1203 else
1205 op0 = protect_from_queue (str_rtx, 1);
1208 /* Now OFFSET is nonzero only for memory operands. */
1210 if (unsignedp)
1212 #ifdef HAVE_extzv
1213 if (HAVE_extzv
1214 && (extzv_bitsize >= bitsize)
1215 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1216 && (bitsize + bitpos > extzv_bitsize)))
1218 int xbitpos = bitpos, xoffset = offset;
1219 rtx bitsize_rtx, bitpos_rtx;
1220 rtx last = get_last_insn ();
1221 rtx xop0 = op0;
1222 rtx xtarget = target;
1223 rtx xspec_target = spec_target;
1224 rtx xspec_target_subreg = spec_target_subreg;
1225 rtx pat;
1226 enum machine_mode maxmode;
1228 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1229 if (maxmode == VOIDmode)
1230 maxmode = word_mode;
1232 if (GET_CODE (xop0) == MEM)
1234 int save_volatile_ok = volatile_ok;
1235 volatile_ok = 1;
1237 /* Is the memory operand acceptable? */
1238 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1239 (xop0, GET_MODE (xop0))))
1241 /* No, load into a reg and extract from there. */
1242 enum machine_mode bestmode;
1244 /* Get the mode to use for inserting into this field. If
1245 OP0 is BLKmode, get the smallest mode consistent with the
1246 alignment. If OP0 is a non-BLKmode object that is no
1247 wider than MAXMODE, use its mode. Otherwise, use the
1248 smallest mode containing the field. */
1250 if (GET_MODE (xop0) == BLKmode
1251 || (GET_MODE_SIZE (GET_MODE (op0))
1252 > GET_MODE_SIZE (maxmode)))
1253 bestmode = get_best_mode (bitsize, bitnum,
1254 align * BITS_PER_UNIT, maxmode,
1255 MEM_VOLATILE_P (xop0));
1256 else
1257 bestmode = GET_MODE (xop0);
1259 if (bestmode == VOIDmode
1260 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1261 && GET_MODE_SIZE (bestmode) > (int) align))
1262 goto extzv_loses;
1264 /* Compute offset as multiple of this unit,
1265 counting in bytes. */
1266 unit = GET_MODE_BITSIZE (bestmode);
1267 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1268 xbitpos = bitnum % unit;
1269 xop0 = change_address (xop0, bestmode,
1270 plus_constant (XEXP (xop0, 0),
1271 xoffset));
1272 /* Fetch it to a register in that size. */
1273 xop0 = force_reg (bestmode, xop0);
1275 /* XBITPOS counts within UNIT, which is what is expected. */
1277 else
1278 /* Get ref to first byte containing part of the field. */
1279 xop0 = change_address (xop0, byte_mode,
1280 plus_constant (XEXP (xop0, 0), xoffset));
1282 volatile_ok = save_volatile_ok;
1285 /* If op0 is a register, we need it in MAXMODE (which is usually
1286 SImode). to make it acceptable to the format of extzv. */
1287 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1288 goto extzv_loses;
1289 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1290 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1292 /* On big-endian machines, we count bits from the most significant.
1293 If the bit field insn does not, we must invert. */
1294 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1295 xbitpos = unit - bitsize - xbitpos;
1297 /* Now convert from counting within UNIT to counting in MAXMODE. */
1298 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1299 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1301 unit = GET_MODE_BITSIZE (maxmode);
1303 if (xtarget == 0
1304 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1305 xtarget = xspec_target = gen_reg_rtx (tmode);
1307 if (GET_MODE (xtarget) != maxmode)
1309 if (GET_CODE (xtarget) == REG)
1311 int wider = (GET_MODE_SIZE (maxmode)
1312 > GET_MODE_SIZE (GET_MODE (xtarget)));
1313 xtarget = gen_lowpart (maxmode, xtarget);
1314 if (wider)
1315 xspec_target_subreg = xtarget;
1317 else
1318 xtarget = gen_reg_rtx (maxmode);
1321 /* If this machine's extzv insists on a register target,
1322 make sure we have one. */
1323 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1324 (xtarget, maxmode)))
1325 xtarget = gen_reg_rtx (maxmode);
1327 bitsize_rtx = GEN_INT (bitsize);
1328 bitpos_rtx = GEN_INT (xbitpos);
1330 pat = gen_extzv (protect_from_queue (xtarget, 1),
1331 xop0, bitsize_rtx, bitpos_rtx);
1332 if (pat)
1334 emit_insn (pat);
1335 target = xtarget;
1336 spec_target = xspec_target;
1337 spec_target_subreg = xspec_target_subreg;
1339 else
1341 delete_insns_since (last);
1342 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1343 bitpos, target, 1, align);
1346 else
1347 extzv_loses:
1348 #endif
1349 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1350 bitpos, target, 1, align);
1352 else
1354 #ifdef HAVE_extv
1355 if (HAVE_extv
1356 && (extv_bitsize >= bitsize)
1357 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1358 && (bitsize + bitpos > extv_bitsize)))
1360 int xbitpos = bitpos, xoffset = offset;
1361 rtx bitsize_rtx, bitpos_rtx;
1362 rtx last = get_last_insn ();
1363 rtx xop0 = op0, xtarget = target;
1364 rtx xspec_target = spec_target;
1365 rtx xspec_target_subreg = spec_target_subreg;
1366 rtx pat;
1367 enum machine_mode maxmode;
1369 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1370 if (maxmode == VOIDmode)
1371 maxmode = word_mode;
1373 if (GET_CODE (xop0) == MEM)
1375 /* Is the memory operand acceptable? */
1376 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1377 (xop0, GET_MODE (xop0))))
1379 /* No, load into a reg and extract from there. */
1380 enum machine_mode bestmode;
1382 /* Get the mode to use for inserting into this field. If
1383 OP0 is BLKmode, get the smallest mode consistent with the
1384 alignment. If OP0 is a non-BLKmode object that is no
1385 wider than MAXMODE, use its mode. Otherwise, use the
1386 smallest mode containing the field. */
1388 if (GET_MODE (xop0) == BLKmode
1389 || (GET_MODE_SIZE (GET_MODE (op0))
1390 > GET_MODE_SIZE (maxmode)))
1391 bestmode = get_best_mode (bitsize, bitnum,
1392 align * BITS_PER_UNIT, maxmode,
1393 MEM_VOLATILE_P (xop0));
1394 else
1395 bestmode = GET_MODE (xop0);
1397 if (bestmode == VOIDmode
1398 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1399 && GET_MODE_SIZE (bestmode) > (int) align))
1400 goto extv_loses;
1402 /* Compute offset as multiple of this unit,
1403 counting in bytes. */
1404 unit = GET_MODE_BITSIZE (bestmode);
1405 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1406 xbitpos = bitnum % unit;
1407 xop0 = change_address (xop0, bestmode,
1408 plus_constant (XEXP (xop0, 0),
1409 xoffset));
1410 /* Fetch it to a register in that size. */
1411 xop0 = force_reg (bestmode, xop0);
1413 /* XBITPOS counts within UNIT, which is what is expected. */
1415 else
1416 /* Get ref to first byte containing part of the field. */
1417 xop0 = change_address (xop0, byte_mode,
1418 plus_constant (XEXP (xop0, 0), xoffset));
1421 /* If op0 is a register, we need it in MAXMODE (which is usually
1422 SImode) to make it acceptable to the format of extv. */
1423 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1424 goto extv_loses;
1425 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1426 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1428 /* On big-endian machines, we count bits from the most significant.
1429 If the bit field insn does not, we must invert. */
1430 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1431 xbitpos = unit - bitsize - xbitpos;
1433 /* XBITPOS counts within a size of UNIT.
1434 Adjust to count within a size of MAXMODE. */
1435 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1436 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1438 unit = GET_MODE_BITSIZE (maxmode);
1440 if (xtarget == 0
1441 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1442 xtarget = xspec_target = gen_reg_rtx (tmode);
1444 if (GET_MODE (xtarget) != maxmode)
1446 if (GET_CODE (xtarget) == REG)
1448 int wider = (GET_MODE_SIZE (maxmode)
1449 > GET_MODE_SIZE (GET_MODE (xtarget)));
1450 xtarget = gen_lowpart (maxmode, xtarget);
1451 if (wider)
1452 xspec_target_subreg = xtarget;
1454 else
1455 xtarget = gen_reg_rtx (maxmode);
1458 /* If this machine's extv insists on a register target,
1459 make sure we have one. */
1460 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1461 (xtarget, maxmode)))
1462 xtarget = gen_reg_rtx (maxmode);
1464 bitsize_rtx = GEN_INT (bitsize);
1465 bitpos_rtx = GEN_INT (xbitpos);
1467 pat = gen_extv (protect_from_queue (xtarget, 1),
1468 xop0, bitsize_rtx, bitpos_rtx);
1469 if (pat)
1471 emit_insn (pat);
1472 target = xtarget;
1473 spec_target = xspec_target;
1474 spec_target_subreg = xspec_target_subreg;
1476 else
1478 delete_insns_since (last);
1479 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1480 bitpos, target, 0, align);
1483 else
1484 extv_loses:
1485 #endif
1486 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1487 bitpos, target, 0, align);
1489 if (target == spec_target)
1490 return target;
1491 if (target == spec_target_subreg)
1492 return spec_target;
1493 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1495 /* If the target mode is floating-point, first convert to the
1496 integer mode of that size and then access it as a floating-point
1497 value via a SUBREG. */
1498 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1500 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1501 MODE_INT, 0),
1502 target, unsignedp);
1503 if (GET_CODE (target) != REG)
1504 target = copy_to_reg (target);
1505 return gen_rtx_SUBREG (tmode, target, 0);
1507 else
1508 return convert_to_mode (tmode, target, unsignedp);
1510 return target;
1513 /* Extract a bit field using shifts and boolean operations
1514 Returns an rtx to represent the value.
1515 OP0 addresses a register (word) or memory (byte).
1516 BITPOS says which bit within the word or byte the bit field starts in.
1517 OFFSET says how many bytes farther the bit field starts;
1518 it is 0 if OP0 is a register.
1519 BITSIZE says how many bits long the bit field is.
1520 (If OP0 is a register, it may be narrower than a full word,
1521 but BITPOS still counts within a full word,
1522 which is significant on bigendian machines.)
1524 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1525 If TARGET is nonzero, attempts to store the value there
1526 and return TARGET, but this is not guaranteed.
1527 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1529 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1531 static rtx
1532 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1533 target, unsignedp, align)
1534 enum machine_mode tmode;
1535 register rtx op0, target;
1536 register int offset, bitsize, bitpos;
1537 int unsignedp;
1538 unsigned int align;
1540 int total_bits = BITS_PER_WORD;
1541 enum machine_mode mode;
1543 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1545 /* Special treatment for a bit field split across two registers. */
1546 if (bitsize + bitpos > BITS_PER_WORD)
1547 return extract_split_bit_field (op0, bitsize, bitpos,
1548 unsignedp, align);
1550 else
1552 /* Get the proper mode to use for this field. We want a mode that
1553 includes the entire field. If such a mode would be larger than
1554 a word, we won't be doing the extraction the normal way. */
1556 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1557 align * BITS_PER_UNIT, word_mode,
1558 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1560 if (mode == VOIDmode)
1561 /* The only way this should occur is if the field spans word
1562 boundaries. */
1563 return extract_split_bit_field (op0, bitsize,
1564 bitpos + offset * BITS_PER_UNIT,
1565 unsignedp, align);
1567 total_bits = GET_MODE_BITSIZE (mode);
1569 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1570 be in the range 0 to total_bits-1, and put any excess bytes in
1571 OFFSET. */
1572 if (bitpos >= total_bits)
1574 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1575 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1576 * BITS_PER_UNIT);
1579 /* Get ref to an aligned byte, halfword, or word containing the field.
1580 Adjust BITPOS to be position within a word,
1581 and OFFSET to be the offset of that word.
1582 Then alter OP0 to refer to that word. */
1583 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1584 offset -= (offset % (total_bits / BITS_PER_UNIT));
1585 op0 = change_address (op0, mode,
1586 plus_constant (XEXP (op0, 0), offset));
1589 mode = GET_MODE (op0);
1591 if (BYTES_BIG_ENDIAN)
1593 /* BITPOS is the distance between our msb and that of OP0.
1594 Convert it to the distance from the lsb. */
1596 bitpos = total_bits - bitsize - bitpos;
1599 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1600 We have reduced the big-endian case to the little-endian case. */
1602 if (unsignedp)
1604 if (bitpos)
1606 /* If the field does not already start at the lsb,
1607 shift it so it does. */
1608 tree amount = build_int_2 (bitpos, 0);
1609 /* Maybe propagate the target for the shift. */
1610 /* But not if we will return it--could confuse integrate.c. */
1611 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1612 && !REG_FUNCTION_VALUE_P (target)
1613 ? target : 0);
1614 if (tmode != mode) subtarget = 0;
1615 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1617 /* Convert the value to the desired mode. */
1618 if (mode != tmode)
1619 op0 = convert_to_mode (tmode, op0, 1);
1621 /* Unless the msb of the field used to be the msb when we shifted,
1622 mask out the upper bits. */
1624 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1625 #if 0
1626 #ifdef SLOW_ZERO_EXTEND
1627 /* Always generate an `and' if
1628 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1629 will combine fruitfully with the zero-extend. */
1630 || tmode != mode
1631 #endif
1632 #endif
1634 return expand_binop (GET_MODE (op0), and_optab, op0,
1635 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1636 target, 1, OPTAB_LIB_WIDEN);
1637 return op0;
1640 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1641 then arithmetic-shift its lsb to the lsb of the word. */
1642 op0 = force_reg (mode, op0);
1643 if (mode != tmode)
1644 target = 0;
1646 /* Find the narrowest integer mode that contains the field. */
1648 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1649 mode = GET_MODE_WIDER_MODE (mode))
1650 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1652 op0 = convert_to_mode (mode, op0, 0);
1653 break;
1656 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1658 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1659 /* Maybe propagate the target for the shift. */
1660 /* But not if we will return the result--could confuse integrate.c. */
1661 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1662 && ! REG_FUNCTION_VALUE_P (target)
1663 ? target : 0);
1664 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1667 return expand_shift (RSHIFT_EXPR, mode, op0,
1668 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1669 target, 0);
1672 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1673 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1674 complement of that if COMPLEMENT. The mask is truncated if
1675 necessary to the width of mode MODE. The mask is zero-extended if
1676 BITSIZE+BITPOS is too small for MODE. */
1678 static rtx
1679 mask_rtx (mode, bitpos, bitsize, complement)
1680 enum machine_mode mode;
1681 int bitpos, bitsize, complement;
1683 HOST_WIDE_INT masklow, maskhigh;
1685 if (bitpos < HOST_BITS_PER_WIDE_INT)
1686 masklow = (HOST_WIDE_INT) -1 << bitpos;
1687 else
1688 masklow = 0;
1690 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1691 masklow &= ((unsigned HOST_WIDE_INT) -1
1692 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1694 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1695 maskhigh = -1;
1696 else
1697 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1699 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1700 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1701 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1702 else
1703 maskhigh = 0;
1705 if (complement)
1707 maskhigh = ~maskhigh;
1708 masklow = ~masklow;
1711 return immed_double_const (masklow, maskhigh, mode);
1714 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1715 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1717 static rtx
1718 lshift_value (mode, value, bitpos, bitsize)
1719 enum machine_mode mode;
1720 rtx value;
1721 int bitpos, bitsize;
1723 unsigned HOST_WIDE_INT v = INTVAL (value);
1724 HOST_WIDE_INT low, high;
1726 if (bitsize < HOST_BITS_PER_WIDE_INT)
1727 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1729 if (bitpos < HOST_BITS_PER_WIDE_INT)
1731 low = v << bitpos;
1732 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1734 else
1736 low = 0;
1737 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1740 return immed_double_const (low, high, mode);
1743 /* Extract a bit field that is split across two words
1744 and return an RTX for the result.
1746 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1747 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1748 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1750 ALIGN is the known alignment of OP0, measured in bytes.
1751 This is also the size of the memory objects to be used. */
1753 static rtx
1754 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1755 rtx op0;
1756 int bitsize, bitpos, unsignedp;
1757 unsigned int align;
1759 int unit;
1760 int bitsdone = 0;
1761 rtx result = NULL_RTX;
1762 int first = 1;
1764 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1765 much at a time. */
1766 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1767 unit = BITS_PER_WORD;
1768 else
1769 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1771 while (bitsdone < bitsize)
1773 int thissize;
1774 rtx part, word;
1775 int thispos;
1776 int offset;
1778 offset = (bitpos + bitsdone) / unit;
1779 thispos = (bitpos + bitsdone) % unit;
1781 /* THISSIZE must not overrun a word boundary. Otherwise,
1782 extract_fixed_bit_field will call us again, and we will mutually
1783 recurse forever. */
1784 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1785 thissize = MIN (thissize, unit - thispos);
1787 /* If OP0 is a register, then handle OFFSET here.
1789 When handling multiword bitfields, extract_bit_field may pass
1790 down a word_mode SUBREG of a larger REG for a bitfield that actually
1791 crosses a word boundary. Thus, for a SUBREG, we must find
1792 the current word starting from the base register. */
1793 if (GET_CODE (op0) == SUBREG)
1795 word = operand_subword_force (SUBREG_REG (op0),
1796 SUBREG_WORD (op0) + offset,
1797 GET_MODE (SUBREG_REG (op0)));
1798 offset = 0;
1800 else if (GET_CODE (op0) == REG)
1802 word = operand_subword_force (op0, offset, GET_MODE (op0));
1803 offset = 0;
1805 else
1806 word = op0;
1808 /* Extract the parts in bit-counting order,
1809 whose meaning is determined by BYTES_PER_UNIT.
1810 OFFSET is in UNITs, and UNIT is in bits.
1811 extract_fixed_bit_field wants offset in bytes. */
1812 part = extract_fixed_bit_field (word_mode, word,
1813 offset * unit / BITS_PER_UNIT,
1814 thissize, thispos, 0, 1, align);
1815 bitsdone += thissize;
1817 /* Shift this part into place for the result. */
1818 if (BYTES_BIG_ENDIAN)
1820 if (bitsize != bitsdone)
1821 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1822 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1824 else
1826 if (bitsdone != thissize)
1827 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1828 build_int_2 (bitsdone - thissize, 0), 0, 1);
1831 if (first)
1832 result = part;
1833 else
1834 /* Combine the parts with bitwise or. This works
1835 because we extracted each part as an unsigned bit field. */
1836 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1837 OPTAB_LIB_WIDEN);
1839 first = 0;
1842 /* Unsigned bit field: we are done. */
1843 if (unsignedp)
1844 return result;
1845 /* Signed bit field: sign-extend with two arithmetic shifts. */
1846 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1847 build_int_2 (BITS_PER_WORD - bitsize, 0),
1848 NULL_RTX, 0);
1849 return expand_shift (RSHIFT_EXPR, word_mode, result,
1850 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1853 /* Add INC into TARGET. */
1855 void
1856 expand_inc (target, inc)
1857 rtx target, inc;
1859 rtx value = expand_binop (GET_MODE (target), add_optab,
1860 target, inc,
1861 target, 0, OPTAB_LIB_WIDEN);
1862 if (value != target)
1863 emit_move_insn (target, value);
1866 /* Subtract DEC from TARGET. */
1868 void
1869 expand_dec (target, dec)
1870 rtx target, dec;
1872 rtx value = expand_binop (GET_MODE (target), sub_optab,
1873 target, dec,
1874 target, 0, OPTAB_LIB_WIDEN);
1875 if (value != target)
1876 emit_move_insn (target, value);
1879 /* Output a shift instruction for expression code CODE,
1880 with SHIFTED being the rtx for the value to shift,
1881 and AMOUNT the tree for the amount to shift by.
1882 Store the result in the rtx TARGET, if that is convenient.
1883 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1884 Return the rtx for where the value is. */
1887 expand_shift (code, mode, shifted, amount, target, unsignedp)
1888 enum tree_code code;
1889 register enum machine_mode mode;
1890 rtx shifted;
1891 tree amount;
1892 register rtx target;
1893 int unsignedp;
1895 register rtx op1, temp = 0;
1896 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1897 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1898 int try;
1900 /* Previously detected shift-counts computed by NEGATE_EXPR
1901 and shifted in the other direction; but that does not work
1902 on all machines. */
1904 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1906 #ifdef SHIFT_COUNT_TRUNCATED
1907 if (SHIFT_COUNT_TRUNCATED)
1909 if (GET_CODE (op1) == CONST_INT
1910 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1911 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1912 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1913 % GET_MODE_BITSIZE (mode));
1914 else if (GET_CODE (op1) == SUBREG
1915 && SUBREG_WORD (op1) == 0)
1916 op1 = SUBREG_REG (op1);
1918 #endif
1920 if (op1 == const0_rtx)
1921 return shifted;
1923 for (try = 0; temp == 0 && try < 3; try++)
1925 enum optab_methods methods;
1927 if (try == 0)
1928 methods = OPTAB_DIRECT;
1929 else if (try == 1)
1930 methods = OPTAB_WIDEN;
1931 else
1932 methods = OPTAB_LIB_WIDEN;
1934 if (rotate)
1936 /* Widening does not work for rotation. */
1937 if (methods == OPTAB_WIDEN)
1938 continue;
1939 else if (methods == OPTAB_LIB_WIDEN)
1941 /* If we have been unable to open-code this by a rotation,
1942 do it as the IOR of two shifts. I.e., to rotate A
1943 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1944 where C is the bitsize of A.
1946 It is theoretically possible that the target machine might
1947 not be able to perform either shift and hence we would
1948 be making two libcalls rather than just the one for the
1949 shift (similarly if IOR could not be done). We will allow
1950 this extremely unlikely lossage to avoid complicating the
1951 code below. */
1953 rtx subtarget = target == shifted ? 0 : target;
1954 rtx temp1;
1955 tree type = TREE_TYPE (amount);
1956 tree new_amount = make_tree (type, op1);
1957 tree other_amount
1958 = fold (build (MINUS_EXPR, type,
1959 convert (type,
1960 build_int_2 (GET_MODE_BITSIZE (mode),
1961 0)),
1962 amount));
1964 shifted = force_reg (mode, shifted);
1966 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1967 mode, shifted, new_amount, subtarget, 1);
1968 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1969 mode, shifted, other_amount, 0, 1);
1970 return expand_binop (mode, ior_optab, temp, temp1, target,
1971 unsignedp, methods);
1974 temp = expand_binop (mode,
1975 left ? rotl_optab : rotr_optab,
1976 shifted, op1, target, unsignedp, methods);
1978 /* If we don't have the rotate, but we are rotating by a constant
1979 that is in range, try a rotate in the opposite direction. */
1981 if (temp == 0 && GET_CODE (op1) == CONST_INT
1982 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1983 temp = expand_binop (mode,
1984 left ? rotr_optab : rotl_optab,
1985 shifted,
1986 GEN_INT (GET_MODE_BITSIZE (mode)
1987 - INTVAL (op1)),
1988 target, unsignedp, methods);
1990 else if (unsignedp)
1991 temp = expand_binop (mode,
1992 left ? ashl_optab : lshr_optab,
1993 shifted, op1, target, unsignedp, methods);
1995 /* Do arithmetic shifts.
1996 Also, if we are going to widen the operand, we can just as well
1997 use an arithmetic right-shift instead of a logical one. */
1998 if (temp == 0 && ! rotate
1999 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2001 enum optab_methods methods1 = methods;
2003 /* If trying to widen a log shift to an arithmetic shift,
2004 don't accept an arithmetic shift of the same size. */
2005 if (unsignedp)
2006 methods1 = OPTAB_MUST_WIDEN;
2008 /* Arithmetic shift */
2010 temp = expand_binop (mode,
2011 left ? ashl_optab : ashr_optab,
2012 shifted, op1, target, unsignedp, methods1);
2015 /* We used to try extzv here for logical right shifts, but that was
2016 only useful for one machine, the VAX, and caused poor code
2017 generation there for lshrdi3, so the code was deleted and a
2018 define_expand for lshrsi3 was added to vax.md. */
2021 if (temp == 0)
2022 abort ();
2023 return temp;
2026 enum alg_code { alg_zero, alg_m, alg_shift,
2027 alg_add_t_m2, alg_sub_t_m2,
2028 alg_add_factor, alg_sub_factor,
2029 alg_add_t2_m, alg_sub_t2_m,
2030 alg_add, alg_subtract, alg_factor, alg_shiftop };
2032 /* This structure records a sequence of operations.
2033 `ops' is the number of operations recorded.
2034 `cost' is their total cost.
2035 The operations are stored in `op' and the corresponding
2036 logarithms of the integer coefficients in `log'.
2038 These are the operations:
2039 alg_zero total := 0;
2040 alg_m total := multiplicand;
2041 alg_shift total := total * coeff
2042 alg_add_t_m2 total := total + multiplicand * coeff;
2043 alg_sub_t_m2 total := total - multiplicand * coeff;
2044 alg_add_factor total := total * coeff + total;
2045 alg_sub_factor total := total * coeff - total;
2046 alg_add_t2_m total := total * coeff + multiplicand;
2047 alg_sub_t2_m total := total * coeff - multiplicand;
2049 The first operand must be either alg_zero or alg_m. */
2051 struct algorithm
2053 short cost;
2054 short ops;
2055 /* The size of the OP and LOG fields are not directly related to the
2056 word size, but the worst-case algorithms will be if we have few
2057 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2058 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2059 in total wordsize operations. */
2060 enum alg_code op[MAX_BITS_PER_WORD];
2061 char log[MAX_BITS_PER_WORD];
2064 static void synth_mult PARAMS ((struct algorithm *,
2065 unsigned HOST_WIDE_INT,
2066 int));
2067 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2068 int, int,
2069 unsigned HOST_WIDE_INT *,
2070 int *, int *));
2071 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2072 int));
2073 /* Compute and return the best algorithm for multiplying by T.
2074 The algorithm must cost less than cost_limit
2075 If retval.cost >= COST_LIMIT, no algorithm was found and all
2076 other field of the returned struct are undefined. */
2078 static void
2079 synth_mult (alg_out, t, cost_limit)
2080 struct algorithm *alg_out;
2081 unsigned HOST_WIDE_INT t;
2082 int cost_limit;
2084 int m;
2085 struct algorithm *alg_in, *best_alg;
2086 int cost;
2087 unsigned HOST_WIDE_INT q;
2089 /* Indicate that no algorithm is yet found. If no algorithm
2090 is found, this value will be returned and indicate failure. */
2091 alg_out->cost = cost_limit;
2093 if (cost_limit <= 0)
2094 return;
2096 /* t == 1 can be done in zero cost. */
2097 if (t == 1)
2099 alg_out->ops = 1;
2100 alg_out->cost = 0;
2101 alg_out->op[0] = alg_m;
2102 return;
2105 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2106 fail now. */
2107 if (t == 0)
2109 if (zero_cost >= cost_limit)
2110 return;
2111 else
2113 alg_out->ops = 1;
2114 alg_out->cost = zero_cost;
2115 alg_out->op[0] = alg_zero;
2116 return;
2120 /* We'll be needing a couple extra algorithm structures now. */
2122 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2123 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2125 /* If we have a group of zero bits at the low-order part of T, try
2126 multiplying by the remaining bits and then doing a shift. */
2128 if ((t & 1) == 0)
2130 m = floor_log2 (t & -t); /* m = number of low zero bits */
2131 q = t >> m;
2132 cost = shift_cost[m];
2133 synth_mult (alg_in, q, cost_limit - cost);
2135 cost += alg_in->cost;
2136 if (cost < cost_limit)
2138 struct algorithm *x;
2139 x = alg_in, alg_in = best_alg, best_alg = x;
2140 best_alg->log[best_alg->ops] = m;
2141 best_alg->op[best_alg->ops] = alg_shift;
2142 cost_limit = cost;
2146 /* If we have an odd number, add or subtract one. */
2147 if ((t & 1) != 0)
2149 unsigned HOST_WIDE_INT w;
2151 for (w = 1; (w & t) != 0; w <<= 1)
2153 /* If T was -1, then W will be zero after the loop. This is another
2154 case where T ends with ...111. Handling this with (T + 1) and
2155 subtract 1 produces slightly better code and results in algorithm
2156 selection much faster than treating it like the ...0111 case
2157 below. */
2158 if (w == 0
2159 || (w > 2
2160 /* Reject the case where t is 3.
2161 Thus we prefer addition in that case. */
2162 && t != 3))
2164 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2166 cost = add_cost;
2167 synth_mult (alg_in, t + 1, cost_limit - cost);
2169 cost += alg_in->cost;
2170 if (cost < cost_limit)
2172 struct algorithm *x;
2173 x = alg_in, alg_in = best_alg, best_alg = x;
2174 best_alg->log[best_alg->ops] = 0;
2175 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2176 cost_limit = cost;
2179 else
2181 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2183 cost = add_cost;
2184 synth_mult (alg_in, t - 1, cost_limit - cost);
2186 cost += alg_in->cost;
2187 if (cost < cost_limit)
2189 struct algorithm *x;
2190 x = alg_in, alg_in = best_alg, best_alg = x;
2191 best_alg->log[best_alg->ops] = 0;
2192 best_alg->op[best_alg->ops] = alg_add_t_m2;
2193 cost_limit = cost;
2198 /* Look for factors of t of the form
2199 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2200 If we find such a factor, we can multiply by t using an algorithm that
2201 multiplies by q, shift the result by m and add/subtract it to itself.
2203 We search for large factors first and loop down, even if large factors
2204 are less probable than small; if we find a large factor we will find a
2205 good sequence quickly, and therefore be able to prune (by decreasing
2206 COST_LIMIT) the search. */
2208 for (m = floor_log2 (t - 1); m >= 2; m--)
2210 unsigned HOST_WIDE_INT d;
2212 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2213 if (t % d == 0 && t > d)
2215 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2216 synth_mult (alg_in, t / d, cost_limit - cost);
2218 cost += alg_in->cost;
2219 if (cost < cost_limit)
2221 struct algorithm *x;
2222 x = alg_in, alg_in = best_alg, best_alg = x;
2223 best_alg->log[best_alg->ops] = m;
2224 best_alg->op[best_alg->ops] = alg_add_factor;
2225 cost_limit = cost;
2227 /* Other factors will have been taken care of in the recursion. */
2228 break;
2231 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2232 if (t % d == 0 && t > d)
2234 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2235 synth_mult (alg_in, t / d, cost_limit - cost);
2237 cost += alg_in->cost;
2238 if (cost < cost_limit)
2240 struct algorithm *x;
2241 x = alg_in, alg_in = best_alg, best_alg = x;
2242 best_alg->log[best_alg->ops] = m;
2243 best_alg->op[best_alg->ops] = alg_sub_factor;
2244 cost_limit = cost;
2246 break;
2250 /* Try shift-and-add (load effective address) instructions,
2251 i.e. do a*3, a*5, a*9. */
2252 if ((t & 1) != 0)
2254 q = t - 1;
2255 q = q & -q;
2256 m = exact_log2 (q);
2257 if (m >= 0)
2259 cost = shiftadd_cost[m];
2260 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2262 cost += alg_in->cost;
2263 if (cost < cost_limit)
2265 struct algorithm *x;
2266 x = alg_in, alg_in = best_alg, best_alg = x;
2267 best_alg->log[best_alg->ops] = m;
2268 best_alg->op[best_alg->ops] = alg_add_t2_m;
2269 cost_limit = cost;
2273 q = t + 1;
2274 q = q & -q;
2275 m = exact_log2 (q);
2276 if (m >= 0)
2278 cost = shiftsub_cost[m];
2279 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2281 cost += alg_in->cost;
2282 if (cost < cost_limit)
2284 struct algorithm *x;
2285 x = alg_in, alg_in = best_alg, best_alg = x;
2286 best_alg->log[best_alg->ops] = m;
2287 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2288 cost_limit = cost;
2293 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2294 we have not found any algorithm. */
2295 if (cost_limit == alg_out->cost)
2296 return;
2298 /* If we are getting a too long sequence for `struct algorithm'
2299 to record, make this search fail. */
2300 if (best_alg->ops == MAX_BITS_PER_WORD)
2301 return;
2303 /* Copy the algorithm from temporary space to the space at alg_out.
2304 We avoid using structure assignment because the majority of
2305 best_alg is normally undefined, and this is a critical function. */
2306 alg_out->ops = best_alg->ops + 1;
2307 alg_out->cost = cost_limit;
2308 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2309 alg_out->ops * sizeof *alg_out->op);
2310 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2311 alg_out->ops * sizeof *alg_out->log);
2314 /* Perform a multiplication and return an rtx for the result.
2315 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2316 TARGET is a suggestion for where to store the result (an rtx).
2318 We check specially for a constant integer as OP1.
2319 If you want this check for OP0 as well, then before calling
2320 you should swap the two operands if OP0 would be constant. */
2323 expand_mult (mode, op0, op1, target, unsignedp)
2324 enum machine_mode mode;
2325 register rtx op0, op1, target;
2326 int unsignedp;
2328 rtx const_op1 = op1;
2330 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2331 less than or equal in size to `unsigned int' this doesn't matter.
2332 If the mode is larger than `unsigned int', then synth_mult works only
2333 if the constant value exactly fits in an `unsigned int' without any
2334 truncation. This means that multiplying by negative values does
2335 not work; results are off by 2^32 on a 32 bit machine. */
2337 /* If we are multiplying in DImode, it may still be a win
2338 to try to work with shifts and adds. */
2339 if (GET_CODE (op1) == CONST_DOUBLE
2340 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2341 && HOST_BITS_PER_INT >= BITS_PER_WORD
2342 && CONST_DOUBLE_HIGH (op1) == 0)
2343 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2344 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2345 && GET_CODE (op1) == CONST_INT
2346 && INTVAL (op1) < 0)
2347 const_op1 = 0;
2349 /* We used to test optimize here, on the grounds that it's better to
2350 produce a smaller program when -O is not used.
2351 But this causes such a terrible slowdown sometimes
2352 that it seems better to use synth_mult always. */
2354 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2356 struct algorithm alg;
2357 struct algorithm alg2;
2358 HOST_WIDE_INT val = INTVAL (op1);
2359 HOST_WIDE_INT val_so_far;
2360 rtx insn;
2361 int mult_cost;
2362 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2364 /* Try to do the computation three ways: multiply by the negative of OP1
2365 and then negate, do the multiplication directly, or do multiplication
2366 by OP1 - 1. */
2368 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2369 mult_cost = MIN (12 * add_cost, mult_cost);
2371 synth_mult (&alg, val, mult_cost);
2373 /* This works only if the inverted value actually fits in an
2374 `unsigned int' */
2375 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2377 synth_mult (&alg2, - val,
2378 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2379 if (alg2.cost + negate_cost < alg.cost)
2380 alg = alg2, variant = negate_variant;
2383 /* This proves very useful for division-by-constant. */
2384 synth_mult (&alg2, val - 1,
2385 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2386 if (alg2.cost + add_cost < alg.cost)
2387 alg = alg2, variant = add_variant;
2389 if (alg.cost < mult_cost)
2391 /* We found something cheaper than a multiply insn. */
2392 int opno;
2393 rtx accum, tem;
2395 op0 = protect_from_queue (op0, 0);
2397 /* Avoid referencing memory over and over.
2398 For speed, but also for correctness when mem is volatile. */
2399 if (GET_CODE (op0) == MEM)
2400 op0 = force_reg (mode, op0);
2402 /* ACCUM starts out either as OP0 or as a zero, depending on
2403 the first operation. */
2405 if (alg.op[0] == alg_zero)
2407 accum = copy_to_mode_reg (mode, const0_rtx);
2408 val_so_far = 0;
2410 else if (alg.op[0] == alg_m)
2412 accum = copy_to_mode_reg (mode, op0);
2413 val_so_far = 1;
2415 else
2416 abort ();
2418 for (opno = 1; opno < alg.ops; opno++)
2420 int log = alg.log[opno];
2421 int preserve = preserve_subexpressions_p ();
2422 rtx shift_subtarget = preserve ? 0 : accum;
2423 rtx add_target
2424 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2425 && ! preserve)
2426 ? target : 0;
2427 rtx accum_target = preserve ? 0 : accum;
2429 switch (alg.op[opno])
2431 case alg_shift:
2432 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2433 build_int_2 (log, 0), NULL_RTX, 0);
2434 val_so_far <<= log;
2435 break;
2437 case alg_add_t_m2:
2438 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2439 build_int_2 (log, 0), NULL_RTX, 0);
2440 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2441 add_target
2442 ? add_target : accum_target);
2443 val_so_far += (HOST_WIDE_INT) 1 << log;
2444 break;
2446 case alg_sub_t_m2:
2447 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2448 build_int_2 (log, 0), NULL_RTX, 0);
2449 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2450 add_target
2451 ? add_target : accum_target);
2452 val_so_far -= (HOST_WIDE_INT) 1 << log;
2453 break;
2455 case alg_add_t2_m:
2456 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2457 build_int_2 (log, 0), shift_subtarget,
2459 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2460 add_target
2461 ? add_target : accum_target);
2462 val_so_far = (val_so_far << log) + 1;
2463 break;
2465 case alg_sub_t2_m:
2466 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2467 build_int_2 (log, 0), shift_subtarget,
2469 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2470 add_target
2471 ? add_target : accum_target);
2472 val_so_far = (val_so_far << log) - 1;
2473 break;
2475 case alg_add_factor:
2476 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2477 build_int_2 (log, 0), NULL_RTX, 0);
2478 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2479 add_target
2480 ? add_target : accum_target);
2481 val_so_far += val_so_far << log;
2482 break;
2484 case alg_sub_factor:
2485 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2486 build_int_2 (log, 0), NULL_RTX, 0);
2487 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2488 (add_target ? add_target
2489 : preserve ? 0 : tem));
2490 val_so_far = (val_so_far << log) - val_so_far;
2491 break;
2493 default:
2494 abort ();
2497 /* Write a REG_EQUAL note on the last insn so that we can cse
2498 multiplication sequences. */
2500 insn = get_last_insn ();
2501 set_unique_reg_note (insn,
2502 REG_EQUAL,
2503 gen_rtx_MULT (mode, op0,
2504 GEN_INT (val_so_far)));
2507 if (variant == negate_variant)
2509 val_so_far = - val_so_far;
2510 accum = expand_unop (mode, neg_optab, accum, target, 0);
2512 else if (variant == add_variant)
2514 val_so_far = val_so_far + 1;
2515 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2518 if (val != val_so_far)
2519 abort ();
2521 return accum;
2525 /* This used to use umul_optab if unsigned, but for non-widening multiply
2526 there is no difference between signed and unsigned. */
2527 op0 = expand_binop (mode, smul_optab,
2528 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2529 if (op0 == 0)
2530 abort ();
2531 return op0;
2534 /* Return the smallest n such that 2**n >= X. */
2537 ceil_log2 (x)
2538 unsigned HOST_WIDE_INT x;
2540 return floor_log2 (x - 1) + 1;
2543 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2544 replace division by D, and put the least significant N bits of the result
2545 in *MULTIPLIER_PTR and return the most significant bit.
2547 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2548 needed precision is in PRECISION (should be <= N).
2550 PRECISION should be as small as possible so this function can choose
2551 multiplier more freely.
2553 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2554 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2556 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2557 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2559 static
2560 unsigned HOST_WIDE_INT
2561 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2562 unsigned HOST_WIDE_INT d;
2563 int n;
2564 int precision;
2565 unsigned HOST_WIDE_INT *multiplier_ptr;
2566 int *post_shift_ptr;
2567 int *lgup_ptr;
2569 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2570 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2571 int lgup, post_shift;
2572 int pow, pow2;
2573 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2575 /* lgup = ceil(log2(divisor)); */
2576 lgup = ceil_log2 (d);
2578 if (lgup > n)
2579 abort ();
2581 pow = n + lgup;
2582 pow2 = n + lgup - precision;
2584 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2586 /* We could handle this with some effort, but this case is much better
2587 handled directly with a scc insn, so rely on caller using that. */
2588 abort ();
2591 /* mlow = 2^(N + lgup)/d */
2592 if (pow >= HOST_BITS_PER_WIDE_INT)
2594 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2595 nl = 0;
2597 else
2599 nh = 0;
2600 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2602 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2603 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2605 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2606 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2607 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2608 else
2609 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2610 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2611 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2613 if (mhigh_hi && nh - d >= d)
2614 abort ();
2615 if (mhigh_hi > 1 || mlow_hi > 1)
2616 abort ();
2617 /* assert that mlow < mhigh. */
2618 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2619 abort();
2621 /* If precision == N, then mlow, mhigh exceed 2^N
2622 (but they do not exceed 2^(N+1)). */
2624 /* Reduce to lowest terms */
2625 for (post_shift = lgup; post_shift > 0; post_shift--)
2627 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2628 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2629 if (ml_lo >= mh_lo)
2630 break;
2632 mlow_hi = 0;
2633 mlow_lo = ml_lo;
2634 mhigh_hi = 0;
2635 mhigh_lo = mh_lo;
2638 *post_shift_ptr = post_shift;
2639 *lgup_ptr = lgup;
2640 if (n < HOST_BITS_PER_WIDE_INT)
2642 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2643 *multiplier_ptr = mhigh_lo & mask;
2644 return mhigh_lo >= mask;
2646 else
2648 *multiplier_ptr = mhigh_lo;
2649 return mhigh_hi;
2653 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2654 congruent to 1 (mod 2**N). */
2656 static unsigned HOST_WIDE_INT
2657 invert_mod2n (x, n)
2658 unsigned HOST_WIDE_INT x;
2659 int n;
2661 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2663 /* The algorithm notes that the choice y = x satisfies
2664 x*y == 1 mod 2^3, since x is assumed odd.
2665 Each iteration doubles the number of bits of significance in y. */
2667 unsigned HOST_WIDE_INT mask;
2668 unsigned HOST_WIDE_INT y = x;
2669 int nbit = 3;
2671 mask = (n == HOST_BITS_PER_WIDE_INT
2672 ? ~(unsigned HOST_WIDE_INT) 0
2673 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2675 while (nbit < n)
2677 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2678 nbit *= 2;
2680 return y;
2683 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2684 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2685 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2686 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2687 become signed.
2689 The result is put in TARGET if that is convenient.
2691 MODE is the mode of operation. */
2694 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2695 enum machine_mode mode;
2696 register rtx adj_operand, op0, op1, target;
2697 int unsignedp;
2699 rtx tem;
2700 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2702 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2703 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2704 NULL_RTX, 0);
2705 tem = expand_and (tem, op1, NULL_RTX);
2706 adj_operand
2707 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2708 adj_operand);
2710 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2711 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2712 NULL_RTX, 0);
2713 tem = expand_and (tem, op0, NULL_RTX);
2714 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2715 target);
2717 return target;
2720 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2721 in TARGET if that is convenient, and return where the result is. If the
2722 operation can not be performed, 0 is returned.
2724 MODE is the mode of operation and result.
2726 UNSIGNEDP nonzero means unsigned multiply.
2728 MAX_COST is the total allowed cost for the expanded RTL. */
2731 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2732 enum machine_mode mode;
2733 register rtx op0, target;
2734 unsigned HOST_WIDE_INT cnst1;
2735 int unsignedp;
2736 int max_cost;
2738 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2739 optab mul_highpart_optab;
2740 optab moptab;
2741 rtx tem;
2742 int size = GET_MODE_BITSIZE (mode);
2743 rtx op1, wide_op1;
2745 /* We can't support modes wider than HOST_BITS_PER_INT. */
2746 if (size > HOST_BITS_PER_WIDE_INT)
2747 abort ();
2749 op1 = GEN_INT (cnst1);
2751 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2752 wide_op1 = op1;
2753 else
2754 wide_op1
2755 = immed_double_const (cnst1,
2756 (unsignedp
2757 ? (HOST_WIDE_INT) 0
2758 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2759 wider_mode);
2761 /* expand_mult handles constant multiplication of word_mode
2762 or narrower. It does a poor job for large modes. */
2763 if (size < BITS_PER_WORD
2764 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2766 /* We have to do this, since expand_binop doesn't do conversion for
2767 multiply. Maybe change expand_binop to handle widening multiply? */
2768 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2770 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2771 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2772 build_int_2 (size, 0), NULL_RTX, 1);
2773 return convert_modes (mode, wider_mode, tem, unsignedp);
2776 if (target == 0)
2777 target = gen_reg_rtx (mode);
2779 /* Firstly, try using a multiplication insn that only generates the needed
2780 high part of the product, and in the sign flavor of unsignedp. */
2781 if (mul_highpart_cost[(int) mode] < max_cost)
2783 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2784 target = expand_binop (mode, mul_highpart_optab,
2785 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2786 if (target)
2787 return target;
2790 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2791 Need to adjust the result after the multiplication. */
2792 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2794 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2795 target = expand_binop (mode, mul_highpart_optab,
2796 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2797 if (target)
2798 /* We used the wrong signedness. Adjust the result. */
2799 return expand_mult_highpart_adjust (mode, target, op0,
2800 op1, target, unsignedp);
2803 /* Try widening multiplication. */
2804 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2805 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2806 && mul_widen_cost[(int) wider_mode] < max_cost)
2808 op1 = force_reg (mode, op1);
2809 goto try;
2812 /* Try widening the mode and perform a non-widening multiplication. */
2813 moptab = smul_optab;
2814 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2815 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2817 op1 = wide_op1;
2818 goto try;
2821 /* Try widening multiplication of opposite signedness, and adjust. */
2822 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2823 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2824 && (mul_widen_cost[(int) wider_mode]
2825 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2827 rtx regop1 = force_reg (mode, op1);
2828 tem = expand_binop (wider_mode, moptab, op0, regop1,
2829 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2830 if (tem != 0)
2832 /* Extract the high half of the just generated product. */
2833 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2834 build_int_2 (size, 0), NULL_RTX, 1);
2835 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2836 /* We used the wrong signedness. Adjust the result. */
2837 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2838 target, unsignedp);
2842 return 0;
2844 try:
2845 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2846 tem = expand_binop (wider_mode, moptab, op0, op1,
2847 NULL_RTX, unsignedp, OPTAB_WIDEN);
2848 if (tem == 0)
2849 return 0;
2851 /* Extract the high half of the just generated product. */
2852 if (mode == word_mode)
2854 return gen_highpart (mode, tem);
2856 else
2858 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2859 build_int_2 (size, 0), NULL_RTX, 1);
2860 return convert_modes (mode, wider_mode, tem, unsignedp);
2864 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2865 if that is convenient, and returning where the result is.
2866 You may request either the quotient or the remainder as the result;
2867 specify REM_FLAG nonzero to get the remainder.
2869 CODE is the expression code for which kind of division this is;
2870 it controls how rounding is done. MODE is the machine mode to use.
2871 UNSIGNEDP nonzero means do unsigned division. */
2873 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2874 and then correct it by or'ing in missing high bits
2875 if result of ANDI is nonzero.
2876 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2877 This could optimize to a bfexts instruction.
2878 But C doesn't use these operations, so their optimizations are
2879 left for later. */
2880 /* ??? For modulo, we don't actually need the highpart of the first product,
2881 the low part will do nicely. And for small divisors, the second multiply
2882 can also be a low-part only multiply or even be completely left out.
2883 E.g. to calculate the remainder of a division by 3 with a 32 bit
2884 multiply, multiply with 0x55555556 and extract the upper two bits;
2885 the result is exact for inputs up to 0x1fffffff.
2886 The input range can be reduced by using cross-sum rules.
2887 For odd divisors >= 3, the following table gives right shift counts
2888 so that if an number is shifted by an integer multiple of the given
2889 amount, the remainder stays the same:
2890 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2891 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2892 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2893 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2894 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2896 Cross-sum rules for even numbers can be derived by leaving as many bits
2897 to the right alone as the divisor has zeros to the right.
2898 E.g. if x is an unsigned 32 bit number:
2899 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2902 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2905 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2906 int rem_flag;
2907 enum tree_code code;
2908 enum machine_mode mode;
2909 register rtx op0, op1, target;
2910 int unsignedp;
2912 enum machine_mode compute_mode;
2913 register rtx tquotient;
2914 rtx quotient = 0, remainder = 0;
2915 rtx last;
2916 int size;
2917 rtx insn, set;
2918 optab optab1, optab2;
2919 int op1_is_constant, op1_is_pow2;
2920 int max_cost, extra_cost;
2921 static HOST_WIDE_INT last_div_const = 0;
2923 op1_is_constant = GET_CODE (op1) == CONST_INT;
2924 op1_is_pow2 = (op1_is_constant
2925 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2926 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2929 This is the structure of expand_divmod:
2931 First comes code to fix up the operands so we can perform the operations
2932 correctly and efficiently.
2934 Second comes a switch statement with code specific for each rounding mode.
2935 For some special operands this code emits all RTL for the desired
2936 operation, for other cases, it generates only a quotient and stores it in
2937 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2938 to indicate that it has not done anything.
2940 Last comes code that finishes the operation. If QUOTIENT is set and
2941 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2942 QUOTIENT is not set, it is computed using trunc rounding.
2944 We try to generate special code for division and remainder when OP1 is a
2945 constant. If |OP1| = 2**n we can use shifts and some other fast
2946 operations. For other values of OP1, we compute a carefully selected
2947 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2948 by m.
2950 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2951 half of the product. Different strategies for generating the product are
2952 implemented in expand_mult_highpart.
2954 If what we actually want is the remainder, we generate that by another
2955 by-constant multiplication and a subtraction. */
2957 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2958 code below will malfunction if we are, so check here and handle
2959 the special case if so. */
2960 if (op1 == const1_rtx)
2961 return rem_flag ? const0_rtx : op0;
2963 if (target
2964 /* Don't use the function value register as a target
2965 since we have to read it as well as write it,
2966 and function-inlining gets confused by this. */
2967 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2968 /* Don't clobber an operand while doing a multi-step calculation. */
2969 || ((rem_flag || op1_is_constant)
2970 && (reg_mentioned_p (target, op0)
2971 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2972 || reg_mentioned_p (target, op1)
2973 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2974 target = 0;
2976 /* Get the mode in which to perform this computation. Normally it will
2977 be MODE, but sometimes we can't do the desired operation in MODE.
2978 If so, pick a wider mode in which we can do the operation. Convert
2979 to that mode at the start to avoid repeated conversions.
2981 First see what operations we need. These depend on the expression
2982 we are evaluating. (We assume that divxx3 insns exist under the
2983 same conditions that modxx3 insns and that these insns don't normally
2984 fail. If these assumptions are not correct, we may generate less
2985 efficient code in some cases.)
2987 Then see if we find a mode in which we can open-code that operation
2988 (either a division, modulus, or shift). Finally, check for the smallest
2989 mode for which we can do the operation with a library call. */
2991 /* We might want to refine this now that we have division-by-constant
2992 optimization. Since expand_mult_highpart tries so many variants, it is
2993 not straightforward to generalize this. Maybe we should make an array
2994 of possible modes in init_expmed? Save this for GCC 2.7. */
2996 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2997 : (unsignedp ? udiv_optab : sdiv_optab));
2998 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3000 for (compute_mode = mode; compute_mode != VOIDmode;
3001 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3002 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3003 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3004 break;
3006 if (compute_mode == VOIDmode)
3007 for (compute_mode = mode; compute_mode != VOIDmode;
3008 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3009 if (optab1->handlers[(int) compute_mode].libfunc
3010 || optab2->handlers[(int) compute_mode].libfunc)
3011 break;
3013 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3014 in expand_binop. */
3015 if (compute_mode == VOIDmode)
3016 compute_mode = mode;
3018 if (target && GET_MODE (target) == compute_mode)
3019 tquotient = target;
3020 else
3021 tquotient = gen_reg_rtx (compute_mode);
3023 size = GET_MODE_BITSIZE (compute_mode);
3024 #if 0
3025 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3026 (mode), and thereby get better code when OP1 is a constant. Do that
3027 later. It will require going over all usages of SIZE below. */
3028 size = GET_MODE_BITSIZE (mode);
3029 #endif
3031 /* Only deduct something for a REM if the last divide done was
3032 for a different constant. Then set the constant of the last
3033 divide. */
3034 max_cost = div_cost[(int) compute_mode]
3035 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3036 && INTVAL (op1) == last_div_const)
3037 ? mul_cost[(int) compute_mode] + add_cost : 0);
3039 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3041 /* Now convert to the best mode to use. */
3042 if (compute_mode != mode)
3044 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3045 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3047 /* convert_modes may have placed op1 into a register, so we
3048 must recompute the following. */
3049 op1_is_constant = GET_CODE (op1) == CONST_INT;
3050 op1_is_pow2 = (op1_is_constant
3051 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3052 || (! unsignedp
3053 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3056 /* If one of the operands is a volatile MEM, copy it into a register. */
3058 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3059 op0 = force_reg (compute_mode, op0);
3060 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3061 op1 = force_reg (compute_mode, op1);
3063 /* If we need the remainder or if OP1 is constant, we need to
3064 put OP0 in a register in case it has any queued subexpressions. */
3065 if (rem_flag || op1_is_constant)
3066 op0 = force_reg (compute_mode, op0);
3068 last = get_last_insn ();
3070 /* Promote floor rounding to trunc rounding for unsigned operations. */
3071 if (unsignedp)
3073 if (code == FLOOR_DIV_EXPR)
3074 code = TRUNC_DIV_EXPR;
3075 if (code == FLOOR_MOD_EXPR)
3076 code = TRUNC_MOD_EXPR;
3077 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3078 code = TRUNC_DIV_EXPR;
3081 if (op1 != const0_rtx)
3082 switch (code)
3084 case TRUNC_MOD_EXPR:
3085 case TRUNC_DIV_EXPR:
3086 if (op1_is_constant)
3088 if (unsignedp)
3090 unsigned HOST_WIDE_INT mh, ml;
3091 int pre_shift, post_shift;
3092 int dummy;
3093 unsigned HOST_WIDE_INT d = INTVAL (op1);
3095 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3097 pre_shift = floor_log2 (d);
3098 if (rem_flag)
3100 remainder
3101 = expand_binop (compute_mode, and_optab, op0,
3102 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3103 remainder, 1,
3104 OPTAB_LIB_WIDEN);
3105 if (remainder)
3106 return gen_lowpart (mode, remainder);
3108 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3109 build_int_2 (pre_shift, 0),
3110 tquotient, 1);
3112 else if (size <= HOST_BITS_PER_WIDE_INT)
3114 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3116 /* Most significant bit of divisor is set; emit an scc
3117 insn. */
3118 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3119 compute_mode, 1, 1);
3120 if (quotient == 0)
3121 goto fail1;
3123 else
3125 /* Find a suitable multiplier and right shift count
3126 instead of multiplying with D. */
3128 mh = choose_multiplier (d, size, size,
3129 &ml, &post_shift, &dummy);
3131 /* If the suggested multiplier is more than SIZE bits,
3132 we can do better for even divisors, using an
3133 initial right shift. */
3134 if (mh != 0 && (d & 1) == 0)
3136 pre_shift = floor_log2 (d & -d);
3137 mh = choose_multiplier (d >> pre_shift, size,
3138 size - pre_shift,
3139 &ml, &post_shift, &dummy);
3140 if (mh)
3141 abort ();
3143 else
3144 pre_shift = 0;
3146 if (mh != 0)
3148 rtx t1, t2, t3, t4;
3150 extra_cost = (shift_cost[post_shift - 1]
3151 + shift_cost[1] + 2 * add_cost);
3152 t1 = expand_mult_highpart (compute_mode, op0, ml,
3153 NULL_RTX, 1,
3154 max_cost - extra_cost);
3155 if (t1 == 0)
3156 goto fail1;
3157 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3158 op0, t1),
3159 NULL_RTX);
3160 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3161 build_int_2 (1, 0), NULL_RTX,1);
3162 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3163 t1, t3),
3164 NULL_RTX);
3165 quotient
3166 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3167 build_int_2 (post_shift - 1, 0),
3168 tquotient, 1);
3170 else
3172 rtx t1, t2;
3174 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3175 build_int_2 (pre_shift, 0),
3176 NULL_RTX, 1);
3177 extra_cost = (shift_cost[pre_shift]
3178 + shift_cost[post_shift]);
3179 t2 = expand_mult_highpart (compute_mode, t1, ml,
3180 NULL_RTX, 1,
3181 max_cost - extra_cost);
3182 if (t2 == 0)
3183 goto fail1;
3184 quotient
3185 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3186 build_int_2 (post_shift, 0),
3187 tquotient, 1);
3191 else /* Too wide mode to use tricky code */
3192 break;
3194 insn = get_last_insn ();
3195 if (insn != last
3196 && (set = single_set (insn)) != 0
3197 && SET_DEST (set) == quotient)
3198 set_unique_reg_note (insn,
3199 REG_EQUAL,
3200 gen_rtx_UDIV (compute_mode, op0, op1));
3202 else /* TRUNC_DIV, signed */
3204 unsigned HOST_WIDE_INT ml;
3205 int lgup, post_shift;
3206 HOST_WIDE_INT d = INTVAL (op1);
3207 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3209 /* n rem d = n rem -d */
3210 if (rem_flag && d < 0)
3212 d = abs_d;
3213 op1 = GEN_INT (abs_d);
3216 if (d == 1)
3217 quotient = op0;
3218 else if (d == -1)
3219 quotient = expand_unop (compute_mode, neg_optab, op0,
3220 tquotient, 0);
3221 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3223 /* This case is not handled correctly below. */
3224 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3225 compute_mode, 1, 1);
3226 if (quotient == 0)
3227 goto fail1;
3229 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3230 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3232 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3234 lgup = floor_log2 (abs_d);
3235 if (abs_d != 2 && BRANCH_COST < 3)
3237 rtx label = gen_label_rtx ();
3238 rtx t1;
3240 t1 = copy_to_mode_reg (compute_mode, op0);
3241 do_cmp_and_jump (t1, const0_rtx, GE,
3242 compute_mode, label);
3243 expand_inc (t1, GEN_INT (abs_d - 1));
3244 emit_label (label);
3245 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3246 build_int_2 (lgup, 0),
3247 tquotient, 0);
3249 else
3251 rtx t1, t2, t3;
3252 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3253 build_int_2 (size - 1, 0),
3254 NULL_RTX, 0);
3255 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3256 build_int_2 (size - lgup, 0),
3257 NULL_RTX, 1);
3258 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3259 op0, t2),
3260 NULL_RTX);
3261 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3262 build_int_2 (lgup, 0),
3263 tquotient, 0);
3266 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3267 the quotient. */
3268 if (d < 0)
3270 insn = get_last_insn ();
3271 if (insn != last
3272 && (set = single_set (insn)) != 0
3273 && SET_DEST (set) == quotient
3274 && abs_d < ((unsigned HOST_WIDE_INT) 1
3275 << (HOST_BITS_PER_WIDE_INT - 1)))
3276 set_unique_reg_note (insn,
3277 REG_EQUAL,
3278 gen_rtx_DIV (compute_mode,
3279 op0,
3280 GEN_INT (abs_d)));
3282 quotient = expand_unop (compute_mode, neg_optab,
3283 quotient, quotient, 0);
3286 else if (size <= HOST_BITS_PER_WIDE_INT)
3288 choose_multiplier (abs_d, size, size - 1,
3289 &ml, &post_shift, &lgup);
3290 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3292 rtx t1, t2, t3;
3294 extra_cost = (shift_cost[post_shift]
3295 + shift_cost[size - 1] + add_cost);
3296 t1 = expand_mult_highpart (compute_mode, op0, ml,
3297 NULL_RTX, 0,
3298 max_cost - extra_cost);
3299 if (t1 == 0)
3300 goto fail1;
3301 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3302 build_int_2 (post_shift, 0), NULL_RTX, 0);
3303 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3304 build_int_2 (size - 1, 0), NULL_RTX, 0);
3305 if (d < 0)
3306 quotient
3307 = force_operand (gen_rtx_MINUS (compute_mode,
3308 t3, t2),
3309 tquotient);
3310 else
3311 quotient
3312 = force_operand (gen_rtx_MINUS (compute_mode,
3313 t2, t3),
3314 tquotient);
3316 else
3318 rtx t1, t2, t3, t4;
3320 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3321 extra_cost = (shift_cost[post_shift]
3322 + shift_cost[size - 1] + 2 * add_cost);
3323 t1 = expand_mult_highpart (compute_mode, op0, ml,
3324 NULL_RTX, 0,
3325 max_cost - extra_cost);
3326 if (t1 == 0)
3327 goto fail1;
3328 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3329 t1, op0),
3330 NULL_RTX);
3331 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3332 build_int_2 (post_shift, 0),
3333 NULL_RTX, 0);
3334 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3335 build_int_2 (size - 1, 0),
3336 NULL_RTX, 0);
3337 if (d < 0)
3338 quotient
3339 = force_operand (gen_rtx_MINUS (compute_mode,
3340 t4, t3),
3341 tquotient);
3342 else
3343 quotient
3344 = force_operand (gen_rtx_MINUS (compute_mode,
3345 t3, t4),
3346 tquotient);
3349 else /* Too wide mode to use tricky code */
3350 break;
3352 insn = get_last_insn ();
3353 if (insn != last
3354 && (set = single_set (insn)) != 0
3355 && SET_DEST (set) == quotient)
3356 set_unique_reg_note (insn,
3357 REG_EQUAL,
3358 gen_rtx_DIV (compute_mode, op0, op1));
3360 break;
3362 fail1:
3363 delete_insns_since (last);
3364 break;
3366 case FLOOR_DIV_EXPR:
3367 case FLOOR_MOD_EXPR:
3368 /* We will come here only for signed operations. */
3369 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3371 unsigned HOST_WIDE_INT mh, ml;
3372 int pre_shift, lgup, post_shift;
3373 HOST_WIDE_INT d = INTVAL (op1);
3375 if (d > 0)
3377 /* We could just as easily deal with negative constants here,
3378 but it does not seem worth the trouble for GCC 2.6. */
3379 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3381 pre_shift = floor_log2 (d);
3382 if (rem_flag)
3384 remainder = expand_binop (compute_mode, and_optab, op0,
3385 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3386 remainder, 0, OPTAB_LIB_WIDEN);
3387 if (remainder)
3388 return gen_lowpart (mode, remainder);
3390 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3391 build_int_2 (pre_shift, 0),
3392 tquotient, 0);
3394 else
3396 rtx t1, t2, t3, t4;
3398 mh = choose_multiplier (d, size, size - 1,
3399 &ml, &post_shift, &lgup);
3400 if (mh)
3401 abort ();
3403 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3404 build_int_2 (size - 1, 0), NULL_RTX, 0);
3405 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3406 NULL_RTX, 0, OPTAB_WIDEN);
3407 extra_cost = (shift_cost[post_shift]
3408 + shift_cost[size - 1] + 2 * add_cost);
3409 t3 = expand_mult_highpart (compute_mode, t2, ml,
3410 NULL_RTX, 1,
3411 max_cost - extra_cost);
3412 if (t3 != 0)
3414 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3415 build_int_2 (post_shift, 0),
3416 NULL_RTX, 1);
3417 quotient = expand_binop (compute_mode, xor_optab,
3418 t4, t1, tquotient, 0,
3419 OPTAB_WIDEN);
3423 else
3425 rtx nsign, t1, t2, t3, t4;
3426 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3427 op0, constm1_rtx), NULL_RTX);
3428 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3429 0, OPTAB_WIDEN);
3430 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3431 build_int_2 (size - 1, 0), NULL_RTX, 0);
3432 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3433 NULL_RTX);
3434 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3435 NULL_RTX, 0);
3436 if (t4)
3438 rtx t5;
3439 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3440 NULL_RTX, 0);
3441 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3442 t4, t5),
3443 tquotient);
3448 if (quotient != 0)
3449 break;
3450 delete_insns_since (last);
3452 /* Try using an instruction that produces both the quotient and
3453 remainder, using truncation. We can easily compensate the quotient
3454 or remainder to get floor rounding, once we have the remainder.
3455 Notice that we compute also the final remainder value here,
3456 and return the result right away. */
3457 if (target == 0 || GET_MODE (target) != compute_mode)
3458 target = gen_reg_rtx (compute_mode);
3460 if (rem_flag)
3462 remainder
3463 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3464 quotient = gen_reg_rtx (compute_mode);
3466 else
3468 quotient
3469 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3470 remainder = gen_reg_rtx (compute_mode);
3473 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3474 quotient, remainder, 0))
3476 /* This could be computed with a branch-less sequence.
3477 Save that for later. */
3478 rtx tem;
3479 rtx label = gen_label_rtx ();
3480 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3481 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3482 NULL_RTX, 0, OPTAB_WIDEN);
3483 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3484 expand_dec (quotient, const1_rtx);
3485 expand_inc (remainder, op1);
3486 emit_label (label);
3487 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3490 /* No luck with division elimination or divmod. Have to do it
3491 by conditionally adjusting op0 *and* the result. */
3493 rtx label1, label2, label3, label4, label5;
3494 rtx adjusted_op0;
3495 rtx tem;
3497 quotient = gen_reg_rtx (compute_mode);
3498 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3499 label1 = gen_label_rtx ();
3500 label2 = gen_label_rtx ();
3501 label3 = gen_label_rtx ();
3502 label4 = gen_label_rtx ();
3503 label5 = gen_label_rtx ();
3504 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3505 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3506 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3507 quotient, 0, OPTAB_LIB_WIDEN);
3508 if (tem != quotient)
3509 emit_move_insn (quotient, tem);
3510 emit_jump_insn (gen_jump (label5));
3511 emit_barrier ();
3512 emit_label (label1);
3513 expand_inc (adjusted_op0, const1_rtx);
3514 emit_jump_insn (gen_jump (label4));
3515 emit_barrier ();
3516 emit_label (label2);
3517 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3518 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3519 quotient, 0, OPTAB_LIB_WIDEN);
3520 if (tem != quotient)
3521 emit_move_insn (quotient, tem);
3522 emit_jump_insn (gen_jump (label5));
3523 emit_barrier ();
3524 emit_label (label3);
3525 expand_dec (adjusted_op0, const1_rtx);
3526 emit_label (label4);
3527 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3528 quotient, 0, OPTAB_LIB_WIDEN);
3529 if (tem != quotient)
3530 emit_move_insn (quotient, tem);
3531 expand_dec (quotient, const1_rtx);
3532 emit_label (label5);
3534 break;
3536 case CEIL_DIV_EXPR:
3537 case CEIL_MOD_EXPR:
3538 if (unsignedp)
3540 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3542 rtx t1, t2, t3;
3543 unsigned HOST_WIDE_INT d = INTVAL (op1);
3544 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3545 build_int_2 (floor_log2 (d), 0),
3546 tquotient, 1);
3547 t2 = expand_binop (compute_mode, and_optab, op0,
3548 GEN_INT (d - 1),
3549 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3550 t3 = gen_reg_rtx (compute_mode);
3551 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3552 compute_mode, 1, 1);
3553 if (t3 == 0)
3555 rtx lab;
3556 lab = gen_label_rtx ();
3557 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3558 expand_inc (t1, const1_rtx);
3559 emit_label (lab);
3560 quotient = t1;
3562 else
3563 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3564 t1, t3),
3565 tquotient);
3566 break;
3569 /* Try using an instruction that produces both the quotient and
3570 remainder, using truncation. We can easily compensate the
3571 quotient or remainder to get ceiling rounding, once we have the
3572 remainder. Notice that we compute also the final remainder
3573 value here, and return the result right away. */
3574 if (target == 0 || GET_MODE (target) != compute_mode)
3575 target = gen_reg_rtx (compute_mode);
3577 if (rem_flag)
3579 remainder = (GET_CODE (target) == REG
3580 ? target : gen_reg_rtx (compute_mode));
3581 quotient = gen_reg_rtx (compute_mode);
3583 else
3585 quotient = (GET_CODE (target) == REG
3586 ? target : gen_reg_rtx (compute_mode));
3587 remainder = gen_reg_rtx (compute_mode);
3590 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3591 remainder, 1))
3593 /* This could be computed with a branch-less sequence.
3594 Save that for later. */
3595 rtx label = gen_label_rtx ();
3596 do_cmp_and_jump (remainder, const0_rtx, EQ,
3597 compute_mode, label);
3598 expand_inc (quotient, const1_rtx);
3599 expand_dec (remainder, op1);
3600 emit_label (label);
3601 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3604 /* No luck with division elimination or divmod. Have to do it
3605 by conditionally adjusting op0 *and* the result. */
3607 rtx label1, label2;
3608 rtx adjusted_op0, tem;
3610 quotient = gen_reg_rtx (compute_mode);
3611 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3612 label1 = gen_label_rtx ();
3613 label2 = gen_label_rtx ();
3614 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3615 compute_mode, label1);
3616 emit_move_insn (quotient, const0_rtx);
3617 emit_jump_insn (gen_jump (label2));
3618 emit_barrier ();
3619 emit_label (label1);
3620 expand_dec (adjusted_op0, const1_rtx);
3621 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3622 quotient, 1, OPTAB_LIB_WIDEN);
3623 if (tem != quotient)
3624 emit_move_insn (quotient, tem);
3625 expand_inc (quotient, const1_rtx);
3626 emit_label (label2);
3629 else /* signed */
3631 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3632 && INTVAL (op1) >= 0)
3634 /* This is extremely similar to the code for the unsigned case
3635 above. For 2.7 we should merge these variants, but for
3636 2.6.1 I don't want to touch the code for unsigned since that
3637 get used in C. The signed case will only be used by other
3638 languages (Ada). */
3640 rtx t1, t2, t3;
3641 unsigned HOST_WIDE_INT d = INTVAL (op1);
3642 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3643 build_int_2 (floor_log2 (d), 0),
3644 tquotient, 0);
3645 t2 = expand_binop (compute_mode, and_optab, op0,
3646 GEN_INT (d - 1),
3647 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3648 t3 = gen_reg_rtx (compute_mode);
3649 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3650 compute_mode, 1, 1);
3651 if (t3 == 0)
3653 rtx lab;
3654 lab = gen_label_rtx ();
3655 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3656 expand_inc (t1, const1_rtx);
3657 emit_label (lab);
3658 quotient = t1;
3660 else
3661 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3662 t1, t3),
3663 tquotient);
3664 break;
3667 /* Try using an instruction that produces both the quotient and
3668 remainder, using truncation. We can easily compensate the
3669 quotient or remainder to get ceiling rounding, once we have the
3670 remainder. Notice that we compute also the final remainder
3671 value here, and return the result right away. */
3672 if (target == 0 || GET_MODE (target) != compute_mode)
3673 target = gen_reg_rtx (compute_mode);
3674 if (rem_flag)
3676 remainder= (GET_CODE (target) == REG
3677 ? target : gen_reg_rtx (compute_mode));
3678 quotient = gen_reg_rtx (compute_mode);
3680 else
3682 quotient = (GET_CODE (target) == REG
3683 ? target : gen_reg_rtx (compute_mode));
3684 remainder = gen_reg_rtx (compute_mode);
3687 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3688 remainder, 0))
3690 /* This could be computed with a branch-less sequence.
3691 Save that for later. */
3692 rtx tem;
3693 rtx label = gen_label_rtx ();
3694 do_cmp_and_jump (remainder, const0_rtx, EQ,
3695 compute_mode, label);
3696 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3697 NULL_RTX, 0, OPTAB_WIDEN);
3698 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3699 expand_inc (quotient, const1_rtx);
3700 expand_dec (remainder, op1);
3701 emit_label (label);
3702 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3705 /* No luck with division elimination or divmod. Have to do it
3706 by conditionally adjusting op0 *and* the result. */
3708 rtx label1, label2, label3, label4, label5;
3709 rtx adjusted_op0;
3710 rtx tem;
3712 quotient = gen_reg_rtx (compute_mode);
3713 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3714 label1 = gen_label_rtx ();
3715 label2 = gen_label_rtx ();
3716 label3 = gen_label_rtx ();
3717 label4 = gen_label_rtx ();
3718 label5 = gen_label_rtx ();
3719 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3720 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3721 compute_mode, label1);
3722 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3723 quotient, 0, OPTAB_LIB_WIDEN);
3724 if (tem != quotient)
3725 emit_move_insn (quotient, tem);
3726 emit_jump_insn (gen_jump (label5));
3727 emit_barrier ();
3728 emit_label (label1);
3729 expand_dec (adjusted_op0, const1_rtx);
3730 emit_jump_insn (gen_jump (label4));
3731 emit_barrier ();
3732 emit_label (label2);
3733 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3734 compute_mode, label3);
3735 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3736 quotient, 0, OPTAB_LIB_WIDEN);
3737 if (tem != quotient)
3738 emit_move_insn (quotient, tem);
3739 emit_jump_insn (gen_jump (label5));
3740 emit_barrier ();
3741 emit_label (label3);
3742 expand_inc (adjusted_op0, const1_rtx);
3743 emit_label (label4);
3744 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3745 quotient, 0, OPTAB_LIB_WIDEN);
3746 if (tem != quotient)
3747 emit_move_insn (quotient, tem);
3748 expand_inc (quotient, const1_rtx);
3749 emit_label (label5);
3752 break;
3754 case EXACT_DIV_EXPR:
3755 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3757 HOST_WIDE_INT d = INTVAL (op1);
3758 unsigned HOST_WIDE_INT ml;
3759 int post_shift;
3760 rtx t1;
3762 post_shift = floor_log2 (d & -d);
3763 ml = invert_mod2n (d >> post_shift, size);
3764 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3765 unsignedp);
3766 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3767 build_int_2 (post_shift, 0),
3768 NULL_RTX, unsignedp);
3770 insn = get_last_insn ();
3771 set_unique_reg_note (insn,
3772 REG_EQUAL,
3773 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3774 compute_mode,
3775 op0, op1));
3777 break;
3779 case ROUND_DIV_EXPR:
3780 case ROUND_MOD_EXPR:
3781 if (unsignedp)
3783 rtx tem;
3784 rtx label;
3785 label = gen_label_rtx ();
3786 quotient = gen_reg_rtx (compute_mode);
3787 remainder = gen_reg_rtx (compute_mode);
3788 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3790 rtx tem;
3791 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3792 quotient, 1, OPTAB_LIB_WIDEN);
3793 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3794 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3795 remainder, 1, OPTAB_LIB_WIDEN);
3797 tem = plus_constant (op1, -1);
3798 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3799 build_int_2 (1, 0), NULL_RTX, 1);
3800 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3801 expand_inc (quotient, const1_rtx);
3802 expand_dec (remainder, op1);
3803 emit_label (label);
3805 else
3807 rtx abs_rem, abs_op1, tem, mask;
3808 rtx label;
3809 label = gen_label_rtx ();
3810 quotient = gen_reg_rtx (compute_mode);
3811 remainder = gen_reg_rtx (compute_mode);
3812 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3814 rtx tem;
3815 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3816 quotient, 0, OPTAB_LIB_WIDEN);
3817 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3818 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3819 remainder, 0, OPTAB_LIB_WIDEN);
3821 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3822 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3823 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3824 build_int_2 (1, 0), NULL_RTX, 1);
3825 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3826 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3827 NULL_RTX, 0, OPTAB_WIDEN);
3828 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3829 build_int_2 (size - 1, 0), NULL_RTX, 0);
3830 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3831 NULL_RTX, 0, OPTAB_WIDEN);
3832 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3833 NULL_RTX, 0, OPTAB_WIDEN);
3834 expand_inc (quotient, tem);
3835 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3836 NULL_RTX, 0, OPTAB_WIDEN);
3837 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3838 NULL_RTX, 0, OPTAB_WIDEN);
3839 expand_dec (remainder, tem);
3840 emit_label (label);
3842 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3844 default:
3845 abort ();
3848 if (quotient == 0)
3850 if (target && GET_MODE (target) != compute_mode)
3851 target = 0;
3853 if (rem_flag)
3855 /* Try to produce the remainder without producing the quotient.
3856 If we seem to have a divmod patten that does not require widening,
3857 don't try windening here. We should really have an WIDEN argument
3858 to expand_twoval_binop, since what we'd really like to do here is
3859 1) try a mod insn in compute_mode
3860 2) try a divmod insn in compute_mode
3861 3) try a div insn in compute_mode and multiply-subtract to get
3862 remainder
3863 4) try the same things with widening allowed. */
3864 remainder
3865 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3866 op0, op1, target,
3867 unsignedp,
3868 ((optab2->handlers[(int) compute_mode].insn_code
3869 != CODE_FOR_nothing)
3870 ? OPTAB_DIRECT : OPTAB_WIDEN));
3871 if (remainder == 0)
3873 /* No luck there. Can we do remainder and divide at once
3874 without a library call? */
3875 remainder = gen_reg_rtx (compute_mode);
3876 if (! expand_twoval_binop ((unsignedp
3877 ? udivmod_optab
3878 : sdivmod_optab),
3879 op0, op1,
3880 NULL_RTX, remainder, unsignedp))
3881 remainder = 0;
3884 if (remainder)
3885 return gen_lowpart (mode, remainder);
3888 /* Produce the quotient. Try a quotient insn, but not a library call.
3889 If we have a divmod in this mode, use it in preference to widening
3890 the div (for this test we assume it will not fail). Note that optab2
3891 is set to the one of the two optabs that the call below will use. */
3892 quotient
3893 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3894 op0, op1, rem_flag ? NULL_RTX : target,
3895 unsignedp,
3896 ((optab2->handlers[(int) compute_mode].insn_code
3897 != CODE_FOR_nothing)
3898 ? OPTAB_DIRECT : OPTAB_WIDEN));
3900 if (quotient == 0)
3902 /* No luck there. Try a quotient-and-remainder insn,
3903 keeping the quotient alone. */
3904 quotient = gen_reg_rtx (compute_mode);
3905 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3906 op0, op1,
3907 quotient, NULL_RTX, unsignedp))
3909 quotient = 0;
3910 if (! rem_flag)
3911 /* Still no luck. If we are not computing the remainder,
3912 use a library call for the quotient. */
3913 quotient = sign_expand_binop (compute_mode,
3914 udiv_optab, sdiv_optab,
3915 op0, op1, target,
3916 unsignedp, OPTAB_LIB_WIDEN);
3921 if (rem_flag)
3923 if (target && GET_MODE (target) != compute_mode)
3924 target = 0;
3926 if (quotient == 0)
3927 /* No divide instruction either. Use library for remainder. */
3928 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3929 op0, op1, target,
3930 unsignedp, OPTAB_LIB_WIDEN);
3931 else
3933 /* We divided. Now finish doing X - Y * (X / Y). */
3934 remainder = expand_mult (compute_mode, quotient, op1,
3935 NULL_RTX, unsignedp);
3936 remainder = expand_binop (compute_mode, sub_optab, op0,
3937 remainder, target, unsignedp,
3938 OPTAB_LIB_WIDEN);
3942 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3945 /* Return a tree node with data type TYPE, describing the value of X.
3946 Usually this is an RTL_EXPR, if there is no obvious better choice.
3947 X may be an expression, however we only support those expressions
3948 generated by loop.c. */
3950 tree
3951 make_tree (type, x)
3952 tree type;
3953 rtx x;
3955 tree t;
3957 switch (GET_CODE (x))
3959 case CONST_INT:
3960 t = build_int_2 (INTVAL (x),
3961 (TREE_UNSIGNED (type)
3962 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3963 || INTVAL (x) >= 0 ? 0 : -1);
3964 TREE_TYPE (t) = type;
3965 return t;
3967 case CONST_DOUBLE:
3968 if (GET_MODE (x) == VOIDmode)
3970 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3971 TREE_TYPE (t) = type;
3973 else
3975 REAL_VALUE_TYPE d;
3977 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3978 t = build_real (type, d);
3981 return t;
3983 case PLUS:
3984 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3985 make_tree (type, XEXP (x, 1))));
3987 case MINUS:
3988 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3989 make_tree (type, XEXP (x, 1))));
3991 case NEG:
3992 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3994 case MULT:
3995 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3996 make_tree (type, XEXP (x, 1))));
3998 case ASHIFT:
3999 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4000 make_tree (type, XEXP (x, 1))));
4002 case LSHIFTRT:
4003 return fold (convert (type,
4004 build (RSHIFT_EXPR, unsigned_type (type),
4005 make_tree (unsigned_type (type),
4006 XEXP (x, 0)),
4007 make_tree (type, XEXP (x, 1)))));
4009 case ASHIFTRT:
4010 return fold (convert (type,
4011 build (RSHIFT_EXPR, signed_type (type),
4012 make_tree (signed_type (type), XEXP (x, 0)),
4013 make_tree (type, XEXP (x, 1)))));
4015 case DIV:
4016 if (TREE_CODE (type) != REAL_TYPE)
4017 t = signed_type (type);
4018 else
4019 t = type;
4021 return fold (convert (type,
4022 build (TRUNC_DIV_EXPR, t,
4023 make_tree (t, XEXP (x, 0)),
4024 make_tree (t, XEXP (x, 1)))));
4025 case UDIV:
4026 t = unsigned_type (type);
4027 return fold (convert (type,
4028 build (TRUNC_DIV_EXPR, t,
4029 make_tree (t, XEXP (x, 0)),
4030 make_tree (t, XEXP (x, 1)))));
4031 default:
4032 t = make_node (RTL_EXPR);
4033 TREE_TYPE (t) = type;
4034 RTL_EXPR_RTL (t) = x;
4035 /* There are no insns to be output
4036 when this rtl_expr is used. */
4037 RTL_EXPR_SEQUENCE (t) = 0;
4038 return t;
4042 /* Return an rtx representing the value of X * MULT + ADD.
4043 TARGET is a suggestion for where to store the result (an rtx).
4044 MODE is the machine mode for the computation.
4045 X and MULT must have mode MODE. ADD may have a different mode.
4046 So can X (defaults to same as MODE).
4047 UNSIGNEDP is non-zero to do unsigned multiplication.
4048 This may emit insns. */
4051 expand_mult_add (x, target, mult, add, mode, unsignedp)
4052 rtx x, target, mult, add;
4053 enum machine_mode mode;
4054 int unsignedp;
4056 tree type = type_for_mode (mode, unsignedp);
4057 tree add_type = (GET_MODE (add) == VOIDmode
4058 ? type : type_for_mode (GET_MODE (add), unsignedp));
4059 tree result = fold (build (PLUS_EXPR, type,
4060 fold (build (MULT_EXPR, type,
4061 make_tree (type, x),
4062 make_tree (type, mult))),
4063 make_tree (add_type, add)));
4065 return expand_expr (result, target, VOIDmode, 0);
4068 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4069 and returning TARGET.
4071 If TARGET is 0, a pseudo-register or constant is returned. */
4074 expand_and (op0, op1, target)
4075 rtx op0, op1, target;
4077 enum machine_mode mode = VOIDmode;
4078 rtx tem;
4080 if (GET_MODE (op0) != VOIDmode)
4081 mode = GET_MODE (op0);
4082 else if (GET_MODE (op1) != VOIDmode)
4083 mode = GET_MODE (op1);
4085 if (mode != VOIDmode)
4086 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4087 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4088 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4089 else
4090 abort ();
4092 if (target == 0)
4093 target = tem;
4094 else if (tem != target)
4095 emit_move_insn (target, tem);
4096 return target;
4099 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4100 and storing in TARGET. Normally return TARGET.
4101 Return 0 if that cannot be done.
4103 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4104 it is VOIDmode, they cannot both be CONST_INT.
4106 UNSIGNEDP is for the case where we have to widen the operands
4107 to perform the operation. It says to use zero-extension.
4109 NORMALIZEP is 1 if we should convert the result to be either zero
4110 or one. Normalize is -1 if we should convert the result to be
4111 either zero or -1. If NORMALIZEP is zero, the result will be left
4112 "raw" out of the scc insn. */
4115 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4116 rtx target;
4117 enum rtx_code code;
4118 rtx op0, op1;
4119 enum machine_mode mode;
4120 int unsignedp;
4121 int normalizep;
4123 rtx subtarget;
4124 enum insn_code icode;
4125 enum machine_mode compare_mode;
4126 enum machine_mode target_mode = GET_MODE (target);
4127 rtx tem;
4128 rtx last = get_last_insn ();
4129 rtx pattern, comparison;
4131 if (unsignedp)
4132 code = unsigned_condition (code);
4134 /* If one operand is constant, make it the second one. Only do this
4135 if the other operand is not constant as well. */
4137 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4138 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4140 tem = op0;
4141 op0 = op1;
4142 op1 = tem;
4143 code = swap_condition (code);
4146 if (mode == VOIDmode)
4147 mode = GET_MODE (op0);
4149 /* For some comparisons with 1 and -1, we can convert this to
4150 comparisons with zero. This will often produce more opportunities for
4151 store-flag insns. */
4153 switch (code)
4155 case LT:
4156 if (op1 == const1_rtx)
4157 op1 = const0_rtx, code = LE;
4158 break;
4159 case LE:
4160 if (op1 == constm1_rtx)
4161 op1 = const0_rtx, code = LT;
4162 break;
4163 case GE:
4164 if (op1 == const1_rtx)
4165 op1 = const0_rtx, code = GT;
4166 break;
4167 case GT:
4168 if (op1 == constm1_rtx)
4169 op1 = const0_rtx, code = GE;
4170 break;
4171 case GEU:
4172 if (op1 == const1_rtx)
4173 op1 = const0_rtx, code = NE;
4174 break;
4175 case LTU:
4176 if (op1 == const1_rtx)
4177 op1 = const0_rtx, code = EQ;
4178 break;
4179 default:
4180 break;
4183 /* From now on, we won't change CODE, so set ICODE now. */
4184 icode = setcc_gen_code[(int) code];
4186 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4187 complement of A (for GE) and shifting the sign bit to the low bit. */
4188 if (op1 == const0_rtx && (code == LT || code == GE)
4189 && GET_MODE_CLASS (mode) == MODE_INT
4190 && (normalizep || STORE_FLAG_VALUE == 1
4191 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4192 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4193 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4195 subtarget = target;
4197 /* If the result is to be wider than OP0, it is best to convert it
4198 first. If it is to be narrower, it is *incorrect* to convert it
4199 first. */
4200 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4202 op0 = protect_from_queue (op0, 0);
4203 op0 = convert_modes (target_mode, mode, op0, 0);
4204 mode = target_mode;
4207 if (target_mode != mode)
4208 subtarget = 0;
4210 if (code == GE)
4211 op0 = expand_unop (mode, one_cmpl_optab, op0,
4212 ((STORE_FLAG_VALUE == 1 || normalizep)
4213 ? 0 : subtarget), 0);
4215 if (STORE_FLAG_VALUE == 1 || normalizep)
4216 /* If we are supposed to produce a 0/1 value, we want to do
4217 a logical shift from the sign bit to the low-order bit; for
4218 a -1/0 value, we do an arithmetic shift. */
4219 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4220 size_int (GET_MODE_BITSIZE (mode) - 1),
4221 subtarget, normalizep != -1);
4223 if (mode != target_mode)
4224 op0 = convert_modes (target_mode, mode, op0, 0);
4226 return op0;
4229 if (icode != CODE_FOR_nothing)
4231 insn_operand_predicate_fn pred;
4233 /* We think we may be able to do this with a scc insn. Emit the
4234 comparison and then the scc insn.
4236 compare_from_rtx may call emit_queue, which would be deleted below
4237 if the scc insn fails. So call it ourselves before setting LAST.
4238 Likewise for do_pending_stack_adjust. */
4240 emit_queue ();
4241 do_pending_stack_adjust ();
4242 last = get_last_insn ();
4244 comparison
4245 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4246 if (GET_CODE (comparison) == CONST_INT)
4247 return (comparison == const0_rtx ? const0_rtx
4248 : normalizep == 1 ? const1_rtx
4249 : normalizep == -1 ? constm1_rtx
4250 : const_true_rtx);
4252 /* If the code of COMPARISON doesn't match CODE, something is
4253 wrong; we can no longer be sure that we have the operation.
4254 We could handle this case, but it should not happen. */
4256 if (GET_CODE (comparison) != code)
4257 abort ();
4259 /* Get a reference to the target in the proper mode for this insn. */
4260 compare_mode = insn_data[(int) icode].operand[0].mode;
4261 subtarget = target;
4262 pred = insn_data[(int) icode].operand[0].predicate;
4263 if (preserve_subexpressions_p ()
4264 || ! (*pred) (subtarget, compare_mode))
4265 subtarget = gen_reg_rtx (compare_mode);
4267 pattern = GEN_FCN (icode) (subtarget);
4268 if (pattern)
4270 emit_insn (pattern);
4272 /* If we are converting to a wider mode, first convert to
4273 TARGET_MODE, then normalize. This produces better combining
4274 opportunities on machines that have a SIGN_EXTRACT when we are
4275 testing a single bit. This mostly benefits the 68k.
4277 If STORE_FLAG_VALUE does not have the sign bit set when
4278 interpreted in COMPARE_MODE, we can do this conversion as
4279 unsigned, which is usually more efficient. */
4280 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4282 convert_move (target, subtarget,
4283 (GET_MODE_BITSIZE (compare_mode)
4284 <= HOST_BITS_PER_WIDE_INT)
4285 && 0 == (STORE_FLAG_VALUE
4286 & ((HOST_WIDE_INT) 1
4287 << (GET_MODE_BITSIZE (compare_mode) -1))));
4288 op0 = target;
4289 compare_mode = target_mode;
4291 else
4292 op0 = subtarget;
4294 /* If we want to keep subexpressions around, don't reuse our
4295 last target. */
4297 if (preserve_subexpressions_p ())
4298 subtarget = 0;
4300 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4301 we don't have to do anything. */
4302 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4304 /* STORE_FLAG_VALUE might be the most negative number, so write
4305 the comparison this way to avoid a compiler-time warning. */
4306 else if (- normalizep == STORE_FLAG_VALUE)
4307 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4309 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4310 makes it hard to use a value of just the sign bit due to
4311 ANSI integer constant typing rules. */
4312 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4313 && (STORE_FLAG_VALUE
4314 & ((HOST_WIDE_INT) 1
4315 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4316 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4317 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4318 subtarget, normalizep == 1);
4319 else if (STORE_FLAG_VALUE & 1)
4321 op0 = expand_and (op0, const1_rtx, subtarget);
4322 if (normalizep == -1)
4323 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4325 else
4326 abort ();
4328 /* If we were converting to a smaller mode, do the
4329 conversion now. */
4330 if (target_mode != compare_mode)
4332 convert_move (target, op0, 0);
4333 return target;
4335 else
4336 return op0;
4340 delete_insns_since (last);
4342 /* If expensive optimizations, use different pseudo registers for each
4343 insn, instead of reusing the same pseudo. This leads to better CSE,
4344 but slows down the compiler, since there are more pseudos */
4345 subtarget = (!flag_expensive_optimizations
4346 && (target_mode == mode)) ? target : NULL_RTX;
4348 /* If we reached here, we can't do this with a scc insn. However, there
4349 are some comparisons that can be done directly. For example, if
4350 this is an equality comparison of integers, we can try to exclusive-or
4351 (or subtract) the two operands and use a recursive call to try the
4352 comparison with zero. Don't do any of these cases if branches are
4353 very cheap. */
4355 if (BRANCH_COST > 0
4356 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4357 && op1 != const0_rtx)
4359 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4360 OPTAB_WIDEN);
4362 if (tem == 0)
4363 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4364 OPTAB_WIDEN);
4365 if (tem != 0)
4366 tem = emit_store_flag (target, code, tem, const0_rtx,
4367 mode, unsignedp, normalizep);
4368 if (tem == 0)
4369 delete_insns_since (last);
4370 return tem;
4373 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4374 the constant zero. Reject all other comparisons at this point. Only
4375 do LE and GT if branches are expensive since they are expensive on
4376 2-operand machines. */
4378 if (BRANCH_COST == 0
4379 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4380 || (code != EQ && code != NE
4381 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4382 return 0;
4384 /* See what we need to return. We can only return a 1, -1, or the
4385 sign bit. */
4387 if (normalizep == 0)
4389 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4390 normalizep = STORE_FLAG_VALUE;
4392 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4393 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4394 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4396 else
4397 return 0;
4400 /* Try to put the result of the comparison in the sign bit. Assume we can't
4401 do the necessary operation below. */
4403 tem = 0;
4405 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4406 the sign bit set. */
4408 if (code == LE)
4410 /* This is destructive, so SUBTARGET can't be OP0. */
4411 if (rtx_equal_p (subtarget, op0))
4412 subtarget = 0;
4414 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4415 OPTAB_WIDEN);
4416 if (tem)
4417 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4418 OPTAB_WIDEN);
4421 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4422 number of bits in the mode of OP0, minus one. */
4424 if (code == GT)
4426 if (rtx_equal_p (subtarget, op0))
4427 subtarget = 0;
4429 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4430 size_int (GET_MODE_BITSIZE (mode) - 1),
4431 subtarget, 0);
4432 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4433 OPTAB_WIDEN);
4436 if (code == EQ || code == NE)
4438 /* For EQ or NE, one way to do the comparison is to apply an operation
4439 that converts the operand into a positive number if it is non-zero
4440 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4441 for NE we negate. This puts the result in the sign bit. Then we
4442 normalize with a shift, if needed.
4444 Two operations that can do the above actions are ABS and FFS, so try
4445 them. If that doesn't work, and MODE is smaller than a full word,
4446 we can use zero-extension to the wider mode (an unsigned conversion)
4447 as the operation. */
4449 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4450 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4451 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4452 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4453 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4455 op0 = protect_from_queue (op0, 0);
4456 tem = convert_modes (word_mode, mode, op0, 1);
4457 mode = word_mode;
4460 if (tem != 0)
4462 if (code == EQ)
4463 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4464 0, OPTAB_WIDEN);
4465 else
4466 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4469 /* If we couldn't do it that way, for NE we can "or" the two's complement
4470 of the value with itself. For EQ, we take the one's complement of
4471 that "or", which is an extra insn, so we only handle EQ if branches
4472 are expensive. */
4474 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4476 if (rtx_equal_p (subtarget, op0))
4477 subtarget = 0;
4479 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4480 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4481 OPTAB_WIDEN);
4483 if (tem && code == EQ)
4484 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4488 if (tem && normalizep)
4489 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4490 size_int (GET_MODE_BITSIZE (mode) - 1),
4491 subtarget, normalizep == 1);
4493 if (tem)
4495 if (GET_MODE (tem) != target_mode)
4497 convert_move (target, tem, 0);
4498 tem = target;
4500 else if (!subtarget)
4502 emit_move_insn (target, tem);
4503 tem = target;
4506 else
4507 delete_insns_since (last);
4509 return tem;
4512 /* Like emit_store_flag, but always succeeds. */
4515 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4516 rtx target;
4517 enum rtx_code code;
4518 rtx op0, op1;
4519 enum machine_mode mode;
4520 int unsignedp;
4521 int normalizep;
4523 rtx tem, label;
4525 /* First see if emit_store_flag can do the job. */
4526 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4527 if (tem != 0)
4528 return tem;
4530 if (normalizep == 0)
4531 normalizep = 1;
4533 /* If this failed, we have to do this with set/compare/jump/set code. */
4535 if (GET_CODE (target) != REG
4536 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4537 target = gen_reg_rtx (GET_MODE (target));
4539 emit_move_insn (target, const1_rtx);
4540 label = gen_label_rtx ();
4541 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4542 NULL_RTX, label);
4544 emit_move_insn (target, const0_rtx);
4545 emit_label (label);
4547 return target;
4550 /* Perform possibly multi-word comparison and conditional jump to LABEL
4551 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4553 The algorithm is based on the code in expr.c:do_jump.
4555 Note that this does not perform a general comparison. Only variants
4556 generated within expmed.c are correctly handled, others abort (but could
4557 be handled if needed). */
4559 static void
4560 do_cmp_and_jump (arg1, arg2, op, mode, label)
4561 rtx arg1, arg2, label;
4562 enum rtx_code op;
4563 enum machine_mode mode;
4565 /* If this mode is an integer too wide to compare properly,
4566 compare word by word. Rely on cse to optimize constant cases. */
4568 if (GET_MODE_CLASS (mode) == MODE_INT
4569 && ! can_compare_p (op, mode, ccp_jump))
4571 rtx label2 = gen_label_rtx ();
4573 switch (op)
4575 case LTU:
4576 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4577 break;
4579 case LEU:
4580 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4581 break;
4583 case LT:
4584 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4585 break;
4587 case GT:
4588 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4589 break;
4591 case GE:
4592 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4593 break;
4595 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4596 that's the only equality operations we do */
4597 case EQ:
4598 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4599 abort();
4600 do_jump_by_parts_equality_rtx (arg1, label2, label);
4601 break;
4603 case NE:
4604 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4605 abort();
4606 do_jump_by_parts_equality_rtx (arg1, label, label2);
4607 break;
4609 default:
4610 abort();
4613 emit_label (label2);
4615 else
4617 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);