(arm_comp_type_attributes): Simply and comment tests on type attributes.
[official-gcc.git] / gcc / expmed.c
blob736a7f2bdf77f133aad95c8e1b97be628449af7b
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-98, 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "toplev.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "flags.h"
30 #include "insn-flags.h"
31 #include "insn-codes.h"
32 #include "insn-config.h"
33 #include "expr.h"
34 #include "real.h"
35 #include "recog.h"
37 static void store_fixed_bit_field PARAMS ((rtx, int, int, int, rtx,
38 unsigned int));
39 static void store_split_bit_field PARAMS ((rtx, int, int, rtx,
40 unsigned int));
41 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx, int,
42 int, int, rtx, int,
43 unsigned int));
44 static rtx mask_rtx PARAMS ((enum machine_mode, int,
45 int, int));
46 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
47 int, int));
48 static rtx extract_split_bit_field PARAMS ((rtx, int, int, int,
49 unsigned int));
50 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
51 enum machine_mode, rtx));
53 /* Non-zero means divides or modulus operations are relatively cheap for
54 powers of two, so don't use branches; emit the operation instead.
55 Usually, this will mean that the MD file will emit non-branch
56 sequences. */
58 static int sdiv_pow2_cheap, smod_pow2_cheap;
60 #ifndef SLOW_UNALIGNED_ACCESS
61 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
62 #endif
64 /* For compilers that support multiple targets with different word sizes,
65 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
66 is the H8/300(H) compiler. */
68 #ifndef MAX_BITS_PER_WORD
69 #define MAX_BITS_PER_WORD BITS_PER_WORD
70 #endif
72 /* Cost of various pieces of RTL. Note that some of these are indexed by
73 shift count and some by mode. */
74 static int add_cost, negate_cost, zero_cost;
75 static int shift_cost[MAX_BITS_PER_WORD];
76 static int shiftadd_cost[MAX_BITS_PER_WORD];
77 static int shiftsub_cost[MAX_BITS_PER_WORD];
78 static int mul_cost[NUM_MACHINE_MODES];
79 static int div_cost[NUM_MACHINE_MODES];
80 static int mul_widen_cost[NUM_MACHINE_MODES];
81 static int mul_highpart_cost[NUM_MACHINE_MODES];
83 void
84 init_expmed ()
86 char *free_point;
87 /* This is "some random pseudo register" for purposes of calling recog
88 to see what insns exist. */
89 rtx reg = gen_rtx_REG (word_mode, 10000);
90 rtx shift_insn, shiftadd_insn, shiftsub_insn;
91 int dummy;
92 int m;
93 enum machine_mode mode, wider_mode;
95 start_sequence ();
97 /* Since we are on the permanent obstack, we must be sure we save this
98 spot AFTER we call start_sequence, since it will reuse the rtl it
99 makes. */
100 free_point = (char *) oballoc (0);
102 reg = gen_rtx_REG (word_mode, 10000);
104 zero_cost = rtx_cost (const0_rtx, 0);
105 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
107 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
108 gen_rtx_ASHIFT (word_mode, reg,
109 const0_rtx)));
111 shiftadd_insn
112 = emit_insn (gen_rtx_SET (VOIDmode, reg,
113 gen_rtx_PLUS (word_mode,
114 gen_rtx_MULT (word_mode,
115 reg, const0_rtx),
116 reg)));
118 shiftsub_insn
119 = emit_insn (gen_rtx_SET (VOIDmode, reg,
120 gen_rtx_MINUS (word_mode,
121 gen_rtx_MULT (word_mode,
122 reg, const0_rtx),
123 reg)));
125 init_recog ();
127 shift_cost[0] = 0;
128 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
130 for (m = 1; m < MAX_BITS_PER_WORD; m++)
132 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
134 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
135 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
136 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
138 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
139 = GEN_INT ((HOST_WIDE_INT) 1 << m);
140 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
141 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
143 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
144 = GEN_INT ((HOST_WIDE_INT) 1 << m);
145 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
146 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
149 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
151 sdiv_pow2_cheap
152 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
153 <= 2 * add_cost);
154 smod_pow2_cheap
155 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
156 <= 2 * add_cost);
158 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
159 mode != VOIDmode;
160 mode = GET_MODE_WIDER_MODE (mode))
162 reg = gen_rtx_REG (mode, 10000);
163 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
164 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
165 wider_mode = GET_MODE_WIDER_MODE (mode);
166 if (wider_mode != VOIDmode)
168 mul_widen_cost[(int) wider_mode]
169 = rtx_cost (gen_rtx_MULT (wider_mode,
170 gen_rtx_ZERO_EXTEND (wider_mode, reg),
171 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
172 SET);
173 mul_highpart_cost[(int) mode]
174 = rtx_cost (gen_rtx_TRUNCATE
175 (mode,
176 gen_rtx_LSHIFTRT (wider_mode,
177 gen_rtx_MULT (wider_mode,
178 gen_rtx_ZERO_EXTEND
179 (wider_mode, reg),
180 gen_rtx_ZERO_EXTEND
181 (wider_mode, reg)),
182 GEN_INT (GET_MODE_BITSIZE (mode)))),
183 SET);
187 /* Free the objects we just allocated. */
188 end_sequence ();
189 obfree (free_point);
192 /* Return an rtx representing minus the value of X.
193 MODE is the intended mode of the result,
194 useful if X is a CONST_INT. */
197 negate_rtx (mode, x)
198 enum machine_mode mode;
199 rtx x;
201 rtx result = simplify_unary_operation (NEG, mode, x, mode);
203 if (result == 0)
204 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
206 return result;
209 /* Generate code to store value from rtx VALUE
210 into a bit-field within structure STR_RTX
211 containing BITSIZE bits starting at bit BITNUM.
212 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
213 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
214 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
216 /* ??? Note that there are two different ideas here for how
217 to determine the size to count bits within, for a register.
218 One is BITS_PER_WORD, and the other is the size of operand 3
219 of the insv pattern.
221 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
222 else, we use the mode of operand 3. */
225 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
226 rtx str_rtx;
227 register int bitsize;
228 int bitnum;
229 enum machine_mode fieldmode;
230 rtx value;
231 unsigned int align;
232 int total_size;
234 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
235 register int offset = bitnum / unit;
236 register int bitpos = bitnum % unit;
237 register rtx op0 = str_rtx;
238 #ifdef HAVE_insv
239 int insv_bitsize;
240 enum machine_mode op_mode;
242 op_mode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
243 if (op_mode == VOIDmode)
244 op_mode = word_mode;
245 insv_bitsize = GET_MODE_BITSIZE (op_mode);
246 #endif
248 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
249 abort ();
251 /* Discount the part of the structure before the desired byte.
252 We need to know how many bytes are safe to reference after it. */
253 if (total_size >= 0)
254 total_size -= (bitpos / BIGGEST_ALIGNMENT
255 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
257 while (GET_CODE (op0) == SUBREG)
259 /* The following line once was done only if WORDS_BIG_ENDIAN,
260 but I think that is a mistake. WORDS_BIG_ENDIAN is
261 meaningful at a much higher level; when structures are copied
262 between memory and regs, the higher-numbered regs
263 always get higher addresses. */
264 offset += SUBREG_WORD (op0);
265 /* We used to adjust BITPOS here, but now we do the whole adjustment
266 right after the loop. */
267 op0 = SUBREG_REG (op0);
270 /* Make sure we are playing with integral modes. Pun with subregs
271 if we aren't. */
273 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
274 if (imode != GET_MODE (op0))
276 if (GET_CODE (op0) == MEM)
277 op0 = change_address (op0, imode, NULL_RTX);
278 else if (imode != BLKmode)
279 op0 = gen_lowpart (imode, op0);
280 else
281 abort ();
285 /* If OP0 is a register, BITPOS must count within a word.
286 But as we have it, it counts within whatever size OP0 now has.
287 On a bigendian machine, these are not the same, so convert. */
288 if (BYTES_BIG_ENDIAN
289 && GET_CODE (op0) != MEM
290 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
291 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
293 value = protect_from_queue (value, 0);
295 if (flag_force_mem)
296 value = force_not_mem (value);
298 if ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
299 || (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)
300 && GET_MODE_SIZE (fieldmode) != 0))
301 && (GET_CODE (op0) != MEM
302 || ! SLOW_UNALIGNED_ACCESS (fieldmode, align)
303 || (offset * BITS_PER_UNIT % bitsize == 0
304 && align % GET_MODE_SIZE (fieldmode) == 0))
305 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
306 && bitsize == GET_MODE_BITSIZE (fieldmode))
308 /* Storing in a full-word or multi-word field in a register
309 can be done with just SUBREG. Also, storing in the entire object
310 can be done with just SUBREG. */
311 if (GET_MODE (op0) != fieldmode)
313 if (GET_CODE (op0) == SUBREG)
315 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
316 || GET_MODE_CLASS (fieldmode) == MODE_INT
317 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
318 op0 = SUBREG_REG (op0);
319 else
320 /* Else we've got some float mode source being extracted into
321 a different float mode destination -- this combination of
322 subregs results in Severe Tire Damage. */
323 abort ();
325 if (GET_CODE (op0) == REG)
326 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
327 else
328 op0 = change_address (op0, fieldmode,
329 plus_constant (XEXP (op0, 0), offset));
331 emit_move_insn (op0, value);
332 return value;
335 /* Storing an lsb-aligned field in a register
336 can be done with a movestrict instruction. */
338 if (GET_CODE (op0) != MEM
339 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
340 && bitsize == GET_MODE_BITSIZE (fieldmode)
341 && (movstrict_optab->handlers[(int) fieldmode].insn_code
342 != CODE_FOR_nothing))
344 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
346 /* Get appropriate low part of the value being stored. */
347 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
348 value = gen_lowpart (fieldmode, value);
349 else if (!(GET_CODE (value) == SYMBOL_REF
350 || GET_CODE (value) == LABEL_REF
351 || GET_CODE (value) == CONST))
352 value = convert_to_mode (fieldmode, value, 0);
354 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
355 value = copy_to_mode_reg (fieldmode, value);
357 if (GET_CODE (op0) == SUBREG)
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
370 emit_insn (GEN_FCN (icode)
371 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
373 return value;
376 /* Handle fields bigger than a word. */
378 if (bitsize > BITS_PER_WORD)
380 /* Here we transfer the words of the field
381 in the order least significant first.
382 This is because the most significant word is the one which may
383 be less than full.
384 However, only do that if the value is not BLKmode. */
386 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
388 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
389 int i;
391 /* This is the mode we must force value to, so that there will be enough
392 subwords to extract. Note that fieldmode will often (always?) be
393 VOIDmode, because that is what store_field uses to indicate that this
394 is a bit field, but passing VOIDmode to operand_subword_force will
395 result in an abort. */
396 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
398 for (i = 0; i < nwords; i++)
400 /* If I is 0, use the low-order word in both field and target;
401 if I is 1, use the next to lowest word; and so on. */
402 int wordnum = (backwards ? nwords - i - 1 : i);
403 int bit_offset = (backwards
404 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
405 : i * BITS_PER_WORD);
406 store_bit_field (op0, MIN (BITS_PER_WORD,
407 bitsize - i * BITS_PER_WORD),
408 bitnum + bit_offset, word_mode,
409 operand_subword_force (value, wordnum,
410 (GET_MODE (value) == VOIDmode
411 ? fieldmode
412 : GET_MODE (value))),
413 align, total_size);
415 return value;
418 /* From here on we can assume that the field to be stored in is
419 a full-word (whatever type that is), since it is shorter than a word. */
421 /* OFFSET is the number of words or bytes (UNIT says which)
422 from STR_RTX to the first word or byte containing part of the field. */
424 if (GET_CODE (op0) != MEM)
426 if (offset != 0
427 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
429 if (GET_CODE (op0) != REG)
431 /* Since this is a destination (lvalue), we can't copy it to a
432 pseudo. We can trivially remove a SUBREG that does not
433 change the size of the operand. Such a SUBREG may have been
434 added above. Otherwise, abort. */
435 if (GET_CODE (op0) == SUBREG
436 && (GET_MODE_SIZE (GET_MODE (op0))
437 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
438 op0 = SUBREG_REG (op0);
439 else
440 abort ();
442 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
443 op0, offset);
445 offset = 0;
447 else
449 op0 = protect_from_queue (op0, 1);
452 /* If VALUE is a floating-point mode, access it as an integer of the
453 corresponding size. This can occur on a machine with 64 bit registers
454 that uses SFmode for float. This can also occur for unaligned float
455 structure fields. */
456 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
458 if (GET_CODE (value) != REG)
459 value = copy_to_reg (value);
460 value = gen_rtx_SUBREG (word_mode, value, 0);
463 /* Now OFFSET is nonzero only if OP0 is memory
464 and is therefore always measured in bytes. */
466 #ifdef HAVE_insv
467 if (HAVE_insv
468 && GET_MODE (value) != BLKmode
469 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
470 /* Ensure insv's size is wide enough for this field. */
471 && (insv_bitsize >= bitsize)
472 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
473 && (bitsize + bitpos > insv_bitsize)))
475 int xbitpos = bitpos;
476 rtx value1;
477 rtx xop0 = op0;
478 rtx last = get_last_insn ();
479 rtx pat;
480 enum machine_mode maxmode;
481 int save_volatile_ok = volatile_ok;
483 maxmode = insn_data[(int) CODE_FOR_insv].operand[3].mode;
484 if (maxmode == VOIDmode)
485 maxmode = word_mode;
487 volatile_ok = 1;
489 /* If this machine's insv can only insert into a register, copy OP0
490 into a register and save it back later. */
491 /* This used to check flag_force_mem, but that was a serious
492 de-optimization now that flag_force_mem is enabled by -O2. */
493 if (GET_CODE (op0) == MEM
494 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
495 (op0, VOIDmode)))
497 rtx tempreg;
498 enum machine_mode bestmode;
500 /* Get the mode to use for inserting into this field. If OP0 is
501 BLKmode, get the smallest mode consistent with the alignment. If
502 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
503 mode. Otherwise, use the smallest mode containing the field. */
505 if (GET_MODE (op0) == BLKmode
506 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
507 bestmode
508 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
509 MEM_VOLATILE_P (op0));
510 else
511 bestmode = GET_MODE (op0);
513 if (bestmode == VOIDmode
514 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
515 && GET_MODE_SIZE (bestmode) > (int) align))
516 goto insv_loses;
518 /* Adjust address to point to the containing unit of that mode. */
519 unit = GET_MODE_BITSIZE (bestmode);
520 /* Compute offset as multiple of this unit, counting in bytes. */
521 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
522 bitpos = bitnum % unit;
523 op0 = change_address (op0, bestmode,
524 plus_constant (XEXP (op0, 0), offset));
526 /* Fetch that unit, store the bitfield in it, then store the unit. */
527 tempreg = copy_to_reg (op0);
528 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
529 align, total_size);
530 emit_move_insn (op0, tempreg);
531 return value;
533 volatile_ok = save_volatile_ok;
535 /* Add OFFSET into OP0's address. */
536 if (GET_CODE (xop0) == MEM)
537 xop0 = change_address (xop0, byte_mode,
538 plus_constant (XEXP (xop0, 0), offset));
540 /* If xop0 is a register, we need it in MAXMODE
541 to make it acceptable to the format of insv. */
542 if (GET_CODE (xop0) == SUBREG)
543 /* We can't just change the mode, because this might clobber op0,
544 and we will need the original value of op0 if insv fails. */
545 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
546 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
547 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
549 /* On big-endian machines, we count bits from the most significant.
550 If the bit field insn does not, we must invert. */
552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
553 xbitpos = unit - bitsize - xbitpos;
555 /* We have been counting XBITPOS within UNIT.
556 Count instead within the size of the register. */
557 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
558 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
560 unit = GET_MODE_BITSIZE (maxmode);
562 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
563 value1 = value;
564 if (GET_MODE (value) != maxmode)
566 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
568 /* Optimization: Don't bother really extending VALUE
569 if it has all the bits we will actually use. However,
570 if we must narrow it, be sure we do it correctly. */
572 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
574 /* Avoid making subreg of a subreg, or of a mem. */
575 if (GET_CODE (value1) != REG)
576 value1 = copy_to_reg (value1);
577 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
579 else
580 value1 = gen_lowpart (maxmode, value1);
582 else if (!CONSTANT_P (value))
583 /* Parse phase is supposed to make VALUE's data type
584 match that of the component reference, which is a type
585 at least as wide as the field; so VALUE should have
586 a mode that corresponds to that type. */
587 abort ();
590 /* If this machine's insv insists on a register,
591 get VALUE1 into a register. */
592 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
593 (value1, maxmode)))
594 value1 = force_reg (maxmode, value1);
596 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
597 if (pat)
598 emit_insn (pat);
599 else
601 delete_insns_since (last);
602 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
605 else
606 insv_loses:
607 #endif
608 /* Insv is not available; store using shifts and boolean ops. */
609 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
610 return value;
613 /* Use shifts and boolean operations to store VALUE
614 into a bit field of width BITSIZE
615 in a memory location specified by OP0 except offset by OFFSET bytes.
616 (OFFSET must be 0 if OP0 is a register.)
617 The field starts at position BITPOS within the byte.
618 (If OP0 is a register, it may be a full word or a narrower mode,
619 but BITPOS still counts within a full word,
620 which is significant on bigendian machines.)
621 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
623 Note that protect_from_queue has already been done on OP0 and VALUE. */
625 static void
626 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
627 register rtx op0;
628 register int offset, bitsize, bitpos;
629 register rtx value;
630 unsigned int struct_align;
632 register enum machine_mode mode;
633 int total_bits = BITS_PER_WORD;
634 rtx subtarget, temp;
635 int all_zero = 0;
636 int all_one = 0;
638 if (! SLOW_UNALIGNED_ACCESS (word_mode, struct_align))
639 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
641 /* There is a case not handled here:
642 a structure with a known alignment of just a halfword
643 and a field split across two aligned halfwords within the structure.
644 Or likewise a structure with a known alignment of just a byte
645 and a field split across two bytes.
646 Such cases are not supposed to be able to occur. */
648 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
650 if (offset != 0)
651 abort ();
652 /* Special treatment for a bit field split across two registers. */
653 if (bitsize + bitpos > BITS_PER_WORD)
655 store_split_bit_field (op0, bitsize, bitpos,
656 value, BITS_PER_WORD);
657 return;
660 else
662 /* Get the proper mode to use for this field. We want a mode that
663 includes the entire field. If such a mode would be larger than
664 a word, we won't be doing the extraction the normal way. */
666 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
667 struct_align * BITS_PER_UNIT, word_mode,
668 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
670 if (mode == VOIDmode)
672 /* The only way this should occur is if the field spans word
673 boundaries. */
674 store_split_bit_field (op0,
675 bitsize, bitpos + offset * BITS_PER_UNIT,
676 value, struct_align);
677 return;
680 total_bits = GET_MODE_BITSIZE (mode);
682 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
683 be in the range 0 to total_bits-1, and put any excess bytes in
684 OFFSET. */
685 if (bitpos >= total_bits)
687 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
688 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
689 * BITS_PER_UNIT);
692 /* Get ref to an aligned byte, halfword, or word containing the field.
693 Adjust BITPOS to be position within a word,
694 and OFFSET to be the offset of that word.
695 Then alter OP0 to refer to that word. */
696 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
697 offset -= (offset % (total_bits / BITS_PER_UNIT));
698 op0 = change_address (op0, mode,
699 plus_constant (XEXP (op0, 0), offset));
702 mode = GET_MODE (op0);
704 /* Now MODE is either some integral mode for a MEM as OP0,
705 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
706 The bit field is contained entirely within OP0.
707 BITPOS is the starting bit number within OP0.
708 (OP0's mode may actually be narrower than MODE.) */
710 if (BYTES_BIG_ENDIAN)
711 /* BITPOS is the distance between our msb
712 and that of the containing datum.
713 Convert it to the distance from the lsb. */
714 bitpos = total_bits - bitsize - bitpos;
716 /* Now BITPOS is always the distance between our lsb
717 and that of OP0. */
719 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
720 we must first convert its mode to MODE. */
722 if (GET_CODE (value) == CONST_INT)
724 register HOST_WIDE_INT v = INTVAL (value);
726 if (bitsize < HOST_BITS_PER_WIDE_INT)
727 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
729 if (v == 0)
730 all_zero = 1;
731 else if ((bitsize < HOST_BITS_PER_WIDE_INT
732 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
733 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
734 all_one = 1;
736 value = lshift_value (mode, value, bitpos, bitsize);
738 else
740 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
741 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
743 if (GET_MODE (value) != mode)
745 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
746 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
747 value = gen_lowpart (mode, value);
748 else
749 value = convert_to_mode (mode, value, 1);
752 if (must_and)
753 value = expand_binop (mode, and_optab, value,
754 mask_rtx (mode, 0, bitsize, 0),
755 NULL_RTX, 1, OPTAB_LIB_WIDEN);
756 if (bitpos > 0)
757 value = expand_shift (LSHIFT_EXPR, mode, value,
758 build_int_2 (bitpos, 0), NULL_RTX, 1);
761 /* Now clear the chosen bits in OP0,
762 except that if VALUE is -1 we need not bother. */
764 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
766 if (! all_one)
768 temp = expand_binop (mode, and_optab, op0,
769 mask_rtx (mode, bitpos, bitsize, 1),
770 subtarget, 1, OPTAB_LIB_WIDEN);
771 subtarget = temp;
773 else
774 temp = op0;
776 /* Now logical-or VALUE into OP0, unless it is zero. */
778 if (! all_zero)
779 temp = expand_binop (mode, ior_optab, temp, value,
780 subtarget, 1, OPTAB_LIB_WIDEN);
781 if (op0 != temp)
782 emit_move_insn (op0, temp);
785 /* Store a bit field that is split across multiple accessible memory objects.
787 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
788 BITSIZE is the field width; BITPOS the position of its first bit
789 (within the word).
790 VALUE is the value to store.
791 ALIGN is the known alignment of OP0, measured in bytes.
792 This is also the size of the memory objects to be used.
794 This does not yet handle fields wider than BITS_PER_WORD. */
796 static void
797 store_split_bit_field (op0, bitsize, bitpos, value, align)
798 rtx op0;
799 int bitsize, bitpos;
800 rtx value;
801 unsigned int align;
803 int unit;
804 int bitsdone = 0;
806 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
807 much at a time. */
808 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
809 unit = BITS_PER_WORD;
810 else
811 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
813 /* If VALUE is a constant other than a CONST_INT, get it into a register in
814 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
815 that VALUE might be a floating-point constant. */
816 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
818 rtx word = gen_lowpart_common (word_mode, value);
820 if (word && (value != word))
821 value = word;
822 else
823 value = gen_lowpart_common (word_mode,
824 force_reg (GET_MODE (value) != VOIDmode
825 ? GET_MODE (value)
826 : word_mode, value));
828 else if (GET_CODE (value) == ADDRESSOF)
829 value = copy_to_reg (value);
831 while (bitsdone < bitsize)
833 int thissize;
834 rtx part, word;
835 int thispos;
836 int offset;
838 offset = (bitpos + bitsdone) / unit;
839 thispos = (bitpos + bitsdone) % unit;
841 /* THISSIZE must not overrun a word boundary. Otherwise,
842 store_fixed_bit_field will call us again, and we will mutually
843 recurse forever. */
844 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
845 thissize = MIN (thissize, unit - thispos);
847 if (BYTES_BIG_ENDIAN)
849 int total_bits;
851 /* We must do an endian conversion exactly the same way as it is
852 done in extract_bit_field, so that the two calls to
853 extract_fixed_bit_field will have comparable arguments. */
854 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
855 total_bits = BITS_PER_WORD;
856 else
857 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
859 /* Fetch successively less significant portions. */
860 if (GET_CODE (value) == CONST_INT)
861 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
862 >> (bitsize - bitsdone - thissize))
863 & (((HOST_WIDE_INT) 1 << thissize) - 1));
864 else
865 /* The args are chosen so that the last part includes the
866 lsb. Give extract_bit_field the value it needs (with
867 endianness compensation) to fetch the piece we want.
869 ??? We have no idea what the alignment of VALUE is, so
870 we have to use a guess. */
871 part
872 = extract_fixed_bit_field
873 (word_mode, value, 0, thissize,
874 total_bits - bitsize + bitsdone, NULL_RTX, 1,
875 GET_MODE (value) == VOIDmode
876 ? UNITS_PER_WORD
877 : (GET_MODE (value) == BLKmode
879 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
881 else
883 /* Fetch successively more significant portions. */
884 if (GET_CODE (value) == CONST_INT)
885 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
886 >> bitsdone)
887 & (((HOST_WIDE_INT) 1 << thissize) - 1));
888 else
889 part
890 = extract_fixed_bit_field
891 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
892 GET_MODE (value) == VOIDmode
893 ? UNITS_PER_WORD
894 : (GET_MODE (value) == BLKmode
896 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
899 /* If OP0 is a register, then handle OFFSET here.
901 When handling multiword bitfields, extract_bit_field may pass
902 down a word_mode SUBREG of a larger REG for a bitfield that actually
903 crosses a word boundary. Thus, for a SUBREG, we must find
904 the current word starting from the base register. */
905 if (GET_CODE (op0) == SUBREG)
907 word = operand_subword_force (SUBREG_REG (op0),
908 SUBREG_WORD (op0) + offset,
909 GET_MODE (SUBREG_REG (op0)));
910 offset = 0;
912 else if (GET_CODE (op0) == REG)
914 word = operand_subword_force (op0, offset, GET_MODE (op0));
915 offset = 0;
917 else
918 word = op0;
920 /* OFFSET is in UNITs, and UNIT is in bits.
921 store_fixed_bit_field wants offset in bytes. */
922 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
923 thissize, thispos, part, align);
924 bitsdone += thissize;
928 /* Generate code to extract a byte-field from STR_RTX
929 containing BITSIZE bits, starting at BITNUM,
930 and put it in TARGET if possible (if TARGET is nonzero).
931 Regardless of TARGET, we return the rtx for where the value is placed.
932 It may be a QUEUED.
934 STR_RTX is the structure containing the byte (a REG or MEM).
935 UNSIGNEDP is nonzero if this is an unsigned bit field.
936 MODE is the natural mode of the field value once extracted.
937 TMODE is the mode the caller would like the value to have;
938 but the value may be returned with type MODE instead.
940 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
941 TOTAL_SIZE is the size in bytes of the containing structure,
942 or -1 if varying.
944 If a TARGET is specified and we can store in it at no extra cost,
945 we do so, and return TARGET.
946 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
947 if they are equally easy. */
950 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
951 target, mode, tmode, align, total_size)
952 rtx str_rtx;
953 register int bitsize;
954 int bitnum;
955 int unsignedp;
956 rtx target;
957 enum machine_mode mode, tmode;
958 unsigned int align;
959 int total_size;
961 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
962 register int offset = bitnum / unit;
963 register int bitpos = bitnum % unit;
964 register rtx op0 = str_rtx;
965 rtx spec_target = target;
966 rtx spec_target_subreg = 0;
967 enum machine_mode int_mode;
968 #ifdef HAVE_extv
969 int extv_bitsize;
970 enum machine_mode extv_mode;
971 #endif
972 #ifdef HAVE_extzv
973 int extzv_bitsize;
974 enum machine_mode extzv_mode;
975 #endif
977 #ifdef HAVE_extv
978 extv_mode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
979 if (extv_mode == VOIDmode)
980 extv_mode = word_mode;
981 extv_bitsize = GET_MODE_BITSIZE (extv_mode);
982 #endif
984 #ifdef HAVE_extzv
985 extzv_mode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
986 if (extzv_mode == VOIDmode)
987 extzv_mode = word_mode;
988 extzv_bitsize = GET_MODE_BITSIZE (extzv_mode);
989 #endif
991 /* Discount the part of the structure before the desired byte.
992 We need to know how many bytes are safe to reference after it. */
993 if (total_size >= 0)
994 total_size -= (bitpos / BIGGEST_ALIGNMENT
995 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
997 if (tmode == VOIDmode)
998 tmode = mode;
999 while (GET_CODE (op0) == SUBREG)
1001 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1002 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1004 offset += SUBREG_WORD (op0);
1006 inner_size = MIN (inner_size, BITS_PER_WORD);
1008 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1010 bitpos += inner_size - outer_size;
1011 if (bitpos > unit)
1013 offset += (bitpos / unit);
1014 bitpos %= unit;
1018 op0 = SUBREG_REG (op0);
1021 /* Make sure we are playing with integral modes. Pun with subregs
1022 if we aren't. */
1024 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1025 if (imode != GET_MODE (op0))
1027 if (GET_CODE (op0) == MEM)
1028 op0 = change_address (op0, imode, NULL_RTX);
1029 else if (imode != BLKmode)
1030 op0 = gen_lowpart (imode, op0);
1031 else
1032 abort ();
1036 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1037 If that's wrong, the solution is to test for it and set TARGET to 0
1038 if needed. */
1040 /* If OP0 is a register, BITPOS must count within a word.
1041 But as we have it, it counts within whatever size OP0 now has.
1042 On a bigendian machine, these are not the same, so convert. */
1043 if (BYTES_BIG_ENDIAN
1044 && GET_CODE (op0) != MEM
1045 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1046 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1048 /* Extracting a full-word or multi-word value
1049 from a structure in a register or aligned memory.
1050 This can be done with just SUBREG.
1051 So too extracting a subword value in
1052 the least significant part of the register. */
1054 if (((GET_CODE (op0) != MEM
1055 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1056 GET_MODE_BITSIZE (GET_MODE (op0))))
1057 || (GET_CODE (op0) == MEM
1058 && (! SLOW_UNALIGNED_ACCESS (mode, align)
1059 || (offset * BITS_PER_UNIT % bitsize == 0
1060 && align * BITS_PER_UNIT % bitsize == 0))))
1061 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1062 && bitpos % BITS_PER_WORD == 0)
1063 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1064 /* ??? The big endian test here is wrong. This is correct
1065 if the value is in a register, and if mode_for_size is not
1066 the same mode as op0. This causes us to get unnecessarily
1067 inefficient code from the Thumb port when -mbig-endian. */
1068 && (BYTES_BIG_ENDIAN
1069 ? bitpos + bitsize == BITS_PER_WORD
1070 : bitpos == 0))))
1072 enum machine_mode mode1
1073 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1075 if (mode1 != GET_MODE (op0))
1077 if (GET_CODE (op0) == SUBREG)
1079 if (GET_MODE (SUBREG_REG (op0)) == mode1
1080 || GET_MODE_CLASS (mode1) == MODE_INT
1081 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1082 op0 = SUBREG_REG (op0);
1083 else
1084 /* Else we've got some float mode source being extracted into
1085 a different float mode destination -- this combination of
1086 subregs results in Severe Tire Damage. */
1087 abort ();
1089 if (GET_CODE (op0) == REG)
1090 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1091 else
1092 op0 = change_address (op0, mode1,
1093 plus_constant (XEXP (op0, 0), offset));
1095 if (mode1 != mode)
1096 return convert_to_mode (tmode, op0, unsignedp);
1097 return op0;
1100 /* Handle fields bigger than a word. */
1102 if (bitsize > BITS_PER_WORD)
1104 /* Here we transfer the words of the field
1105 in the order least significant first.
1106 This is because the most significant word is the one which may
1107 be less than full. */
1109 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1110 int i;
1112 if (target == 0 || GET_CODE (target) != REG)
1113 target = gen_reg_rtx (mode);
1115 /* Indicate for flow that the entire target reg is being set. */
1116 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1118 for (i = 0; i < nwords; i++)
1120 /* If I is 0, use the low-order word in both field and target;
1121 if I is 1, use the next to lowest word; and so on. */
1122 /* Word number in TARGET to use. */
1123 int wordnum = (WORDS_BIG_ENDIAN
1124 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1125 : i);
1126 /* Offset from start of field in OP0. */
1127 int bit_offset = (WORDS_BIG_ENDIAN
1128 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1129 : i * BITS_PER_WORD);
1130 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1131 rtx result_part
1132 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1133 bitsize - i * BITS_PER_WORD),
1134 bitnum + bit_offset,
1135 1, target_part, mode, word_mode,
1136 align, total_size);
1138 if (target_part == 0)
1139 abort ();
1141 if (result_part != target_part)
1142 emit_move_insn (target_part, result_part);
1145 if (unsignedp)
1147 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1148 need to be zero'd out. */
1149 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1151 int i,total_words;
1153 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1154 for (i = nwords; i < total_words; i++)
1156 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1157 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1158 emit_move_insn (target_part, const0_rtx);
1161 return target;
1164 /* Signed bit field: sign-extend with two arithmetic shifts. */
1165 target = expand_shift (LSHIFT_EXPR, mode, target,
1166 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1167 NULL_RTX, 0);
1168 return expand_shift (RSHIFT_EXPR, mode, target,
1169 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1170 NULL_RTX, 0);
1173 /* From here on we know the desired field is smaller than a word. */
1175 /* Check if there is a correspondingly-sized integer field, so we can
1176 safely extract it as one size of integer, if necessary; then
1177 truncate or extend to the size that is wanted; then use SUBREGs or
1178 convert_to_mode to get one of the modes we really wanted. */
1180 int_mode = int_mode_for_mode (tmode);
1181 if (int_mode == BLKmode)
1182 int_mode = int_mode_for_mode (mode);
1183 if (int_mode == BLKmode)
1184 abort(); /* Should probably push op0 out to memory and then
1185 do a load. */
1187 /* OFFSET is the number of words or bytes (UNIT says which)
1188 from STR_RTX to the first word or byte containing part of the field. */
1190 if (GET_CODE (op0) != MEM)
1192 if (offset != 0
1193 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1195 if (GET_CODE (op0) != REG)
1196 op0 = copy_to_reg (op0);
1197 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1198 op0, offset);
1200 offset = 0;
1202 else
1204 op0 = protect_from_queue (str_rtx, 1);
1207 /* Now OFFSET is nonzero only for memory operands. */
1209 if (unsignedp)
1211 #ifdef HAVE_extzv
1212 if (HAVE_extzv
1213 && (extzv_bitsize >= bitsize)
1214 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1215 && (bitsize + bitpos > extzv_bitsize)))
1217 int xbitpos = bitpos, xoffset = offset;
1218 rtx bitsize_rtx, bitpos_rtx;
1219 rtx last = get_last_insn ();
1220 rtx xop0 = op0;
1221 rtx xtarget = target;
1222 rtx xspec_target = spec_target;
1223 rtx xspec_target_subreg = spec_target_subreg;
1224 rtx pat;
1225 enum machine_mode maxmode;
1227 maxmode = insn_data[(int) CODE_FOR_extzv].operand[0].mode;
1228 if (maxmode == VOIDmode)
1229 maxmode = word_mode;
1231 if (GET_CODE (xop0) == MEM)
1233 int save_volatile_ok = volatile_ok;
1234 volatile_ok = 1;
1236 /* Is the memory operand acceptable? */
1237 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1238 (xop0, GET_MODE (xop0))))
1240 /* No, load into a reg and extract from there. */
1241 enum machine_mode bestmode;
1243 /* Get the mode to use for inserting into this field. If
1244 OP0 is BLKmode, get the smallest mode consistent with the
1245 alignment. If OP0 is a non-BLKmode object that is no
1246 wider than MAXMODE, use its mode. Otherwise, use the
1247 smallest mode containing the field. */
1249 if (GET_MODE (xop0) == BLKmode
1250 || (GET_MODE_SIZE (GET_MODE (op0))
1251 > GET_MODE_SIZE (maxmode)))
1252 bestmode = get_best_mode (bitsize, bitnum,
1253 align * BITS_PER_UNIT, maxmode,
1254 MEM_VOLATILE_P (xop0));
1255 else
1256 bestmode = GET_MODE (xop0);
1258 if (bestmode == VOIDmode
1259 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1260 && GET_MODE_SIZE (bestmode) > (int) align))
1261 goto extzv_loses;
1263 /* Compute offset as multiple of this unit,
1264 counting in bytes. */
1265 unit = GET_MODE_BITSIZE (bestmode);
1266 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1267 xbitpos = bitnum % unit;
1268 xop0 = change_address (xop0, bestmode,
1269 plus_constant (XEXP (xop0, 0),
1270 xoffset));
1271 /* Fetch it to a register in that size. */
1272 xop0 = force_reg (bestmode, xop0);
1274 /* XBITPOS counts within UNIT, which is what is expected. */
1276 else
1277 /* Get ref to first byte containing part of the field. */
1278 xop0 = change_address (xop0, byte_mode,
1279 plus_constant (XEXP (xop0, 0), xoffset));
1281 volatile_ok = save_volatile_ok;
1284 /* If op0 is a register, we need it in MAXMODE (which is usually
1285 SImode). to make it acceptable to the format of extzv. */
1286 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1287 goto extzv_loses;
1288 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1289 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1291 /* On big-endian machines, we count bits from the most significant.
1292 If the bit field insn does not, we must invert. */
1293 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1294 xbitpos = unit - bitsize - xbitpos;
1296 /* Now convert from counting within UNIT to counting in MAXMODE. */
1297 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1298 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1300 unit = GET_MODE_BITSIZE (maxmode);
1302 if (xtarget == 0
1303 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1304 xtarget = xspec_target = gen_reg_rtx (tmode);
1306 if (GET_MODE (xtarget) != maxmode)
1308 if (GET_CODE (xtarget) == REG)
1310 int wider = (GET_MODE_SIZE (maxmode)
1311 > GET_MODE_SIZE (GET_MODE (xtarget)));
1312 xtarget = gen_lowpart (maxmode, xtarget);
1313 if (wider)
1314 xspec_target_subreg = xtarget;
1316 else
1317 xtarget = gen_reg_rtx (maxmode);
1320 /* If this machine's extzv insists on a register target,
1321 make sure we have one. */
1322 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1323 (xtarget, maxmode)))
1324 xtarget = gen_reg_rtx (maxmode);
1326 bitsize_rtx = GEN_INT (bitsize);
1327 bitpos_rtx = GEN_INT (xbitpos);
1329 pat = gen_extzv (protect_from_queue (xtarget, 1),
1330 xop0, bitsize_rtx, bitpos_rtx);
1331 if (pat)
1333 emit_insn (pat);
1334 target = xtarget;
1335 spec_target = xspec_target;
1336 spec_target_subreg = xspec_target_subreg;
1338 else
1340 delete_insns_since (last);
1341 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1342 bitpos, target, 1, align);
1345 else
1346 extzv_loses:
1347 #endif
1348 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1349 bitpos, target, 1, align);
1351 else
1353 #ifdef HAVE_extv
1354 if (HAVE_extv
1355 && (extv_bitsize >= bitsize)
1356 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1357 && (bitsize + bitpos > extv_bitsize)))
1359 int xbitpos = bitpos, xoffset = offset;
1360 rtx bitsize_rtx, bitpos_rtx;
1361 rtx last = get_last_insn ();
1362 rtx xop0 = op0, xtarget = target;
1363 rtx xspec_target = spec_target;
1364 rtx xspec_target_subreg = spec_target_subreg;
1365 rtx pat;
1366 enum machine_mode maxmode;
1368 maxmode = insn_data[(int) CODE_FOR_extv].operand[0].mode;
1369 if (maxmode == VOIDmode)
1370 maxmode = word_mode;
1372 if (GET_CODE (xop0) == MEM)
1374 /* Is the memory operand acceptable? */
1375 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1376 (xop0, GET_MODE (xop0))))
1378 /* No, load into a reg and extract from there. */
1379 enum machine_mode bestmode;
1381 /* Get the mode to use for inserting into this field. If
1382 OP0 is BLKmode, get the smallest mode consistent with the
1383 alignment. If OP0 is a non-BLKmode object that is no
1384 wider than MAXMODE, use its mode. Otherwise, use the
1385 smallest mode containing the field. */
1387 if (GET_MODE (xop0) == BLKmode
1388 || (GET_MODE_SIZE (GET_MODE (op0))
1389 > GET_MODE_SIZE (maxmode)))
1390 bestmode = get_best_mode (bitsize, bitnum,
1391 align * BITS_PER_UNIT, maxmode,
1392 MEM_VOLATILE_P (xop0));
1393 else
1394 bestmode = GET_MODE (xop0);
1396 if (bestmode == VOIDmode
1397 || (SLOW_UNALIGNED_ACCESS (bestmode, align)
1398 && GET_MODE_SIZE (bestmode) > (int) align))
1399 goto extv_loses;
1401 /* Compute offset as multiple of this unit,
1402 counting in bytes. */
1403 unit = GET_MODE_BITSIZE (bestmode);
1404 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1405 xbitpos = bitnum % unit;
1406 xop0 = change_address (xop0, bestmode,
1407 plus_constant (XEXP (xop0, 0),
1408 xoffset));
1409 /* Fetch it to a register in that size. */
1410 xop0 = force_reg (bestmode, xop0);
1412 /* XBITPOS counts within UNIT, which is what is expected. */
1414 else
1415 /* Get ref to first byte containing part of the field. */
1416 xop0 = change_address (xop0, byte_mode,
1417 plus_constant (XEXP (xop0, 0), xoffset));
1420 /* If op0 is a register, we need it in MAXMODE (which is usually
1421 SImode) to make it acceptable to the format of extv. */
1422 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1423 goto extv_loses;
1424 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1425 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1427 /* On big-endian machines, we count bits from the most significant.
1428 If the bit field insn does not, we must invert. */
1429 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1430 xbitpos = unit - bitsize - xbitpos;
1432 /* XBITPOS counts within a size of UNIT.
1433 Adjust to count within a size of MAXMODE. */
1434 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1435 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1437 unit = GET_MODE_BITSIZE (maxmode);
1439 if (xtarget == 0
1440 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1441 xtarget = xspec_target = gen_reg_rtx (tmode);
1443 if (GET_MODE (xtarget) != maxmode)
1445 if (GET_CODE (xtarget) == REG)
1447 int wider = (GET_MODE_SIZE (maxmode)
1448 > GET_MODE_SIZE (GET_MODE (xtarget)));
1449 xtarget = gen_lowpart (maxmode, xtarget);
1450 if (wider)
1451 xspec_target_subreg = xtarget;
1453 else
1454 xtarget = gen_reg_rtx (maxmode);
1457 /* If this machine's extv insists on a register target,
1458 make sure we have one. */
1459 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1460 (xtarget, maxmode)))
1461 xtarget = gen_reg_rtx (maxmode);
1463 bitsize_rtx = GEN_INT (bitsize);
1464 bitpos_rtx = GEN_INT (xbitpos);
1466 pat = gen_extv (protect_from_queue (xtarget, 1),
1467 xop0, bitsize_rtx, bitpos_rtx);
1468 if (pat)
1470 emit_insn (pat);
1471 target = xtarget;
1472 spec_target = xspec_target;
1473 spec_target_subreg = xspec_target_subreg;
1475 else
1477 delete_insns_since (last);
1478 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1479 bitpos, target, 0, align);
1482 else
1483 extv_loses:
1484 #endif
1485 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1486 bitpos, target, 0, align);
1488 if (target == spec_target)
1489 return target;
1490 if (target == spec_target_subreg)
1491 return spec_target;
1492 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1494 /* If the target mode is floating-point, first convert to the
1495 integer mode of that size and then access it as a floating-point
1496 value via a SUBREG. */
1497 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1499 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1500 MODE_INT, 0),
1501 target, unsignedp);
1502 if (GET_CODE (target) != REG)
1503 target = copy_to_reg (target);
1504 return gen_rtx_SUBREG (tmode, target, 0);
1506 else
1507 return convert_to_mode (tmode, target, unsignedp);
1509 return target;
1512 /* Extract a bit field using shifts and boolean operations
1513 Returns an rtx to represent the value.
1514 OP0 addresses a register (word) or memory (byte).
1515 BITPOS says which bit within the word or byte the bit field starts in.
1516 OFFSET says how many bytes farther the bit field starts;
1517 it is 0 if OP0 is a register.
1518 BITSIZE says how many bits long the bit field is.
1519 (If OP0 is a register, it may be narrower than a full word,
1520 but BITPOS still counts within a full word,
1521 which is significant on bigendian machines.)
1523 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1524 If TARGET is nonzero, attempts to store the value there
1525 and return TARGET, but this is not guaranteed.
1526 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1528 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1530 static rtx
1531 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1532 target, unsignedp, align)
1533 enum machine_mode tmode;
1534 register rtx op0, target;
1535 register int offset, bitsize, bitpos;
1536 int unsignedp;
1537 unsigned int align;
1539 int total_bits = BITS_PER_WORD;
1540 enum machine_mode mode;
1542 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1544 /* Special treatment for a bit field split across two registers. */
1545 if (bitsize + bitpos > BITS_PER_WORD)
1546 return extract_split_bit_field (op0, bitsize, bitpos,
1547 unsignedp, align);
1549 else
1551 /* Get the proper mode to use for this field. We want a mode that
1552 includes the entire field. If such a mode would be larger than
1553 a word, we won't be doing the extraction the normal way. */
1555 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1556 align * BITS_PER_UNIT, word_mode,
1557 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1559 if (mode == VOIDmode)
1560 /* The only way this should occur is if the field spans word
1561 boundaries. */
1562 return extract_split_bit_field (op0, bitsize,
1563 bitpos + offset * BITS_PER_UNIT,
1564 unsignedp, align);
1566 total_bits = GET_MODE_BITSIZE (mode);
1568 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1569 be in the range 0 to total_bits-1, and put any excess bytes in
1570 OFFSET. */
1571 if (bitpos >= total_bits)
1573 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1574 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1575 * BITS_PER_UNIT);
1578 /* Get ref to an aligned byte, halfword, or word containing the field.
1579 Adjust BITPOS to be position within a word,
1580 and OFFSET to be the offset of that word.
1581 Then alter OP0 to refer to that word. */
1582 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1583 offset -= (offset % (total_bits / BITS_PER_UNIT));
1584 op0 = change_address (op0, mode,
1585 plus_constant (XEXP (op0, 0), offset));
1588 mode = GET_MODE (op0);
1590 if (BYTES_BIG_ENDIAN)
1592 /* BITPOS is the distance between our msb and that of OP0.
1593 Convert it to the distance from the lsb. */
1595 bitpos = total_bits - bitsize - bitpos;
1598 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1599 We have reduced the big-endian case to the little-endian case. */
1601 if (unsignedp)
1603 if (bitpos)
1605 /* If the field does not already start at the lsb,
1606 shift it so it does. */
1607 tree amount = build_int_2 (bitpos, 0);
1608 /* Maybe propagate the target for the shift. */
1609 /* But not if we will return it--could confuse integrate.c. */
1610 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1611 && !REG_FUNCTION_VALUE_P (target)
1612 ? target : 0);
1613 if (tmode != mode) subtarget = 0;
1614 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1616 /* Convert the value to the desired mode. */
1617 if (mode != tmode)
1618 op0 = convert_to_mode (tmode, op0, 1);
1620 /* Unless the msb of the field used to be the msb when we shifted,
1621 mask out the upper bits. */
1623 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1624 #if 0
1625 #ifdef SLOW_ZERO_EXTEND
1626 /* Always generate an `and' if
1627 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1628 will combine fruitfully with the zero-extend. */
1629 || tmode != mode
1630 #endif
1631 #endif
1633 return expand_binop (GET_MODE (op0), and_optab, op0,
1634 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1635 target, 1, OPTAB_LIB_WIDEN);
1636 return op0;
1639 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1640 then arithmetic-shift its lsb to the lsb of the word. */
1641 op0 = force_reg (mode, op0);
1642 if (mode != tmode)
1643 target = 0;
1645 /* Find the narrowest integer mode that contains the field. */
1647 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1648 mode = GET_MODE_WIDER_MODE (mode))
1649 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1651 op0 = convert_to_mode (mode, op0, 0);
1652 break;
1655 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1657 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1658 /* Maybe propagate the target for the shift. */
1659 /* But not if we will return the result--could confuse integrate.c. */
1660 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1661 && ! REG_FUNCTION_VALUE_P (target)
1662 ? target : 0);
1663 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1666 return expand_shift (RSHIFT_EXPR, mode, op0,
1667 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1668 target, 0);
1671 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1672 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1673 complement of that if COMPLEMENT. The mask is truncated if
1674 necessary to the width of mode MODE. The mask is zero-extended if
1675 BITSIZE+BITPOS is too small for MODE. */
1677 static rtx
1678 mask_rtx (mode, bitpos, bitsize, complement)
1679 enum machine_mode mode;
1680 int bitpos, bitsize, complement;
1682 HOST_WIDE_INT masklow, maskhigh;
1684 if (bitpos < HOST_BITS_PER_WIDE_INT)
1685 masklow = (HOST_WIDE_INT) -1 << bitpos;
1686 else
1687 masklow = 0;
1689 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1690 masklow &= ((unsigned HOST_WIDE_INT) -1
1691 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1693 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1694 maskhigh = -1;
1695 else
1696 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1698 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1699 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1700 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1701 else
1702 maskhigh = 0;
1704 if (complement)
1706 maskhigh = ~maskhigh;
1707 masklow = ~masklow;
1710 return immed_double_const (masklow, maskhigh, mode);
1713 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1714 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1716 static rtx
1717 lshift_value (mode, value, bitpos, bitsize)
1718 enum machine_mode mode;
1719 rtx value;
1720 int bitpos, bitsize;
1722 unsigned HOST_WIDE_INT v = INTVAL (value);
1723 HOST_WIDE_INT low, high;
1725 if (bitsize < HOST_BITS_PER_WIDE_INT)
1726 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1728 if (bitpos < HOST_BITS_PER_WIDE_INT)
1730 low = v << bitpos;
1731 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1733 else
1735 low = 0;
1736 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1739 return immed_double_const (low, high, mode);
1742 /* Extract a bit field that is split across two words
1743 and return an RTX for the result.
1745 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1746 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1747 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1749 ALIGN is the known alignment of OP0, measured in bytes.
1750 This is also the size of the memory objects to be used. */
1752 static rtx
1753 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1754 rtx op0;
1755 int bitsize, bitpos, unsignedp;
1756 unsigned int align;
1758 int unit;
1759 int bitsdone = 0;
1760 rtx result = NULL_RTX;
1761 int first = 1;
1763 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1764 much at a time. */
1765 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1766 unit = BITS_PER_WORD;
1767 else
1768 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1770 while (bitsdone < bitsize)
1772 int thissize;
1773 rtx part, word;
1774 int thispos;
1775 int offset;
1777 offset = (bitpos + bitsdone) / unit;
1778 thispos = (bitpos + bitsdone) % unit;
1780 /* THISSIZE must not overrun a word boundary. Otherwise,
1781 extract_fixed_bit_field will call us again, and we will mutually
1782 recurse forever. */
1783 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1784 thissize = MIN (thissize, unit - thispos);
1786 /* If OP0 is a register, then handle OFFSET here.
1788 When handling multiword bitfields, extract_bit_field may pass
1789 down a word_mode SUBREG of a larger REG for a bitfield that actually
1790 crosses a word boundary. Thus, for a SUBREG, we must find
1791 the current word starting from the base register. */
1792 if (GET_CODE (op0) == SUBREG)
1794 word = operand_subword_force (SUBREG_REG (op0),
1795 SUBREG_WORD (op0) + offset,
1796 GET_MODE (SUBREG_REG (op0)));
1797 offset = 0;
1799 else if (GET_CODE (op0) == REG)
1801 word = operand_subword_force (op0, offset, GET_MODE (op0));
1802 offset = 0;
1804 else
1805 word = op0;
1807 /* Extract the parts in bit-counting order,
1808 whose meaning is determined by BYTES_PER_UNIT.
1809 OFFSET is in UNITs, and UNIT is in bits.
1810 extract_fixed_bit_field wants offset in bytes. */
1811 part = extract_fixed_bit_field (word_mode, word,
1812 offset * unit / BITS_PER_UNIT,
1813 thissize, thispos, 0, 1, align);
1814 bitsdone += thissize;
1816 /* Shift this part into place for the result. */
1817 if (BYTES_BIG_ENDIAN)
1819 if (bitsize != bitsdone)
1820 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1821 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1823 else
1825 if (bitsdone != thissize)
1826 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1827 build_int_2 (bitsdone - thissize, 0), 0, 1);
1830 if (first)
1831 result = part;
1832 else
1833 /* Combine the parts with bitwise or. This works
1834 because we extracted each part as an unsigned bit field. */
1835 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1836 OPTAB_LIB_WIDEN);
1838 first = 0;
1841 /* Unsigned bit field: we are done. */
1842 if (unsignedp)
1843 return result;
1844 /* Signed bit field: sign-extend with two arithmetic shifts. */
1845 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1846 build_int_2 (BITS_PER_WORD - bitsize, 0),
1847 NULL_RTX, 0);
1848 return expand_shift (RSHIFT_EXPR, word_mode, result,
1849 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1852 /* Add INC into TARGET. */
1854 void
1855 expand_inc (target, inc)
1856 rtx target, inc;
1858 rtx value = expand_binop (GET_MODE (target), add_optab,
1859 target, inc,
1860 target, 0, OPTAB_LIB_WIDEN);
1861 if (value != target)
1862 emit_move_insn (target, value);
1865 /* Subtract DEC from TARGET. */
1867 void
1868 expand_dec (target, dec)
1869 rtx target, dec;
1871 rtx value = expand_binop (GET_MODE (target), sub_optab,
1872 target, dec,
1873 target, 0, OPTAB_LIB_WIDEN);
1874 if (value != target)
1875 emit_move_insn (target, value);
1878 /* Output a shift instruction for expression code CODE,
1879 with SHIFTED being the rtx for the value to shift,
1880 and AMOUNT the tree for the amount to shift by.
1881 Store the result in the rtx TARGET, if that is convenient.
1882 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1883 Return the rtx for where the value is. */
1886 expand_shift (code, mode, shifted, amount, target, unsignedp)
1887 enum tree_code code;
1888 register enum machine_mode mode;
1889 rtx shifted;
1890 tree amount;
1891 register rtx target;
1892 int unsignedp;
1894 register rtx op1, temp = 0;
1895 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1896 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1897 int try;
1899 /* Previously detected shift-counts computed by NEGATE_EXPR
1900 and shifted in the other direction; but that does not work
1901 on all machines. */
1903 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1905 #ifdef SHIFT_COUNT_TRUNCATED
1906 if (SHIFT_COUNT_TRUNCATED)
1908 if (GET_CODE (op1) == CONST_INT
1909 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1910 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1911 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1912 % GET_MODE_BITSIZE (mode));
1913 else if (GET_CODE (op1) == SUBREG
1914 && SUBREG_WORD (op1) == 0)
1915 op1 = SUBREG_REG (op1);
1917 #endif
1919 if (op1 == const0_rtx)
1920 return shifted;
1922 for (try = 0; temp == 0 && try < 3; try++)
1924 enum optab_methods methods;
1926 if (try == 0)
1927 methods = OPTAB_DIRECT;
1928 else if (try == 1)
1929 methods = OPTAB_WIDEN;
1930 else
1931 methods = OPTAB_LIB_WIDEN;
1933 if (rotate)
1935 /* Widening does not work for rotation. */
1936 if (methods == OPTAB_WIDEN)
1937 continue;
1938 else if (methods == OPTAB_LIB_WIDEN)
1940 /* If we have been unable to open-code this by a rotation,
1941 do it as the IOR of two shifts. I.e., to rotate A
1942 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1943 where C is the bitsize of A.
1945 It is theoretically possible that the target machine might
1946 not be able to perform either shift and hence we would
1947 be making two libcalls rather than just the one for the
1948 shift (similarly if IOR could not be done). We will allow
1949 this extremely unlikely lossage to avoid complicating the
1950 code below. */
1952 rtx subtarget = target == shifted ? 0 : target;
1953 rtx temp1;
1954 tree type = TREE_TYPE (amount);
1955 tree new_amount = make_tree (type, op1);
1956 tree other_amount
1957 = fold (build (MINUS_EXPR, type,
1958 convert (type,
1959 build_int_2 (GET_MODE_BITSIZE (mode),
1960 0)),
1961 amount));
1963 shifted = force_reg (mode, shifted);
1965 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1966 mode, shifted, new_amount, subtarget, 1);
1967 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1968 mode, shifted, other_amount, 0, 1);
1969 return expand_binop (mode, ior_optab, temp, temp1, target,
1970 unsignedp, methods);
1973 temp = expand_binop (mode,
1974 left ? rotl_optab : rotr_optab,
1975 shifted, op1, target, unsignedp, methods);
1977 /* If we don't have the rotate, but we are rotating by a constant
1978 that is in range, try a rotate in the opposite direction. */
1980 if (temp == 0 && GET_CODE (op1) == CONST_INT
1981 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1982 temp = expand_binop (mode,
1983 left ? rotr_optab : rotl_optab,
1984 shifted,
1985 GEN_INT (GET_MODE_BITSIZE (mode)
1986 - INTVAL (op1)),
1987 target, unsignedp, methods);
1989 else if (unsignedp)
1990 temp = expand_binop (mode,
1991 left ? ashl_optab : lshr_optab,
1992 shifted, op1, target, unsignedp, methods);
1994 /* Do arithmetic shifts.
1995 Also, if we are going to widen the operand, we can just as well
1996 use an arithmetic right-shift instead of a logical one. */
1997 if (temp == 0 && ! rotate
1998 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2000 enum optab_methods methods1 = methods;
2002 /* If trying to widen a log shift to an arithmetic shift,
2003 don't accept an arithmetic shift of the same size. */
2004 if (unsignedp)
2005 methods1 = OPTAB_MUST_WIDEN;
2007 /* Arithmetic shift */
2009 temp = expand_binop (mode,
2010 left ? ashl_optab : ashr_optab,
2011 shifted, op1, target, unsignedp, methods1);
2014 /* We used to try extzv here for logical right shifts, but that was
2015 only useful for one machine, the VAX, and caused poor code
2016 generation there for lshrdi3, so the code was deleted and a
2017 define_expand for lshrsi3 was added to vax.md. */
2020 if (temp == 0)
2021 abort ();
2022 return temp;
2025 enum alg_code { alg_zero, alg_m, alg_shift,
2026 alg_add_t_m2, alg_sub_t_m2,
2027 alg_add_factor, alg_sub_factor,
2028 alg_add_t2_m, alg_sub_t2_m,
2029 alg_add, alg_subtract, alg_factor, alg_shiftop };
2031 /* This structure records a sequence of operations.
2032 `ops' is the number of operations recorded.
2033 `cost' is their total cost.
2034 The operations are stored in `op' and the corresponding
2035 logarithms of the integer coefficients in `log'.
2037 These are the operations:
2038 alg_zero total := 0;
2039 alg_m total := multiplicand;
2040 alg_shift total := total * coeff
2041 alg_add_t_m2 total := total + multiplicand * coeff;
2042 alg_sub_t_m2 total := total - multiplicand * coeff;
2043 alg_add_factor total := total * coeff + total;
2044 alg_sub_factor total := total * coeff - total;
2045 alg_add_t2_m total := total * coeff + multiplicand;
2046 alg_sub_t2_m total := total * coeff - multiplicand;
2048 The first operand must be either alg_zero or alg_m. */
2050 struct algorithm
2052 short cost;
2053 short ops;
2054 /* The size of the OP and LOG fields are not directly related to the
2055 word size, but the worst-case algorithms will be if we have few
2056 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2057 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2058 in total wordsize operations. */
2059 enum alg_code op[MAX_BITS_PER_WORD];
2060 char log[MAX_BITS_PER_WORD];
2063 static void synth_mult PARAMS ((struct algorithm *,
2064 unsigned HOST_WIDE_INT,
2065 int));
2066 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2067 int, int,
2068 unsigned HOST_WIDE_INT *,
2069 int *, int *));
2070 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2071 int));
2072 /* Compute and return the best algorithm for multiplying by T.
2073 The algorithm must cost less than cost_limit
2074 If retval.cost >= COST_LIMIT, no algorithm was found and all
2075 other field of the returned struct are undefined. */
2077 static void
2078 synth_mult (alg_out, t, cost_limit)
2079 struct algorithm *alg_out;
2080 unsigned HOST_WIDE_INT t;
2081 int cost_limit;
2083 int m;
2084 struct algorithm *alg_in, *best_alg;
2085 int cost;
2086 unsigned HOST_WIDE_INT q;
2088 /* Indicate that no algorithm is yet found. If no algorithm
2089 is found, this value will be returned and indicate failure. */
2090 alg_out->cost = cost_limit;
2092 if (cost_limit <= 0)
2093 return;
2095 /* t == 1 can be done in zero cost. */
2096 if (t == 1)
2098 alg_out->ops = 1;
2099 alg_out->cost = 0;
2100 alg_out->op[0] = alg_m;
2101 return;
2104 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2105 fail now. */
2106 if (t == 0)
2108 if (zero_cost >= cost_limit)
2109 return;
2110 else
2112 alg_out->ops = 1;
2113 alg_out->cost = zero_cost;
2114 alg_out->op[0] = alg_zero;
2115 return;
2119 /* We'll be needing a couple extra algorithm structures now. */
2121 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2122 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2124 /* If we have a group of zero bits at the low-order part of T, try
2125 multiplying by the remaining bits and then doing a shift. */
2127 if ((t & 1) == 0)
2129 m = floor_log2 (t & -t); /* m = number of low zero bits */
2130 q = t >> m;
2131 cost = shift_cost[m];
2132 synth_mult (alg_in, q, cost_limit - cost);
2134 cost += alg_in->cost;
2135 if (cost < cost_limit)
2137 struct algorithm *x;
2138 x = alg_in, alg_in = best_alg, best_alg = x;
2139 best_alg->log[best_alg->ops] = m;
2140 best_alg->op[best_alg->ops] = alg_shift;
2141 cost_limit = cost;
2145 /* If we have an odd number, add or subtract one. */
2146 if ((t & 1) != 0)
2148 unsigned HOST_WIDE_INT w;
2150 for (w = 1; (w & t) != 0; w <<= 1)
2152 /* If T was -1, then W will be zero after the loop. This is another
2153 case where T ends with ...111. Handling this with (T + 1) and
2154 subtract 1 produces slightly better code and results in algorithm
2155 selection much faster than treating it like the ...0111 case
2156 below. */
2157 if (w == 0
2158 || (w > 2
2159 /* Reject the case where t is 3.
2160 Thus we prefer addition in that case. */
2161 && t != 3))
2163 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2165 cost = add_cost;
2166 synth_mult (alg_in, t + 1, cost_limit - cost);
2168 cost += alg_in->cost;
2169 if (cost < cost_limit)
2171 struct algorithm *x;
2172 x = alg_in, alg_in = best_alg, best_alg = x;
2173 best_alg->log[best_alg->ops] = 0;
2174 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2175 cost_limit = cost;
2178 else
2180 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2182 cost = add_cost;
2183 synth_mult (alg_in, t - 1, cost_limit - cost);
2185 cost += alg_in->cost;
2186 if (cost < cost_limit)
2188 struct algorithm *x;
2189 x = alg_in, alg_in = best_alg, best_alg = x;
2190 best_alg->log[best_alg->ops] = 0;
2191 best_alg->op[best_alg->ops] = alg_add_t_m2;
2192 cost_limit = cost;
2197 /* Look for factors of t of the form
2198 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2199 If we find such a factor, we can multiply by t using an algorithm that
2200 multiplies by q, shift the result by m and add/subtract it to itself.
2202 We search for large factors first and loop down, even if large factors
2203 are less probable than small; if we find a large factor we will find a
2204 good sequence quickly, and therefore be able to prune (by decreasing
2205 COST_LIMIT) the search. */
2207 for (m = floor_log2 (t - 1); m >= 2; m--)
2209 unsigned HOST_WIDE_INT d;
2211 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2212 if (t % d == 0 && t > d)
2214 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2215 synth_mult (alg_in, t / d, cost_limit - cost);
2217 cost += alg_in->cost;
2218 if (cost < cost_limit)
2220 struct algorithm *x;
2221 x = alg_in, alg_in = best_alg, best_alg = x;
2222 best_alg->log[best_alg->ops] = m;
2223 best_alg->op[best_alg->ops] = alg_add_factor;
2224 cost_limit = cost;
2226 /* Other factors will have been taken care of in the recursion. */
2227 break;
2230 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2231 if (t % d == 0 && t > d)
2233 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2234 synth_mult (alg_in, t / d, cost_limit - cost);
2236 cost += alg_in->cost;
2237 if (cost < cost_limit)
2239 struct algorithm *x;
2240 x = alg_in, alg_in = best_alg, best_alg = x;
2241 best_alg->log[best_alg->ops] = m;
2242 best_alg->op[best_alg->ops] = alg_sub_factor;
2243 cost_limit = cost;
2245 break;
2249 /* Try shift-and-add (load effective address) instructions,
2250 i.e. do a*3, a*5, a*9. */
2251 if ((t & 1) != 0)
2253 q = t - 1;
2254 q = q & -q;
2255 m = exact_log2 (q);
2256 if (m >= 0)
2258 cost = shiftadd_cost[m];
2259 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2261 cost += alg_in->cost;
2262 if (cost < cost_limit)
2264 struct algorithm *x;
2265 x = alg_in, alg_in = best_alg, best_alg = x;
2266 best_alg->log[best_alg->ops] = m;
2267 best_alg->op[best_alg->ops] = alg_add_t2_m;
2268 cost_limit = cost;
2272 q = t + 1;
2273 q = q & -q;
2274 m = exact_log2 (q);
2275 if (m >= 0)
2277 cost = shiftsub_cost[m];
2278 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2280 cost += alg_in->cost;
2281 if (cost < cost_limit)
2283 struct algorithm *x;
2284 x = alg_in, alg_in = best_alg, best_alg = x;
2285 best_alg->log[best_alg->ops] = m;
2286 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2287 cost_limit = cost;
2292 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2293 we have not found any algorithm. */
2294 if (cost_limit == alg_out->cost)
2295 return;
2297 /* If we are getting a too long sequence for `struct algorithm'
2298 to record, make this search fail. */
2299 if (best_alg->ops == MAX_BITS_PER_WORD)
2300 return;
2302 /* Copy the algorithm from temporary space to the space at alg_out.
2303 We avoid using structure assignment because the majority of
2304 best_alg is normally undefined, and this is a critical function. */
2305 alg_out->ops = best_alg->ops + 1;
2306 alg_out->cost = cost_limit;
2307 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2308 alg_out->ops * sizeof *alg_out->op);
2309 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2310 alg_out->ops * sizeof *alg_out->log);
2313 /* Perform a multiplication and return an rtx for the result.
2314 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2315 TARGET is a suggestion for where to store the result (an rtx).
2317 We check specially for a constant integer as OP1.
2318 If you want this check for OP0 as well, then before calling
2319 you should swap the two operands if OP0 would be constant. */
2322 expand_mult (mode, op0, op1, target, unsignedp)
2323 enum machine_mode mode;
2324 register rtx op0, op1, target;
2325 int unsignedp;
2327 rtx const_op1 = op1;
2329 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2330 less than or equal in size to `unsigned int' this doesn't matter.
2331 If the mode is larger than `unsigned int', then synth_mult works only
2332 if the constant value exactly fits in an `unsigned int' without any
2333 truncation. This means that multiplying by negative values does
2334 not work; results are off by 2^32 on a 32 bit machine. */
2336 /* If we are multiplying in DImode, it may still be a win
2337 to try to work with shifts and adds. */
2338 if (GET_CODE (op1) == CONST_DOUBLE
2339 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2340 && HOST_BITS_PER_INT >= BITS_PER_WORD
2341 && CONST_DOUBLE_HIGH (op1) == 0)
2342 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2343 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2344 && GET_CODE (op1) == CONST_INT
2345 && INTVAL (op1) < 0)
2346 const_op1 = 0;
2348 /* We used to test optimize here, on the grounds that it's better to
2349 produce a smaller program when -O is not used.
2350 But this causes such a terrible slowdown sometimes
2351 that it seems better to use synth_mult always. */
2353 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2355 struct algorithm alg;
2356 struct algorithm alg2;
2357 HOST_WIDE_INT val = INTVAL (op1);
2358 HOST_WIDE_INT val_so_far;
2359 rtx insn;
2360 int mult_cost;
2361 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2363 /* Try to do the computation three ways: multiply by the negative of OP1
2364 and then negate, do the multiplication directly, or do multiplication
2365 by OP1 - 1. */
2367 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2368 mult_cost = MIN (12 * add_cost, mult_cost);
2370 synth_mult (&alg, val, mult_cost);
2372 /* This works only if the inverted value actually fits in an
2373 `unsigned int' */
2374 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2376 synth_mult (&alg2, - val,
2377 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2378 if (alg2.cost + negate_cost < alg.cost)
2379 alg = alg2, variant = negate_variant;
2382 /* This proves very useful for division-by-constant. */
2383 synth_mult (&alg2, val - 1,
2384 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2385 if (alg2.cost + add_cost < alg.cost)
2386 alg = alg2, variant = add_variant;
2388 if (alg.cost < mult_cost)
2390 /* We found something cheaper than a multiply insn. */
2391 int opno;
2392 rtx accum, tem;
2394 op0 = protect_from_queue (op0, 0);
2396 /* Avoid referencing memory over and over.
2397 For speed, but also for correctness when mem is volatile. */
2398 if (GET_CODE (op0) == MEM)
2399 op0 = force_reg (mode, op0);
2401 /* ACCUM starts out either as OP0 or as a zero, depending on
2402 the first operation. */
2404 if (alg.op[0] == alg_zero)
2406 accum = copy_to_mode_reg (mode, const0_rtx);
2407 val_so_far = 0;
2409 else if (alg.op[0] == alg_m)
2411 accum = copy_to_mode_reg (mode, op0);
2412 val_so_far = 1;
2414 else
2415 abort ();
2417 for (opno = 1; opno < alg.ops; opno++)
2419 int log = alg.log[opno];
2420 int preserve = preserve_subexpressions_p ();
2421 rtx shift_subtarget = preserve ? 0 : accum;
2422 rtx add_target
2423 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2424 && ! preserve)
2425 ? target : 0;
2426 rtx accum_target = preserve ? 0 : accum;
2428 switch (alg.op[opno])
2430 case alg_shift:
2431 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2432 build_int_2 (log, 0), NULL_RTX, 0);
2433 val_so_far <<= log;
2434 break;
2436 case alg_add_t_m2:
2437 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2438 build_int_2 (log, 0), NULL_RTX, 0);
2439 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2440 add_target
2441 ? add_target : accum_target);
2442 val_so_far += (HOST_WIDE_INT) 1 << log;
2443 break;
2445 case alg_sub_t_m2:
2446 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2447 build_int_2 (log, 0), NULL_RTX, 0);
2448 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2449 add_target
2450 ? add_target : accum_target);
2451 val_so_far -= (HOST_WIDE_INT) 1 << log;
2452 break;
2454 case alg_add_t2_m:
2455 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2456 build_int_2 (log, 0), shift_subtarget,
2458 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2459 add_target
2460 ? add_target : accum_target);
2461 val_so_far = (val_so_far << log) + 1;
2462 break;
2464 case alg_sub_t2_m:
2465 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2466 build_int_2 (log, 0), shift_subtarget,
2468 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2469 add_target
2470 ? add_target : accum_target);
2471 val_so_far = (val_so_far << log) - 1;
2472 break;
2474 case alg_add_factor:
2475 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2476 build_int_2 (log, 0), NULL_RTX, 0);
2477 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2478 add_target
2479 ? add_target : accum_target);
2480 val_so_far += val_so_far << log;
2481 break;
2483 case alg_sub_factor:
2484 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2485 build_int_2 (log, 0), NULL_RTX, 0);
2486 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2487 (add_target ? add_target
2488 : preserve ? 0 : tem));
2489 val_so_far = (val_so_far << log) - val_so_far;
2490 break;
2492 default:
2493 abort ();
2496 /* Write a REG_EQUAL note on the last insn so that we can cse
2497 multiplication sequences. */
2499 insn = get_last_insn ();
2500 set_unique_reg_note (insn,
2501 REG_EQUAL,
2502 gen_rtx_MULT (mode, op0,
2503 GEN_INT (val_so_far)));
2506 if (variant == negate_variant)
2508 val_so_far = - val_so_far;
2509 accum = expand_unop (mode, neg_optab, accum, target, 0);
2511 else if (variant == add_variant)
2513 val_so_far = val_so_far + 1;
2514 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2517 if (val != val_so_far)
2518 abort ();
2520 return accum;
2524 /* This used to use umul_optab if unsigned, but for non-widening multiply
2525 there is no difference between signed and unsigned. */
2526 op0 = expand_binop (mode, smul_optab,
2527 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2528 if (op0 == 0)
2529 abort ();
2530 return op0;
2533 /* Return the smallest n such that 2**n >= X. */
2536 ceil_log2 (x)
2537 unsigned HOST_WIDE_INT x;
2539 return floor_log2 (x - 1) + 1;
2542 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2543 replace division by D, and put the least significant N bits of the result
2544 in *MULTIPLIER_PTR and return the most significant bit.
2546 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2547 needed precision is in PRECISION (should be <= N).
2549 PRECISION should be as small as possible so this function can choose
2550 multiplier more freely.
2552 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2553 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2555 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2556 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2558 static
2559 unsigned HOST_WIDE_INT
2560 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2561 unsigned HOST_WIDE_INT d;
2562 int n;
2563 int precision;
2564 unsigned HOST_WIDE_INT *multiplier_ptr;
2565 int *post_shift_ptr;
2566 int *lgup_ptr;
2568 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2569 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2570 int lgup, post_shift;
2571 int pow, pow2;
2572 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2574 /* lgup = ceil(log2(divisor)); */
2575 lgup = ceil_log2 (d);
2577 if (lgup > n)
2578 abort ();
2580 pow = n + lgup;
2581 pow2 = n + lgup - precision;
2583 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2585 /* We could handle this with some effort, but this case is much better
2586 handled directly with a scc insn, so rely on caller using that. */
2587 abort ();
2590 /* mlow = 2^(N + lgup)/d */
2591 if (pow >= HOST_BITS_PER_WIDE_INT)
2593 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2594 nl = 0;
2596 else
2598 nh = 0;
2599 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2601 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2602 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2604 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2605 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2606 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2607 else
2608 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2609 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2610 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2612 if (mhigh_hi && nh - d >= d)
2613 abort ();
2614 if (mhigh_hi > 1 || mlow_hi > 1)
2615 abort ();
2616 /* assert that mlow < mhigh. */
2617 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2618 abort();
2620 /* If precision == N, then mlow, mhigh exceed 2^N
2621 (but they do not exceed 2^(N+1)). */
2623 /* Reduce to lowest terms */
2624 for (post_shift = lgup; post_shift > 0; post_shift--)
2626 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2627 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2628 if (ml_lo >= mh_lo)
2629 break;
2631 mlow_hi = 0;
2632 mlow_lo = ml_lo;
2633 mhigh_hi = 0;
2634 mhigh_lo = mh_lo;
2637 *post_shift_ptr = post_shift;
2638 *lgup_ptr = lgup;
2639 if (n < HOST_BITS_PER_WIDE_INT)
2641 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2642 *multiplier_ptr = mhigh_lo & mask;
2643 return mhigh_lo >= mask;
2645 else
2647 *multiplier_ptr = mhigh_lo;
2648 return mhigh_hi;
2652 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2653 congruent to 1 (mod 2**N). */
2655 static unsigned HOST_WIDE_INT
2656 invert_mod2n (x, n)
2657 unsigned HOST_WIDE_INT x;
2658 int n;
2660 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2662 /* The algorithm notes that the choice y = x satisfies
2663 x*y == 1 mod 2^3, since x is assumed odd.
2664 Each iteration doubles the number of bits of significance in y. */
2666 unsigned HOST_WIDE_INT mask;
2667 unsigned HOST_WIDE_INT y = x;
2668 int nbit = 3;
2670 mask = (n == HOST_BITS_PER_WIDE_INT
2671 ? ~(unsigned HOST_WIDE_INT) 0
2672 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2674 while (nbit < n)
2676 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2677 nbit *= 2;
2679 return y;
2682 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2683 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2684 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2685 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2686 become signed.
2688 The result is put in TARGET if that is convenient.
2690 MODE is the mode of operation. */
2693 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2694 enum machine_mode mode;
2695 register rtx adj_operand, op0, op1, target;
2696 int unsignedp;
2698 rtx tem;
2699 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2701 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2702 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2703 NULL_RTX, 0);
2704 tem = expand_and (tem, op1, NULL_RTX);
2705 adj_operand
2706 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2707 adj_operand);
2709 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2710 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2711 NULL_RTX, 0);
2712 tem = expand_and (tem, op0, NULL_RTX);
2713 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2714 target);
2716 return target;
2719 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2720 in TARGET if that is convenient, and return where the result is. If the
2721 operation can not be performed, 0 is returned.
2723 MODE is the mode of operation and result.
2725 UNSIGNEDP nonzero means unsigned multiply.
2727 MAX_COST is the total allowed cost for the expanded RTL. */
2730 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2731 enum machine_mode mode;
2732 register rtx op0, target;
2733 unsigned HOST_WIDE_INT cnst1;
2734 int unsignedp;
2735 int max_cost;
2737 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2738 optab mul_highpart_optab;
2739 optab moptab;
2740 rtx tem;
2741 int size = GET_MODE_BITSIZE (mode);
2742 rtx op1, wide_op1;
2744 /* We can't support modes wider than HOST_BITS_PER_INT. */
2745 if (size > HOST_BITS_PER_WIDE_INT)
2746 abort ();
2748 op1 = GEN_INT (cnst1);
2750 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2751 wide_op1 = op1;
2752 else
2753 wide_op1
2754 = immed_double_const (cnst1,
2755 (unsignedp
2756 ? (HOST_WIDE_INT) 0
2757 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2758 wider_mode);
2760 /* expand_mult handles constant multiplication of word_mode
2761 or narrower. It does a poor job for large modes. */
2762 if (size < BITS_PER_WORD
2763 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2765 /* We have to do this, since expand_binop doesn't do conversion for
2766 multiply. Maybe change expand_binop to handle widening multiply? */
2767 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2769 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2770 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2771 build_int_2 (size, 0), NULL_RTX, 1);
2772 return convert_modes (mode, wider_mode, tem, unsignedp);
2775 if (target == 0)
2776 target = gen_reg_rtx (mode);
2778 /* Firstly, try using a multiplication insn that only generates the needed
2779 high part of the product, and in the sign flavor of unsignedp. */
2780 if (mul_highpart_cost[(int) mode] < max_cost)
2782 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2783 target = expand_binop (mode, mul_highpart_optab,
2784 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2785 if (target)
2786 return target;
2789 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2790 Need to adjust the result after the multiplication. */
2791 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2793 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2794 target = expand_binop (mode, mul_highpart_optab,
2795 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2796 if (target)
2797 /* We used the wrong signedness. Adjust the result. */
2798 return expand_mult_highpart_adjust (mode, target, op0,
2799 op1, target, unsignedp);
2802 /* Try widening multiplication. */
2803 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2804 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2805 && mul_widen_cost[(int) wider_mode] < max_cost)
2807 op1 = force_reg (mode, op1);
2808 goto try;
2811 /* Try widening the mode and perform a non-widening multiplication. */
2812 moptab = smul_optab;
2813 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2814 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2816 op1 = wide_op1;
2817 goto try;
2820 /* Try widening multiplication of opposite signedness, and adjust. */
2821 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2822 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2823 && (mul_widen_cost[(int) wider_mode]
2824 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2826 rtx regop1 = force_reg (mode, op1);
2827 tem = expand_binop (wider_mode, moptab, op0, regop1,
2828 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2829 if (tem != 0)
2831 /* Extract the high half of the just generated product. */
2832 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2833 build_int_2 (size, 0), NULL_RTX, 1);
2834 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2835 /* We used the wrong signedness. Adjust the result. */
2836 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2837 target, unsignedp);
2841 return 0;
2843 try:
2844 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2845 tem = expand_binop (wider_mode, moptab, op0, op1,
2846 NULL_RTX, unsignedp, OPTAB_WIDEN);
2847 if (tem == 0)
2848 return 0;
2850 /* Extract the high half of the just generated product. */
2851 if (mode == word_mode)
2853 return gen_highpart (mode, tem);
2855 else
2857 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2858 build_int_2 (size, 0), NULL_RTX, 1);
2859 return convert_modes (mode, wider_mode, tem, unsignedp);
2863 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2864 if that is convenient, and returning where the result is.
2865 You may request either the quotient or the remainder as the result;
2866 specify REM_FLAG nonzero to get the remainder.
2868 CODE is the expression code for which kind of division this is;
2869 it controls how rounding is done. MODE is the machine mode to use.
2870 UNSIGNEDP nonzero means do unsigned division. */
2872 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2873 and then correct it by or'ing in missing high bits
2874 if result of ANDI is nonzero.
2875 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2876 This could optimize to a bfexts instruction.
2877 But C doesn't use these operations, so their optimizations are
2878 left for later. */
2879 /* ??? For modulo, we don't actually need the highpart of the first product,
2880 the low part will do nicely. And for small divisors, the second multiply
2881 can also be a low-part only multiply or even be completely left out.
2882 E.g. to calculate the remainder of a division by 3 with a 32 bit
2883 multiply, multiply with 0x55555556 and extract the upper two bits;
2884 the result is exact for inputs up to 0x1fffffff.
2885 The input range can be reduced by using cross-sum rules.
2886 For odd divisors >= 3, the following table gives right shift counts
2887 so that if an number is shifted by an integer multiple of the given
2888 amount, the remainder stays the same:
2889 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2890 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2891 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2892 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2893 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2895 Cross-sum rules for even numbers can be derived by leaving as many bits
2896 to the right alone as the divisor has zeros to the right.
2897 E.g. if x is an unsigned 32 bit number:
2898 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2901 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2904 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2905 int rem_flag;
2906 enum tree_code code;
2907 enum machine_mode mode;
2908 register rtx op0, op1, target;
2909 int unsignedp;
2911 enum machine_mode compute_mode;
2912 register rtx tquotient;
2913 rtx quotient = 0, remainder = 0;
2914 rtx last;
2915 int size;
2916 rtx insn, set;
2917 optab optab1, optab2;
2918 int op1_is_constant, op1_is_pow2;
2919 int max_cost, extra_cost;
2920 static HOST_WIDE_INT last_div_const = 0;
2922 op1_is_constant = GET_CODE (op1) == CONST_INT;
2923 op1_is_pow2 = (op1_is_constant
2924 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2925 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2928 This is the structure of expand_divmod:
2930 First comes code to fix up the operands so we can perform the operations
2931 correctly and efficiently.
2933 Second comes a switch statement with code specific for each rounding mode.
2934 For some special operands this code emits all RTL for the desired
2935 operation, for other cases, it generates only a quotient and stores it in
2936 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2937 to indicate that it has not done anything.
2939 Last comes code that finishes the operation. If QUOTIENT is set and
2940 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2941 QUOTIENT is not set, it is computed using trunc rounding.
2943 We try to generate special code for division and remainder when OP1 is a
2944 constant. If |OP1| = 2**n we can use shifts and some other fast
2945 operations. For other values of OP1, we compute a carefully selected
2946 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2947 by m.
2949 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2950 half of the product. Different strategies for generating the product are
2951 implemented in expand_mult_highpart.
2953 If what we actually want is the remainder, we generate that by another
2954 by-constant multiplication and a subtraction. */
2956 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2957 code below will malfunction if we are, so check here and handle
2958 the special case if so. */
2959 if (op1 == const1_rtx)
2960 return rem_flag ? const0_rtx : op0;
2962 if (target
2963 /* Don't use the function value register as a target
2964 since we have to read it as well as write it,
2965 and function-inlining gets confused by this. */
2966 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2967 /* Don't clobber an operand while doing a multi-step calculation. */
2968 || ((rem_flag || op1_is_constant)
2969 && (reg_mentioned_p (target, op0)
2970 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2971 || reg_mentioned_p (target, op1)
2972 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2973 target = 0;
2975 /* Get the mode in which to perform this computation. Normally it will
2976 be MODE, but sometimes we can't do the desired operation in MODE.
2977 If so, pick a wider mode in which we can do the operation. Convert
2978 to that mode at the start to avoid repeated conversions.
2980 First see what operations we need. These depend on the expression
2981 we are evaluating. (We assume that divxx3 insns exist under the
2982 same conditions that modxx3 insns and that these insns don't normally
2983 fail. If these assumptions are not correct, we may generate less
2984 efficient code in some cases.)
2986 Then see if we find a mode in which we can open-code that operation
2987 (either a division, modulus, or shift). Finally, check for the smallest
2988 mode for which we can do the operation with a library call. */
2990 /* We might want to refine this now that we have division-by-constant
2991 optimization. Since expand_mult_highpart tries so many variants, it is
2992 not straightforward to generalize this. Maybe we should make an array
2993 of possible modes in init_expmed? Save this for GCC 2.7. */
2995 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2996 : (unsignedp ? udiv_optab : sdiv_optab));
2997 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2999 for (compute_mode = mode; compute_mode != VOIDmode;
3000 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3001 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3002 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3003 break;
3005 if (compute_mode == VOIDmode)
3006 for (compute_mode = mode; compute_mode != VOIDmode;
3007 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3008 if (optab1->handlers[(int) compute_mode].libfunc
3009 || optab2->handlers[(int) compute_mode].libfunc)
3010 break;
3012 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3013 in expand_binop. */
3014 if (compute_mode == VOIDmode)
3015 compute_mode = mode;
3017 if (target && GET_MODE (target) == compute_mode)
3018 tquotient = target;
3019 else
3020 tquotient = gen_reg_rtx (compute_mode);
3022 size = GET_MODE_BITSIZE (compute_mode);
3023 #if 0
3024 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3025 (mode), and thereby get better code when OP1 is a constant. Do that
3026 later. It will require going over all usages of SIZE below. */
3027 size = GET_MODE_BITSIZE (mode);
3028 #endif
3030 /* Only deduct something for a REM if the last divide done was
3031 for a different constant. Then set the constant of the last
3032 divide. */
3033 max_cost = div_cost[(int) compute_mode]
3034 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3035 && INTVAL (op1) == last_div_const)
3036 ? mul_cost[(int) compute_mode] + add_cost : 0);
3038 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3040 /* Now convert to the best mode to use. */
3041 if (compute_mode != mode)
3043 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3044 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3046 /* convert_modes may have placed op1 into a register, so we
3047 must recompute the following. */
3048 op1_is_constant = GET_CODE (op1) == CONST_INT;
3049 op1_is_pow2 = (op1_is_constant
3050 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3051 || (! unsignedp
3052 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3055 /* If one of the operands is a volatile MEM, copy it into a register. */
3057 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3058 op0 = force_reg (compute_mode, op0);
3059 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3060 op1 = force_reg (compute_mode, op1);
3062 /* If we need the remainder or if OP1 is constant, we need to
3063 put OP0 in a register in case it has any queued subexpressions. */
3064 if (rem_flag || op1_is_constant)
3065 op0 = force_reg (compute_mode, op0);
3067 last = get_last_insn ();
3069 /* Promote floor rounding to trunc rounding for unsigned operations. */
3070 if (unsignedp)
3072 if (code == FLOOR_DIV_EXPR)
3073 code = TRUNC_DIV_EXPR;
3074 if (code == FLOOR_MOD_EXPR)
3075 code = TRUNC_MOD_EXPR;
3076 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3077 code = TRUNC_DIV_EXPR;
3080 if (op1 != const0_rtx)
3081 switch (code)
3083 case TRUNC_MOD_EXPR:
3084 case TRUNC_DIV_EXPR:
3085 if (op1_is_constant)
3087 if (unsignedp)
3089 unsigned HOST_WIDE_INT mh, ml;
3090 int pre_shift, post_shift;
3091 int dummy;
3092 unsigned HOST_WIDE_INT d = INTVAL (op1);
3094 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3096 pre_shift = floor_log2 (d);
3097 if (rem_flag)
3099 remainder
3100 = expand_binop (compute_mode, and_optab, op0,
3101 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3102 remainder, 1,
3103 OPTAB_LIB_WIDEN);
3104 if (remainder)
3105 return gen_lowpart (mode, remainder);
3107 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3108 build_int_2 (pre_shift, 0),
3109 tquotient, 1);
3111 else if (size <= HOST_BITS_PER_WIDE_INT)
3113 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3115 /* Most significant bit of divisor is set; emit an scc
3116 insn. */
3117 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3118 compute_mode, 1, 1);
3119 if (quotient == 0)
3120 goto fail1;
3122 else
3124 /* Find a suitable multiplier and right shift count
3125 instead of multiplying with D. */
3127 mh = choose_multiplier (d, size, size,
3128 &ml, &post_shift, &dummy);
3130 /* If the suggested multiplier is more than SIZE bits,
3131 we can do better for even divisors, using an
3132 initial right shift. */
3133 if (mh != 0 && (d & 1) == 0)
3135 pre_shift = floor_log2 (d & -d);
3136 mh = choose_multiplier (d >> pre_shift, size,
3137 size - pre_shift,
3138 &ml, &post_shift, &dummy);
3139 if (mh)
3140 abort ();
3142 else
3143 pre_shift = 0;
3145 if (mh != 0)
3147 rtx t1, t2, t3, t4;
3149 extra_cost = (shift_cost[post_shift - 1]
3150 + shift_cost[1] + 2 * add_cost);
3151 t1 = expand_mult_highpart (compute_mode, op0, ml,
3152 NULL_RTX, 1,
3153 max_cost - extra_cost);
3154 if (t1 == 0)
3155 goto fail1;
3156 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3157 op0, t1),
3158 NULL_RTX);
3159 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3160 build_int_2 (1, 0), NULL_RTX,1);
3161 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3162 t1, t3),
3163 NULL_RTX);
3164 quotient
3165 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3166 build_int_2 (post_shift - 1, 0),
3167 tquotient, 1);
3169 else
3171 rtx t1, t2;
3173 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3174 build_int_2 (pre_shift, 0),
3175 NULL_RTX, 1);
3176 extra_cost = (shift_cost[pre_shift]
3177 + shift_cost[post_shift]);
3178 t2 = expand_mult_highpart (compute_mode, t1, ml,
3179 NULL_RTX, 1,
3180 max_cost - extra_cost);
3181 if (t2 == 0)
3182 goto fail1;
3183 quotient
3184 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3185 build_int_2 (post_shift, 0),
3186 tquotient, 1);
3190 else /* Too wide mode to use tricky code */
3191 break;
3193 insn = get_last_insn ();
3194 if (insn != last
3195 && (set = single_set (insn)) != 0
3196 && SET_DEST (set) == quotient)
3197 set_unique_reg_note (insn,
3198 REG_EQUAL,
3199 gen_rtx_UDIV (compute_mode, op0, op1));
3201 else /* TRUNC_DIV, signed */
3203 unsigned HOST_WIDE_INT ml;
3204 int lgup, post_shift;
3205 HOST_WIDE_INT d = INTVAL (op1);
3206 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3208 /* n rem d = n rem -d */
3209 if (rem_flag && d < 0)
3211 d = abs_d;
3212 op1 = GEN_INT (abs_d);
3215 if (d == 1)
3216 quotient = op0;
3217 else if (d == -1)
3218 quotient = expand_unop (compute_mode, neg_optab, op0,
3219 tquotient, 0);
3220 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3222 /* This case is not handled correctly below. */
3223 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3224 compute_mode, 1, 1);
3225 if (quotient == 0)
3226 goto fail1;
3228 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3229 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3231 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3233 lgup = floor_log2 (abs_d);
3234 if (abs_d != 2 && BRANCH_COST < 3)
3236 rtx label = gen_label_rtx ();
3237 rtx t1;
3239 t1 = copy_to_mode_reg (compute_mode, op0);
3240 do_cmp_and_jump (t1, const0_rtx, GE,
3241 compute_mode, label);
3242 expand_inc (t1, GEN_INT (abs_d - 1));
3243 emit_label (label);
3244 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3245 build_int_2 (lgup, 0),
3246 tquotient, 0);
3248 else
3250 rtx t1, t2, t3;
3251 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3252 build_int_2 (size - 1, 0),
3253 NULL_RTX, 0);
3254 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3255 build_int_2 (size - lgup, 0),
3256 NULL_RTX, 1);
3257 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3258 op0, t2),
3259 NULL_RTX);
3260 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3261 build_int_2 (lgup, 0),
3262 tquotient, 0);
3265 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3266 the quotient. */
3267 if (d < 0)
3269 insn = get_last_insn ();
3270 if (insn != last
3271 && (set = single_set (insn)) != 0
3272 && SET_DEST (set) == quotient
3273 && abs_d < ((unsigned HOST_WIDE_INT) 1
3274 << (HOST_BITS_PER_WIDE_INT - 1)))
3275 set_unique_reg_note (insn,
3276 REG_EQUAL,
3277 gen_rtx_DIV (compute_mode,
3278 op0,
3279 GEN_INT (abs_d)));
3281 quotient = expand_unop (compute_mode, neg_optab,
3282 quotient, quotient, 0);
3285 else if (size <= HOST_BITS_PER_WIDE_INT)
3287 choose_multiplier (abs_d, size, size - 1,
3288 &ml, &post_shift, &lgup);
3289 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3291 rtx t1, t2, t3;
3293 extra_cost = (shift_cost[post_shift]
3294 + shift_cost[size - 1] + add_cost);
3295 t1 = expand_mult_highpart (compute_mode, op0, ml,
3296 NULL_RTX, 0,
3297 max_cost - extra_cost);
3298 if (t1 == 0)
3299 goto fail1;
3300 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3301 build_int_2 (post_shift, 0), NULL_RTX, 0);
3302 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3303 build_int_2 (size - 1, 0), NULL_RTX, 0);
3304 if (d < 0)
3305 quotient
3306 = force_operand (gen_rtx_MINUS (compute_mode,
3307 t3, t2),
3308 tquotient);
3309 else
3310 quotient
3311 = force_operand (gen_rtx_MINUS (compute_mode,
3312 t2, t3),
3313 tquotient);
3315 else
3317 rtx t1, t2, t3, t4;
3319 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3320 extra_cost = (shift_cost[post_shift]
3321 + shift_cost[size - 1] + 2 * add_cost);
3322 t1 = expand_mult_highpart (compute_mode, op0, ml,
3323 NULL_RTX, 0,
3324 max_cost - extra_cost);
3325 if (t1 == 0)
3326 goto fail1;
3327 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3328 t1, op0),
3329 NULL_RTX);
3330 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3331 build_int_2 (post_shift, 0),
3332 NULL_RTX, 0);
3333 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3334 build_int_2 (size - 1, 0),
3335 NULL_RTX, 0);
3336 if (d < 0)
3337 quotient
3338 = force_operand (gen_rtx_MINUS (compute_mode,
3339 t4, t3),
3340 tquotient);
3341 else
3342 quotient
3343 = force_operand (gen_rtx_MINUS (compute_mode,
3344 t3, t4),
3345 tquotient);
3348 else /* Too wide mode to use tricky code */
3349 break;
3351 insn = get_last_insn ();
3352 if (insn != last
3353 && (set = single_set (insn)) != 0
3354 && SET_DEST (set) == quotient)
3355 set_unique_reg_note (insn,
3356 REG_EQUAL,
3357 gen_rtx_DIV (compute_mode, op0, op1));
3359 break;
3361 fail1:
3362 delete_insns_since (last);
3363 break;
3365 case FLOOR_DIV_EXPR:
3366 case FLOOR_MOD_EXPR:
3367 /* We will come here only for signed operations. */
3368 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3370 unsigned HOST_WIDE_INT mh, ml;
3371 int pre_shift, lgup, post_shift;
3372 HOST_WIDE_INT d = INTVAL (op1);
3374 if (d > 0)
3376 /* We could just as easily deal with negative constants here,
3377 but it does not seem worth the trouble for GCC 2.6. */
3378 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3380 pre_shift = floor_log2 (d);
3381 if (rem_flag)
3383 remainder = expand_binop (compute_mode, and_optab, op0,
3384 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3385 remainder, 0, OPTAB_LIB_WIDEN);
3386 if (remainder)
3387 return gen_lowpart (mode, remainder);
3389 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3390 build_int_2 (pre_shift, 0),
3391 tquotient, 0);
3393 else
3395 rtx t1, t2, t3, t4;
3397 mh = choose_multiplier (d, size, size - 1,
3398 &ml, &post_shift, &lgup);
3399 if (mh)
3400 abort ();
3402 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3403 build_int_2 (size - 1, 0), NULL_RTX, 0);
3404 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3405 NULL_RTX, 0, OPTAB_WIDEN);
3406 extra_cost = (shift_cost[post_shift]
3407 + shift_cost[size - 1] + 2 * add_cost);
3408 t3 = expand_mult_highpart (compute_mode, t2, ml,
3409 NULL_RTX, 1,
3410 max_cost - extra_cost);
3411 if (t3 != 0)
3413 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3414 build_int_2 (post_shift, 0),
3415 NULL_RTX, 1);
3416 quotient = expand_binop (compute_mode, xor_optab,
3417 t4, t1, tquotient, 0,
3418 OPTAB_WIDEN);
3422 else
3424 rtx nsign, t1, t2, t3, t4;
3425 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3426 op0, constm1_rtx), NULL_RTX);
3427 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3428 0, OPTAB_WIDEN);
3429 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3430 build_int_2 (size - 1, 0), NULL_RTX, 0);
3431 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3432 NULL_RTX);
3433 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3434 NULL_RTX, 0);
3435 if (t4)
3437 rtx t5;
3438 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3439 NULL_RTX, 0);
3440 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3441 t4, t5),
3442 tquotient);
3447 if (quotient != 0)
3448 break;
3449 delete_insns_since (last);
3451 /* Try using an instruction that produces both the quotient and
3452 remainder, using truncation. We can easily compensate the quotient
3453 or remainder to get floor rounding, once we have the remainder.
3454 Notice that we compute also the final remainder value here,
3455 and return the result right away. */
3456 if (target == 0 || GET_MODE (target) != compute_mode)
3457 target = gen_reg_rtx (compute_mode);
3459 if (rem_flag)
3461 remainder
3462 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3463 quotient = gen_reg_rtx (compute_mode);
3465 else
3467 quotient
3468 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3469 remainder = gen_reg_rtx (compute_mode);
3472 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3473 quotient, remainder, 0))
3475 /* This could be computed with a branch-less sequence.
3476 Save that for later. */
3477 rtx tem;
3478 rtx label = gen_label_rtx ();
3479 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3480 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3481 NULL_RTX, 0, OPTAB_WIDEN);
3482 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3483 expand_dec (quotient, const1_rtx);
3484 expand_inc (remainder, op1);
3485 emit_label (label);
3486 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3489 /* No luck with division elimination or divmod. Have to do it
3490 by conditionally adjusting op0 *and* the result. */
3492 rtx label1, label2, label3, label4, label5;
3493 rtx adjusted_op0;
3494 rtx tem;
3496 quotient = gen_reg_rtx (compute_mode);
3497 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3498 label1 = gen_label_rtx ();
3499 label2 = gen_label_rtx ();
3500 label3 = gen_label_rtx ();
3501 label4 = gen_label_rtx ();
3502 label5 = gen_label_rtx ();
3503 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3504 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3505 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3506 quotient, 0, OPTAB_LIB_WIDEN);
3507 if (tem != quotient)
3508 emit_move_insn (quotient, tem);
3509 emit_jump_insn (gen_jump (label5));
3510 emit_barrier ();
3511 emit_label (label1);
3512 expand_inc (adjusted_op0, const1_rtx);
3513 emit_jump_insn (gen_jump (label4));
3514 emit_barrier ();
3515 emit_label (label2);
3516 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3517 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3518 quotient, 0, OPTAB_LIB_WIDEN);
3519 if (tem != quotient)
3520 emit_move_insn (quotient, tem);
3521 emit_jump_insn (gen_jump (label5));
3522 emit_barrier ();
3523 emit_label (label3);
3524 expand_dec (adjusted_op0, const1_rtx);
3525 emit_label (label4);
3526 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3527 quotient, 0, OPTAB_LIB_WIDEN);
3528 if (tem != quotient)
3529 emit_move_insn (quotient, tem);
3530 expand_dec (quotient, const1_rtx);
3531 emit_label (label5);
3533 break;
3535 case CEIL_DIV_EXPR:
3536 case CEIL_MOD_EXPR:
3537 if (unsignedp)
3539 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3541 rtx t1, t2, t3;
3542 unsigned HOST_WIDE_INT d = INTVAL (op1);
3543 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3544 build_int_2 (floor_log2 (d), 0),
3545 tquotient, 1);
3546 t2 = expand_binop (compute_mode, and_optab, op0,
3547 GEN_INT (d - 1),
3548 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3549 t3 = gen_reg_rtx (compute_mode);
3550 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3551 compute_mode, 1, 1);
3552 if (t3 == 0)
3554 rtx lab;
3555 lab = gen_label_rtx ();
3556 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3557 expand_inc (t1, const1_rtx);
3558 emit_label (lab);
3559 quotient = t1;
3561 else
3562 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3563 t1, t3),
3564 tquotient);
3565 break;
3568 /* Try using an instruction that produces both the quotient and
3569 remainder, using truncation. We can easily compensate the
3570 quotient or remainder to get ceiling rounding, once we have the
3571 remainder. Notice that we compute also the final remainder
3572 value here, and return the result right away. */
3573 if (target == 0 || GET_MODE (target) != compute_mode)
3574 target = gen_reg_rtx (compute_mode);
3576 if (rem_flag)
3578 remainder = (GET_CODE (target) == REG
3579 ? target : gen_reg_rtx (compute_mode));
3580 quotient = gen_reg_rtx (compute_mode);
3582 else
3584 quotient = (GET_CODE (target) == REG
3585 ? target : gen_reg_rtx (compute_mode));
3586 remainder = gen_reg_rtx (compute_mode);
3589 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3590 remainder, 1))
3592 /* This could be computed with a branch-less sequence.
3593 Save that for later. */
3594 rtx label = gen_label_rtx ();
3595 do_cmp_and_jump (remainder, const0_rtx, EQ,
3596 compute_mode, label);
3597 expand_inc (quotient, const1_rtx);
3598 expand_dec (remainder, op1);
3599 emit_label (label);
3600 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3603 /* No luck with division elimination or divmod. Have to do it
3604 by conditionally adjusting op0 *and* the result. */
3606 rtx label1, label2;
3607 rtx adjusted_op0, tem;
3609 quotient = gen_reg_rtx (compute_mode);
3610 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3611 label1 = gen_label_rtx ();
3612 label2 = gen_label_rtx ();
3613 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3614 compute_mode, label1);
3615 emit_move_insn (quotient, const0_rtx);
3616 emit_jump_insn (gen_jump (label2));
3617 emit_barrier ();
3618 emit_label (label1);
3619 expand_dec (adjusted_op0, const1_rtx);
3620 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3621 quotient, 1, OPTAB_LIB_WIDEN);
3622 if (tem != quotient)
3623 emit_move_insn (quotient, tem);
3624 expand_inc (quotient, const1_rtx);
3625 emit_label (label2);
3628 else /* signed */
3630 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3631 && INTVAL (op1) >= 0)
3633 /* This is extremely similar to the code for the unsigned case
3634 above. For 2.7 we should merge these variants, but for
3635 2.6.1 I don't want to touch the code for unsigned since that
3636 get used in C. The signed case will only be used by other
3637 languages (Ada). */
3639 rtx t1, t2, t3;
3640 unsigned HOST_WIDE_INT d = INTVAL (op1);
3641 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3642 build_int_2 (floor_log2 (d), 0),
3643 tquotient, 0);
3644 t2 = expand_binop (compute_mode, and_optab, op0,
3645 GEN_INT (d - 1),
3646 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3647 t3 = gen_reg_rtx (compute_mode);
3648 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3649 compute_mode, 1, 1);
3650 if (t3 == 0)
3652 rtx lab;
3653 lab = gen_label_rtx ();
3654 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3655 expand_inc (t1, const1_rtx);
3656 emit_label (lab);
3657 quotient = t1;
3659 else
3660 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3661 t1, t3),
3662 tquotient);
3663 break;
3666 /* Try using an instruction that produces both the quotient and
3667 remainder, using truncation. We can easily compensate the
3668 quotient or remainder to get ceiling rounding, once we have the
3669 remainder. Notice that we compute also the final remainder
3670 value here, and return the result right away. */
3671 if (target == 0 || GET_MODE (target) != compute_mode)
3672 target = gen_reg_rtx (compute_mode);
3673 if (rem_flag)
3675 remainder= (GET_CODE (target) == REG
3676 ? target : gen_reg_rtx (compute_mode));
3677 quotient = gen_reg_rtx (compute_mode);
3679 else
3681 quotient = (GET_CODE (target) == REG
3682 ? target : gen_reg_rtx (compute_mode));
3683 remainder = gen_reg_rtx (compute_mode);
3686 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3687 remainder, 0))
3689 /* This could be computed with a branch-less sequence.
3690 Save that for later. */
3691 rtx tem;
3692 rtx label = gen_label_rtx ();
3693 do_cmp_and_jump (remainder, const0_rtx, EQ,
3694 compute_mode, label);
3695 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3696 NULL_RTX, 0, OPTAB_WIDEN);
3697 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3698 expand_inc (quotient, const1_rtx);
3699 expand_dec (remainder, op1);
3700 emit_label (label);
3701 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3704 /* No luck with division elimination or divmod. Have to do it
3705 by conditionally adjusting op0 *and* the result. */
3707 rtx label1, label2, label3, label4, label5;
3708 rtx adjusted_op0;
3709 rtx tem;
3711 quotient = gen_reg_rtx (compute_mode);
3712 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3713 label1 = gen_label_rtx ();
3714 label2 = gen_label_rtx ();
3715 label3 = gen_label_rtx ();
3716 label4 = gen_label_rtx ();
3717 label5 = gen_label_rtx ();
3718 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3719 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3720 compute_mode, label1);
3721 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3722 quotient, 0, OPTAB_LIB_WIDEN);
3723 if (tem != quotient)
3724 emit_move_insn (quotient, tem);
3725 emit_jump_insn (gen_jump (label5));
3726 emit_barrier ();
3727 emit_label (label1);
3728 expand_dec (adjusted_op0, const1_rtx);
3729 emit_jump_insn (gen_jump (label4));
3730 emit_barrier ();
3731 emit_label (label2);
3732 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3733 compute_mode, label3);
3734 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3735 quotient, 0, OPTAB_LIB_WIDEN);
3736 if (tem != quotient)
3737 emit_move_insn (quotient, tem);
3738 emit_jump_insn (gen_jump (label5));
3739 emit_barrier ();
3740 emit_label (label3);
3741 expand_inc (adjusted_op0, const1_rtx);
3742 emit_label (label4);
3743 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3744 quotient, 0, OPTAB_LIB_WIDEN);
3745 if (tem != quotient)
3746 emit_move_insn (quotient, tem);
3747 expand_inc (quotient, const1_rtx);
3748 emit_label (label5);
3751 break;
3753 case EXACT_DIV_EXPR:
3754 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3756 HOST_WIDE_INT d = INTVAL (op1);
3757 unsigned HOST_WIDE_INT ml;
3758 int post_shift;
3759 rtx t1;
3761 post_shift = floor_log2 (d & -d);
3762 ml = invert_mod2n (d >> post_shift, size);
3763 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3764 unsignedp);
3765 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3766 build_int_2 (post_shift, 0),
3767 NULL_RTX, unsignedp);
3769 insn = get_last_insn ();
3770 set_unique_reg_note (insn,
3771 REG_EQUAL,
3772 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3773 compute_mode,
3774 op0, op1));
3776 break;
3778 case ROUND_DIV_EXPR:
3779 case ROUND_MOD_EXPR:
3780 if (unsignedp)
3782 rtx tem;
3783 rtx label;
3784 label = gen_label_rtx ();
3785 quotient = gen_reg_rtx (compute_mode);
3786 remainder = gen_reg_rtx (compute_mode);
3787 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3789 rtx tem;
3790 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3791 quotient, 1, OPTAB_LIB_WIDEN);
3792 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3793 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3794 remainder, 1, OPTAB_LIB_WIDEN);
3796 tem = plus_constant (op1, -1);
3797 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3798 build_int_2 (1, 0), NULL_RTX, 1);
3799 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3800 expand_inc (quotient, const1_rtx);
3801 expand_dec (remainder, op1);
3802 emit_label (label);
3804 else
3806 rtx abs_rem, abs_op1, tem, mask;
3807 rtx label;
3808 label = gen_label_rtx ();
3809 quotient = gen_reg_rtx (compute_mode);
3810 remainder = gen_reg_rtx (compute_mode);
3811 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3813 rtx tem;
3814 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3815 quotient, 0, OPTAB_LIB_WIDEN);
3816 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3817 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3818 remainder, 0, OPTAB_LIB_WIDEN);
3820 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0);
3821 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0);
3822 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3823 build_int_2 (1, 0), NULL_RTX, 1);
3824 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3825 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3826 NULL_RTX, 0, OPTAB_WIDEN);
3827 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3828 build_int_2 (size - 1, 0), NULL_RTX, 0);
3829 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3830 NULL_RTX, 0, OPTAB_WIDEN);
3831 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3832 NULL_RTX, 0, OPTAB_WIDEN);
3833 expand_inc (quotient, tem);
3834 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3835 NULL_RTX, 0, OPTAB_WIDEN);
3836 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3837 NULL_RTX, 0, OPTAB_WIDEN);
3838 expand_dec (remainder, tem);
3839 emit_label (label);
3841 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3843 default:
3844 abort ();
3847 if (quotient == 0)
3849 if (target && GET_MODE (target) != compute_mode)
3850 target = 0;
3852 if (rem_flag)
3854 /* Try to produce the remainder without producing the quotient.
3855 If we seem to have a divmod patten that does not require widening,
3856 don't try windening here. We should really have an WIDEN argument
3857 to expand_twoval_binop, since what we'd really like to do here is
3858 1) try a mod insn in compute_mode
3859 2) try a divmod insn in compute_mode
3860 3) try a div insn in compute_mode and multiply-subtract to get
3861 remainder
3862 4) try the same things with widening allowed. */
3863 remainder
3864 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3865 op0, op1, target,
3866 unsignedp,
3867 ((optab2->handlers[(int) compute_mode].insn_code
3868 != CODE_FOR_nothing)
3869 ? OPTAB_DIRECT : OPTAB_WIDEN));
3870 if (remainder == 0)
3872 /* No luck there. Can we do remainder and divide at once
3873 without a library call? */
3874 remainder = gen_reg_rtx (compute_mode);
3875 if (! expand_twoval_binop ((unsignedp
3876 ? udivmod_optab
3877 : sdivmod_optab),
3878 op0, op1,
3879 NULL_RTX, remainder, unsignedp))
3880 remainder = 0;
3883 if (remainder)
3884 return gen_lowpart (mode, remainder);
3887 /* Produce the quotient. Try a quotient insn, but not a library call.
3888 If we have a divmod in this mode, use it in preference to widening
3889 the div (for this test we assume it will not fail). Note that optab2
3890 is set to the one of the two optabs that the call below will use. */
3891 quotient
3892 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3893 op0, op1, rem_flag ? NULL_RTX : target,
3894 unsignedp,
3895 ((optab2->handlers[(int) compute_mode].insn_code
3896 != CODE_FOR_nothing)
3897 ? OPTAB_DIRECT : OPTAB_WIDEN));
3899 if (quotient == 0)
3901 /* No luck there. Try a quotient-and-remainder insn,
3902 keeping the quotient alone. */
3903 quotient = gen_reg_rtx (compute_mode);
3904 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3905 op0, op1,
3906 quotient, NULL_RTX, unsignedp))
3908 quotient = 0;
3909 if (! rem_flag)
3910 /* Still no luck. If we are not computing the remainder,
3911 use a library call for the quotient. */
3912 quotient = sign_expand_binop (compute_mode,
3913 udiv_optab, sdiv_optab,
3914 op0, op1, target,
3915 unsignedp, OPTAB_LIB_WIDEN);
3920 if (rem_flag)
3922 if (target && GET_MODE (target) != compute_mode)
3923 target = 0;
3925 if (quotient == 0)
3926 /* No divide instruction either. Use library for remainder. */
3927 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3928 op0, op1, target,
3929 unsignedp, OPTAB_LIB_WIDEN);
3930 else
3932 /* We divided. Now finish doing X - Y * (X / Y). */
3933 remainder = expand_mult (compute_mode, quotient, op1,
3934 NULL_RTX, unsignedp);
3935 remainder = expand_binop (compute_mode, sub_optab, op0,
3936 remainder, target, unsignedp,
3937 OPTAB_LIB_WIDEN);
3941 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3944 /* Return a tree node with data type TYPE, describing the value of X.
3945 Usually this is an RTL_EXPR, if there is no obvious better choice.
3946 X may be an expression, however we only support those expressions
3947 generated by loop.c. */
3949 tree
3950 make_tree (type, x)
3951 tree type;
3952 rtx x;
3954 tree t;
3956 switch (GET_CODE (x))
3958 case CONST_INT:
3959 t = build_int_2 (INTVAL (x),
3960 (TREE_UNSIGNED (type)
3961 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3962 || INTVAL (x) >= 0 ? 0 : -1);
3963 TREE_TYPE (t) = type;
3964 return t;
3966 case CONST_DOUBLE:
3967 if (GET_MODE (x) == VOIDmode)
3969 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3970 TREE_TYPE (t) = type;
3972 else
3974 REAL_VALUE_TYPE d;
3976 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3977 t = build_real (type, d);
3980 return t;
3982 case PLUS:
3983 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3984 make_tree (type, XEXP (x, 1))));
3986 case MINUS:
3987 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3988 make_tree (type, XEXP (x, 1))));
3990 case NEG:
3991 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3993 case MULT:
3994 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3995 make_tree (type, XEXP (x, 1))));
3997 case ASHIFT:
3998 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3999 make_tree (type, XEXP (x, 1))));
4001 case LSHIFTRT:
4002 return fold (convert (type,
4003 build (RSHIFT_EXPR, unsigned_type (type),
4004 make_tree (unsigned_type (type),
4005 XEXP (x, 0)),
4006 make_tree (type, XEXP (x, 1)))));
4008 case ASHIFTRT:
4009 return fold (convert (type,
4010 build (RSHIFT_EXPR, signed_type (type),
4011 make_tree (signed_type (type), XEXP (x, 0)),
4012 make_tree (type, XEXP (x, 1)))));
4014 case DIV:
4015 if (TREE_CODE (type) != REAL_TYPE)
4016 t = signed_type (type);
4017 else
4018 t = type;
4020 return fold (convert (type,
4021 build (TRUNC_DIV_EXPR, t,
4022 make_tree (t, XEXP (x, 0)),
4023 make_tree (t, XEXP (x, 1)))));
4024 case UDIV:
4025 t = unsigned_type (type);
4026 return fold (convert (type,
4027 build (TRUNC_DIV_EXPR, t,
4028 make_tree (t, XEXP (x, 0)),
4029 make_tree (t, XEXP (x, 1)))));
4030 default:
4031 t = make_node (RTL_EXPR);
4032 TREE_TYPE (t) = type;
4033 RTL_EXPR_RTL (t) = x;
4034 /* There are no insns to be output
4035 when this rtl_expr is used. */
4036 RTL_EXPR_SEQUENCE (t) = 0;
4037 return t;
4041 /* Return an rtx representing the value of X * MULT + ADD.
4042 TARGET is a suggestion for where to store the result (an rtx).
4043 MODE is the machine mode for the computation.
4044 X and MULT must have mode MODE. ADD may have a different mode.
4045 So can X (defaults to same as MODE).
4046 UNSIGNEDP is non-zero to do unsigned multiplication.
4047 This may emit insns. */
4050 expand_mult_add (x, target, mult, add, mode, unsignedp)
4051 rtx x, target, mult, add;
4052 enum machine_mode mode;
4053 int unsignedp;
4055 tree type = type_for_mode (mode, unsignedp);
4056 tree add_type = (GET_MODE (add) == VOIDmode
4057 ? type : type_for_mode (GET_MODE (add), unsignedp));
4058 tree result = fold (build (PLUS_EXPR, type,
4059 fold (build (MULT_EXPR, type,
4060 make_tree (type, x),
4061 make_tree (type, mult))),
4062 make_tree (add_type, add)));
4064 return expand_expr (result, target, VOIDmode, 0);
4067 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4068 and returning TARGET.
4070 If TARGET is 0, a pseudo-register or constant is returned. */
4073 expand_and (op0, op1, target)
4074 rtx op0, op1, target;
4076 enum machine_mode mode = VOIDmode;
4077 rtx tem;
4079 if (GET_MODE (op0) != VOIDmode)
4080 mode = GET_MODE (op0);
4081 else if (GET_MODE (op1) != VOIDmode)
4082 mode = GET_MODE (op1);
4084 if (mode != VOIDmode)
4085 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4086 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4087 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4088 else
4089 abort ();
4091 if (target == 0)
4092 target = tem;
4093 else if (tem != target)
4094 emit_move_insn (target, tem);
4095 return target;
4098 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4099 and storing in TARGET. Normally return TARGET.
4100 Return 0 if that cannot be done.
4102 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4103 it is VOIDmode, they cannot both be CONST_INT.
4105 UNSIGNEDP is for the case where we have to widen the operands
4106 to perform the operation. It says to use zero-extension.
4108 NORMALIZEP is 1 if we should convert the result to be either zero
4109 or one. Normalize is -1 if we should convert the result to be
4110 either zero or -1. If NORMALIZEP is zero, the result will be left
4111 "raw" out of the scc insn. */
4114 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4115 rtx target;
4116 enum rtx_code code;
4117 rtx op0, op1;
4118 enum machine_mode mode;
4119 int unsignedp;
4120 int normalizep;
4122 rtx subtarget;
4123 enum insn_code icode;
4124 enum machine_mode compare_mode;
4125 enum machine_mode target_mode = GET_MODE (target);
4126 rtx tem;
4127 rtx last = get_last_insn ();
4128 rtx pattern, comparison;
4130 if (unsignedp)
4131 code = unsigned_condition (code);
4133 /* If one operand is constant, make it the second one. Only do this
4134 if the other operand is not constant as well. */
4136 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4137 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4139 tem = op0;
4140 op0 = op1;
4141 op1 = tem;
4142 code = swap_condition (code);
4145 if (mode == VOIDmode)
4146 mode = GET_MODE (op0);
4148 /* For some comparisons with 1 and -1, we can convert this to
4149 comparisons with zero. This will often produce more opportunities for
4150 store-flag insns. */
4152 switch (code)
4154 case LT:
4155 if (op1 == const1_rtx)
4156 op1 = const0_rtx, code = LE;
4157 break;
4158 case LE:
4159 if (op1 == constm1_rtx)
4160 op1 = const0_rtx, code = LT;
4161 break;
4162 case GE:
4163 if (op1 == const1_rtx)
4164 op1 = const0_rtx, code = GT;
4165 break;
4166 case GT:
4167 if (op1 == constm1_rtx)
4168 op1 = const0_rtx, code = GE;
4169 break;
4170 case GEU:
4171 if (op1 == const1_rtx)
4172 op1 = const0_rtx, code = NE;
4173 break;
4174 case LTU:
4175 if (op1 == const1_rtx)
4176 op1 = const0_rtx, code = EQ;
4177 break;
4178 default:
4179 break;
4182 /* From now on, we won't change CODE, so set ICODE now. */
4183 icode = setcc_gen_code[(int) code];
4185 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4186 complement of A (for GE) and shifting the sign bit to the low bit. */
4187 if (op1 == const0_rtx && (code == LT || code == GE)
4188 && GET_MODE_CLASS (mode) == MODE_INT
4189 && (normalizep || STORE_FLAG_VALUE == 1
4190 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4191 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4192 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4194 subtarget = target;
4196 /* If the result is to be wider than OP0, it is best to convert it
4197 first. If it is to be narrower, it is *incorrect* to convert it
4198 first. */
4199 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4201 op0 = protect_from_queue (op0, 0);
4202 op0 = convert_modes (target_mode, mode, op0, 0);
4203 mode = target_mode;
4206 if (target_mode != mode)
4207 subtarget = 0;
4209 if (code == GE)
4210 op0 = expand_unop (mode, one_cmpl_optab, op0,
4211 ((STORE_FLAG_VALUE == 1 || normalizep)
4212 ? 0 : subtarget), 0);
4214 if (STORE_FLAG_VALUE == 1 || normalizep)
4215 /* If we are supposed to produce a 0/1 value, we want to do
4216 a logical shift from the sign bit to the low-order bit; for
4217 a -1/0 value, we do an arithmetic shift. */
4218 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4219 size_int (GET_MODE_BITSIZE (mode) - 1),
4220 subtarget, normalizep != -1);
4222 if (mode != target_mode)
4223 op0 = convert_modes (target_mode, mode, op0, 0);
4225 return op0;
4228 if (icode != CODE_FOR_nothing)
4230 insn_operand_predicate_fn pred;
4232 /* We think we may be able to do this with a scc insn. Emit the
4233 comparison and then the scc insn.
4235 compare_from_rtx may call emit_queue, which would be deleted below
4236 if the scc insn fails. So call it ourselves before setting LAST.
4237 Likewise for do_pending_stack_adjust. */
4239 emit_queue ();
4240 do_pending_stack_adjust ();
4241 last = get_last_insn ();
4243 comparison
4244 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4245 if (GET_CODE (comparison) == CONST_INT)
4246 return (comparison == const0_rtx ? const0_rtx
4247 : normalizep == 1 ? const1_rtx
4248 : normalizep == -1 ? constm1_rtx
4249 : const_true_rtx);
4251 /* If the code of COMPARISON doesn't match CODE, something is
4252 wrong; we can no longer be sure that we have the operation.
4253 We could handle this case, but it should not happen. */
4255 if (GET_CODE (comparison) != code)
4256 abort ();
4258 /* Get a reference to the target in the proper mode for this insn. */
4259 compare_mode = insn_data[(int) icode].operand[0].mode;
4260 subtarget = target;
4261 pred = insn_data[(int) icode].operand[0].predicate;
4262 if (preserve_subexpressions_p ()
4263 || ! (*pred) (subtarget, compare_mode))
4264 subtarget = gen_reg_rtx (compare_mode);
4266 pattern = GEN_FCN (icode) (subtarget);
4267 if (pattern)
4269 emit_insn (pattern);
4271 /* If we are converting to a wider mode, first convert to
4272 TARGET_MODE, then normalize. This produces better combining
4273 opportunities on machines that have a SIGN_EXTRACT when we are
4274 testing a single bit. This mostly benefits the 68k.
4276 If STORE_FLAG_VALUE does not have the sign bit set when
4277 interpreted in COMPARE_MODE, we can do this conversion as
4278 unsigned, which is usually more efficient. */
4279 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4281 convert_move (target, subtarget,
4282 (GET_MODE_BITSIZE (compare_mode)
4283 <= HOST_BITS_PER_WIDE_INT)
4284 && 0 == (STORE_FLAG_VALUE
4285 & ((HOST_WIDE_INT) 1
4286 << (GET_MODE_BITSIZE (compare_mode) -1))));
4287 op0 = target;
4288 compare_mode = target_mode;
4290 else
4291 op0 = subtarget;
4293 /* If we want to keep subexpressions around, don't reuse our
4294 last target. */
4296 if (preserve_subexpressions_p ())
4297 subtarget = 0;
4299 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4300 we don't have to do anything. */
4301 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4303 /* STORE_FLAG_VALUE might be the most negative number, so write
4304 the comparison this way to avoid a compiler-time warning. */
4305 else if (- normalizep == STORE_FLAG_VALUE)
4306 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4308 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4309 makes it hard to use a value of just the sign bit due to
4310 ANSI integer constant typing rules. */
4311 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4312 && (STORE_FLAG_VALUE
4313 & ((HOST_WIDE_INT) 1
4314 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4315 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4316 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4317 subtarget, normalizep == 1);
4318 else if (STORE_FLAG_VALUE & 1)
4320 op0 = expand_and (op0, const1_rtx, subtarget);
4321 if (normalizep == -1)
4322 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4324 else
4325 abort ();
4327 /* If we were converting to a smaller mode, do the
4328 conversion now. */
4329 if (target_mode != compare_mode)
4331 convert_move (target, op0, 0);
4332 return target;
4334 else
4335 return op0;
4339 delete_insns_since (last);
4341 /* If expensive optimizations, use different pseudo registers for each
4342 insn, instead of reusing the same pseudo. This leads to better CSE,
4343 but slows down the compiler, since there are more pseudos */
4344 subtarget = (!flag_expensive_optimizations
4345 && (target_mode == mode)) ? target : NULL_RTX;
4347 /* If we reached here, we can't do this with a scc insn. However, there
4348 are some comparisons that can be done directly. For example, if
4349 this is an equality comparison of integers, we can try to exclusive-or
4350 (or subtract) the two operands and use a recursive call to try the
4351 comparison with zero. Don't do any of these cases if branches are
4352 very cheap. */
4354 if (BRANCH_COST > 0
4355 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4356 && op1 != const0_rtx)
4358 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4359 OPTAB_WIDEN);
4361 if (tem == 0)
4362 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4363 OPTAB_WIDEN);
4364 if (tem != 0)
4365 tem = emit_store_flag (target, code, tem, const0_rtx,
4366 mode, unsignedp, normalizep);
4367 if (tem == 0)
4368 delete_insns_since (last);
4369 return tem;
4372 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4373 the constant zero. Reject all other comparisons at this point. Only
4374 do LE and GT if branches are expensive since they are expensive on
4375 2-operand machines. */
4377 if (BRANCH_COST == 0
4378 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4379 || (code != EQ && code != NE
4380 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4381 return 0;
4383 /* See what we need to return. We can only return a 1, -1, or the
4384 sign bit. */
4386 if (normalizep == 0)
4388 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4389 normalizep = STORE_FLAG_VALUE;
4391 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4392 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4393 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4395 else
4396 return 0;
4399 /* Try to put the result of the comparison in the sign bit. Assume we can't
4400 do the necessary operation below. */
4402 tem = 0;
4404 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4405 the sign bit set. */
4407 if (code == LE)
4409 /* This is destructive, so SUBTARGET can't be OP0. */
4410 if (rtx_equal_p (subtarget, op0))
4411 subtarget = 0;
4413 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4414 OPTAB_WIDEN);
4415 if (tem)
4416 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4417 OPTAB_WIDEN);
4420 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4421 number of bits in the mode of OP0, minus one. */
4423 if (code == GT)
4425 if (rtx_equal_p (subtarget, op0))
4426 subtarget = 0;
4428 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4429 size_int (GET_MODE_BITSIZE (mode) - 1),
4430 subtarget, 0);
4431 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4432 OPTAB_WIDEN);
4435 if (code == EQ || code == NE)
4437 /* For EQ or NE, one way to do the comparison is to apply an operation
4438 that converts the operand into a positive number if it is non-zero
4439 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4440 for NE we negate. This puts the result in the sign bit. Then we
4441 normalize with a shift, if needed.
4443 Two operations that can do the above actions are ABS and FFS, so try
4444 them. If that doesn't work, and MODE is smaller than a full word,
4445 we can use zero-extension to the wider mode (an unsigned conversion)
4446 as the operation. */
4448 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4449 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4450 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4451 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4452 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4454 op0 = protect_from_queue (op0, 0);
4455 tem = convert_modes (word_mode, mode, op0, 1);
4456 mode = word_mode;
4459 if (tem != 0)
4461 if (code == EQ)
4462 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4463 0, OPTAB_WIDEN);
4464 else
4465 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4468 /* If we couldn't do it that way, for NE we can "or" the two's complement
4469 of the value with itself. For EQ, we take the one's complement of
4470 that "or", which is an extra insn, so we only handle EQ if branches
4471 are expensive. */
4473 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4475 if (rtx_equal_p (subtarget, op0))
4476 subtarget = 0;
4478 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4479 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4480 OPTAB_WIDEN);
4482 if (tem && code == EQ)
4483 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4487 if (tem && normalizep)
4488 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4489 size_int (GET_MODE_BITSIZE (mode) - 1),
4490 subtarget, normalizep == 1);
4492 if (tem)
4494 if (GET_MODE (tem) != target_mode)
4496 convert_move (target, tem, 0);
4497 tem = target;
4499 else if (!subtarget)
4501 emit_move_insn (target, tem);
4502 tem = target;
4505 else
4506 delete_insns_since (last);
4508 return tem;
4511 /* Like emit_store_flag, but always succeeds. */
4514 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4515 rtx target;
4516 enum rtx_code code;
4517 rtx op0, op1;
4518 enum machine_mode mode;
4519 int unsignedp;
4520 int normalizep;
4522 rtx tem, label;
4524 /* First see if emit_store_flag can do the job. */
4525 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4526 if (tem != 0)
4527 return tem;
4529 if (normalizep == 0)
4530 normalizep = 1;
4532 /* If this failed, we have to do this with set/compare/jump/set code. */
4534 if (GET_CODE (target) != REG
4535 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4536 target = gen_reg_rtx (GET_MODE (target));
4538 emit_move_insn (target, const1_rtx);
4539 label = gen_label_rtx ();
4540 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, 0,
4541 NULL_RTX, label);
4543 emit_move_insn (target, const0_rtx);
4544 emit_label (label);
4546 return target;
4549 /* Perform possibly multi-word comparison and conditional jump to LABEL
4550 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4552 The algorithm is based on the code in expr.c:do_jump.
4554 Note that this does not perform a general comparison. Only variants
4555 generated within expmed.c are correctly handled, others abort (but could
4556 be handled if needed). */
4558 static void
4559 do_cmp_and_jump (arg1, arg2, op, mode, label)
4560 rtx arg1, arg2, label;
4561 enum rtx_code op;
4562 enum machine_mode mode;
4564 /* If this mode is an integer too wide to compare properly,
4565 compare word by word. Rely on cse to optimize constant cases. */
4567 if (GET_MODE_CLASS (mode) == MODE_INT
4568 && ! can_compare_p (op, mode, ccp_jump))
4570 rtx label2 = gen_label_rtx ();
4572 switch (op)
4574 case LTU:
4575 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4576 break;
4578 case LEU:
4579 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4580 break;
4582 case LT:
4583 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4584 break;
4586 case GT:
4587 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4588 break;
4590 case GE:
4591 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4592 break;
4594 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4595 that's the only equality operations we do */
4596 case EQ:
4597 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4598 abort();
4599 do_jump_by_parts_equality_rtx (arg1, label2, label);
4600 break;
4602 case NE:
4603 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4604 abort();
4605 do_jump_by_parts_equality_rtx (arg1, label, label2);
4606 break;
4608 default:
4609 abort();
4612 emit_label (label2);
4614 else
4616 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, 0, label);