Fix blkarg.c test to fail even on alpha.
[official-gcc.git] / gcc / expmed.c
blobec7d0e94938f493ca5e682b7a9e1351e77927f8a
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
31 #include "expr.h"
32 #include "real.h"
33 #include "recog.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
40 int, int));
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
42 int, int));
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
44 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
45 enum machine_mode, rtx));
47 #define CEIL(x,y) (((x) + (y) - 1) / (y))
49 /* Non-zero means divides or modulus operations are relatively cheap for
50 powers of two, so don't use branches; emit the operation instead.
51 Usually, this will mean that the MD file will emit non-branch
52 sequences. */
54 static int sdiv_pow2_cheap, smod_pow2_cheap;
56 #ifndef SLOW_UNALIGNED_ACCESS
57 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
58 #endif
60 /* For compilers that support multiple targets with different word sizes,
61 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
62 is the H8/300(H) compiler. */
64 #ifndef MAX_BITS_PER_WORD
65 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 #endif
68 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
69 and some by mode. */
70 static int add_cost, negate_cost, zero_cost;
71 static int shift_cost[MAX_BITS_PER_WORD];
72 static int shiftadd_cost[MAX_BITS_PER_WORD];
73 static int shiftsub_cost[MAX_BITS_PER_WORD];
74 static int mul_cost[NUM_MACHINE_MODES];
75 static int div_cost[NUM_MACHINE_MODES];
76 static int mul_widen_cost[NUM_MACHINE_MODES];
77 static int mul_highpart_cost[NUM_MACHINE_MODES];
79 void
80 init_expmed ()
82 char *free_point;
83 /* This is "some random pseudo register" for purposes of calling recog
84 to see what insns exist. */
85 rtx reg = gen_rtx_REG (word_mode, 10000);
86 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 int dummy;
88 int m;
89 enum machine_mode mode, wider_mode;
91 start_sequence ();
93 /* Since we are on the permanent obstack, we must be sure we save this
94 spot AFTER we call start_sequence, since it will reuse the rtl it
95 makes. */
96 free_point = (char *) oballoc (0);
98 reg = gen_rtx (REG, word_mode, 10000);
100 zero_cost = rtx_cost (const0_rtx, 0);
101 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
103 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
104 gen_rtx_ASHIFT (word_mode, reg,
105 const0_rtx)));
107 shiftadd_insn
108 = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_PLUS (word_mode,
110 gen_rtx_MULT (word_mode,
111 reg, const0_rtx),
112 reg)));
114 shiftsub_insn
115 = emit_insn (gen_rtx_SET (VOIDmode, reg,
116 gen_rtx_MINUS (word_mode,
117 gen_rtx_MULT (word_mode,
118 reg, const0_rtx),
119 reg)));
121 init_recog ();
123 shift_cost[0] = 0;
124 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
126 for (m = 1; m < MAX_BITS_PER_WORD; m++)
128 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
130 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
131 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
132 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
134 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
135 = GEN_INT ((HOST_WIDE_INT) 1 << m);
136 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
137 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
142 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
145 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
147 sdiv_pow2_cheap
148 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
149 <= 2 * add_cost);
150 smod_pow2_cheap
151 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
152 <= 2 * add_cost);
154 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
155 mode != VOIDmode;
156 mode = GET_MODE_WIDER_MODE (mode))
158 reg = gen_rtx_REG (mode, 10000);
159 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
160 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
161 wider_mode = GET_MODE_WIDER_MODE (mode);
162 if (wider_mode != VOIDmode)
164 mul_widen_cost[(int) wider_mode]
165 = rtx_cost (gen_rtx_MULT (wider_mode,
166 gen_rtx_ZERO_EXTEND (wider_mode, reg),
167 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
168 SET);
169 mul_highpart_cost[(int) mode]
170 = rtx_cost (gen_rtx_TRUNCATE
171 (mode,
172 gen_rtx_LSHIFTRT
173 (wider_mode,
174 gen_rtx_MULT (wider_mode,
175 gen_rtx_ZERO_EXTEND (wider_mode, reg),
176 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
177 GEN_INT (GET_MODE_BITSIZE (mode)))),
178 SET);
182 /* Free the objects we just allocated. */
183 end_sequence ();
184 obfree (free_point);
187 /* Return an rtx representing minus the value of X.
188 MODE is the intended mode of the result,
189 useful if X is a CONST_INT. */
192 negate_rtx (mode, x)
193 enum machine_mode mode;
194 rtx x;
196 rtx result = simplify_unary_operation (NEG, mode, x, mode);
198 if (result == 0)
199 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
201 return result;
204 /* Generate code to store value from rtx VALUE
205 into a bit-field within structure STR_RTX
206 containing BITSIZE bits starting at bit BITNUM.
207 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
208 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
209 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
211 /* ??? Note that there are two different ideas here for how
212 to determine the size to count bits within, for a register.
213 One is BITS_PER_WORD, and the other is the size of operand 3
214 of the insv pattern.
216 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
217 else, we use the mode of operand 3. */
220 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
221 rtx str_rtx;
222 register int bitsize;
223 int bitnum;
224 enum machine_mode fieldmode;
225 rtx value;
226 int align;
227 int total_size;
229 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
230 register int offset = bitnum / unit;
231 register int bitpos = bitnum % unit;
232 register rtx op0 = str_rtx;
233 #ifdef HAVE_insv
234 int insv_bitsize;
236 if (insn_operand_mode[(int) CODE_FOR_insv][3] == VOIDmode)
237 insv_bitsize = GET_MODE_BITSIZE (word_mode);
238 else
239 insv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]);
240 #endif
242 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
243 abort ();
245 /* Discount the part of the structure before the desired byte.
246 We need to know how many bytes are safe to reference after it. */
247 if (total_size >= 0)
248 total_size -= (bitpos / BIGGEST_ALIGNMENT
249 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
251 while (GET_CODE (op0) == SUBREG)
253 /* The following line once was done only if WORDS_BIG_ENDIAN,
254 but I think that is a mistake. WORDS_BIG_ENDIAN is
255 meaningful at a much higher level; when structures are copied
256 between memory and regs, the higher-numbered regs
257 always get higher addresses. */
258 offset += SUBREG_WORD (op0);
259 /* We used to adjust BITPOS here, but now we do the whole adjustment
260 right after the loop. */
261 op0 = SUBREG_REG (op0);
264 /* Make sure we are playing with integral modes. Pun with subregs
265 if we aren't. */
267 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
268 if (imode != GET_MODE (op0))
270 if (GET_CODE (op0) == MEM)
271 op0 = change_address (op0, imode, NULL_RTX);
272 else if (imode != BLKmode)
273 op0 = gen_lowpart (imode, op0);
274 else
275 abort ();
279 /* If OP0 is a register, BITPOS must count within a word.
280 But as we have it, it counts within whatever size OP0 now has.
281 On a bigendian machine, these are not the same, so convert. */
282 if (BYTES_BIG_ENDIAN
283 && GET_CODE (op0) != MEM
284 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
285 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
287 value = protect_from_queue (value, 0);
289 if (flag_force_mem)
290 value = force_not_mem (value);
292 /* Note that the adjustment of BITPOS above has no effect on whether
293 BITPOS is 0 in a REG bigger than a word. */
294 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
295 && (GET_CODE (op0) != MEM
296 || ! SLOW_UNALIGNED_ACCESS
297 || (offset * BITS_PER_UNIT % bitsize == 0
298 && align % GET_MODE_SIZE (fieldmode) == 0))
299 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
301 /* Storing in a full-word or multi-word field in a register
302 can be done with just SUBREG. */
303 if (GET_MODE (op0) != fieldmode)
305 if (GET_CODE (op0) == SUBREG)
307 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
308 || GET_MODE_CLASS (fieldmode) == MODE_INT
309 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
310 op0 = SUBREG_REG (op0);
311 else
312 /* Else we've got some float mode source being extracted into
313 a different float mode destination -- this combination of
314 subregs results in Severe Tire Damage. */
315 abort ();
317 if (GET_CODE (op0) == REG)
318 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
319 else
320 op0 = change_address (op0, fieldmode,
321 plus_constant (XEXP (op0, 0), offset));
323 emit_move_insn (op0, value);
324 return value;
327 /* Storing an lsb-aligned field in a register
328 can be done with a movestrict instruction. */
330 if (GET_CODE (op0) != MEM
331 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
332 && bitsize == GET_MODE_BITSIZE (fieldmode)
333 && (GET_MODE (op0) == fieldmode
334 || (movstrict_optab->handlers[(int) fieldmode].insn_code
335 != CODE_FOR_nothing)))
337 /* Get appropriate low part of the value being stored. */
338 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
339 value = gen_lowpart (fieldmode, value);
340 else if (!(GET_CODE (value) == SYMBOL_REF
341 || GET_CODE (value) == LABEL_REF
342 || GET_CODE (value) == CONST))
343 value = convert_to_mode (fieldmode, value, 0);
345 if (GET_MODE (op0) == fieldmode)
346 emit_move_insn (op0, value);
347 else
349 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
350 if (! (*insn_operand_predicate[icode][1]) (value, fieldmode))
351 value = copy_to_mode_reg (fieldmode, value);
353 if (GET_CODE (op0) == SUBREG)
355 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
356 || GET_MODE_CLASS (fieldmode) == MODE_INT
357 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
358 op0 = SUBREG_REG (op0);
359 else
360 /* Else we've got some float mode source being extracted into
361 a different float mode destination -- this combination of
362 subregs results in Severe Tire Damage. */
363 abort ();
366 emit_insn (GEN_FCN (icode)
367 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
369 return value;
372 /* Handle fields bigger than a word. */
374 if (bitsize > BITS_PER_WORD)
376 /* Here we transfer the words of the field
377 in the order least significant first.
378 This is because the most significant word is the one which may
379 be less than full.
380 However, only do that if the value is not BLKmode. */
382 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
384 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
385 int i;
387 /* This is the mode we must force value to, so that there will be enough
388 subwords to extract. Note that fieldmode will often (always?) be
389 VOIDmode, because that is what store_field uses to indicate that this
390 is a bit field, but passing VOIDmode to operand_subword_force will
391 result in an abort. */
392 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
394 for (i = 0; i < nwords; i++)
396 /* If I is 0, use the low-order word in both field and target;
397 if I is 1, use the next to lowest word; and so on. */
398 int wordnum = (backwards ? nwords - i - 1 : i);
399 int bit_offset = (backwards
400 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
401 : i * BITS_PER_WORD);
402 store_bit_field (op0, MIN (BITS_PER_WORD,
403 bitsize - i * BITS_PER_WORD),
404 bitnum + bit_offset, word_mode,
405 operand_subword_force (value, wordnum,
406 (GET_MODE (value) == VOIDmode
407 ? fieldmode
408 : GET_MODE (value))),
409 align, total_size);
411 return value;
414 /* From here on we can assume that the field to be stored in is
415 a full-word (whatever type that is), since it is shorter than a word. */
417 /* OFFSET is the number of words or bytes (UNIT says which)
418 from STR_RTX to the first word or byte containing part of the field. */
420 if (GET_CODE (op0) != MEM)
422 if (offset != 0
423 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
425 if (GET_CODE (op0) != REG)
427 /* Since this is a destination (lvalue), we can't copy it to a
428 pseudo. We can trivially remove a SUBREG that does not
429 change the size of the operand. Such a SUBREG may have been
430 added above. Otherwise, abort. */
431 if (GET_CODE (op0) == SUBREG
432 && (GET_MODE_SIZE (GET_MODE (op0))
433 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
434 op0 = SUBREG_REG (op0);
435 else
436 abort ();
438 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
439 op0, offset);
441 offset = 0;
443 else
445 op0 = protect_from_queue (op0, 1);
448 /* If VALUE is a floating-point mode, access it as an integer of the
449 corresponding size. This can occur on a machine with 64 bit registers
450 that uses SFmode for float. This can also occur for unaligned float
451 structure fields. */
452 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
454 if (GET_CODE (value) != REG)
455 value = copy_to_reg (value);
456 value = gen_rtx_SUBREG (word_mode, value, 0);
459 /* Now OFFSET is nonzero only if OP0 is memory
460 and is therefore always measured in bytes. */
462 #ifdef HAVE_insv
463 if (HAVE_insv
464 && GET_MODE (value) != BLKmode
465 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
466 /* Ensure insv's size is wide enough for this field. */
467 && (insv_bitsize >= bitsize)
468 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
469 && (bitsize + bitpos > insv_bitsize)))
471 int xbitpos = bitpos;
472 rtx value1;
473 rtx xop0 = op0;
474 rtx last = get_last_insn ();
475 rtx pat;
476 enum machine_mode maxmode;
477 int save_volatile_ok = volatile_ok;
479 maxmode = insn_operand_mode[(int) CODE_FOR_insv][3];
480 if (maxmode == VOIDmode)
481 maxmode = word_mode;
483 volatile_ok = 1;
485 /* If this machine's insv can only insert into a register, copy OP0
486 into a register and save it back later. */
487 /* This used to check flag_force_mem, but that was a serious
488 de-optimization now that flag_force_mem is enabled by -O2. */
489 if (GET_CODE (op0) == MEM
490 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
491 (op0, VOIDmode)))
493 rtx tempreg;
494 enum machine_mode bestmode;
496 /* Get the mode to use for inserting into this field. If OP0 is
497 BLKmode, get the smallest mode consistent with the alignment. If
498 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
499 mode. Otherwise, use the smallest mode containing the field. */
501 if (GET_MODE (op0) == BLKmode
502 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
503 bestmode
504 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
505 MEM_VOLATILE_P (op0));
506 else
507 bestmode = GET_MODE (op0);
509 if (bestmode == VOIDmode
510 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
511 goto insv_loses;
513 /* Adjust address to point to the containing unit of that mode. */
514 unit = GET_MODE_BITSIZE (bestmode);
515 /* Compute offset as multiple of this unit, counting in bytes. */
516 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
517 bitpos = bitnum % unit;
518 op0 = change_address (op0, bestmode,
519 plus_constant (XEXP (op0, 0), offset));
521 /* Fetch that unit, store the bitfield in it, then store the unit. */
522 tempreg = copy_to_reg (op0);
523 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
524 align, total_size);
525 emit_move_insn (op0, tempreg);
526 return value;
528 volatile_ok = save_volatile_ok;
530 /* Add OFFSET into OP0's address. */
531 if (GET_CODE (xop0) == MEM)
532 xop0 = change_address (xop0, byte_mode,
533 plus_constant (XEXP (xop0, 0), offset));
535 /* If xop0 is a register, we need it in MAXMODE
536 to make it acceptable to the format of insv. */
537 if (GET_CODE (xop0) == SUBREG)
538 /* We can't just change the mode, because this might clobber op0,
539 and we will need the original value of op0 if insv fails. */
540 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
541 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
542 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
544 /* On big-endian machines, we count bits from the most significant.
545 If the bit field insn does not, we must invert. */
547 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
548 xbitpos = unit - bitsize - xbitpos;
550 /* We have been counting XBITPOS within UNIT.
551 Count instead within the size of the register. */
552 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
553 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
555 unit = GET_MODE_BITSIZE (maxmode);
557 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
558 value1 = value;
559 if (GET_MODE (value) != maxmode)
561 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
563 /* Optimization: Don't bother really extending VALUE
564 if it has all the bits we will actually use. However,
565 if we must narrow it, be sure we do it correctly. */
567 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
569 /* Avoid making subreg of a subreg, or of a mem. */
570 if (GET_CODE (value1) != REG)
571 value1 = copy_to_reg (value1);
572 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
574 else
575 value1 = gen_lowpart (maxmode, value1);
577 else if (!CONSTANT_P (value))
578 /* Parse phase is supposed to make VALUE's data type
579 match that of the component reference, which is a type
580 at least as wide as the field; so VALUE should have
581 a mode that corresponds to that type. */
582 abort ();
585 /* If this machine's insv insists on a register,
586 get VALUE1 into a register. */
587 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
588 (value1, maxmode)))
589 value1 = force_reg (maxmode, value1);
591 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
592 if (pat)
593 emit_insn (pat);
594 else
596 delete_insns_since (last);
597 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
600 else
601 insv_loses:
602 #endif
603 /* Insv is not available; store using shifts and boolean ops. */
604 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
605 return value;
608 /* Use shifts and boolean operations to store VALUE
609 into a bit field of width BITSIZE
610 in a memory location specified by OP0 except offset by OFFSET bytes.
611 (OFFSET must be 0 if OP0 is a register.)
612 The field starts at position BITPOS within the byte.
613 (If OP0 is a register, it may be a full word or a narrower mode,
614 but BITPOS still counts within a full word,
615 which is significant on bigendian machines.)
616 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
618 Note that protect_from_queue has already been done on OP0 and VALUE. */
620 static void
621 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
622 register rtx op0;
623 register int offset, bitsize, bitpos;
624 register rtx value;
625 int struct_align;
627 register enum machine_mode mode;
628 int total_bits = BITS_PER_WORD;
629 rtx subtarget, temp;
630 int all_zero = 0;
631 int all_one = 0;
633 if (! SLOW_UNALIGNED_ACCESS)
634 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
636 /* There is a case not handled here:
637 a structure with a known alignment of just a halfword
638 and a field split across two aligned halfwords within the structure.
639 Or likewise a structure with a known alignment of just a byte
640 and a field split across two bytes.
641 Such cases are not supposed to be able to occur. */
643 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
645 if (offset != 0)
646 abort ();
647 /* Special treatment for a bit field split across two registers. */
648 if (bitsize + bitpos > BITS_PER_WORD)
650 store_split_bit_field (op0, bitsize, bitpos,
651 value, BITS_PER_WORD);
652 return;
655 else
657 /* Get the proper mode to use for this field. We want a mode that
658 includes the entire field. If such a mode would be larger than
659 a word, we won't be doing the extraction the normal way. */
661 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
662 struct_align * BITS_PER_UNIT, word_mode,
663 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
665 if (mode == VOIDmode)
667 /* The only way this should occur is if the field spans word
668 boundaries. */
669 store_split_bit_field (op0,
670 bitsize, bitpos + offset * BITS_PER_UNIT,
671 value, struct_align);
672 return;
675 total_bits = GET_MODE_BITSIZE (mode);
677 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
678 be in the range 0 to total_bits-1, and put any excess bytes in
679 OFFSET. */
680 if (bitpos >= total_bits)
682 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
683 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
684 * BITS_PER_UNIT);
687 /* Get ref to an aligned byte, halfword, or word containing the field.
688 Adjust BITPOS to be position within a word,
689 and OFFSET to be the offset of that word.
690 Then alter OP0 to refer to that word. */
691 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
692 offset -= (offset % (total_bits / BITS_PER_UNIT));
693 op0 = change_address (op0, mode,
694 plus_constant (XEXP (op0, 0), offset));
697 mode = GET_MODE (op0);
699 /* Now MODE is either some integral mode for a MEM as OP0,
700 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
701 The bit field is contained entirely within OP0.
702 BITPOS is the starting bit number within OP0.
703 (OP0's mode may actually be narrower than MODE.) */
705 if (BYTES_BIG_ENDIAN)
706 /* BITPOS is the distance between our msb
707 and that of the containing datum.
708 Convert it to the distance from the lsb. */
709 bitpos = total_bits - bitsize - bitpos;
711 /* Now BITPOS is always the distance between our lsb
712 and that of OP0. */
714 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
715 we must first convert its mode to MODE. */
717 if (GET_CODE (value) == CONST_INT)
719 register HOST_WIDE_INT v = INTVAL (value);
721 if (bitsize < HOST_BITS_PER_WIDE_INT)
722 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
724 if (v == 0)
725 all_zero = 1;
726 else if ((bitsize < HOST_BITS_PER_WIDE_INT
727 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
728 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
729 all_one = 1;
731 value = lshift_value (mode, value, bitpos, bitsize);
733 else
735 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
736 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
738 if (GET_MODE (value) != mode)
740 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
741 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
742 value = gen_lowpart (mode, value);
743 else
744 value = convert_to_mode (mode, value, 1);
747 if (must_and)
748 value = expand_binop (mode, and_optab, value,
749 mask_rtx (mode, 0, bitsize, 0),
750 NULL_RTX, 1, OPTAB_LIB_WIDEN);
751 if (bitpos > 0)
752 value = expand_shift (LSHIFT_EXPR, mode, value,
753 build_int_2 (bitpos, 0), NULL_RTX, 1);
756 /* Now clear the chosen bits in OP0,
757 except that if VALUE is -1 we need not bother. */
759 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
761 if (! all_one)
763 temp = expand_binop (mode, and_optab, op0,
764 mask_rtx (mode, bitpos, bitsize, 1),
765 subtarget, 1, OPTAB_LIB_WIDEN);
766 subtarget = temp;
768 else
769 temp = op0;
771 /* Now logical-or VALUE into OP0, unless it is zero. */
773 if (! all_zero)
774 temp = expand_binop (mode, ior_optab, temp, value,
775 subtarget, 1, OPTAB_LIB_WIDEN);
776 if (op0 != temp)
777 emit_move_insn (op0, temp);
780 /* Store a bit field that is split across multiple accessible memory objects.
782 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
783 BITSIZE is the field width; BITPOS the position of its first bit
784 (within the word).
785 VALUE is the value to store.
786 ALIGN is the known alignment of OP0, measured in bytes.
787 This is also the size of the memory objects to be used.
789 This does not yet handle fields wider than BITS_PER_WORD. */
791 static void
792 store_split_bit_field (op0, bitsize, bitpos, value, align)
793 rtx op0;
794 int bitsize, bitpos;
795 rtx value;
796 int align;
798 int unit;
799 int bitsdone = 0;
801 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
802 much at a time. */
803 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
804 unit = BITS_PER_WORD;
805 else
806 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
808 /* If VALUE is a constant other than a CONST_INT, get it into a register in
809 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
810 that VALUE might be a floating-point constant. */
811 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
813 rtx word = gen_lowpart_common (word_mode, value);
815 if (word && (value != word))
816 value = word;
817 else
818 value = gen_lowpart_common (word_mode,
819 force_reg (GET_MODE (value) != VOIDmode
820 ? GET_MODE (value)
821 : word_mode, value));
823 else if (GET_CODE (value) == ADDRESSOF)
824 value = copy_to_reg (value);
826 while (bitsdone < bitsize)
828 int thissize;
829 rtx part, word;
830 int thispos;
831 int offset;
833 offset = (bitpos + bitsdone) / unit;
834 thispos = (bitpos + bitsdone) % unit;
836 /* THISSIZE must not overrun a word boundary. Otherwise,
837 store_fixed_bit_field will call us again, and we will mutually
838 recurse forever. */
839 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
840 thissize = MIN (thissize, unit - thispos);
842 if (BYTES_BIG_ENDIAN)
844 int total_bits;
846 /* We must do an endian conversion exactly the same way as it is
847 done in extract_bit_field, so that the two calls to
848 extract_fixed_bit_field will have comparable arguments. */
849 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
850 total_bits = BITS_PER_WORD;
851 else
852 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
854 /* Fetch successively less significant portions. */
855 if (GET_CODE (value) == CONST_INT)
856 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
857 >> (bitsize - bitsdone - thissize))
858 & (((HOST_WIDE_INT) 1 << thissize) - 1));
859 else
860 /* The args are chosen so that the last part includes the
861 lsb. Give extract_bit_field the value it needs (with
862 endianness compensation) to fetch the piece we want.
864 ??? We have no idea what the alignment of VALUE is, so
865 we have to use a guess. */
866 part
867 = extract_fixed_bit_field
868 (word_mode, value, 0, thissize,
869 total_bits - bitsize + bitsdone, NULL_RTX, 1,
870 GET_MODE (value) == VOIDmode
871 ? UNITS_PER_WORD
872 : (GET_MODE (value) == BLKmode
874 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
876 else
878 /* Fetch successively more significant portions. */
879 if (GET_CODE (value) == CONST_INT)
880 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
881 >> bitsdone)
882 & (((HOST_WIDE_INT) 1 << thissize) - 1));
883 else
884 part
885 = extract_fixed_bit_field
886 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
887 GET_MODE (value) == VOIDmode
888 ? UNITS_PER_WORD
889 : (GET_MODE (value) == BLKmode
891 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
894 /* If OP0 is a register, then handle OFFSET here.
896 When handling multiword bitfields, extract_bit_field may pass
897 down a word_mode SUBREG of a larger REG for a bitfield that actually
898 crosses a word boundary. Thus, for a SUBREG, we must find
899 the current word starting from the base register. */
900 if (GET_CODE (op0) == SUBREG)
902 word = operand_subword_force (SUBREG_REG (op0),
903 SUBREG_WORD (op0) + offset,
904 GET_MODE (SUBREG_REG (op0)));
905 offset = 0;
907 else if (GET_CODE (op0) == REG)
909 word = operand_subword_force (op0, offset, GET_MODE (op0));
910 offset = 0;
912 else
913 word = op0;
915 /* OFFSET is in UNITs, and UNIT is in bits.
916 store_fixed_bit_field wants offset in bytes. */
917 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
918 thissize, thispos, part, align);
919 bitsdone += thissize;
923 /* Generate code to extract a byte-field from STR_RTX
924 containing BITSIZE bits, starting at BITNUM,
925 and put it in TARGET if possible (if TARGET is nonzero).
926 Regardless of TARGET, we return the rtx for where the value is placed.
927 It may be a QUEUED.
929 STR_RTX is the structure containing the byte (a REG or MEM).
930 UNSIGNEDP is nonzero if this is an unsigned bit field.
931 MODE is the natural mode of the field value once extracted.
932 TMODE is the mode the caller would like the value to have;
933 but the value may be returned with type MODE instead.
935 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
936 TOTAL_SIZE is the size in bytes of the containing structure,
937 or -1 if varying.
939 If a TARGET is specified and we can store in it at no extra cost,
940 we do so, and return TARGET.
941 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
942 if they are equally easy. */
945 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
946 target, mode, tmode, align, total_size)
947 rtx str_rtx;
948 register int bitsize;
949 int bitnum;
950 int unsignedp;
951 rtx target;
952 enum machine_mode mode, tmode;
953 int align;
954 int total_size;
956 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
957 register int offset = bitnum / unit;
958 register int bitpos = bitnum % unit;
959 register rtx op0 = str_rtx;
960 rtx spec_target = target;
961 rtx spec_target_subreg = 0;
962 #ifdef HAVE_extv
963 int extv_bitsize;
964 #endif
965 #ifdef HAVE_extzv
966 int extzv_bitsize;
967 #endif
969 #ifdef HAVE_extv
970 if (insn_operand_mode[(int) CODE_FOR_extv][0] == VOIDmode)
971 extv_bitsize = GET_MODE_BITSIZE (word_mode);
972 else
973 extv_bitsize = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]);
974 #endif
976 #ifdef HAVE_extzv
977 if (insn_operand_mode[(int) CODE_FOR_extzv][0] == VOIDmode)
978 extzv_bitsize = GET_MODE_BITSIZE (word_mode);
979 else
980 extzv_bitsize
981 = GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]);
982 #endif
984 /* Discount the part of the structure before the desired byte.
985 We need to know how many bytes are safe to reference after it. */
986 if (total_size >= 0)
987 total_size -= (bitpos / BIGGEST_ALIGNMENT
988 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
990 if (tmode == VOIDmode)
991 tmode = mode;
992 while (GET_CODE (op0) == SUBREG)
994 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
995 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
997 offset += SUBREG_WORD (op0);
999 inner_size = MIN (inner_size, BITS_PER_WORD);
1001 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1003 bitpos += inner_size - outer_size;
1004 if (bitpos > unit)
1006 offset += (bitpos / unit);
1007 bitpos %= unit;
1011 op0 = SUBREG_REG (op0);
1014 /* Make sure we are playing with integral modes. Pun with subregs
1015 if we aren't. */
1017 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1018 if (imode != GET_MODE (op0))
1020 if (GET_CODE (op0) == MEM)
1021 op0 = change_address (op0, imode, NULL_RTX);
1022 else if (imode != BLKmode)
1023 op0 = gen_lowpart (imode, op0);
1024 else
1025 abort ();
1029 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1030 If that's wrong, the solution is to test for it and set TARGET to 0
1031 if needed. */
1033 /* If OP0 is a register, BITPOS must count within a word.
1034 But as we have it, it counts within whatever size OP0 now has.
1035 On a bigendian machine, these are not the same, so convert. */
1036 if (BYTES_BIG_ENDIAN
1037 && GET_CODE (op0) != MEM
1038 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1039 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1041 /* Extracting a full-word or multi-word value
1042 from a structure in a register or aligned memory.
1043 This can be done with just SUBREG.
1044 So too extracting a subword value in
1045 the least significant part of the register. */
1047 if (((GET_CODE (op0) != MEM
1048 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1049 GET_MODE_BITSIZE (GET_MODE (op0))))
1050 || (GET_CODE (op0) == MEM
1051 && (! SLOW_UNALIGNED_ACCESS
1052 || (offset * BITS_PER_UNIT % bitsize == 0
1053 && align * BITS_PER_UNIT % bitsize == 0))))
1054 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1055 && bitpos % BITS_PER_WORD == 0)
1056 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1057 /* ??? The big endian test here is wrong. This is correct
1058 if the value is in a register, and if mode_for_size is not
1059 the same mode as op0. This causes us to get unnecessarily
1060 inefficient code from the Thumb port when -mbig-endian. */
1061 && (BYTES_BIG_ENDIAN
1062 ? bitpos + bitsize == BITS_PER_WORD
1063 : bitpos == 0))))
1065 enum machine_mode mode1
1066 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
1068 if (mode1 != GET_MODE (op0))
1070 if (GET_CODE (op0) == SUBREG)
1072 if (GET_MODE (SUBREG_REG (op0)) == mode1
1073 || GET_MODE_CLASS (mode1) == MODE_INT
1074 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1075 op0 = SUBREG_REG (op0);
1076 else
1077 /* Else we've got some float mode source being extracted into
1078 a different float mode destination -- this combination of
1079 subregs results in Severe Tire Damage. */
1080 abort ();
1082 if (GET_CODE (op0) == REG)
1083 op0 = gen_rtx_SUBREG (mode1, op0, offset);
1084 else
1085 op0 = change_address (op0, mode1,
1086 plus_constant (XEXP (op0, 0), offset));
1088 if (mode1 != mode)
1089 return convert_to_mode (tmode, op0, unsignedp);
1090 return op0;
1093 /* Handle fields bigger than a word. */
1095 if (bitsize > BITS_PER_WORD)
1097 /* Here we transfer the words of the field
1098 in the order least significant first.
1099 This is because the most significant word is the one which may
1100 be less than full. */
1102 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1103 int i;
1105 if (target == 0 || GET_CODE (target) != REG)
1106 target = gen_reg_rtx (mode);
1108 /* Indicate for flow that the entire target reg is being set. */
1109 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1111 for (i = 0; i < nwords; i++)
1113 /* If I is 0, use the low-order word in both field and target;
1114 if I is 1, use the next to lowest word; and so on. */
1115 /* Word number in TARGET to use. */
1116 int wordnum = (WORDS_BIG_ENDIAN
1117 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1118 : i);
1119 /* Offset from start of field in OP0. */
1120 int bit_offset = (WORDS_BIG_ENDIAN
1121 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1122 : i * BITS_PER_WORD);
1123 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1124 rtx result_part
1125 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1126 bitsize - i * BITS_PER_WORD),
1127 bitnum + bit_offset,
1128 1, target_part, mode, word_mode,
1129 align, total_size);
1131 if (target_part == 0)
1132 abort ();
1134 if (result_part != target_part)
1135 emit_move_insn (target_part, result_part);
1138 if (unsignedp)
1140 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1141 need to be zero'd out. */
1142 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1144 int i,total_words;
1146 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1147 for (i = nwords; i < total_words; i++)
1149 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1150 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1151 emit_move_insn (target_part, const0_rtx);
1154 return target;
1157 /* Signed bit field: sign-extend with two arithmetic shifts. */
1158 target = expand_shift (LSHIFT_EXPR, mode, target,
1159 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1160 NULL_RTX, 0);
1161 return expand_shift (RSHIFT_EXPR, mode, target,
1162 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1163 NULL_RTX, 0);
1166 /* From here on we know the desired field is smaller than a word
1167 so we can assume it is an integer. So we can safely extract it as one
1168 size of integer, if necessary, and then truncate or extend
1169 to the size that is wanted. */
1171 /* OFFSET is the number of words or bytes (UNIT says which)
1172 from STR_RTX to the first word or byte containing part of the field. */
1174 if (GET_CODE (op0) != MEM)
1176 if (offset != 0
1177 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1179 if (GET_CODE (op0) != REG)
1180 op0 = copy_to_reg (op0);
1181 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1182 op0, offset);
1184 offset = 0;
1186 else
1188 op0 = protect_from_queue (str_rtx, 1);
1191 /* Now OFFSET is nonzero only for memory operands. */
1193 if (unsignedp)
1195 #ifdef HAVE_extzv
1196 if (HAVE_extzv
1197 && (extzv_bitsize >= bitsize)
1198 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1199 && (bitsize + bitpos > extzv_bitsize)))
1201 int xbitpos = bitpos, xoffset = offset;
1202 rtx bitsize_rtx, bitpos_rtx;
1203 rtx last = get_last_insn ();
1204 rtx xop0 = op0;
1205 rtx xtarget = target;
1206 rtx xspec_target = spec_target;
1207 rtx xspec_target_subreg = spec_target_subreg;
1208 rtx pat;
1209 enum machine_mode maxmode;
1211 maxmode = insn_operand_mode[(int) CODE_FOR_extzv][0];
1212 if (maxmode == VOIDmode)
1213 maxmode = word_mode;
1215 if (GET_CODE (xop0) == MEM)
1217 int save_volatile_ok = volatile_ok;
1218 volatile_ok = 1;
1220 /* Is the memory operand acceptable? */
1221 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1222 (xop0, GET_MODE (xop0))))
1224 /* No, load into a reg and extract from there. */
1225 enum machine_mode bestmode;
1227 /* Get the mode to use for inserting into this field. If
1228 OP0 is BLKmode, get the smallest mode consistent with the
1229 alignment. If OP0 is a non-BLKmode object that is no
1230 wider than MAXMODE, use its mode. Otherwise, use the
1231 smallest mode containing the field. */
1233 if (GET_MODE (xop0) == BLKmode
1234 || (GET_MODE_SIZE (GET_MODE (op0))
1235 > GET_MODE_SIZE (maxmode)))
1236 bestmode = get_best_mode (bitsize, bitnum,
1237 align * BITS_PER_UNIT, maxmode,
1238 MEM_VOLATILE_P (xop0));
1239 else
1240 bestmode = GET_MODE (xop0);
1242 if (bestmode == VOIDmode
1243 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1244 goto extzv_loses;
1246 /* Compute offset as multiple of this unit,
1247 counting in bytes. */
1248 unit = GET_MODE_BITSIZE (bestmode);
1249 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1250 xbitpos = bitnum % unit;
1251 xop0 = change_address (xop0, bestmode,
1252 plus_constant (XEXP (xop0, 0),
1253 xoffset));
1254 /* Fetch it to a register in that size. */
1255 xop0 = force_reg (bestmode, xop0);
1257 /* XBITPOS counts within UNIT, which is what is expected. */
1259 else
1260 /* Get ref to first byte containing part of the field. */
1261 xop0 = change_address (xop0, byte_mode,
1262 plus_constant (XEXP (xop0, 0), xoffset));
1264 volatile_ok = save_volatile_ok;
1267 /* If op0 is a register, we need it in MAXMODE (which is usually
1268 SImode). to make it acceptable to the format of extzv. */
1269 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1270 goto extzv_loses;
1271 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1272 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1274 /* On big-endian machines, we count bits from the most significant.
1275 If the bit field insn does not, we must invert. */
1276 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1277 xbitpos = unit - bitsize - xbitpos;
1279 /* Now convert from counting within UNIT to counting in MAXMODE. */
1280 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1281 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1283 unit = GET_MODE_BITSIZE (maxmode);
1285 if (xtarget == 0
1286 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1287 xtarget = xspec_target = gen_reg_rtx (tmode);
1289 if (GET_MODE (xtarget) != maxmode)
1291 if (GET_CODE (xtarget) == REG)
1293 int wider = (GET_MODE_SIZE (maxmode)
1294 > GET_MODE_SIZE (GET_MODE (xtarget)));
1295 xtarget = gen_lowpart (maxmode, xtarget);
1296 if (wider)
1297 xspec_target_subreg = xtarget;
1299 else
1300 xtarget = gen_reg_rtx (maxmode);
1303 /* If this machine's extzv insists on a register target,
1304 make sure we have one. */
1305 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1306 (xtarget, maxmode)))
1307 xtarget = gen_reg_rtx (maxmode);
1309 bitsize_rtx = GEN_INT (bitsize);
1310 bitpos_rtx = GEN_INT (xbitpos);
1312 pat = gen_extzv (protect_from_queue (xtarget, 1),
1313 xop0, bitsize_rtx, bitpos_rtx);
1314 if (pat)
1316 emit_insn (pat);
1317 target = xtarget;
1318 spec_target = xspec_target;
1319 spec_target_subreg = xspec_target_subreg;
1321 else
1323 delete_insns_since (last);
1324 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1325 bitpos, target, 1, align);
1328 else
1329 extzv_loses:
1330 #endif
1331 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1332 target, 1, align);
1334 else
1336 #ifdef HAVE_extv
1337 if (HAVE_extv
1338 && (extv_bitsize >= bitsize)
1339 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1340 && (bitsize + bitpos > extv_bitsize)))
1342 int xbitpos = bitpos, xoffset = offset;
1343 rtx bitsize_rtx, bitpos_rtx;
1344 rtx last = get_last_insn ();
1345 rtx xop0 = op0, xtarget = target;
1346 rtx xspec_target = spec_target;
1347 rtx xspec_target_subreg = spec_target_subreg;
1348 rtx pat;
1349 enum machine_mode maxmode;
1351 maxmode = insn_operand_mode[(int) CODE_FOR_extv][0];
1352 if (maxmode == VOIDmode)
1353 maxmode = word_mode;
1355 if (GET_CODE (xop0) == MEM)
1357 /* Is the memory operand acceptable? */
1358 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1359 (xop0, GET_MODE (xop0))))
1361 /* No, load into a reg and extract from there. */
1362 enum machine_mode bestmode;
1364 /* Get the mode to use for inserting into this field. If
1365 OP0 is BLKmode, get the smallest mode consistent with the
1366 alignment. If OP0 is a non-BLKmode object that is no
1367 wider than MAXMODE, use its mode. Otherwise, use the
1368 smallest mode containing the field. */
1370 if (GET_MODE (xop0) == BLKmode
1371 || (GET_MODE_SIZE (GET_MODE (op0))
1372 > GET_MODE_SIZE (maxmode)))
1373 bestmode = get_best_mode (bitsize, bitnum,
1374 align * BITS_PER_UNIT, maxmode,
1375 MEM_VOLATILE_P (xop0));
1376 else
1377 bestmode = GET_MODE (xop0);
1379 if (bestmode == VOIDmode
1380 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1381 goto extv_loses;
1383 /* Compute offset as multiple of this unit,
1384 counting in bytes. */
1385 unit = GET_MODE_BITSIZE (bestmode);
1386 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1387 xbitpos = bitnum % unit;
1388 xop0 = change_address (xop0, bestmode,
1389 plus_constant (XEXP (xop0, 0),
1390 xoffset));
1391 /* Fetch it to a register in that size. */
1392 xop0 = force_reg (bestmode, xop0);
1394 /* XBITPOS counts within UNIT, which is what is expected. */
1396 else
1397 /* Get ref to first byte containing part of the field. */
1398 xop0 = change_address (xop0, byte_mode,
1399 plus_constant (XEXP (xop0, 0), xoffset));
1402 /* If op0 is a register, we need it in MAXMODE (which is usually
1403 SImode) to make it acceptable to the format of extv. */
1404 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1405 goto extv_loses;
1406 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1407 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1409 /* On big-endian machines, we count bits from the most significant.
1410 If the bit field insn does not, we must invert. */
1411 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1412 xbitpos = unit - bitsize - xbitpos;
1414 /* XBITPOS counts within a size of UNIT.
1415 Adjust to count within a size of MAXMODE. */
1416 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1417 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1419 unit = GET_MODE_BITSIZE (maxmode);
1421 if (xtarget == 0
1422 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1423 xtarget = xspec_target = gen_reg_rtx (tmode);
1425 if (GET_MODE (xtarget) != maxmode)
1427 if (GET_CODE (xtarget) == REG)
1429 int wider = (GET_MODE_SIZE (maxmode)
1430 > GET_MODE_SIZE (GET_MODE (xtarget)));
1431 xtarget = gen_lowpart (maxmode, xtarget);
1432 if (wider)
1433 xspec_target_subreg = xtarget;
1435 else
1436 xtarget = gen_reg_rtx (maxmode);
1439 /* If this machine's extv insists on a register target,
1440 make sure we have one. */
1441 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1442 (xtarget, maxmode)))
1443 xtarget = gen_reg_rtx (maxmode);
1445 bitsize_rtx = GEN_INT (bitsize);
1446 bitpos_rtx = GEN_INT (xbitpos);
1448 pat = gen_extv (protect_from_queue (xtarget, 1),
1449 xop0, bitsize_rtx, bitpos_rtx);
1450 if (pat)
1452 emit_insn (pat);
1453 target = xtarget;
1454 spec_target = xspec_target;
1455 spec_target_subreg = xspec_target_subreg;
1457 else
1459 delete_insns_since (last);
1460 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1461 bitpos, target, 0, align);
1464 else
1465 extv_loses:
1466 #endif
1467 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1468 target, 0, align);
1470 if (target == spec_target)
1471 return target;
1472 if (target == spec_target_subreg)
1473 return spec_target;
1474 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1476 /* If the target mode is floating-point, first convert to the
1477 integer mode of that size and then access it as a floating-point
1478 value via a SUBREG. */
1479 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1481 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1482 MODE_INT, 0),
1483 target, unsignedp);
1484 if (GET_CODE (target) != REG)
1485 target = copy_to_reg (target);
1486 return gen_rtx_SUBREG (tmode, target, 0);
1488 else
1489 return convert_to_mode (tmode, target, unsignedp);
1491 return target;
1494 /* Extract a bit field using shifts and boolean operations
1495 Returns an rtx to represent the value.
1496 OP0 addresses a register (word) or memory (byte).
1497 BITPOS says which bit within the word or byte the bit field starts in.
1498 OFFSET says how many bytes farther the bit field starts;
1499 it is 0 if OP0 is a register.
1500 BITSIZE says how many bits long the bit field is.
1501 (If OP0 is a register, it may be narrower than a full word,
1502 but BITPOS still counts within a full word,
1503 which is significant on bigendian machines.)
1505 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1506 If TARGET is nonzero, attempts to store the value there
1507 and return TARGET, but this is not guaranteed.
1508 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1510 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1512 static rtx
1513 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1514 target, unsignedp, align)
1515 enum machine_mode tmode;
1516 register rtx op0, target;
1517 register int offset, bitsize, bitpos;
1518 int unsignedp;
1519 int align;
1521 int total_bits = BITS_PER_WORD;
1522 enum machine_mode mode;
1524 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1526 /* Special treatment for a bit field split across two registers. */
1527 if (bitsize + bitpos > BITS_PER_WORD)
1528 return extract_split_bit_field (op0, bitsize, bitpos,
1529 unsignedp, align);
1531 else
1533 /* Get the proper mode to use for this field. We want a mode that
1534 includes the entire field. If such a mode would be larger than
1535 a word, we won't be doing the extraction the normal way. */
1537 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1538 align * BITS_PER_UNIT, word_mode,
1539 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1541 if (mode == VOIDmode)
1542 /* The only way this should occur is if the field spans word
1543 boundaries. */
1544 return extract_split_bit_field (op0, bitsize,
1545 bitpos + offset * BITS_PER_UNIT,
1546 unsignedp, align);
1548 total_bits = GET_MODE_BITSIZE (mode);
1550 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1551 be in the range 0 to total_bits-1, and put any excess bytes in
1552 OFFSET. */
1553 if (bitpos >= total_bits)
1555 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1556 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1557 * BITS_PER_UNIT);
1560 /* Get ref to an aligned byte, halfword, or word containing the field.
1561 Adjust BITPOS to be position within a word,
1562 and OFFSET to be the offset of that word.
1563 Then alter OP0 to refer to that word. */
1564 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1565 offset -= (offset % (total_bits / BITS_PER_UNIT));
1566 op0 = change_address (op0, mode,
1567 plus_constant (XEXP (op0, 0), offset));
1570 mode = GET_MODE (op0);
1572 if (BYTES_BIG_ENDIAN)
1574 /* BITPOS is the distance between our msb and that of OP0.
1575 Convert it to the distance from the lsb. */
1577 bitpos = total_bits - bitsize - bitpos;
1580 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1581 We have reduced the big-endian case to the little-endian case. */
1583 if (unsignedp)
1585 if (bitpos)
1587 /* If the field does not already start at the lsb,
1588 shift it so it does. */
1589 tree amount = build_int_2 (bitpos, 0);
1590 /* Maybe propagate the target for the shift. */
1591 /* But not if we will return it--could confuse integrate.c. */
1592 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1593 && !REG_FUNCTION_VALUE_P (target)
1594 ? target : 0);
1595 if (tmode != mode) subtarget = 0;
1596 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1598 /* Convert the value to the desired mode. */
1599 if (mode != tmode)
1600 op0 = convert_to_mode (tmode, op0, 1);
1602 /* Unless the msb of the field used to be the msb when we shifted,
1603 mask out the upper bits. */
1605 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1606 #if 0
1607 #ifdef SLOW_ZERO_EXTEND
1608 /* Always generate an `and' if
1609 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1610 will combine fruitfully with the zero-extend. */
1611 || tmode != mode
1612 #endif
1613 #endif
1615 return expand_binop (GET_MODE (op0), and_optab, op0,
1616 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1617 target, 1, OPTAB_LIB_WIDEN);
1618 return op0;
1621 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1622 then arithmetic-shift its lsb to the lsb of the word. */
1623 op0 = force_reg (mode, op0);
1624 if (mode != tmode)
1625 target = 0;
1627 /* Find the narrowest integer mode that contains the field. */
1629 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1630 mode = GET_MODE_WIDER_MODE (mode))
1631 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1633 op0 = convert_to_mode (mode, op0, 0);
1634 break;
1637 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1639 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1640 /* Maybe propagate the target for the shift. */
1641 /* But not if we will return the result--could confuse integrate.c. */
1642 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1643 && ! REG_FUNCTION_VALUE_P (target)
1644 ? target : 0);
1645 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1648 return expand_shift (RSHIFT_EXPR, mode, op0,
1649 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1650 target, 0);
1653 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1654 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1655 complement of that if COMPLEMENT. The mask is truncated if
1656 necessary to the width of mode MODE. The mask is zero-extended if
1657 BITSIZE+BITPOS is too small for MODE. */
1659 static rtx
1660 mask_rtx (mode, bitpos, bitsize, complement)
1661 enum machine_mode mode;
1662 int bitpos, bitsize, complement;
1664 HOST_WIDE_INT masklow, maskhigh;
1666 if (bitpos < HOST_BITS_PER_WIDE_INT)
1667 masklow = (HOST_WIDE_INT) -1 << bitpos;
1668 else
1669 masklow = 0;
1671 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1672 masklow &= ((unsigned HOST_WIDE_INT) -1
1673 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1675 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1676 maskhigh = -1;
1677 else
1678 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1680 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1681 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1682 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1683 else
1684 maskhigh = 0;
1686 if (complement)
1688 maskhigh = ~maskhigh;
1689 masklow = ~masklow;
1692 return immed_double_const (masklow, maskhigh, mode);
1695 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1696 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1698 static rtx
1699 lshift_value (mode, value, bitpos, bitsize)
1700 enum machine_mode mode;
1701 rtx value;
1702 int bitpos, bitsize;
1704 unsigned HOST_WIDE_INT v = INTVAL (value);
1705 HOST_WIDE_INT low, high;
1707 if (bitsize < HOST_BITS_PER_WIDE_INT)
1708 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1710 if (bitpos < HOST_BITS_PER_WIDE_INT)
1712 low = v << bitpos;
1713 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1715 else
1717 low = 0;
1718 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1721 return immed_double_const (low, high, mode);
1724 /* Extract a bit field that is split across two words
1725 and return an RTX for the result.
1727 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1728 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1729 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1731 ALIGN is the known alignment of OP0, measured in bytes.
1732 This is also the size of the memory objects to be used. */
1734 static rtx
1735 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1736 rtx op0;
1737 int bitsize, bitpos, unsignedp, align;
1739 int unit;
1740 int bitsdone = 0;
1741 rtx result = NULL_RTX;
1742 int first = 1;
1744 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1745 much at a time. */
1746 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1747 unit = BITS_PER_WORD;
1748 else
1749 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1751 while (bitsdone < bitsize)
1753 int thissize;
1754 rtx part, word;
1755 int thispos;
1756 int offset;
1758 offset = (bitpos + bitsdone) / unit;
1759 thispos = (bitpos + bitsdone) % unit;
1761 /* THISSIZE must not overrun a word boundary. Otherwise,
1762 extract_fixed_bit_field will call us again, and we will mutually
1763 recurse forever. */
1764 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1765 thissize = MIN (thissize, unit - thispos);
1767 /* If OP0 is a register, then handle OFFSET here.
1769 When handling multiword bitfields, extract_bit_field may pass
1770 down a word_mode SUBREG of a larger REG for a bitfield that actually
1771 crosses a word boundary. Thus, for a SUBREG, we must find
1772 the current word starting from the base register. */
1773 if (GET_CODE (op0) == SUBREG)
1775 word = operand_subword_force (SUBREG_REG (op0),
1776 SUBREG_WORD (op0) + offset,
1777 GET_MODE (SUBREG_REG (op0)));
1778 offset = 0;
1780 else if (GET_CODE (op0) == REG)
1782 word = operand_subword_force (op0, offset, GET_MODE (op0));
1783 offset = 0;
1785 else
1786 word = op0;
1788 /* Extract the parts in bit-counting order,
1789 whose meaning is determined by BYTES_PER_UNIT.
1790 OFFSET is in UNITs, and UNIT is in bits.
1791 extract_fixed_bit_field wants offset in bytes. */
1792 part = extract_fixed_bit_field (word_mode, word,
1793 offset * unit / BITS_PER_UNIT,
1794 thissize, thispos, 0, 1, align);
1795 bitsdone += thissize;
1797 /* Shift this part into place for the result. */
1798 if (BYTES_BIG_ENDIAN)
1800 if (bitsize != bitsdone)
1801 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1802 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1804 else
1806 if (bitsdone != thissize)
1807 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1808 build_int_2 (bitsdone - thissize, 0), 0, 1);
1811 if (first)
1812 result = part;
1813 else
1814 /* Combine the parts with bitwise or. This works
1815 because we extracted each part as an unsigned bit field. */
1816 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1817 OPTAB_LIB_WIDEN);
1819 first = 0;
1822 /* Unsigned bit field: we are done. */
1823 if (unsignedp)
1824 return result;
1825 /* Signed bit field: sign-extend with two arithmetic shifts. */
1826 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1827 build_int_2 (BITS_PER_WORD - bitsize, 0),
1828 NULL_RTX, 0);
1829 return expand_shift (RSHIFT_EXPR, word_mode, result,
1830 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1833 /* Add INC into TARGET. */
1835 void
1836 expand_inc (target, inc)
1837 rtx target, inc;
1839 rtx value = expand_binop (GET_MODE (target), add_optab,
1840 target, inc,
1841 target, 0, OPTAB_LIB_WIDEN);
1842 if (value != target)
1843 emit_move_insn (target, value);
1846 /* Subtract DEC from TARGET. */
1848 void
1849 expand_dec (target, dec)
1850 rtx target, dec;
1852 rtx value = expand_binop (GET_MODE (target), sub_optab,
1853 target, dec,
1854 target, 0, OPTAB_LIB_WIDEN);
1855 if (value != target)
1856 emit_move_insn (target, value);
1859 /* Output a shift instruction for expression code CODE,
1860 with SHIFTED being the rtx for the value to shift,
1861 and AMOUNT the tree for the amount to shift by.
1862 Store the result in the rtx TARGET, if that is convenient.
1863 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1864 Return the rtx for where the value is. */
1867 expand_shift (code, mode, shifted, amount, target, unsignedp)
1868 enum tree_code code;
1869 register enum machine_mode mode;
1870 rtx shifted;
1871 tree amount;
1872 register rtx target;
1873 int unsignedp;
1875 register rtx op1, temp = 0;
1876 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1877 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1878 int try;
1880 /* Previously detected shift-counts computed by NEGATE_EXPR
1881 and shifted in the other direction; but that does not work
1882 on all machines. */
1884 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1886 #ifdef SHIFT_COUNT_TRUNCATED
1887 if (SHIFT_COUNT_TRUNCATED)
1889 if (GET_CODE (op1) == CONST_INT
1890 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1891 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1892 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1893 % GET_MODE_BITSIZE (mode));
1894 else if (GET_CODE (op1) == SUBREG
1895 && SUBREG_WORD (op1) == 0)
1896 op1 = SUBREG_REG (op1);
1898 #endif
1900 if (op1 == const0_rtx)
1901 return shifted;
1903 for (try = 0; temp == 0 && try < 3; try++)
1905 enum optab_methods methods;
1907 if (try == 0)
1908 methods = OPTAB_DIRECT;
1909 else if (try == 1)
1910 methods = OPTAB_WIDEN;
1911 else
1912 methods = OPTAB_LIB_WIDEN;
1914 if (rotate)
1916 /* Widening does not work for rotation. */
1917 if (methods == OPTAB_WIDEN)
1918 continue;
1919 else if (methods == OPTAB_LIB_WIDEN)
1921 /* If we have been unable to open-code this by a rotation,
1922 do it as the IOR of two shifts. I.e., to rotate A
1923 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1924 where C is the bitsize of A.
1926 It is theoretically possible that the target machine might
1927 not be able to perform either shift and hence we would
1928 be making two libcalls rather than just the one for the
1929 shift (similarly if IOR could not be done). We will allow
1930 this extremely unlikely lossage to avoid complicating the
1931 code below. */
1933 rtx subtarget = target == shifted ? 0 : target;
1934 rtx temp1;
1935 tree type = TREE_TYPE (amount);
1936 tree new_amount = make_tree (type, op1);
1937 tree other_amount
1938 = fold (build (MINUS_EXPR, type,
1939 convert (type,
1940 build_int_2 (GET_MODE_BITSIZE (mode),
1941 0)),
1942 amount));
1944 shifted = force_reg (mode, shifted);
1946 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1947 mode, shifted, new_amount, subtarget, 1);
1948 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1949 mode, shifted, other_amount, 0, 1);
1950 return expand_binop (mode, ior_optab, temp, temp1, target,
1951 unsignedp, methods);
1954 temp = expand_binop (mode,
1955 left ? rotl_optab : rotr_optab,
1956 shifted, op1, target, unsignedp, methods);
1958 /* If we don't have the rotate, but we are rotating by a constant
1959 that is in range, try a rotate in the opposite direction. */
1961 if (temp == 0 && GET_CODE (op1) == CONST_INT
1962 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1963 temp = expand_binop (mode,
1964 left ? rotr_optab : rotl_optab,
1965 shifted,
1966 GEN_INT (GET_MODE_BITSIZE (mode)
1967 - INTVAL (op1)),
1968 target, unsignedp, methods);
1970 else if (unsignedp)
1971 temp = expand_binop (mode,
1972 left ? ashl_optab : lshr_optab,
1973 shifted, op1, target, unsignedp, methods);
1975 /* Do arithmetic shifts.
1976 Also, if we are going to widen the operand, we can just as well
1977 use an arithmetic right-shift instead of a logical one. */
1978 if (temp == 0 && ! rotate
1979 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1981 enum optab_methods methods1 = methods;
1983 /* If trying to widen a log shift to an arithmetic shift,
1984 don't accept an arithmetic shift of the same size. */
1985 if (unsignedp)
1986 methods1 = OPTAB_MUST_WIDEN;
1988 /* Arithmetic shift */
1990 temp = expand_binop (mode,
1991 left ? ashl_optab : ashr_optab,
1992 shifted, op1, target, unsignedp, methods1);
1995 /* We used to try extzv here for logical right shifts, but that was
1996 only useful for one machine, the VAX, and caused poor code
1997 generation there for lshrdi3, so the code was deleted and a
1998 define_expand for lshrsi3 was added to vax.md. */
2001 if (temp == 0)
2002 abort ();
2003 return temp;
2006 enum alg_code { alg_zero, alg_m, alg_shift,
2007 alg_add_t_m2, alg_sub_t_m2,
2008 alg_add_factor, alg_sub_factor,
2009 alg_add_t2_m, alg_sub_t2_m,
2010 alg_add, alg_subtract, alg_factor, alg_shiftop };
2012 /* This structure records a sequence of operations.
2013 `ops' is the number of operations recorded.
2014 `cost' is their total cost.
2015 The operations are stored in `op' and the corresponding
2016 logarithms of the integer coefficients in `log'.
2018 These are the operations:
2019 alg_zero total := 0;
2020 alg_m total := multiplicand;
2021 alg_shift total := total * coeff
2022 alg_add_t_m2 total := total + multiplicand * coeff;
2023 alg_sub_t_m2 total := total - multiplicand * coeff;
2024 alg_add_factor total := total * coeff + total;
2025 alg_sub_factor total := total * coeff - total;
2026 alg_add_t2_m total := total * coeff + multiplicand;
2027 alg_sub_t2_m total := total * coeff - multiplicand;
2029 The first operand must be either alg_zero or alg_m. */
2031 struct algorithm
2033 short cost;
2034 short ops;
2035 /* The size of the OP and LOG fields are not directly related to the
2036 word size, but the worst-case algorithms will be if we have few
2037 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2038 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2039 in total wordsize operations. */
2040 enum alg_code op[MAX_BITS_PER_WORD];
2041 char log[MAX_BITS_PER_WORD];
2044 static void synth_mult PROTO((struct algorithm *,
2045 unsigned HOST_WIDE_INT,
2046 int));
2047 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
2048 int, int,
2049 unsigned HOST_WIDE_INT *,
2050 int *, int *));
2051 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
2052 int));
2053 /* Compute and return the best algorithm for multiplying by T.
2054 The algorithm must cost less than cost_limit
2055 If retval.cost >= COST_LIMIT, no algorithm was found and all
2056 other field of the returned struct are undefined. */
2058 static void
2059 synth_mult (alg_out, t, cost_limit)
2060 struct algorithm *alg_out;
2061 unsigned HOST_WIDE_INT t;
2062 int cost_limit;
2064 int m;
2065 struct algorithm *alg_in, *best_alg;
2066 int cost;
2067 unsigned HOST_WIDE_INT q;
2069 /* Indicate that no algorithm is yet found. If no algorithm
2070 is found, this value will be returned and indicate failure. */
2071 alg_out->cost = cost_limit;
2073 if (cost_limit <= 0)
2074 return;
2076 /* t == 1 can be done in zero cost. */
2077 if (t == 1)
2079 alg_out->ops = 1;
2080 alg_out->cost = 0;
2081 alg_out->op[0] = alg_m;
2082 return;
2085 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2086 fail now. */
2087 if (t == 0)
2089 if (zero_cost >= cost_limit)
2090 return;
2091 else
2093 alg_out->ops = 1;
2094 alg_out->cost = zero_cost;
2095 alg_out->op[0] = alg_zero;
2096 return;
2100 /* We'll be needing a couple extra algorithm structures now. */
2102 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2103 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2105 /* If we have a group of zero bits at the low-order part of T, try
2106 multiplying by the remaining bits and then doing a shift. */
2108 if ((t & 1) == 0)
2110 m = floor_log2 (t & -t); /* m = number of low zero bits */
2111 q = t >> m;
2112 cost = shift_cost[m];
2113 synth_mult (alg_in, q, cost_limit - cost);
2115 cost += alg_in->cost;
2116 if (cost < cost_limit)
2118 struct algorithm *x;
2119 x = alg_in, alg_in = best_alg, best_alg = x;
2120 best_alg->log[best_alg->ops] = m;
2121 best_alg->op[best_alg->ops] = alg_shift;
2122 cost_limit = cost;
2126 /* If we have an odd number, add or subtract one. */
2127 if ((t & 1) != 0)
2129 unsigned HOST_WIDE_INT w;
2131 for (w = 1; (w & t) != 0; w <<= 1)
2133 /* If T was -1, then W will be zero after the loop. This is another
2134 case where T ends with ...111. Handling this with (T + 1) and
2135 subtract 1 produces slightly better code and results in algorithm
2136 selection much faster than treating it like the ...0111 case
2137 below. */
2138 if (w == 0
2139 || (w > 2
2140 /* Reject the case where t is 3.
2141 Thus we prefer addition in that case. */
2142 && t != 3))
2144 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2146 cost = add_cost;
2147 synth_mult (alg_in, t + 1, cost_limit - cost);
2149 cost += alg_in->cost;
2150 if (cost < cost_limit)
2152 struct algorithm *x;
2153 x = alg_in, alg_in = best_alg, best_alg = x;
2154 best_alg->log[best_alg->ops] = 0;
2155 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2156 cost_limit = cost;
2159 else
2161 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2163 cost = add_cost;
2164 synth_mult (alg_in, t - 1, cost_limit - cost);
2166 cost += alg_in->cost;
2167 if (cost < cost_limit)
2169 struct algorithm *x;
2170 x = alg_in, alg_in = best_alg, best_alg = x;
2171 best_alg->log[best_alg->ops] = 0;
2172 best_alg->op[best_alg->ops] = alg_add_t_m2;
2173 cost_limit = cost;
2178 /* Look for factors of t of the form
2179 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2180 If we find such a factor, we can multiply by t using an algorithm that
2181 multiplies by q, shift the result by m and add/subtract it to itself.
2183 We search for large factors first and loop down, even if large factors
2184 are less probable than small; if we find a large factor we will find a
2185 good sequence quickly, and therefore be able to prune (by decreasing
2186 COST_LIMIT) the search. */
2188 for (m = floor_log2 (t - 1); m >= 2; m--)
2190 unsigned HOST_WIDE_INT d;
2192 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2193 if (t % d == 0 && t > d)
2195 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2196 synth_mult (alg_in, t / d, cost_limit - cost);
2198 cost += alg_in->cost;
2199 if (cost < cost_limit)
2201 struct algorithm *x;
2202 x = alg_in, alg_in = best_alg, best_alg = x;
2203 best_alg->log[best_alg->ops] = m;
2204 best_alg->op[best_alg->ops] = alg_add_factor;
2205 cost_limit = cost;
2207 /* Other factors will have been taken care of in the recursion. */
2208 break;
2211 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2212 if (t % d == 0 && t > d)
2214 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2215 synth_mult (alg_in, t / d, cost_limit - cost);
2217 cost += alg_in->cost;
2218 if (cost < cost_limit)
2220 struct algorithm *x;
2221 x = alg_in, alg_in = best_alg, best_alg = x;
2222 best_alg->log[best_alg->ops] = m;
2223 best_alg->op[best_alg->ops] = alg_sub_factor;
2224 cost_limit = cost;
2226 break;
2230 /* Try shift-and-add (load effective address) instructions,
2231 i.e. do a*3, a*5, a*9. */
2232 if ((t & 1) != 0)
2234 q = t - 1;
2235 q = q & -q;
2236 m = exact_log2 (q);
2237 if (m >= 0)
2239 cost = shiftadd_cost[m];
2240 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2242 cost += alg_in->cost;
2243 if (cost < cost_limit)
2245 struct algorithm *x;
2246 x = alg_in, alg_in = best_alg, best_alg = x;
2247 best_alg->log[best_alg->ops] = m;
2248 best_alg->op[best_alg->ops] = alg_add_t2_m;
2249 cost_limit = cost;
2253 q = t + 1;
2254 q = q & -q;
2255 m = exact_log2 (q);
2256 if (m >= 0)
2258 cost = shiftsub_cost[m];
2259 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2261 cost += alg_in->cost;
2262 if (cost < cost_limit)
2264 struct algorithm *x;
2265 x = alg_in, alg_in = best_alg, best_alg = x;
2266 best_alg->log[best_alg->ops] = m;
2267 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2268 cost_limit = cost;
2273 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2274 we have not found any algorithm. */
2275 if (cost_limit == alg_out->cost)
2276 return;
2278 /* If we are getting a too long sequence for `struct algorithm'
2279 to record, make this search fail. */
2280 if (best_alg->ops == MAX_BITS_PER_WORD)
2281 return;
2283 /* Copy the algorithm from temporary space to the space at alg_out.
2284 We avoid using structure assignment because the majority of
2285 best_alg is normally undefined, and this is a critical function. */
2286 alg_out->ops = best_alg->ops + 1;
2287 alg_out->cost = cost_limit;
2288 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2289 alg_out->ops * sizeof *alg_out->op);
2290 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2291 alg_out->ops * sizeof *alg_out->log);
2294 /* Perform a multiplication and return an rtx for the result.
2295 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2296 TARGET is a suggestion for where to store the result (an rtx).
2298 We check specially for a constant integer as OP1.
2299 If you want this check for OP0 as well, then before calling
2300 you should swap the two operands if OP0 would be constant. */
2303 expand_mult (mode, op0, op1, target, unsignedp)
2304 enum machine_mode mode;
2305 register rtx op0, op1, target;
2306 int unsignedp;
2308 rtx const_op1 = op1;
2310 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2311 less than or equal in size to `unsigned int' this doesn't matter.
2312 If the mode is larger than `unsigned int', then synth_mult works only
2313 if the constant value exactly fits in an `unsigned int' without any
2314 truncation. This means that multiplying by negative values does
2315 not work; results are off by 2^32 on a 32 bit machine. */
2317 /* If we are multiplying in DImode, it may still be a win
2318 to try to work with shifts and adds. */
2319 if (GET_CODE (op1) == CONST_DOUBLE
2320 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2321 && HOST_BITS_PER_INT >= BITS_PER_WORD
2322 && CONST_DOUBLE_HIGH (op1) == 0)
2323 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2324 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2325 && GET_CODE (op1) == CONST_INT
2326 && INTVAL (op1) < 0)
2327 const_op1 = 0;
2329 /* We used to test optimize here, on the grounds that it's better to
2330 produce a smaller program when -O is not used.
2331 But this causes such a terrible slowdown sometimes
2332 that it seems better to use synth_mult always. */
2334 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2336 struct algorithm alg;
2337 struct algorithm alg2;
2338 HOST_WIDE_INT val = INTVAL (op1);
2339 HOST_WIDE_INT val_so_far;
2340 rtx insn;
2341 int mult_cost;
2342 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2344 /* Try to do the computation three ways: multiply by the negative of OP1
2345 and then negate, do the multiplication directly, or do multiplication
2346 by OP1 - 1. */
2348 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2349 mult_cost = MIN (12 * add_cost, mult_cost);
2351 synth_mult (&alg, val, mult_cost);
2353 /* This works only if the inverted value actually fits in an
2354 `unsigned int' */
2355 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2357 synth_mult (&alg2, - val,
2358 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2359 if (alg2.cost + negate_cost < alg.cost)
2360 alg = alg2, variant = negate_variant;
2363 /* This proves very useful for division-by-constant. */
2364 synth_mult (&alg2, val - 1,
2365 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2366 if (alg2.cost + add_cost < alg.cost)
2367 alg = alg2, variant = add_variant;
2369 if (alg.cost < mult_cost)
2371 /* We found something cheaper than a multiply insn. */
2372 int opno;
2373 rtx accum, tem;
2375 op0 = protect_from_queue (op0, 0);
2377 /* Avoid referencing memory over and over.
2378 For speed, but also for correctness when mem is volatile. */
2379 if (GET_CODE (op0) == MEM)
2380 op0 = force_reg (mode, op0);
2382 /* ACCUM starts out either as OP0 or as a zero, depending on
2383 the first operation. */
2385 if (alg.op[0] == alg_zero)
2387 accum = copy_to_mode_reg (mode, const0_rtx);
2388 val_so_far = 0;
2390 else if (alg.op[0] == alg_m)
2392 accum = copy_to_mode_reg (mode, op0);
2393 val_so_far = 1;
2395 else
2396 abort ();
2398 for (opno = 1; opno < alg.ops; opno++)
2400 int log = alg.log[opno];
2401 int preserve = preserve_subexpressions_p ();
2402 rtx shift_subtarget = preserve ? 0 : accum;
2403 rtx add_target
2404 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2405 && ! preserve)
2406 ? target : 0;
2407 rtx accum_target = preserve ? 0 : accum;
2409 switch (alg.op[opno])
2411 case alg_shift:
2412 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2413 build_int_2 (log, 0), NULL_RTX, 0);
2414 val_so_far <<= log;
2415 break;
2417 case alg_add_t_m2:
2418 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2419 build_int_2 (log, 0), NULL_RTX, 0);
2420 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2421 add_target ? add_target : accum_target);
2422 val_so_far += (HOST_WIDE_INT) 1 << log;
2423 break;
2425 case alg_sub_t_m2:
2426 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2427 build_int_2 (log, 0), NULL_RTX, 0);
2428 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2429 add_target ? add_target : accum_target);
2430 val_so_far -= (HOST_WIDE_INT) 1 << log;
2431 break;
2433 case alg_add_t2_m:
2434 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2435 build_int_2 (log, 0), shift_subtarget,
2437 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2438 add_target ? add_target : accum_target);
2439 val_so_far = (val_so_far << log) + 1;
2440 break;
2442 case alg_sub_t2_m:
2443 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2444 build_int_2 (log, 0), shift_subtarget,
2446 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2447 add_target ? add_target : accum_target);
2448 val_so_far = (val_so_far << log) - 1;
2449 break;
2451 case alg_add_factor:
2452 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2453 build_int_2 (log, 0), NULL_RTX, 0);
2454 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2455 add_target ? add_target : accum_target);
2456 val_so_far += val_so_far << log;
2457 break;
2459 case alg_sub_factor:
2460 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2461 build_int_2 (log, 0), NULL_RTX, 0);
2462 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2463 (add_target ? add_target
2464 : preserve ? 0 : tem));
2465 val_so_far = (val_so_far << log) - val_so_far;
2466 break;
2468 default:
2469 abort ();;
2472 /* Write a REG_EQUAL note on the last insn so that we can cse
2473 multiplication sequences. */
2475 insn = get_last_insn ();
2476 REG_NOTES (insn)
2477 = gen_rtx_EXPR_LIST (REG_EQUAL,
2478 gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
2479 REG_NOTES (insn));
2482 if (variant == negate_variant)
2484 val_so_far = - val_so_far;
2485 accum = expand_unop (mode, neg_optab, accum, target, 0);
2487 else if (variant == add_variant)
2489 val_so_far = val_so_far + 1;
2490 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2493 if (val != val_so_far)
2494 abort ();
2496 return accum;
2500 /* This used to use umul_optab if unsigned, but for non-widening multiply
2501 there is no difference between signed and unsigned. */
2502 op0 = expand_binop (mode, smul_optab,
2503 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2504 if (op0 == 0)
2505 abort ();
2506 return op0;
2509 /* Return the smallest n such that 2**n >= X. */
2512 ceil_log2 (x)
2513 unsigned HOST_WIDE_INT x;
2515 return floor_log2 (x - 1) + 1;
2518 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2519 replace division by D, and put the least significant N bits of the result
2520 in *MULTIPLIER_PTR and return the most significant bit.
2522 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2523 needed precision is in PRECISION (should be <= N).
2525 PRECISION should be as small as possible so this function can choose
2526 multiplier more freely.
2528 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2529 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2531 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2532 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2534 static
2535 unsigned HOST_WIDE_INT
2536 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2537 unsigned HOST_WIDE_INT d;
2538 int n;
2539 int precision;
2540 unsigned HOST_WIDE_INT *multiplier_ptr;
2541 int *post_shift_ptr;
2542 int *lgup_ptr;
2544 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2545 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2546 int lgup, post_shift;
2547 int pow, pow2;
2548 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2550 /* lgup = ceil(log2(divisor)); */
2551 lgup = ceil_log2 (d);
2553 if (lgup > n)
2554 abort ();
2556 pow = n + lgup;
2557 pow2 = n + lgup - precision;
2559 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2561 /* We could handle this with some effort, but this case is much better
2562 handled directly with a scc insn, so rely on caller using that. */
2563 abort ();
2566 /* mlow = 2^(N + lgup)/d */
2567 if (pow >= HOST_BITS_PER_WIDE_INT)
2569 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2570 nl = 0;
2572 else
2574 nh = 0;
2575 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2577 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2578 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2580 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2581 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2582 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2583 else
2584 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2585 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2586 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2588 if (mhigh_hi && nh - d >= d)
2589 abort ();
2590 if (mhigh_hi > 1 || mlow_hi > 1)
2591 abort ();
2592 /* assert that mlow < mhigh. */
2593 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2594 abort();
2596 /* If precision == N, then mlow, mhigh exceed 2^N
2597 (but they do not exceed 2^(N+1)). */
2599 /* Reduce to lowest terms */
2600 for (post_shift = lgup; post_shift > 0; post_shift--)
2602 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2603 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2604 if (ml_lo >= mh_lo)
2605 break;
2607 mlow_hi = 0;
2608 mlow_lo = ml_lo;
2609 mhigh_hi = 0;
2610 mhigh_lo = mh_lo;
2613 *post_shift_ptr = post_shift;
2614 *lgup_ptr = lgup;
2615 if (n < HOST_BITS_PER_WIDE_INT)
2617 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2618 *multiplier_ptr = mhigh_lo & mask;
2619 return mhigh_lo >= mask;
2621 else
2623 *multiplier_ptr = mhigh_lo;
2624 return mhigh_hi;
2628 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2629 congruent to 1 (mod 2**N). */
2631 static unsigned HOST_WIDE_INT
2632 invert_mod2n (x, n)
2633 unsigned HOST_WIDE_INT x;
2634 int n;
2636 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2638 /* The algorithm notes that the choice y = x satisfies
2639 x*y == 1 mod 2^3, since x is assumed odd.
2640 Each iteration doubles the number of bits of significance in y. */
2642 unsigned HOST_WIDE_INT mask;
2643 unsigned HOST_WIDE_INT y = x;
2644 int nbit = 3;
2646 mask = (n == HOST_BITS_PER_WIDE_INT
2647 ? ~(unsigned HOST_WIDE_INT) 0
2648 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2650 while (nbit < n)
2652 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2653 nbit *= 2;
2655 return y;
2658 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2659 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2660 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2661 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2662 become signed.
2664 The result is put in TARGET if that is convenient.
2666 MODE is the mode of operation. */
2669 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2670 enum machine_mode mode;
2671 register rtx adj_operand, op0, op1, target;
2672 int unsignedp;
2674 rtx tem;
2675 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2677 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2678 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2679 NULL_RTX, 0);
2680 tem = expand_and (tem, op1, NULL_RTX);
2681 adj_operand
2682 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2683 adj_operand);
2685 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2686 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2687 NULL_RTX, 0);
2688 tem = expand_and (tem, op0, NULL_RTX);
2689 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2690 target);
2692 return target;
2695 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2696 in TARGET if that is convenient, and return where the result is. If the
2697 operation can not be performed, 0 is returned.
2699 MODE is the mode of operation and result.
2701 UNSIGNEDP nonzero means unsigned multiply.
2703 MAX_COST is the total allowed cost for the expanded RTL. */
2706 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2707 enum machine_mode mode;
2708 register rtx op0, target;
2709 unsigned HOST_WIDE_INT cnst1;
2710 int unsignedp;
2711 int max_cost;
2713 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2714 optab mul_highpart_optab;
2715 optab moptab;
2716 rtx tem;
2717 int size = GET_MODE_BITSIZE (mode);
2718 rtx op1, wide_op1;
2720 /* We can't support modes wider than HOST_BITS_PER_INT. */
2721 if (size > HOST_BITS_PER_WIDE_INT)
2722 abort ();
2724 op1 = GEN_INT (cnst1);
2726 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2727 wide_op1 = op1;
2728 else
2729 wide_op1
2730 = immed_double_const (cnst1,
2731 (unsignedp
2732 ? (HOST_WIDE_INT) 0
2733 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2734 wider_mode);
2736 /* expand_mult handles constant multiplication of word_mode
2737 or narrower. It does a poor job for large modes. */
2738 if (size < BITS_PER_WORD
2739 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2741 /* We have to do this, since expand_binop doesn't do conversion for
2742 multiply. Maybe change expand_binop to handle widening multiply? */
2743 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2745 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2746 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2747 build_int_2 (size, 0), NULL_RTX, 1);
2748 return convert_modes (mode, wider_mode, tem, unsignedp);
2751 if (target == 0)
2752 target = gen_reg_rtx (mode);
2754 /* Firstly, try using a multiplication insn that only generates the needed
2755 high part of the product, and in the sign flavor of unsignedp. */
2756 if (mul_highpart_cost[(int) mode] < max_cost)
2758 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2759 target = expand_binop (mode, mul_highpart_optab,
2760 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2761 if (target)
2762 return target;
2765 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2766 Need to adjust the result after the multiplication. */
2767 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2769 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2770 target = expand_binop (mode, mul_highpart_optab,
2771 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2772 if (target)
2773 /* We used the wrong signedness. Adjust the result. */
2774 return expand_mult_highpart_adjust (mode, target, op0,
2775 op1, target, unsignedp);
2778 /* Try widening multiplication. */
2779 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2780 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2781 && mul_widen_cost[(int) wider_mode] < max_cost)
2783 op1 = force_reg (mode, op1);
2784 goto try;
2787 /* Try widening the mode and perform a non-widening multiplication. */
2788 moptab = smul_optab;
2789 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2790 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2792 op1 = wide_op1;
2793 goto try;
2796 /* Try widening multiplication of opposite signedness, and adjust. */
2797 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2798 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2799 && (mul_widen_cost[(int) wider_mode]
2800 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2802 rtx regop1 = force_reg (mode, op1);
2803 tem = expand_binop (wider_mode, moptab, op0, regop1,
2804 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2805 if (tem != 0)
2807 /* Extract the high half of the just generated product. */
2808 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2809 build_int_2 (size, 0), NULL_RTX, 1);
2810 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2811 /* We used the wrong signedness. Adjust the result. */
2812 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2813 target, unsignedp);
2817 return 0;
2819 try:
2820 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2821 tem = expand_binop (wider_mode, moptab, op0, op1,
2822 NULL_RTX, unsignedp, OPTAB_WIDEN);
2823 if (tem == 0)
2824 return 0;
2826 /* Extract the high half of the just generated product. */
2827 if (mode == word_mode)
2829 return gen_highpart (mode, tem);
2831 else
2833 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2834 build_int_2 (size, 0), NULL_RTX, 1);
2835 return convert_modes (mode, wider_mode, tem, unsignedp);
2839 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2840 if that is convenient, and returning where the result is.
2841 You may request either the quotient or the remainder as the result;
2842 specify REM_FLAG nonzero to get the remainder.
2844 CODE is the expression code for which kind of division this is;
2845 it controls how rounding is done. MODE is the machine mode to use.
2846 UNSIGNEDP nonzero means do unsigned division. */
2848 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2849 and then correct it by or'ing in missing high bits
2850 if result of ANDI is nonzero.
2851 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2852 This could optimize to a bfexts instruction.
2853 But C doesn't use these operations, so their optimizations are
2854 left for later. */
2855 /* ??? For modulo, we don't actually need the highpart of the first product,
2856 the low part will do nicely. And for small divisors, the second multiply
2857 can also be a low-part only multiply or even be completely left out.
2858 E.g. to calculate the remainder of a division by 3 with a 32 bit
2859 multiply, multiply with 0x55555556 and extract the upper two bits;
2860 the result is exact for inputs up to 0x1fffffff.
2861 The input range can be reduced by using cross-sum rules.
2862 For odd divisors >= 3, the following table gives right shift counts
2863 so that if an number is shifted by an integer multiple of the given
2864 amount, the remainder stays the same:
2865 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2866 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2867 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2868 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2869 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2871 Cross-sum rules for even numbers can be derived by leaving as many bits
2872 to the right alone as the divisor has zeros to the right.
2873 E.g. if x is an unsigned 32 bit number:
2874 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2877 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2880 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2881 int rem_flag;
2882 enum tree_code code;
2883 enum machine_mode mode;
2884 register rtx op0, op1, target;
2885 int unsignedp;
2887 enum machine_mode compute_mode;
2888 register rtx tquotient;
2889 rtx quotient = 0, remainder = 0;
2890 rtx last;
2891 int size;
2892 rtx insn, set;
2893 optab optab1, optab2;
2894 int op1_is_constant, op1_is_pow2;
2895 int max_cost, extra_cost;
2896 static HOST_WIDE_INT last_div_const = 0;
2898 op1_is_constant = GET_CODE (op1) == CONST_INT;
2899 op1_is_pow2 = (op1_is_constant
2900 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2901 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2904 This is the structure of expand_divmod:
2906 First comes code to fix up the operands so we can perform the operations
2907 correctly and efficiently.
2909 Second comes a switch statement with code specific for each rounding mode.
2910 For some special operands this code emits all RTL for the desired
2911 operation, for other cases, it generates only a quotient and stores it in
2912 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2913 to indicate that it has not done anything.
2915 Last comes code that finishes the operation. If QUOTIENT is set and
2916 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2917 QUOTIENT is not set, it is computed using trunc rounding.
2919 We try to generate special code for division and remainder when OP1 is a
2920 constant. If |OP1| = 2**n we can use shifts and some other fast
2921 operations. For other values of OP1, we compute a carefully selected
2922 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2923 by m.
2925 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2926 half of the product. Different strategies for generating the product are
2927 implemented in expand_mult_highpart.
2929 If what we actually want is the remainder, we generate that by another
2930 by-constant multiplication and a subtraction. */
2932 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2933 code below will malfunction if we are, so check here and handle
2934 the special case if so. */
2935 if (op1 == const1_rtx)
2936 return rem_flag ? const0_rtx : op0;
2938 if (target
2939 /* Don't use the function value register as a target
2940 since we have to read it as well as write it,
2941 and function-inlining gets confused by this. */
2942 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2943 /* Don't clobber an operand while doing a multi-step calculation. */
2944 || ((rem_flag || op1_is_constant)
2945 && (reg_mentioned_p (target, op0)
2946 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2947 || reg_mentioned_p (target, op1)
2948 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2949 target = 0;
2951 /* Get the mode in which to perform this computation. Normally it will
2952 be MODE, but sometimes we can't do the desired operation in MODE.
2953 If so, pick a wider mode in which we can do the operation. Convert
2954 to that mode at the start to avoid repeated conversions.
2956 First see what operations we need. These depend on the expression
2957 we are evaluating. (We assume that divxx3 insns exist under the
2958 same conditions that modxx3 insns and that these insns don't normally
2959 fail. If these assumptions are not correct, we may generate less
2960 efficient code in some cases.)
2962 Then see if we find a mode in which we can open-code that operation
2963 (either a division, modulus, or shift). Finally, check for the smallest
2964 mode for which we can do the operation with a library call. */
2966 /* We might want to refine this now that we have division-by-constant
2967 optimization. Since expand_mult_highpart tries so many variants, it is
2968 not straightforward to generalize this. Maybe we should make an array
2969 of possible modes in init_expmed? Save this for GCC 2.7. */
2971 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2972 : (unsignedp ? udiv_optab : sdiv_optab));
2973 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2975 for (compute_mode = mode; compute_mode != VOIDmode;
2976 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2977 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2978 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2979 break;
2981 if (compute_mode == VOIDmode)
2982 for (compute_mode = mode; compute_mode != VOIDmode;
2983 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2984 if (optab1->handlers[(int) compute_mode].libfunc
2985 || optab2->handlers[(int) compute_mode].libfunc)
2986 break;
2988 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2989 in expand_binop. */
2990 if (compute_mode == VOIDmode)
2991 compute_mode = mode;
2993 if (target && GET_MODE (target) == compute_mode)
2994 tquotient = target;
2995 else
2996 tquotient = gen_reg_rtx (compute_mode);
2998 size = GET_MODE_BITSIZE (compute_mode);
2999 #if 0
3000 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3001 (mode), and thereby get better code when OP1 is a constant. Do that
3002 later. It will require going over all usages of SIZE below. */
3003 size = GET_MODE_BITSIZE (mode);
3004 #endif
3006 /* Only deduct something for a REM if the last divide done was
3007 for a different constant. Then set the constant of the last
3008 divide. */
3009 max_cost = div_cost[(int) compute_mode]
3010 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3011 && INTVAL (op1) == last_div_const)
3012 ? mul_cost[(int) compute_mode] + add_cost : 0);
3014 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3016 /* Now convert to the best mode to use. */
3017 if (compute_mode != mode)
3019 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3020 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3022 /* convert_modes may have placed op1 into a register, so we
3023 must recompute the following. */
3024 op1_is_constant = GET_CODE (op1) == CONST_INT;
3025 op1_is_pow2 = (op1_is_constant
3026 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3027 || (! unsignedp
3028 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3031 /* If one of the operands is a volatile MEM, copy it into a register. */
3033 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3034 op0 = force_reg (compute_mode, op0);
3035 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3036 op1 = force_reg (compute_mode, op1);
3038 /* If we need the remainder or if OP1 is constant, we need to
3039 put OP0 in a register in case it has any queued subexpressions. */
3040 if (rem_flag || op1_is_constant)
3041 op0 = force_reg (compute_mode, op0);
3043 last = get_last_insn ();
3045 /* Promote floor rounding to trunc rounding for unsigned operations. */
3046 if (unsignedp)
3048 if (code == FLOOR_DIV_EXPR)
3049 code = TRUNC_DIV_EXPR;
3050 if (code == FLOOR_MOD_EXPR)
3051 code = TRUNC_MOD_EXPR;
3052 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3053 code = TRUNC_DIV_EXPR;
3056 if (op1 != const0_rtx)
3057 switch (code)
3059 case TRUNC_MOD_EXPR:
3060 case TRUNC_DIV_EXPR:
3061 if (op1_is_constant)
3063 if (unsignedp)
3065 unsigned HOST_WIDE_INT mh, ml;
3066 int pre_shift, post_shift;
3067 int dummy;
3068 unsigned HOST_WIDE_INT d = INTVAL (op1);
3070 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3072 pre_shift = floor_log2 (d);
3073 if (rem_flag)
3075 remainder
3076 = expand_binop (compute_mode, and_optab, op0,
3077 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3078 remainder, 1,
3079 OPTAB_LIB_WIDEN);
3080 if (remainder)
3081 return gen_lowpart (mode, remainder);
3083 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3084 build_int_2 (pre_shift, 0),
3085 tquotient, 1);
3087 else if (size <= HOST_BITS_PER_WIDE_INT)
3089 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3091 /* Most significant bit of divisor is set; emit an scc
3092 insn. */
3093 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3094 compute_mode, 1, 1);
3095 if (quotient == 0)
3096 goto fail1;
3098 else
3100 /* Find a suitable multiplier and right shift count
3101 instead of multiplying with D. */
3103 mh = choose_multiplier (d, size, size,
3104 &ml, &post_shift, &dummy);
3106 /* If the suggested multiplier is more than SIZE bits,
3107 we can do better for even divisors, using an
3108 initial right shift. */
3109 if (mh != 0 && (d & 1) == 0)
3111 pre_shift = floor_log2 (d & -d);
3112 mh = choose_multiplier (d >> pre_shift, size,
3113 size - pre_shift,
3114 &ml, &post_shift, &dummy);
3115 if (mh)
3116 abort ();
3118 else
3119 pre_shift = 0;
3121 if (mh != 0)
3123 rtx t1, t2, t3, t4;
3125 extra_cost = (shift_cost[post_shift - 1]
3126 + shift_cost[1] + 2 * add_cost);
3127 t1 = expand_mult_highpart (compute_mode, op0, ml,
3128 NULL_RTX, 1,
3129 max_cost - extra_cost);
3130 if (t1 == 0)
3131 goto fail1;
3132 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3133 op0, t1),
3134 NULL_RTX);
3135 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3136 build_int_2 (1, 0), NULL_RTX,1);
3137 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3138 t1, t3),
3139 NULL_RTX);
3140 quotient
3141 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3142 build_int_2 (post_shift - 1, 0),
3143 tquotient, 1);
3145 else
3147 rtx t1, t2;
3149 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3150 build_int_2 (pre_shift, 0),
3151 NULL_RTX, 1);
3152 extra_cost = (shift_cost[pre_shift]
3153 + shift_cost[post_shift]);
3154 t2 = expand_mult_highpart (compute_mode, t1, ml,
3155 NULL_RTX, 1,
3156 max_cost - extra_cost);
3157 if (t2 == 0)
3158 goto fail1;
3159 quotient
3160 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3161 build_int_2 (post_shift, 0),
3162 tquotient, 1);
3166 else /* Too wide mode to use tricky code */
3167 break;
3169 insn = get_last_insn ();
3170 if (insn != last
3171 && (set = single_set (insn)) != 0
3172 && SET_DEST (set) == quotient)
3173 REG_NOTES (insn)
3174 = gen_rtx_EXPR_LIST (REG_EQUAL,
3175 gen_rtx_UDIV (compute_mode, op0, op1),
3176 REG_NOTES (insn));
3178 else /* TRUNC_DIV, signed */
3180 unsigned HOST_WIDE_INT ml;
3181 int lgup, post_shift;
3182 HOST_WIDE_INT d = INTVAL (op1);
3183 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3185 /* n rem d = n rem -d */
3186 if (rem_flag && d < 0)
3188 d = abs_d;
3189 op1 = GEN_INT (abs_d);
3192 if (d == 1)
3193 quotient = op0;
3194 else if (d == -1)
3195 quotient = expand_unop (compute_mode, neg_optab, op0,
3196 tquotient, 0);
3197 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3199 /* This case is not handled correctly below. */
3200 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3201 compute_mode, 1, 1);
3202 if (quotient == 0)
3203 goto fail1;
3205 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3206 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3208 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3210 lgup = floor_log2 (abs_d);
3211 if (abs_d != 2 && BRANCH_COST < 3)
3213 rtx label = gen_label_rtx ();
3214 rtx t1;
3216 t1 = copy_to_mode_reg (compute_mode, op0);
3217 do_cmp_and_jump (t1, const0_rtx, GE,
3218 compute_mode, label);
3219 expand_inc (t1, GEN_INT (abs_d - 1));
3220 emit_label (label);
3221 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3222 build_int_2 (lgup, 0),
3223 tquotient, 0);
3225 else
3227 rtx t1, t2, t3;
3228 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3229 build_int_2 (size - 1, 0),
3230 NULL_RTX, 0);
3231 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3232 build_int_2 (size - lgup, 0),
3233 NULL_RTX, 1);
3234 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3235 op0, t2),
3236 NULL_RTX);
3237 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3238 build_int_2 (lgup, 0),
3239 tquotient, 0);
3242 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3243 the quotient. */
3244 if (d < 0)
3246 insn = get_last_insn ();
3247 if (insn != last
3248 && (set = single_set (insn)) != 0
3249 && SET_DEST (set) == quotient)
3250 REG_NOTES (insn)
3251 = gen_rtx_EXPR_LIST (REG_EQUAL,
3252 gen_rtx_DIV (compute_mode,
3253 op0,
3254 GEN_INT (abs_d)),
3255 REG_NOTES (insn));
3257 quotient = expand_unop (compute_mode, neg_optab,
3258 quotient, quotient, 0);
3261 else if (size <= HOST_BITS_PER_WIDE_INT)
3263 choose_multiplier (abs_d, size, size - 1,
3264 &ml, &post_shift, &lgup);
3265 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3267 rtx t1, t2, t3;
3269 extra_cost = (shift_cost[post_shift]
3270 + shift_cost[size - 1] + add_cost);
3271 t1 = expand_mult_highpart (compute_mode, op0, ml,
3272 NULL_RTX, 0,
3273 max_cost - extra_cost);
3274 if (t1 == 0)
3275 goto fail1;
3276 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3277 build_int_2 (post_shift, 0), NULL_RTX, 0);
3278 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3279 build_int_2 (size - 1, 0), NULL_RTX, 0);
3280 if (d < 0)
3281 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
3282 tquotient);
3283 else
3284 quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
3285 tquotient);
3287 else
3289 rtx t1, t2, t3, t4;
3291 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3292 extra_cost = (shift_cost[post_shift]
3293 + shift_cost[size - 1] + 2 * add_cost);
3294 t1 = expand_mult_highpart (compute_mode, op0, ml,
3295 NULL_RTX, 0,
3296 max_cost - extra_cost);
3297 if (t1 == 0)
3298 goto fail1;
3299 t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
3300 NULL_RTX);
3301 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3302 build_int_2 (post_shift, 0), NULL_RTX, 0);
3303 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3304 build_int_2 (size - 1, 0), NULL_RTX, 0);
3305 if (d < 0)
3306 quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
3307 tquotient);
3308 else
3309 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
3310 tquotient);
3313 else /* Too wide mode to use tricky code */
3314 break;
3316 insn = get_last_insn ();
3317 if (insn != last
3318 && (set = single_set (insn)) != 0
3319 && SET_DEST (set) == quotient)
3320 REG_NOTES (insn)
3321 = gen_rtx_EXPR_LIST (REG_EQUAL,
3322 gen_rtx_DIV (compute_mode, op0, op1),
3323 REG_NOTES (insn));
3325 break;
3327 fail1:
3328 delete_insns_since (last);
3329 break;
3331 case FLOOR_DIV_EXPR:
3332 case FLOOR_MOD_EXPR:
3333 /* We will come here only for signed operations. */
3334 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3336 unsigned HOST_WIDE_INT mh, ml;
3337 int pre_shift, lgup, post_shift;
3338 HOST_WIDE_INT d = INTVAL (op1);
3340 if (d > 0)
3342 /* We could just as easily deal with negative constants here,
3343 but it does not seem worth the trouble for GCC 2.6. */
3344 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3346 pre_shift = floor_log2 (d);
3347 if (rem_flag)
3349 remainder = expand_binop (compute_mode, and_optab, op0,
3350 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3351 remainder, 0, OPTAB_LIB_WIDEN);
3352 if (remainder)
3353 return gen_lowpart (mode, remainder);
3355 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3356 build_int_2 (pre_shift, 0),
3357 tquotient, 0);
3359 else
3361 rtx t1, t2, t3, t4;
3363 mh = choose_multiplier (d, size, size - 1,
3364 &ml, &post_shift, &lgup);
3365 if (mh)
3366 abort ();
3368 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3369 build_int_2 (size - 1, 0), NULL_RTX, 0);
3370 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3371 NULL_RTX, 0, OPTAB_WIDEN);
3372 extra_cost = (shift_cost[post_shift]
3373 + shift_cost[size - 1] + 2 * add_cost);
3374 t3 = expand_mult_highpart (compute_mode, t2, ml,
3375 NULL_RTX, 1,
3376 max_cost - extra_cost);
3377 if (t3 != 0)
3379 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3380 build_int_2 (post_shift, 0),
3381 NULL_RTX, 1);
3382 quotient = expand_binop (compute_mode, xor_optab,
3383 t4, t1, tquotient, 0,
3384 OPTAB_WIDEN);
3388 else
3390 rtx nsign, t1, t2, t3, t4;
3391 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3392 op0, constm1_rtx), NULL_RTX);
3393 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3394 0, OPTAB_WIDEN);
3395 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3396 build_int_2 (size - 1, 0), NULL_RTX, 0);
3397 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3398 NULL_RTX);
3399 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3400 NULL_RTX, 0);
3401 if (t4)
3403 rtx t5;
3404 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3405 NULL_RTX, 0);
3406 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3407 t4, t5),
3408 tquotient);
3413 if (quotient != 0)
3414 break;
3415 delete_insns_since (last);
3417 /* Try using an instruction that produces both the quotient and
3418 remainder, using truncation. We can easily compensate the quotient
3419 or remainder to get floor rounding, once we have the remainder.
3420 Notice that we compute also the final remainder value here,
3421 and return the result right away. */
3422 if (target == 0 || GET_MODE (target) != compute_mode)
3423 target = gen_reg_rtx (compute_mode);
3425 if (rem_flag)
3427 remainder
3428 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3429 quotient = gen_reg_rtx (compute_mode);
3431 else
3433 quotient
3434 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3435 remainder = gen_reg_rtx (compute_mode);
3438 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3439 quotient, remainder, 0))
3441 /* This could be computed with a branch-less sequence.
3442 Save that for later. */
3443 rtx tem;
3444 rtx label = gen_label_rtx ();
3445 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3446 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3447 NULL_RTX, 0, OPTAB_WIDEN);
3448 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3449 expand_dec (quotient, const1_rtx);
3450 expand_inc (remainder, op1);
3451 emit_label (label);
3452 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3455 /* No luck with division elimination or divmod. Have to do it
3456 by conditionally adjusting op0 *and* the result. */
3458 rtx label1, label2, label3, label4, label5;
3459 rtx adjusted_op0;
3460 rtx tem;
3462 quotient = gen_reg_rtx (compute_mode);
3463 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3464 label1 = gen_label_rtx ();
3465 label2 = gen_label_rtx ();
3466 label3 = gen_label_rtx ();
3467 label4 = gen_label_rtx ();
3468 label5 = gen_label_rtx ();
3469 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3470 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3471 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3472 quotient, 0, OPTAB_LIB_WIDEN);
3473 if (tem != quotient)
3474 emit_move_insn (quotient, tem);
3475 emit_jump_insn (gen_jump (label5));
3476 emit_barrier ();
3477 emit_label (label1);
3478 expand_inc (adjusted_op0, const1_rtx);
3479 emit_jump_insn (gen_jump (label4));
3480 emit_barrier ();
3481 emit_label (label2);
3482 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3483 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3484 quotient, 0, OPTAB_LIB_WIDEN);
3485 if (tem != quotient)
3486 emit_move_insn (quotient, tem);
3487 emit_jump_insn (gen_jump (label5));
3488 emit_barrier ();
3489 emit_label (label3);
3490 expand_dec (adjusted_op0, const1_rtx);
3491 emit_label (label4);
3492 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3493 quotient, 0, OPTAB_LIB_WIDEN);
3494 if (tem != quotient)
3495 emit_move_insn (quotient, tem);
3496 expand_dec (quotient, const1_rtx);
3497 emit_label (label5);
3499 break;
3501 case CEIL_DIV_EXPR:
3502 case CEIL_MOD_EXPR:
3503 if (unsignedp)
3505 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3507 rtx t1, t2, t3;
3508 unsigned HOST_WIDE_INT d = INTVAL (op1);
3509 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3510 build_int_2 (floor_log2 (d), 0),
3511 tquotient, 1);
3512 t2 = expand_binop (compute_mode, and_optab, op0,
3513 GEN_INT (d - 1),
3514 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3515 t3 = gen_reg_rtx (compute_mode);
3516 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3517 compute_mode, 1, 1);
3518 if (t3 == 0)
3520 rtx lab;
3521 lab = gen_label_rtx ();
3522 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3523 expand_inc (t1, const1_rtx);
3524 emit_label (lab);
3525 quotient = t1;
3527 else
3528 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3529 t1, t3),
3530 tquotient);
3531 break;
3534 /* Try using an instruction that produces both the quotient and
3535 remainder, using truncation. We can easily compensate the
3536 quotient or remainder to get ceiling rounding, once we have the
3537 remainder. Notice that we compute also the final remainder
3538 value here, and return the result right away. */
3539 if (target == 0 || GET_MODE (target) != compute_mode)
3540 target = gen_reg_rtx (compute_mode);
3542 if (rem_flag)
3544 remainder = (GET_CODE (target) == REG
3545 ? target : gen_reg_rtx (compute_mode));
3546 quotient = gen_reg_rtx (compute_mode);
3548 else
3550 quotient = (GET_CODE (target) == REG
3551 ? target : gen_reg_rtx (compute_mode));
3552 remainder = gen_reg_rtx (compute_mode);
3555 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3556 remainder, 1))
3558 /* This could be computed with a branch-less sequence.
3559 Save that for later. */
3560 rtx label = gen_label_rtx ();
3561 do_cmp_and_jump (remainder, const0_rtx, EQ,
3562 compute_mode, label);
3563 expand_inc (quotient, const1_rtx);
3564 expand_dec (remainder, op1);
3565 emit_label (label);
3566 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3569 /* No luck with division elimination or divmod. Have to do it
3570 by conditionally adjusting op0 *and* the result. */
3572 rtx label1, label2;
3573 rtx adjusted_op0, tem;
3575 quotient = gen_reg_rtx (compute_mode);
3576 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3577 label1 = gen_label_rtx ();
3578 label2 = gen_label_rtx ();
3579 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3580 compute_mode, label1);
3581 emit_move_insn (quotient, const0_rtx);
3582 emit_jump_insn (gen_jump (label2));
3583 emit_barrier ();
3584 emit_label (label1);
3585 expand_dec (adjusted_op0, const1_rtx);
3586 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3587 quotient, 1, OPTAB_LIB_WIDEN);
3588 if (tem != quotient)
3589 emit_move_insn (quotient, tem);
3590 expand_inc (quotient, const1_rtx);
3591 emit_label (label2);
3594 else /* signed */
3596 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3597 && INTVAL (op1) >= 0)
3599 /* This is extremely similar to the code for the unsigned case
3600 above. For 2.7 we should merge these variants, but for
3601 2.6.1 I don't want to touch the code for unsigned since that
3602 get used in C. The signed case will only be used by other
3603 languages (Ada). */
3605 rtx t1, t2, t3;
3606 unsigned HOST_WIDE_INT d = INTVAL (op1);
3607 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3608 build_int_2 (floor_log2 (d), 0),
3609 tquotient, 0);
3610 t2 = expand_binop (compute_mode, and_optab, op0,
3611 GEN_INT (d - 1),
3612 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3613 t3 = gen_reg_rtx (compute_mode);
3614 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3615 compute_mode, 1, 1);
3616 if (t3 == 0)
3618 rtx lab;
3619 lab = gen_label_rtx ();
3620 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3621 expand_inc (t1, const1_rtx);
3622 emit_label (lab);
3623 quotient = t1;
3625 else
3626 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3627 t1, t3),
3628 tquotient);
3629 break;
3632 /* Try using an instruction that produces both the quotient and
3633 remainder, using truncation. We can easily compensate the
3634 quotient or remainder to get ceiling rounding, once we have the
3635 remainder. Notice that we compute also the final remainder
3636 value here, and return the result right away. */
3637 if (target == 0 || GET_MODE (target) != compute_mode)
3638 target = gen_reg_rtx (compute_mode);
3639 if (rem_flag)
3641 remainder= (GET_CODE (target) == REG
3642 ? target : gen_reg_rtx (compute_mode));
3643 quotient = gen_reg_rtx (compute_mode);
3645 else
3647 quotient = (GET_CODE (target) == REG
3648 ? target : gen_reg_rtx (compute_mode));
3649 remainder = gen_reg_rtx (compute_mode);
3652 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3653 remainder, 0))
3655 /* This could be computed with a branch-less sequence.
3656 Save that for later. */
3657 rtx tem;
3658 rtx label = gen_label_rtx ();
3659 do_cmp_and_jump (remainder, const0_rtx, EQ,
3660 compute_mode, label);
3661 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3662 NULL_RTX, 0, OPTAB_WIDEN);
3663 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3664 expand_inc (quotient, const1_rtx);
3665 expand_dec (remainder, op1);
3666 emit_label (label);
3667 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3670 /* No luck with division elimination or divmod. Have to do it
3671 by conditionally adjusting op0 *and* the result. */
3673 rtx label1, label2, label3, label4, label5;
3674 rtx adjusted_op0;
3675 rtx tem;
3677 quotient = gen_reg_rtx (compute_mode);
3678 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3679 label1 = gen_label_rtx ();
3680 label2 = gen_label_rtx ();
3681 label3 = gen_label_rtx ();
3682 label4 = gen_label_rtx ();
3683 label5 = gen_label_rtx ();
3684 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3685 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3686 compute_mode, label1);
3687 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3688 quotient, 0, OPTAB_LIB_WIDEN);
3689 if (tem != quotient)
3690 emit_move_insn (quotient, tem);
3691 emit_jump_insn (gen_jump (label5));
3692 emit_barrier ();
3693 emit_label (label1);
3694 expand_dec (adjusted_op0, const1_rtx);
3695 emit_jump_insn (gen_jump (label4));
3696 emit_barrier ();
3697 emit_label (label2);
3698 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3699 compute_mode, label3);
3700 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3701 quotient, 0, OPTAB_LIB_WIDEN);
3702 if (tem != quotient)
3703 emit_move_insn (quotient, tem);
3704 emit_jump_insn (gen_jump (label5));
3705 emit_barrier ();
3706 emit_label (label3);
3707 expand_inc (adjusted_op0, const1_rtx);
3708 emit_label (label4);
3709 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3710 quotient, 0, OPTAB_LIB_WIDEN);
3711 if (tem != quotient)
3712 emit_move_insn (quotient, tem);
3713 expand_inc (quotient, const1_rtx);
3714 emit_label (label5);
3717 break;
3719 case EXACT_DIV_EXPR:
3720 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3722 HOST_WIDE_INT d = INTVAL (op1);
3723 unsigned HOST_WIDE_INT ml;
3724 int post_shift;
3725 rtx t1;
3727 post_shift = floor_log2 (d & -d);
3728 ml = invert_mod2n (d >> post_shift, size);
3729 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3730 unsignedp);
3731 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3732 build_int_2 (post_shift, 0),
3733 NULL_RTX, unsignedp);
3735 insn = get_last_insn ();
3736 REG_NOTES (insn)
3737 = gen_rtx_EXPR_LIST (REG_EQUAL,
3738 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3739 compute_mode,
3740 op0, op1),
3741 REG_NOTES (insn));
3743 break;
3745 case ROUND_DIV_EXPR:
3746 case ROUND_MOD_EXPR:
3747 if (unsignedp)
3749 rtx tem;
3750 rtx label;
3751 label = gen_label_rtx ();
3752 quotient = gen_reg_rtx (compute_mode);
3753 remainder = gen_reg_rtx (compute_mode);
3754 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3756 rtx tem;
3757 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3758 quotient, 1, OPTAB_LIB_WIDEN);
3759 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3760 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3761 remainder, 1, OPTAB_LIB_WIDEN);
3763 tem = plus_constant (op1, -1);
3764 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3765 build_int_2 (1, 0), NULL_RTX, 1);
3766 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3767 expand_inc (quotient, const1_rtx);
3768 expand_dec (remainder, op1);
3769 emit_label (label);
3771 else
3773 rtx abs_rem, abs_op1, tem, mask;
3774 rtx label;
3775 label = gen_label_rtx ();
3776 quotient = gen_reg_rtx (compute_mode);
3777 remainder = gen_reg_rtx (compute_mode);
3778 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3780 rtx tem;
3781 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3782 quotient, 0, OPTAB_LIB_WIDEN);
3783 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3784 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3785 remainder, 0, OPTAB_LIB_WIDEN);
3787 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3788 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3789 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3790 build_int_2 (1, 0), NULL_RTX, 1);
3791 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3792 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3793 NULL_RTX, 0, OPTAB_WIDEN);
3794 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3795 build_int_2 (size - 1, 0), NULL_RTX, 0);
3796 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3797 NULL_RTX, 0, OPTAB_WIDEN);
3798 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3799 NULL_RTX, 0, OPTAB_WIDEN);
3800 expand_inc (quotient, tem);
3801 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3802 NULL_RTX, 0, OPTAB_WIDEN);
3803 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3804 NULL_RTX, 0, OPTAB_WIDEN);
3805 expand_dec (remainder, tem);
3806 emit_label (label);
3808 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3810 default:
3811 abort ();
3814 if (quotient == 0)
3816 if (target && GET_MODE (target) != compute_mode)
3817 target = 0;
3819 if (rem_flag)
3821 /* Try to produce the remainder without producing the quotient.
3822 If we seem to have a divmod patten that does not require widening,
3823 don't try windening here. We should really have an WIDEN argument
3824 to expand_twoval_binop, since what we'd really like to do here is
3825 1) try a mod insn in compute_mode
3826 2) try a divmod insn in compute_mode
3827 3) try a div insn in compute_mode and multiply-subtract to get
3828 remainder
3829 4) try the same things with widening allowed. */
3830 remainder
3831 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3832 op0, op1, target,
3833 unsignedp,
3834 ((optab2->handlers[(int) compute_mode].insn_code
3835 != CODE_FOR_nothing)
3836 ? OPTAB_DIRECT : OPTAB_WIDEN));
3837 if (remainder == 0)
3839 /* No luck there. Can we do remainder and divide at once
3840 without a library call? */
3841 remainder = gen_reg_rtx (compute_mode);
3842 if (! expand_twoval_binop ((unsignedp
3843 ? udivmod_optab
3844 : sdivmod_optab),
3845 op0, op1,
3846 NULL_RTX, remainder, unsignedp))
3847 remainder = 0;
3850 if (remainder)
3851 return gen_lowpart (mode, remainder);
3854 /* Produce the quotient. Try a quotient insn, but not a library call.
3855 If we have a divmod in this mode, use it in preference to widening
3856 the div (for this test we assume it will not fail). Note that optab2
3857 is set to the one of the two optabs that the call below will use. */
3858 quotient
3859 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3860 op0, op1, rem_flag ? NULL_RTX : target,
3861 unsignedp,
3862 ((optab2->handlers[(int) compute_mode].insn_code
3863 != CODE_FOR_nothing)
3864 ? OPTAB_DIRECT : OPTAB_WIDEN));
3866 if (quotient == 0)
3868 /* No luck there. Try a quotient-and-remainder insn,
3869 keeping the quotient alone. */
3870 quotient = gen_reg_rtx (compute_mode);
3871 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3872 op0, op1,
3873 quotient, NULL_RTX, unsignedp))
3875 quotient = 0;
3876 if (! rem_flag)
3877 /* Still no luck. If we are not computing the remainder,
3878 use a library call for the quotient. */
3879 quotient = sign_expand_binop (compute_mode,
3880 udiv_optab, sdiv_optab,
3881 op0, op1, target,
3882 unsignedp, OPTAB_LIB_WIDEN);
3887 if (rem_flag)
3889 if (target && GET_MODE (target) != compute_mode)
3890 target = 0;
3892 if (quotient == 0)
3893 /* No divide instruction either. Use library for remainder. */
3894 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3895 op0, op1, target,
3896 unsignedp, OPTAB_LIB_WIDEN);
3897 else
3899 /* We divided. Now finish doing X - Y * (X / Y). */
3900 remainder = expand_mult (compute_mode, quotient, op1,
3901 NULL_RTX, unsignedp);
3902 remainder = expand_binop (compute_mode, sub_optab, op0,
3903 remainder, target, unsignedp,
3904 OPTAB_LIB_WIDEN);
3908 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3911 /* Return a tree node with data type TYPE, describing the value of X.
3912 Usually this is an RTL_EXPR, if there is no obvious better choice.
3913 X may be an expression, however we only support those expressions
3914 generated by loop.c. */
3916 tree
3917 make_tree (type, x)
3918 tree type;
3919 rtx x;
3921 tree t;
3923 switch (GET_CODE (x))
3925 case CONST_INT:
3926 t = build_int_2 (INTVAL (x),
3927 (TREE_UNSIGNED (type)
3928 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3929 || INTVAL (x) >= 0 ? 0 : -1);
3930 TREE_TYPE (t) = type;
3931 return t;
3933 case CONST_DOUBLE:
3934 if (GET_MODE (x) == VOIDmode)
3936 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3937 TREE_TYPE (t) = type;
3939 else
3941 REAL_VALUE_TYPE d;
3943 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3944 t = build_real (type, d);
3947 return t;
3949 case PLUS:
3950 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3951 make_tree (type, XEXP (x, 1))));
3953 case MINUS:
3954 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3955 make_tree (type, XEXP (x, 1))));
3957 case NEG:
3958 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3960 case MULT:
3961 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3962 make_tree (type, XEXP (x, 1))));
3964 case ASHIFT:
3965 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3966 make_tree (type, XEXP (x, 1))));
3968 case LSHIFTRT:
3969 return fold (convert (type,
3970 build (RSHIFT_EXPR, unsigned_type (type),
3971 make_tree (unsigned_type (type),
3972 XEXP (x, 0)),
3973 make_tree (type, XEXP (x, 1)))));
3975 case ASHIFTRT:
3976 return fold (convert (type,
3977 build (RSHIFT_EXPR, signed_type (type),
3978 make_tree (signed_type (type), XEXP (x, 0)),
3979 make_tree (type, XEXP (x, 1)))));
3981 case DIV:
3982 if (TREE_CODE (type) != REAL_TYPE)
3983 t = signed_type (type);
3984 else
3985 t = type;
3987 return fold (convert (type,
3988 build (TRUNC_DIV_EXPR, t,
3989 make_tree (t, XEXP (x, 0)),
3990 make_tree (t, XEXP (x, 1)))));
3991 case UDIV:
3992 t = unsigned_type (type);
3993 return fold (convert (type,
3994 build (TRUNC_DIV_EXPR, t,
3995 make_tree (t, XEXP (x, 0)),
3996 make_tree (t, XEXP (x, 1)))));
3997 default:
3998 t = make_node (RTL_EXPR);
3999 TREE_TYPE (t) = type;
4000 RTL_EXPR_RTL (t) = x;
4001 /* There are no insns to be output
4002 when this rtl_expr is used. */
4003 RTL_EXPR_SEQUENCE (t) = 0;
4004 return t;
4008 /* Return an rtx representing the value of X * MULT + ADD.
4009 TARGET is a suggestion for where to store the result (an rtx).
4010 MODE is the machine mode for the computation.
4011 X and MULT must have mode MODE. ADD may have a different mode.
4012 So can X (defaults to same as MODE).
4013 UNSIGNEDP is non-zero to do unsigned multiplication.
4014 This may emit insns. */
4017 expand_mult_add (x, target, mult, add, mode, unsignedp)
4018 rtx x, target, mult, add;
4019 enum machine_mode mode;
4020 int unsignedp;
4022 tree type = type_for_mode (mode, unsignedp);
4023 tree add_type = (GET_MODE (add) == VOIDmode
4024 ? type : type_for_mode (GET_MODE (add), unsignedp));
4025 tree result = fold (build (PLUS_EXPR, type,
4026 fold (build (MULT_EXPR, type,
4027 make_tree (type, x),
4028 make_tree (type, mult))),
4029 make_tree (add_type, add)));
4031 return expand_expr (result, target, VOIDmode, 0);
4034 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4035 and returning TARGET.
4037 If TARGET is 0, a pseudo-register or constant is returned. */
4040 expand_and (op0, op1, target)
4041 rtx op0, op1, target;
4043 enum machine_mode mode = VOIDmode;
4044 rtx tem;
4046 if (GET_MODE (op0) != VOIDmode)
4047 mode = GET_MODE (op0);
4048 else if (GET_MODE (op1) != VOIDmode)
4049 mode = GET_MODE (op1);
4051 if (mode != VOIDmode)
4052 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4053 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
4054 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
4055 else
4056 abort ();
4058 if (target == 0)
4059 target = tem;
4060 else if (tem != target)
4061 emit_move_insn (target, tem);
4062 return target;
4065 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4066 and storing in TARGET. Normally return TARGET.
4067 Return 0 if that cannot be done.
4069 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4070 it is VOIDmode, they cannot both be CONST_INT.
4072 UNSIGNEDP is for the case where we have to widen the operands
4073 to perform the operation. It says to use zero-extension.
4075 NORMALIZEP is 1 if we should convert the result to be either zero
4076 or one. Normalize is -1 if we should convert the result to be
4077 either zero or -1. If NORMALIZEP is zero, the result will be left
4078 "raw" out of the scc insn. */
4081 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4082 rtx target;
4083 enum rtx_code code;
4084 rtx op0, op1;
4085 enum machine_mode mode;
4086 int unsignedp;
4087 int normalizep;
4089 rtx subtarget;
4090 enum insn_code icode;
4091 enum machine_mode compare_mode;
4092 enum machine_mode target_mode = GET_MODE (target);
4093 rtx tem;
4094 rtx last = get_last_insn ();
4095 rtx pattern, comparison;
4097 /* If one operand is constant, make it the second one. Only do this
4098 if the other operand is not constant as well. */
4100 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
4101 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
4103 tem = op0;
4104 op0 = op1;
4105 op1 = tem;
4106 code = swap_condition (code);
4109 if (mode == VOIDmode)
4110 mode = GET_MODE (op0);
4112 /* For some comparisons with 1 and -1, we can convert this to
4113 comparisons with zero. This will often produce more opportunities for
4114 store-flag insns. */
4116 switch (code)
4118 case LT:
4119 if (op1 == const1_rtx)
4120 op1 = const0_rtx, code = LE;
4121 break;
4122 case LE:
4123 if (op1 == constm1_rtx)
4124 op1 = const0_rtx, code = LT;
4125 break;
4126 case GE:
4127 if (op1 == const1_rtx)
4128 op1 = const0_rtx, code = GT;
4129 break;
4130 case GT:
4131 if (op1 == constm1_rtx)
4132 op1 = const0_rtx, code = GE;
4133 break;
4134 case GEU:
4135 if (op1 == const1_rtx)
4136 op1 = const0_rtx, code = NE;
4137 break;
4138 case LTU:
4139 if (op1 == const1_rtx)
4140 op1 = const0_rtx, code = EQ;
4141 break;
4142 default:
4143 break;
4146 /* From now on, we won't change CODE, so set ICODE now. */
4147 icode = setcc_gen_code[(int) code];
4149 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4150 complement of A (for GE) and shifting the sign bit to the low bit. */
4151 if (op1 == const0_rtx && (code == LT || code == GE)
4152 && GET_MODE_CLASS (mode) == MODE_INT
4153 && (normalizep || STORE_FLAG_VALUE == 1
4154 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4155 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4156 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4158 subtarget = target;
4160 /* If the result is to be wider than OP0, it is best to convert it
4161 first. If it is to be narrower, it is *incorrect* to convert it
4162 first. */
4163 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4165 op0 = protect_from_queue (op0, 0);
4166 op0 = convert_modes (target_mode, mode, op0, 0);
4167 mode = target_mode;
4170 if (target_mode != mode)
4171 subtarget = 0;
4173 if (code == GE)
4174 op0 = expand_unop (mode, one_cmpl_optab, op0,
4175 ((STORE_FLAG_VALUE == 1 || normalizep)
4176 ? 0 : subtarget), 0);
4178 if (STORE_FLAG_VALUE == 1 || normalizep)
4179 /* If we are supposed to produce a 0/1 value, we want to do
4180 a logical shift from the sign bit to the low-order bit; for
4181 a -1/0 value, we do an arithmetic shift. */
4182 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4183 size_int (GET_MODE_BITSIZE (mode) - 1),
4184 subtarget, normalizep != -1);
4186 if (mode != target_mode)
4187 op0 = convert_modes (target_mode, mode, op0, 0);
4189 return op0;
4192 if (icode != CODE_FOR_nothing)
4194 /* We think we may be able to do this with a scc insn. Emit the
4195 comparison and then the scc insn.
4197 compare_from_rtx may call emit_queue, which would be deleted below
4198 if the scc insn fails. So call it ourselves before setting LAST. */
4200 emit_queue ();
4201 last = get_last_insn ();
4203 comparison
4204 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4205 if (GET_CODE (comparison) == CONST_INT)
4206 return (comparison == const0_rtx ? const0_rtx
4207 : normalizep == 1 ? const1_rtx
4208 : normalizep == -1 ? constm1_rtx
4209 : const_true_rtx);
4211 /* If the code of COMPARISON doesn't match CODE, something is
4212 wrong; we can no longer be sure that we have the operation.
4213 We could handle this case, but it should not happen. */
4215 if (GET_CODE (comparison) != code)
4216 abort ();
4218 /* Get a reference to the target in the proper mode for this insn. */
4219 compare_mode = insn_operand_mode[(int) icode][0];
4220 subtarget = target;
4221 if (preserve_subexpressions_p ()
4222 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4223 subtarget = gen_reg_rtx (compare_mode);
4225 pattern = GEN_FCN (icode) (subtarget);
4226 if (pattern)
4228 emit_insn (pattern);
4230 /* If we are converting to a wider mode, first convert to
4231 TARGET_MODE, then normalize. This produces better combining
4232 opportunities on machines that have a SIGN_EXTRACT when we are
4233 testing a single bit. This mostly benefits the 68k.
4235 If STORE_FLAG_VALUE does not have the sign bit set when
4236 interpreted in COMPARE_MODE, we can do this conversion as
4237 unsigned, which is usually more efficient. */
4238 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4240 convert_move (target, subtarget,
4241 (GET_MODE_BITSIZE (compare_mode)
4242 <= HOST_BITS_PER_WIDE_INT)
4243 && 0 == (STORE_FLAG_VALUE
4244 & ((HOST_WIDE_INT) 1
4245 << (GET_MODE_BITSIZE (compare_mode) -1))));
4246 op0 = target;
4247 compare_mode = target_mode;
4249 else
4250 op0 = subtarget;
4252 /* If we want to keep subexpressions around, don't reuse our
4253 last target. */
4255 if (preserve_subexpressions_p ())
4256 subtarget = 0;
4258 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4259 we don't have to do anything. */
4260 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4262 else if (normalizep == - STORE_FLAG_VALUE)
4263 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4265 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4266 makes it hard to use a value of just the sign bit due to
4267 ANSI integer constant typing rules. */
4268 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4269 && (STORE_FLAG_VALUE
4270 & ((HOST_WIDE_INT) 1
4271 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4272 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4273 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4274 subtarget, normalizep == 1);
4275 else if (STORE_FLAG_VALUE & 1)
4277 op0 = expand_and (op0, const1_rtx, subtarget);
4278 if (normalizep == -1)
4279 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4281 else
4282 abort ();
4284 /* If we were converting to a smaller mode, do the
4285 conversion now. */
4286 if (target_mode != compare_mode)
4288 convert_move (target, op0, 0);
4289 return target;
4291 else
4292 return op0;
4296 delete_insns_since (last);
4298 /* If expensive optimizations, use different pseudo registers for each
4299 insn, instead of reusing the same pseudo. This leads to better CSE,
4300 but slows down the compiler, since there are more pseudos */
4301 subtarget = (!flag_expensive_optimizations
4302 && (target_mode == mode)) ? target : NULL_RTX;
4304 /* If we reached here, we can't do this with a scc insn. However, there
4305 are some comparisons that can be done directly. For example, if
4306 this is an equality comparison of integers, we can try to exclusive-or
4307 (or subtract) the two operands and use a recursive call to try the
4308 comparison with zero. Don't do any of these cases if branches are
4309 very cheap. */
4311 if (BRANCH_COST > 0
4312 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4313 && op1 != const0_rtx)
4315 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4316 OPTAB_WIDEN);
4318 if (tem == 0)
4319 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4320 OPTAB_WIDEN);
4321 if (tem != 0)
4322 tem = emit_store_flag (target, code, tem, const0_rtx,
4323 mode, unsignedp, normalizep);
4324 if (tem == 0)
4325 delete_insns_since (last);
4326 return tem;
4329 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4330 the constant zero. Reject all other comparisons at this point. Only
4331 do LE and GT if branches are expensive since they are expensive on
4332 2-operand machines. */
4334 if (BRANCH_COST == 0
4335 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4336 || (code != EQ && code != NE
4337 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4338 return 0;
4340 /* See what we need to return. We can only return a 1, -1, or the
4341 sign bit. */
4343 if (normalizep == 0)
4345 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4346 normalizep = STORE_FLAG_VALUE;
4348 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4349 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4350 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4352 else
4353 return 0;
4356 /* Try to put the result of the comparison in the sign bit. Assume we can't
4357 do the necessary operation below. */
4359 tem = 0;
4361 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4362 the sign bit set. */
4364 if (code == LE)
4366 /* This is destructive, so SUBTARGET can't be OP0. */
4367 if (rtx_equal_p (subtarget, op0))
4368 subtarget = 0;
4370 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4371 OPTAB_WIDEN);
4372 if (tem)
4373 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4374 OPTAB_WIDEN);
4377 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4378 number of bits in the mode of OP0, minus one. */
4380 if (code == GT)
4382 if (rtx_equal_p (subtarget, op0))
4383 subtarget = 0;
4385 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4386 size_int (GET_MODE_BITSIZE (mode) - 1),
4387 subtarget, 0);
4388 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4389 OPTAB_WIDEN);
4392 if (code == EQ || code == NE)
4394 /* For EQ or NE, one way to do the comparison is to apply an operation
4395 that converts the operand into a positive number if it is non-zero
4396 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4397 for NE we negate. This puts the result in the sign bit. Then we
4398 normalize with a shift, if needed.
4400 Two operations that can do the above actions are ABS and FFS, so try
4401 them. If that doesn't work, and MODE is smaller than a full word,
4402 we can use zero-extension to the wider mode (an unsigned conversion)
4403 as the operation. */
4405 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4406 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4407 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4408 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4409 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4411 op0 = protect_from_queue (op0, 0);
4412 tem = convert_modes (word_mode, mode, op0, 1);
4413 mode = word_mode;
4416 if (tem != 0)
4418 if (code == EQ)
4419 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4420 0, OPTAB_WIDEN);
4421 else
4422 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4425 /* If we couldn't do it that way, for NE we can "or" the two's complement
4426 of the value with itself. For EQ, we take the one's complement of
4427 that "or", which is an extra insn, so we only handle EQ if branches
4428 are expensive. */
4430 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4432 if (rtx_equal_p (subtarget, op0))
4433 subtarget = 0;
4435 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4436 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4437 OPTAB_WIDEN);
4439 if (tem && code == EQ)
4440 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4444 if (tem && normalizep)
4445 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4446 size_int (GET_MODE_BITSIZE (mode) - 1),
4447 subtarget, normalizep == 1);
4449 if (tem)
4451 if (GET_MODE (tem) != target_mode)
4453 convert_move (target, tem, 0);
4454 tem = target;
4456 else if (!subtarget)
4458 emit_move_insn (target, tem);
4459 tem = target;
4462 else
4463 delete_insns_since (last);
4465 return tem;
4468 /* Like emit_store_flag, but always succeeds. */
4471 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4472 rtx target;
4473 enum rtx_code code;
4474 rtx op0, op1;
4475 enum machine_mode mode;
4476 int unsignedp;
4477 int normalizep;
4479 rtx tem, label;
4481 /* First see if emit_store_flag can do the job. */
4482 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4483 if (tem != 0)
4484 return tem;
4486 if (normalizep == 0)
4487 normalizep = 1;
4489 /* If this failed, we have to do this with set/compare/jump/set code. */
4491 if (GET_CODE (target) != REG
4492 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4493 target = gen_reg_rtx (GET_MODE (target));
4495 emit_move_insn (target, const1_rtx);
4496 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4497 if (GET_CODE (tem) == CONST_INT)
4498 return tem;
4500 label = gen_label_rtx ();
4501 if (bcc_gen_fctn[(int) code] == 0)
4502 abort ();
4504 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4505 emit_move_insn (target, const0_rtx);
4506 emit_label (label);
4508 return target;
4511 /* Perform possibly multi-word comparison and conditional jump to LABEL
4512 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4514 The algorithm is based on the code in expr.c:do_jump.
4516 Note that this does not perform a general comparison. Only variants
4517 generated within expmed.c are correctly handled, others abort (but could
4518 be handled if needed). */
4520 static void
4521 do_cmp_and_jump (arg1, arg2, op, mode, label)
4522 rtx arg1, arg2, label;
4523 enum rtx_code op;
4524 enum machine_mode mode;
4526 /* If this mode is an integer too wide to compare properly,
4527 compare word by word. Rely on cse to optimize constant cases. */
4529 if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
4531 rtx label2 = gen_label_rtx ();
4533 switch (op)
4535 case LTU:
4536 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4537 break;
4539 case LEU:
4540 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4541 break;
4543 case LT:
4544 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4545 break;
4547 case GT:
4548 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4549 break;
4551 case GE:
4552 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4553 break;
4555 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4556 that's the only equality operations we do */
4557 case EQ:
4558 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4559 abort();
4560 do_jump_by_parts_equality_rtx (arg1, label2, label);
4561 break;
4563 case NE:
4564 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4565 abort();
4566 do_jump_by_parts_equality_rtx (arg1, label, label2);
4567 break;
4569 default:
4570 abort();
4573 emit_label (label2);
4575 else
4577 emit_cmp_insn(arg1, arg2, op, NULL_RTX, mode, 0, 0);
4578 if (bcc_gen_fctn[(int) op] == 0)
4579 abort ();
4580 emit_jump_insn ((*bcc_gen_fctn[(int) op]) (label));