(expand_asm, expand_asm_operands): Change error message text.
[official-gcc.git] / gcc / expmed.c
blob98b4abde3de378f4dd9d170463c1396e6e75f476
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92, 93, 94, 1995 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 #include "config.h"
23 #include "rtl.h"
24 #include "tree.h"
25 #include "flags.h"
26 #include "insn-flags.h"
27 #include "insn-codes.h"
28 #include "insn-config.h"
29 #include "expr.h"
30 #include "real.h"
31 #include "recog.h"
33 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
34 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
35 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
36 int, int, rtx, int, int));
37 static rtx mask_rtx PROTO((enum machine_mode, int,
38 int, int));
39 static rtx lshift_value PROTO((enum machine_mode, rtx,
40 int, int));
41 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
43 #define CEIL(x,y) (((x) + (y) - 1) / (y))
45 /* Non-zero means divides or modulus operations are relatively cheap for
46 powers of two, so don't use branches; emit the operation instead.
47 Usually, this will mean that the MD file will emit non-branch
48 sequences. */
50 static int sdiv_pow2_cheap, smod_pow2_cheap;
52 #ifndef SLOW_UNALIGNED_ACCESS
53 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
54 #endif
56 /* For compilers that support multiple targets with different word sizes,
57 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
58 is the H8/300(H) compiler. */
60 #ifndef MAX_BITS_PER_WORD
61 #define MAX_BITS_PER_WORD BITS_PER_WORD
62 #endif
64 /* Cost of various pieces of RTL. */
65 static int add_cost, negate_cost, zero_cost;
66 static int shift_cost[MAX_BITS_PER_WORD];
67 static int shiftadd_cost[MAX_BITS_PER_WORD];
68 static int shiftsub_cost[MAX_BITS_PER_WORD];
70 void
71 init_expmed ()
73 char *free_point;
74 /* This is "some random pseudo register" for purposes of calling recog
75 to see what insns exist. */
76 rtx reg = gen_rtx (REG, word_mode, 10000);
77 rtx shift_insn, shiftadd_insn, shiftsub_insn;
78 int dummy;
79 int m;
81 start_sequence ();
83 /* Since we are on the permanent obstack, we must be sure we save this
84 spot AFTER we call start_sequence, since it will reuse the rtl it
85 makes. */
87 free_point = (char *) oballoc (0);
89 zero_cost = rtx_cost (const0_rtx, 0);
90 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
92 shift_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
93 gen_rtx (ASHIFT, word_mode, reg,
94 const0_rtx)));
96 shiftadd_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
97 gen_rtx (PLUS, word_mode,
98 gen_rtx (MULT, word_mode,
99 reg, const0_rtx),
100 reg)));
102 shiftsub_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
103 gen_rtx (MINUS, word_mode,
104 gen_rtx (MULT, word_mode,
105 reg, const0_rtx),
106 reg)));
108 init_recog ();
110 shift_cost[0] = 0;
111 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
113 for (m = 1; m < BITS_PER_WORD; m++)
115 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
117 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
118 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
119 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
121 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
122 = GEN_INT ((HOST_WIDE_INT) 1 << m);
123 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
124 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
126 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
127 = GEN_INT ((HOST_WIDE_INT) 1 << m);
128 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
129 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
132 negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
134 sdiv_pow2_cheap
135 = (rtx_cost (gen_rtx (DIV, word_mode, reg, GEN_INT (32)), SET)
136 <= 2 * add_cost);
137 smod_pow2_cheap
138 = (rtx_cost (gen_rtx (MOD, word_mode, reg, GEN_INT (32)), SET)
139 <= 2 * add_cost);
141 /* Free the objects we just allocated. */
142 end_sequence ();
143 obfree (free_point);
146 /* Return an rtx representing minus the value of X.
147 MODE is the intended mode of the result,
148 useful if X is a CONST_INT. */
151 negate_rtx (mode, x)
152 enum machine_mode mode;
153 rtx x;
155 if (GET_CODE (x) == CONST_INT)
157 HOST_WIDE_INT val = - INTVAL (x);
158 if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_WIDE_INT)
160 /* Sign extend the value from the bits that are significant. */
161 if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
162 val |= (HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (mode);
163 else
164 val &= ((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (mode)) - 1;
166 return GEN_INT (val);
168 else
169 return expand_unop (GET_MODE (x), neg_optab, x, NULL_RTX, 0);
172 /* Generate code to store value from rtx VALUE
173 into a bit-field within structure STR_RTX
174 containing BITSIZE bits starting at bit BITNUM.
175 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
176 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
177 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
179 /* ??? Note that there are two different ideas here for how
180 to determine the size to count bits within, for a register.
181 One is BITS_PER_WORD, and the other is the size of operand 3
182 of the insv pattern. (The latter assumes that an n-bit machine
183 will be able to insert bit fields up to n bits wide.)
184 It isn't certain that either of these is right.
185 extract_bit_field has the same quandary. */
188 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
189 rtx str_rtx;
190 register int bitsize;
191 int bitnum;
192 enum machine_mode fieldmode;
193 rtx value;
194 int align;
195 int total_size;
197 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
198 register int offset = bitnum / unit;
199 register int bitpos = bitnum % unit;
200 register rtx op0 = str_rtx;
202 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
203 abort ();
205 /* Discount the part of the structure before the desired byte.
206 We need to know how many bytes are safe to reference after it. */
207 if (total_size >= 0)
208 total_size -= (bitpos / BIGGEST_ALIGNMENT
209 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
211 while (GET_CODE (op0) == SUBREG)
213 /* The following line once was done only if WORDS_BIG_ENDIAN,
214 but I think that is a mistake. WORDS_BIG_ENDIAN is
215 meaningful at a much higher level; when structures are copied
216 between memory and regs, the higher-numbered regs
217 always get higher addresses. */
218 offset += SUBREG_WORD (op0);
219 /* We used to adjust BITPOS here, but now we do the whole adjustment
220 right after the loop. */
221 op0 = SUBREG_REG (op0);
224 /* If OP0 is a register, BITPOS must count within a word.
225 But as we have it, it counts within whatever size OP0 now has.
226 On a bigendian machine, these are not the same, so convert. */
227 if (BYTES_BIG_ENDIAN
228 && GET_CODE (op0) != MEM
229 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
230 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
232 value = protect_from_queue (value, 0);
234 if (flag_force_mem)
235 value = force_not_mem (value);
237 /* Note that the adjustment of BITPOS above has no effect on whether
238 BITPOS is 0 in a REG bigger than a word. */
239 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
240 && (GET_CODE (op0) != MEM
241 || ! SLOW_UNALIGNED_ACCESS
242 || (offset * BITS_PER_UNIT % bitsize == 0
243 && align % GET_MODE_SIZE (fieldmode) == 0))
244 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
246 /* Storing in a full-word or multi-word field in a register
247 can be done with just SUBREG. */
248 if (GET_MODE (op0) != fieldmode)
250 if (GET_CODE (op0) == REG)
251 op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
252 else
253 op0 = change_address (op0, fieldmode,
254 plus_constant (XEXP (op0, 0), offset));
256 emit_move_insn (op0, value);
257 return value;
260 /* Storing an lsb-aligned field in a register
261 can be done with a movestrict instruction. */
263 if (GET_CODE (op0) != MEM
264 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
265 && bitsize == GET_MODE_BITSIZE (fieldmode)
266 && (GET_MODE (op0) == fieldmode
267 || (movstrict_optab->handlers[(int) fieldmode].insn_code
268 != CODE_FOR_nothing)))
270 /* Get appropriate low part of the value being stored. */
271 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
272 value = gen_lowpart (fieldmode, value);
273 else if (!(GET_CODE (value) == SYMBOL_REF
274 || GET_CODE (value) == LABEL_REF
275 || GET_CODE (value) == CONST))
276 value = convert_to_mode (fieldmode, value, 0);
278 if (GET_MODE (op0) == fieldmode)
279 emit_move_insn (op0, value);
280 else
282 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
283 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
284 value = copy_to_mode_reg (fieldmode, value);
285 emit_insn (GEN_FCN (icode)
286 (gen_rtx (SUBREG, fieldmode, op0, offset), value));
288 return value;
291 /* Handle fields bigger than a word. */
293 if (bitsize > BITS_PER_WORD)
295 /* Here we transfer the words of the field
296 in the order least significant first.
297 This is because the most significant word is the one which may
298 be less than full.
299 However, only do that if the value is not BLKmode. */
301 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
303 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
304 int i;
306 /* This is the mode we must force value to, so that there will be enough
307 subwords to extract. Note that fieldmode will often (always?) be
308 VOIDmode, because that is what store_field uses to indicate that this
309 is a bit field, but passing VOIDmode to operand_subword_force will
310 result in an abort. */
311 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
313 for (i = 0; i < nwords; i++)
315 /* If I is 0, use the low-order word in both field and target;
316 if I is 1, use the next to lowest word; and so on. */
317 int wordnum = (backwards ? nwords - i - 1 : i);
318 int bit_offset = (backwards
319 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
320 : i * BITS_PER_WORD);
321 store_bit_field (op0, MIN (BITS_PER_WORD,
322 bitsize - i * BITS_PER_WORD),
323 bitnum + bit_offset, word_mode,
324 operand_subword_force (value, wordnum,
325 (GET_MODE (value) == VOIDmode
326 ? fieldmode
327 : GET_MODE (value))),
328 align, total_size);
330 return value;
333 /* From here on we can assume that the field to be stored in is
334 a full-word (whatever type that is), since it is shorter than a word. */
336 /* OFFSET is the number of words or bytes (UNIT says which)
337 from STR_RTX to the first word or byte containing part of the field. */
339 if (GET_CODE (op0) == REG)
341 if (offset != 0
342 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
343 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
344 op0, offset);
345 offset = 0;
347 else
349 op0 = protect_from_queue (op0, 1);
352 /* If VALUE is a floating-point mode, access it as an integer of the
353 corresponding size. This can occur on a machine with 64 bit registers
354 that uses SFmode for float. This can also occur for unaligned float
355 structure fields. */
356 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
358 if (GET_CODE (value) != REG)
359 value = copy_to_reg (value);
360 value = gen_rtx (SUBREG, word_mode, value, 0);
363 /* Now OFFSET is nonzero only if OP0 is memory
364 and is therefore always measured in bytes. */
366 #ifdef HAVE_insv
367 if (HAVE_insv
368 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
369 /* Ensure insv's size is wide enough for this field. */
370 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
371 >= bitsize)
372 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
373 && (bitsize + bitpos
374 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
376 int xbitpos = bitpos;
377 rtx value1;
378 rtx xop0 = op0;
379 rtx last = get_last_insn ();
380 rtx pat;
381 enum machine_mode maxmode
382 = insn_operand_mode[(int) CODE_FOR_insv][3];
384 int save_volatile_ok = volatile_ok;
385 volatile_ok = 1;
387 /* If this machine's insv can only insert into a register, or if we
388 are to force MEMs into a register, copy OP0 into a register and
389 save it back later. */
390 if (GET_CODE (op0) == MEM
391 && (flag_force_mem
392 || ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
393 (op0, VOIDmode))))
395 rtx tempreg;
396 enum machine_mode bestmode;
398 /* Get the mode to use for inserting into this field. If OP0 is
399 BLKmode, get the smallest mode consistent with the alignment. If
400 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
401 mode. Otherwise, use the smallest mode containing the field. */
403 if (GET_MODE (op0) == BLKmode
404 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
405 bestmode
406 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
407 MEM_VOLATILE_P (op0));
408 else
409 bestmode = GET_MODE (op0);
411 if (bestmode == VOIDmode
412 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
413 goto insv_loses;
415 /* Adjust address to point to the containing unit of that mode. */
416 unit = GET_MODE_BITSIZE (bestmode);
417 /* Compute offset as multiple of this unit, counting in bytes. */
418 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
419 bitpos = bitnum % unit;
420 op0 = change_address (op0, bestmode,
421 plus_constant (XEXP (op0, 0), offset));
423 /* Fetch that unit, store the bitfield in it, then store the unit. */
424 tempreg = copy_to_reg (op0);
425 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
426 align, total_size);
427 emit_move_insn (op0, tempreg);
428 return value;
430 volatile_ok = save_volatile_ok;
432 /* Add OFFSET into OP0's address. */
433 if (GET_CODE (xop0) == MEM)
434 xop0 = change_address (xop0, byte_mode,
435 plus_constant (XEXP (xop0, 0), offset));
437 /* If xop0 is a register, we need it in MAXMODE
438 to make it acceptable to the format of insv. */
439 if (GET_CODE (xop0) == SUBREG)
440 /* We can't just change the mode, because this might clobber op0,
441 and we will need the original value of op0 if insv fails. */
442 xop0 = gen_rtx (SUBREG, maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
443 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
444 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
446 /* On big-endian machines, we count bits from the most significant.
447 If the bit field insn does not, we must invert. */
449 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
450 xbitpos = unit - bitsize - xbitpos;
452 /* We have been counting XBITPOS within UNIT.
453 Count instead within the size of the register. */
454 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
455 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
457 unit = GET_MODE_BITSIZE (maxmode);
459 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
460 value1 = value;
461 if (GET_MODE (value) != maxmode)
463 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
465 /* Optimization: Don't bother really extending VALUE
466 if it has all the bits we will actually use. However,
467 if we must narrow it, be sure we do it correctly. */
469 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
471 /* Avoid making subreg of a subreg, or of a mem. */
472 if (GET_CODE (value1) != REG)
473 value1 = copy_to_reg (value1);
474 value1 = gen_rtx (SUBREG, maxmode, value1, 0);
476 else
477 value1 = gen_lowpart (maxmode, value1);
479 else if (!CONSTANT_P (value))
480 /* Parse phase is supposed to make VALUE's data type
481 match that of the component reference, which is a type
482 at least as wide as the field; so VALUE should have
483 a mode that corresponds to that type. */
484 abort ();
487 /* If this machine's insv insists on a register,
488 get VALUE1 into a register. */
489 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
490 (value1, maxmode)))
491 value1 = force_reg (maxmode, value1);
493 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
494 if (pat)
495 emit_insn (pat);
496 else
498 delete_insns_since (last);
499 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
502 else
503 insv_loses:
504 #endif
505 /* Insv is not available; store using shifts and boolean ops. */
506 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
507 return value;
510 /* Use shifts and boolean operations to store VALUE
511 into a bit field of width BITSIZE
512 in a memory location specified by OP0 except offset by OFFSET bytes.
513 (OFFSET must be 0 if OP0 is a register.)
514 The field starts at position BITPOS within the byte.
515 (If OP0 is a register, it may be a full word or a narrower mode,
516 but BITPOS still counts within a full word,
517 which is significant on bigendian machines.)
518 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
520 Note that protect_from_queue has already been done on OP0 and VALUE. */
522 static void
523 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
524 register rtx op0;
525 register int offset, bitsize, bitpos;
526 register rtx value;
527 int struct_align;
529 register enum machine_mode mode;
530 int total_bits = BITS_PER_WORD;
531 rtx subtarget, temp;
532 int all_zero = 0;
533 int all_one = 0;
535 /* There is a case not handled here:
536 a structure with a known alignment of just a halfword
537 and a field split across two aligned halfwords within the structure.
538 Or likewise a structure with a known alignment of just a byte
539 and a field split across two bytes.
540 Such cases are not supposed to be able to occur. */
542 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
544 if (offset != 0)
545 abort ();
546 /* Special treatment for a bit field split across two registers. */
547 if (bitsize + bitpos > BITS_PER_WORD)
549 store_split_bit_field (op0, bitsize, bitpos,
550 value, BITS_PER_WORD);
551 return;
554 else
556 /* Get the proper mode to use for this field. We want a mode that
557 includes the entire field. If such a mode would be larger than
558 a word, we won't be doing the extraction the normal way. */
560 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
561 struct_align * BITS_PER_UNIT, word_mode,
562 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
564 if (mode == VOIDmode)
566 /* The only way this should occur is if the field spans word
567 boundaries. */
568 store_split_bit_field (op0,
569 bitsize, bitpos + offset * BITS_PER_UNIT,
570 value, struct_align);
571 return;
574 total_bits = GET_MODE_BITSIZE (mode);
576 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
577 be be in the range 0 to total_bits-1, and put any excess bytes in
578 OFFSET. */
579 if (bitpos >= total_bits)
581 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
582 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
583 * BITS_PER_UNIT);
586 /* Get ref to an aligned byte, halfword, or word containing the field.
587 Adjust BITPOS to be position within a word,
588 and OFFSET to be the offset of that word.
589 Then alter OP0 to refer to that word. */
590 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
591 offset -= (offset % (total_bits / BITS_PER_UNIT));
592 op0 = change_address (op0, mode,
593 plus_constant (XEXP (op0, 0), offset));
596 mode = GET_MODE (op0);
598 /* Now MODE is either some integral mode for a MEM as OP0,
599 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
600 The bit field is contained entirely within OP0.
601 BITPOS is the starting bit number within OP0.
602 (OP0's mode may actually be narrower than MODE.) */
604 if (BYTES_BIG_ENDIAN)
605 /* BITPOS is the distance between our msb
606 and that of the containing datum.
607 Convert it to the distance from the lsb. */
608 bitpos = total_bits - bitsize - bitpos;
610 /* Now BITPOS is always the distance between our lsb
611 and that of OP0. */
613 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
614 we must first convert its mode to MODE. */
616 if (GET_CODE (value) == CONST_INT)
618 register HOST_WIDE_INT v = INTVAL (value);
620 if (bitsize < HOST_BITS_PER_WIDE_INT)
621 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
623 if (v == 0)
624 all_zero = 1;
625 else if ((bitsize < HOST_BITS_PER_WIDE_INT
626 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
627 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
628 all_one = 1;
630 value = lshift_value (mode, value, bitpos, bitsize);
632 else
634 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
635 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
637 if (GET_MODE (value) != mode)
639 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
640 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
641 value = gen_lowpart (mode, value);
642 else
643 value = convert_to_mode (mode, value, 1);
646 if (must_and)
647 value = expand_binop (mode, and_optab, value,
648 mask_rtx (mode, 0, bitsize, 0),
649 NULL_RTX, 1, OPTAB_LIB_WIDEN);
650 if (bitpos > 0)
651 value = expand_shift (LSHIFT_EXPR, mode, value,
652 build_int_2 (bitpos, 0), NULL_RTX, 1);
655 /* Now clear the chosen bits in OP0,
656 except that if VALUE is -1 we need not bother. */
658 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
660 if (! all_one)
662 temp = expand_binop (mode, and_optab, op0,
663 mask_rtx (mode, bitpos, bitsize, 1),
664 subtarget, 1, OPTAB_LIB_WIDEN);
665 subtarget = temp;
667 else
668 temp = op0;
670 /* Now logical-or VALUE into OP0, unless it is zero. */
672 if (! all_zero)
673 temp = expand_binop (mode, ior_optab, temp, value,
674 subtarget, 1, OPTAB_LIB_WIDEN);
675 if (op0 != temp)
676 emit_move_insn (op0, temp);
679 /* Store a bit field that is split across multiple accessible memory objects.
681 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
682 BITSIZE is the field width; BITPOS the position of its first bit
683 (within the word).
684 VALUE is the value to store.
685 ALIGN is the known alignment of OP0, measured in bytes.
686 This is also the size of the memory objects to be used.
688 This does not yet handle fields wider than BITS_PER_WORD. */
690 static void
691 store_split_bit_field (op0, bitsize, bitpos, value, align)
692 rtx op0;
693 int bitsize, bitpos;
694 rtx value;
695 int align;
697 int unit;
698 int bitsdone = 0;
700 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
701 much at a time. */
702 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
703 unit = BITS_PER_WORD;
704 else
705 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
707 /* If VALUE is a constant other than a CONST_INT, get it into a register in
708 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
709 that VALUE might be a floating-point constant. */
710 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
712 rtx word = gen_lowpart_common (word_mode, value);
714 if (word && (value != word))
715 value = word;
716 else
717 value = gen_lowpart_common (word_mode,
718 force_reg (GET_MODE (value), value));
721 while (bitsdone < bitsize)
723 int thissize;
724 rtx part, word;
725 int thispos;
726 int offset;
728 offset = (bitpos + bitsdone) / unit;
729 thispos = (bitpos + bitsdone) % unit;
731 /* THISSIZE must not overrun a word boundary. Otherwise,
732 store_fixed_bit_field will call us again, and we will mutually
733 recurse forever. */
734 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
735 thissize = MIN (thissize, unit - thispos);
737 if (BYTES_BIG_ENDIAN)
739 /* Fetch successively less significant portions. */
740 if (GET_CODE (value) == CONST_INT)
741 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
742 >> (bitsize - bitsdone - thissize))
743 & (((HOST_WIDE_INT) 1 << thissize) - 1));
744 else
745 /* The args are chosen so that the last part includes the
746 lsb. Give extract_bit_field the value it needs (with
747 endianness compensation) to fetch the piece we want. */
748 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
749 GET_MODE_BITSIZE (GET_MODE (value))
750 - bitsize + bitsdone,
751 NULL_RTX, 1, align);
753 else
755 /* Fetch successively more significant portions. */
756 if (GET_CODE (value) == CONST_INT)
757 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
758 >> bitsdone)
759 & (((HOST_WIDE_INT) 1 << thissize) - 1));
760 else
761 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
762 bitsdone, NULL_RTX, 1, align);
765 /* If OP0 is a register, then handle OFFSET here.
767 When handling multiword bitfields, extract_bit_field may pass
768 down a word_mode SUBREG of a larger REG for a bitfield that actually
769 crosses a word boundary. Thus, for a SUBREG, we must find
770 the current word starting from the base register. */
771 if (GET_CODE (op0) == SUBREG)
773 word = operand_subword_force (SUBREG_REG (op0),
774 SUBREG_WORD (op0) + offset,
775 GET_MODE (SUBREG_REG (op0)));
776 offset = 0;
778 else if (GET_CODE (op0) == REG)
780 word = operand_subword_force (op0, offset, GET_MODE (op0));
781 offset = 0;
783 else
784 word = op0;
786 /* OFFSET is in UNITs, and UNIT is in bits.
787 store_fixed_bit_field wants offset in bytes. */
788 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
789 thissize, thispos, part, align);
790 bitsdone += thissize;
794 /* Generate code to extract a byte-field from STR_RTX
795 containing BITSIZE bits, starting at BITNUM,
796 and put it in TARGET if possible (if TARGET is nonzero).
797 Regardless of TARGET, we return the rtx for where the value is placed.
798 It may be a QUEUED.
800 STR_RTX is the structure containing the byte (a REG or MEM).
801 UNSIGNEDP is nonzero if this is an unsigned bit field.
802 MODE is the natural mode of the field value once extracted.
803 TMODE is the mode the caller would like the value to have;
804 but the value may be returned with type MODE instead.
806 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
807 TOTAL_SIZE is the size in bytes of the containing structure,
808 or -1 if varying.
810 If a TARGET is specified and we can store in it at no extra cost,
811 we do so, and return TARGET.
812 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
813 if they are equally easy. */
816 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
817 target, mode, tmode, align, total_size)
818 rtx str_rtx;
819 register int bitsize;
820 int bitnum;
821 int unsignedp;
822 rtx target;
823 enum machine_mode mode, tmode;
824 int align;
825 int total_size;
827 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
828 register int offset = bitnum / unit;
829 register int bitpos = bitnum % unit;
830 register rtx op0 = str_rtx;
831 rtx spec_target = target;
832 rtx spec_target_subreg = 0;
834 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
835 abort ();
837 /* Discount the part of the structure before the desired byte.
838 We need to know how many bytes are safe to reference after it. */
839 if (total_size >= 0)
840 total_size -= (bitpos / BIGGEST_ALIGNMENT
841 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
843 if (tmode == VOIDmode)
844 tmode = mode;
845 while (GET_CODE (op0) == SUBREG)
847 offset += SUBREG_WORD (op0);
848 op0 = SUBREG_REG (op0);
851 /* ??? We currently assume TARGET is at least as big as BITSIZE.
852 If that's wrong, the solution is to test for it and set TARGET to 0
853 if needed. */
855 /* If OP0 is a register, BITPOS must count within a word.
856 But as we have it, it counts within whatever size OP0 now has.
857 On a bigendian machine, these are not the same, so convert. */
858 if (BYTES_BIG_ENDIAN &&
859 GET_CODE (op0) != MEM
860 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
861 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
863 /* Extracting a full-word or multi-word value
864 from a structure in a register or aligned memory.
865 This can be done with just SUBREG.
866 So too extracting a subword value in
867 the least significant part of the register. */
869 if ((GET_CODE (op0) == REG
870 || (GET_CODE (op0) == MEM
871 && (! SLOW_UNALIGNED_ACCESS
872 || (offset * BITS_PER_UNIT % bitsize == 0
873 && align * BITS_PER_UNIT % bitsize == 0))))
874 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
875 && bitpos % BITS_PER_WORD == 0)
876 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
877 && (BYTES_BIG_ENDIAN
878 ? bitpos + bitsize == BITS_PER_WORD
879 : bitpos == 0))))
881 enum machine_mode mode1
882 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
884 if (mode1 != GET_MODE (op0))
886 if (GET_CODE (op0) == REG)
887 op0 = gen_rtx (SUBREG, mode1, op0, offset);
888 else
889 op0 = change_address (op0, mode1,
890 plus_constant (XEXP (op0, 0), offset));
892 if (mode1 != mode)
893 return convert_to_mode (tmode, op0, unsignedp);
894 return op0;
897 /* Handle fields bigger than a word. */
899 if (bitsize > BITS_PER_WORD)
901 /* Here we transfer the words of the field
902 in the order least significant first.
903 This is because the most significant word is the one which may
904 be less than full. */
906 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
907 int i;
909 if (target == 0 || GET_CODE (target) != REG)
910 target = gen_reg_rtx (mode);
912 for (i = 0; i < nwords; i++)
914 /* If I is 0, use the low-order word in both field and target;
915 if I is 1, use the next to lowest word; and so on. */
916 /* Word number in TARGET to use. */
917 int wordnum = (WORDS_BIG_ENDIAN
918 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
919 : i);
920 /* Offset from start of field in OP0. */
921 int bit_offset = (WORDS_BIG_ENDIAN
922 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
923 : i * BITS_PER_WORD);
924 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
925 rtx result_part
926 = extract_bit_field (op0, MIN (BITS_PER_WORD,
927 bitsize - i * BITS_PER_WORD),
928 bitnum + bit_offset,
929 1, target_part, mode, word_mode,
930 align, total_size);
932 if (target_part == 0)
933 abort ();
935 if (result_part != target_part)
936 emit_move_insn (target_part, result_part);
939 if (unsignedp)
941 /* Unless we've filled TARGET, the upper regs in a multi-reg value
942 need to be zero'd out. */
943 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
945 int i,total_words;
947 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
948 for (i = nwords; i < total_words; i++)
950 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
951 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
952 emit_move_insn (target_part, const0_rtx);
955 return target;
958 /* Signed bit field: sign-extend with two arithmetic shifts. */
959 target = expand_shift (LSHIFT_EXPR, mode, target,
960 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
961 NULL_RTX, 0);
962 return expand_shift (RSHIFT_EXPR, mode, target,
963 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
964 NULL_RTX, 0);
967 /* From here on we know the desired field is smaller than a word
968 so we can assume it is an integer. So we can safely extract it as one
969 size of integer, if necessary, and then truncate or extend
970 to the size that is wanted. */
972 /* OFFSET is the number of words or bytes (UNIT says which)
973 from STR_RTX to the first word or byte containing part of the field. */
975 if (GET_CODE (op0) == REG)
977 if (offset != 0
978 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
979 op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
980 op0, offset);
981 offset = 0;
983 else
985 op0 = protect_from_queue (str_rtx, 1);
988 /* Now OFFSET is nonzero only for memory operands. */
990 if (unsignedp)
992 #ifdef HAVE_extzv
993 if (HAVE_extzv
994 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
995 >= bitsize)
996 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
997 && (bitsize + bitpos
998 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
1000 int xbitpos = bitpos, xoffset = offset;
1001 rtx bitsize_rtx, bitpos_rtx;
1002 rtx last = get_last_insn();
1003 rtx xop0 = op0;
1004 rtx xtarget = target;
1005 rtx xspec_target = spec_target;
1006 rtx xspec_target_subreg = spec_target_subreg;
1007 rtx pat;
1008 enum machine_mode maxmode
1009 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1011 if (GET_CODE (xop0) == MEM)
1013 int save_volatile_ok = volatile_ok;
1014 volatile_ok = 1;
1016 /* Is the memory operand acceptable? */
1017 if (flag_force_mem
1018 || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1019 (xop0, GET_MODE (xop0))))
1021 /* No, load into a reg and extract from there. */
1022 enum machine_mode bestmode;
1024 /* Get the mode to use for inserting into this field. If
1025 OP0 is BLKmode, get the smallest mode consistent with the
1026 alignment. If OP0 is a non-BLKmode object that is no
1027 wider than MAXMODE, use its mode. Otherwise, use the
1028 smallest mode containing the field. */
1030 if (GET_MODE (xop0) == BLKmode
1031 || (GET_MODE_SIZE (GET_MODE (op0))
1032 > GET_MODE_SIZE (maxmode)))
1033 bestmode = get_best_mode (bitsize, bitnum,
1034 align * BITS_PER_UNIT, maxmode,
1035 MEM_VOLATILE_P (xop0));
1036 else
1037 bestmode = GET_MODE (xop0);
1039 if (bestmode == VOIDmode
1040 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1041 goto extzv_loses;
1043 /* Compute offset as multiple of this unit,
1044 counting in bytes. */
1045 unit = GET_MODE_BITSIZE (bestmode);
1046 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1047 xbitpos = bitnum % unit;
1048 xop0 = change_address (xop0, bestmode,
1049 plus_constant (XEXP (xop0, 0),
1050 xoffset));
1051 /* Fetch it to a register in that size. */
1052 xop0 = force_reg (bestmode, xop0);
1054 /* XBITPOS counts within UNIT, which is what is expected. */
1056 else
1057 /* Get ref to first byte containing part of the field. */
1058 xop0 = change_address (xop0, byte_mode,
1059 plus_constant (XEXP (xop0, 0), xoffset));
1061 volatile_ok = save_volatile_ok;
1064 /* If op0 is a register, we need it in MAXMODE (which is usually
1065 SImode). to make it acceptable to the format of extzv. */
1066 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1067 abort ();
1068 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1069 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1071 /* On big-endian machines, we count bits from the most significant.
1072 If the bit field insn does not, we must invert. */
1073 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1074 xbitpos = unit - bitsize - xbitpos;
1076 /* Now convert from counting within UNIT to counting in MAXMODE. */
1077 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1078 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1080 unit = GET_MODE_BITSIZE (maxmode);
1082 if (xtarget == 0
1083 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1084 xtarget = xspec_target = gen_reg_rtx (tmode);
1086 if (GET_MODE (xtarget) != maxmode)
1088 if (GET_CODE (xtarget) == REG)
1090 int wider = (GET_MODE_SIZE (maxmode)
1091 > GET_MODE_SIZE (GET_MODE (xtarget)));
1092 xtarget = gen_lowpart (maxmode, xtarget);
1093 if (wider)
1094 xspec_target_subreg = xtarget;
1096 else
1097 xtarget = gen_reg_rtx (maxmode);
1100 /* If this machine's extzv insists on a register target,
1101 make sure we have one. */
1102 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1103 (xtarget, maxmode)))
1104 xtarget = gen_reg_rtx (maxmode);
1106 bitsize_rtx = GEN_INT (bitsize);
1107 bitpos_rtx = GEN_INT (xbitpos);
1109 pat = gen_extzv (protect_from_queue (xtarget, 1),
1110 xop0, bitsize_rtx, bitpos_rtx);
1111 if (pat)
1113 emit_insn (pat);
1114 target = xtarget;
1115 spec_target = xspec_target;
1116 spec_target_subreg = xspec_target_subreg;
1118 else
1120 delete_insns_since (last);
1121 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1122 bitpos, target, 1, align);
1125 else
1126 extzv_loses:
1127 #endif
1128 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1129 target, 1, align);
1131 else
1133 #ifdef HAVE_extv
1134 if (HAVE_extv
1135 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1136 >= bitsize)
1137 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1138 && (bitsize + bitpos
1139 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
1141 int xbitpos = bitpos, xoffset = offset;
1142 rtx bitsize_rtx, bitpos_rtx;
1143 rtx last = get_last_insn();
1144 rtx xop0 = op0, xtarget = target;
1145 rtx xspec_target = spec_target;
1146 rtx xspec_target_subreg = spec_target_subreg;
1147 rtx pat;
1148 enum machine_mode maxmode
1149 = insn_operand_mode[(int) CODE_FOR_extv][0];
1151 if (GET_CODE (xop0) == MEM)
1153 /* Is the memory operand acceptable? */
1154 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1155 (xop0, GET_MODE (xop0))))
1157 /* No, load into a reg and extract from there. */
1158 enum machine_mode bestmode;
1160 /* Get the mode to use for inserting into this field. If
1161 OP0 is BLKmode, get the smallest mode consistent with the
1162 alignment. If OP0 is a non-BLKmode object that is no
1163 wider than MAXMODE, use its mode. Otherwise, use the
1164 smallest mode containing the field. */
1166 if (GET_MODE (xop0) == BLKmode
1167 || (GET_MODE_SIZE (GET_MODE (op0))
1168 > GET_MODE_SIZE (maxmode)))
1169 bestmode = get_best_mode (bitsize, bitnum,
1170 align * BITS_PER_UNIT, maxmode,
1171 MEM_VOLATILE_P (xop0));
1172 else
1173 bestmode = GET_MODE (xop0);
1175 if (bestmode == VOIDmode
1176 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1177 goto extv_loses;
1179 /* Compute offset as multiple of this unit,
1180 counting in bytes. */
1181 unit = GET_MODE_BITSIZE (bestmode);
1182 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1183 xbitpos = bitnum % unit;
1184 xop0 = change_address (xop0, bestmode,
1185 plus_constant (XEXP (xop0, 0),
1186 xoffset));
1187 /* Fetch it to a register in that size. */
1188 xop0 = force_reg (bestmode, xop0);
1190 /* XBITPOS counts within UNIT, which is what is expected. */
1192 else
1193 /* Get ref to first byte containing part of the field. */
1194 xop0 = change_address (xop0, byte_mode,
1195 plus_constant (XEXP (xop0, 0), xoffset));
1198 /* If op0 is a register, we need it in MAXMODE (which is usually
1199 SImode) to make it acceptable to the format of extv. */
1200 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1201 abort ();
1202 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1203 xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
1205 /* On big-endian machines, we count bits from the most significant.
1206 If the bit field insn does not, we must invert. */
1207 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1208 xbitpos = unit - bitsize - xbitpos;
1210 /* XBITPOS counts within a size of UNIT.
1211 Adjust to count within a size of MAXMODE. */
1212 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1213 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1215 unit = GET_MODE_BITSIZE (maxmode);
1217 if (xtarget == 0
1218 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1219 xtarget = xspec_target = gen_reg_rtx (tmode);
1221 if (GET_MODE (xtarget) != maxmode)
1223 if (GET_CODE (xtarget) == REG)
1225 int wider = (GET_MODE_SIZE (maxmode)
1226 > GET_MODE_SIZE (GET_MODE (xtarget)));
1227 xtarget = gen_lowpart (maxmode, xtarget);
1228 if (wider)
1229 xspec_target_subreg = xtarget;
1231 else
1232 xtarget = gen_reg_rtx (maxmode);
1235 /* If this machine's extv insists on a register target,
1236 make sure we have one. */
1237 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1238 (xtarget, maxmode)))
1239 xtarget = gen_reg_rtx (maxmode);
1241 bitsize_rtx = GEN_INT (bitsize);
1242 bitpos_rtx = GEN_INT (xbitpos);
1244 pat = gen_extv (protect_from_queue (xtarget, 1),
1245 xop0, bitsize_rtx, bitpos_rtx);
1246 if (pat)
1248 emit_insn (pat);
1249 target = xtarget;
1250 spec_target = xspec_target;
1251 spec_target_subreg = xspec_target_subreg;
1253 else
1255 delete_insns_since (last);
1256 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1257 bitpos, target, 0, align);
1260 else
1261 extv_loses:
1262 #endif
1263 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1264 target, 0, align);
1266 if (target == spec_target)
1267 return target;
1268 if (target == spec_target_subreg)
1269 return spec_target;
1270 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1272 /* If the target mode is floating-point, first convert to the
1273 integer mode of that size and then access it as a floating-point
1274 value via a SUBREG. */
1275 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1277 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1278 MODE_INT, 0),
1279 target, unsignedp);
1280 if (GET_CODE (target) != REG)
1281 target = copy_to_reg (target);
1282 return gen_rtx (SUBREG, tmode, target, 0);
1284 else
1285 return convert_to_mode (tmode, target, unsignedp);
1287 return target;
1290 /* Extract a bit field using shifts and boolean operations
1291 Returns an rtx to represent the value.
1292 OP0 addresses a register (word) or memory (byte).
1293 BITPOS says which bit within the word or byte the bit field starts in.
1294 OFFSET says how many bytes farther the bit field starts;
1295 it is 0 if OP0 is a register.
1296 BITSIZE says how many bits long the bit field is.
1297 (If OP0 is a register, it may be narrower than a full word,
1298 but BITPOS still counts within a full word,
1299 which is significant on bigendian machines.)
1301 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1302 If TARGET is nonzero, attempts to store the value there
1303 and return TARGET, but this is not guaranteed.
1304 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1306 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1308 static rtx
1309 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1310 target, unsignedp, align)
1311 enum machine_mode tmode;
1312 register rtx op0, target;
1313 register int offset, bitsize, bitpos;
1314 int unsignedp;
1315 int align;
1317 int total_bits = BITS_PER_WORD;
1318 enum machine_mode mode;
1320 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1322 /* Special treatment for a bit field split across two registers. */
1323 if (bitsize + bitpos > BITS_PER_WORD)
1324 return extract_split_bit_field (op0, bitsize, bitpos,
1325 unsignedp, align);
1327 else
1329 /* Get the proper mode to use for this field. We want a mode that
1330 includes the entire field. If such a mode would be larger than
1331 a word, we won't be doing the extraction the normal way. */
1333 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1334 align * BITS_PER_UNIT, word_mode,
1335 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1337 if (mode == VOIDmode)
1338 /* The only way this should occur is if the field spans word
1339 boundaries. */
1340 return extract_split_bit_field (op0, bitsize,
1341 bitpos + offset * BITS_PER_UNIT,
1342 unsignedp, align);
1344 total_bits = GET_MODE_BITSIZE (mode);
1346 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1347 be be in the range 0 to total_bits-1, and put any excess bytes in
1348 OFFSET. */
1349 if (bitpos >= total_bits)
1351 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1352 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1353 * BITS_PER_UNIT);
1356 /* Get ref to an aligned byte, halfword, or word containing the field.
1357 Adjust BITPOS to be position within a word,
1358 and OFFSET to be the offset of that word.
1359 Then alter OP0 to refer to that word. */
1360 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1361 offset -= (offset % (total_bits / BITS_PER_UNIT));
1362 op0 = change_address (op0, mode,
1363 plus_constant (XEXP (op0, 0), offset));
1366 mode = GET_MODE (op0);
1368 if (BYTES_BIG_ENDIAN)
1370 /* BITPOS is the distance between our msb and that of OP0.
1371 Convert it to the distance from the lsb. */
1373 bitpos = total_bits - bitsize - bitpos;
1376 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1377 We have reduced the big-endian case to the little-endian case. */
1379 if (unsignedp)
1381 if (bitpos)
1383 /* If the field does not already start at the lsb,
1384 shift it so it does. */
1385 tree amount = build_int_2 (bitpos, 0);
1386 /* Maybe propagate the target for the shift. */
1387 /* But not if we will return it--could confuse integrate.c. */
1388 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1389 && !REG_FUNCTION_VALUE_P (target)
1390 ? target : 0);
1391 if (tmode != mode) subtarget = 0;
1392 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1394 /* Convert the value to the desired mode. */
1395 if (mode != tmode)
1396 op0 = convert_to_mode (tmode, op0, 1);
1398 /* Unless the msb of the field used to be the msb when we shifted,
1399 mask out the upper bits. */
1401 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1402 #if 0
1403 #ifdef SLOW_ZERO_EXTEND
1404 /* Always generate an `and' if
1405 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1406 will combine fruitfully with the zero-extend. */
1407 || tmode != mode
1408 #endif
1409 #endif
1411 return expand_binop (GET_MODE (op0), and_optab, op0,
1412 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1413 target, 1, OPTAB_LIB_WIDEN);
1414 return op0;
1417 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1418 then arithmetic-shift its lsb to the lsb of the word. */
1419 op0 = force_reg (mode, op0);
1420 if (mode != tmode)
1421 target = 0;
1423 /* Find the narrowest integer mode that contains the field. */
1425 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1426 mode = GET_MODE_WIDER_MODE (mode))
1427 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1429 op0 = convert_to_mode (mode, op0, 0);
1430 break;
1433 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1435 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1436 /* Maybe propagate the target for the shift. */
1437 /* But not if we will return the result--could confuse integrate.c. */
1438 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1439 && ! REG_FUNCTION_VALUE_P (target)
1440 ? target : 0);
1441 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1444 return expand_shift (RSHIFT_EXPR, mode, op0,
1445 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1446 target, 0);
1449 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1450 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1451 complement of that if COMPLEMENT. The mask is truncated if
1452 necessary to the width of mode MODE. The mask is zero-extended if
1453 BITSIZE+BITPOS is too small for MODE. */
1455 static rtx
1456 mask_rtx (mode, bitpos, bitsize, complement)
1457 enum machine_mode mode;
1458 int bitpos, bitsize, complement;
1460 HOST_WIDE_INT masklow, maskhigh;
1462 if (bitpos < HOST_BITS_PER_WIDE_INT)
1463 masklow = (HOST_WIDE_INT) -1 << bitpos;
1464 else
1465 masklow = 0;
1467 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1468 masklow &= ((unsigned HOST_WIDE_INT) -1
1469 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1471 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1472 maskhigh = -1;
1473 else
1474 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1476 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1477 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1478 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1479 else
1480 maskhigh = 0;
1482 if (complement)
1484 maskhigh = ~maskhigh;
1485 masklow = ~masklow;
1488 return immed_double_const (masklow, maskhigh, mode);
1491 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1492 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1494 static rtx
1495 lshift_value (mode, value, bitpos, bitsize)
1496 enum machine_mode mode;
1497 rtx value;
1498 int bitpos, bitsize;
1500 unsigned HOST_WIDE_INT v = INTVAL (value);
1501 HOST_WIDE_INT low, high;
1503 if (bitsize < HOST_BITS_PER_WIDE_INT)
1504 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1506 if (bitpos < HOST_BITS_PER_WIDE_INT)
1508 low = v << bitpos;
1509 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1511 else
1513 low = 0;
1514 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1517 return immed_double_const (low, high, mode);
1520 /* Extract a bit field that is split across two words
1521 and return an RTX for the result.
1523 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1524 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1525 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1527 ALIGN is the known alignment of OP0, measured in bytes.
1528 This is also the size of the memory objects to be used. */
1530 static rtx
1531 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1532 rtx op0;
1533 int bitsize, bitpos, unsignedp, align;
1535 int unit;
1536 int bitsdone = 0;
1537 rtx result;
1538 int first = 1;
1540 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1541 much at a time. */
1542 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1543 unit = BITS_PER_WORD;
1544 else
1545 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1547 while (bitsdone < bitsize)
1549 int thissize;
1550 rtx part, word;
1551 int thispos;
1552 int offset;
1554 offset = (bitpos + bitsdone) / unit;
1555 thispos = (bitpos + bitsdone) % unit;
1557 /* THISSIZE must not overrun a word boundary. Otherwise,
1558 extract_fixed_bit_field will call us again, and we will mutually
1559 recurse forever. */
1560 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1561 thissize = MIN (thissize, unit - thispos);
1563 /* If OP0 is a register, then handle OFFSET here.
1565 When handling multiword bitfields, extract_bit_field may pass
1566 down a word_mode SUBREG of a larger REG for a bitfield that actually
1567 crosses a word boundary. Thus, for a SUBREG, we must find
1568 the current word starting from the base register. */
1569 if (GET_CODE (op0) == SUBREG)
1571 word = operand_subword_force (SUBREG_REG (op0),
1572 SUBREG_WORD (op0) + offset,
1573 GET_MODE (SUBREG_REG (op0)));
1574 offset = 0;
1576 else if (GET_CODE (op0) == REG)
1578 word = operand_subword_force (op0, offset, GET_MODE (op0));
1579 offset = 0;
1581 else
1582 word = op0;
1584 /* Extract the parts in bit-counting order,
1585 whose meaning is determined by BYTES_PER_UNIT.
1586 OFFSET is in UNITs, and UNIT is in bits.
1587 extract_fixed_bit_field wants offset in bytes. */
1588 part = extract_fixed_bit_field (word_mode, word,
1589 offset * unit / BITS_PER_UNIT,
1590 thissize, thispos, 0, 1, align);
1591 bitsdone += thissize;
1593 /* Shift this part into place for the result. */
1594 if (BYTES_BIG_ENDIAN)
1596 if (bitsize != bitsdone)
1597 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1598 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1600 else
1602 if (bitsdone != thissize)
1603 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1604 build_int_2 (bitsdone - thissize, 0), 0, 1);
1607 if (first)
1608 result = part;
1609 else
1610 /* Combine the parts with bitwise or. This works
1611 because we extracted each part as an unsigned bit field. */
1612 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1613 OPTAB_LIB_WIDEN);
1615 first = 0;
1618 /* Unsigned bit field: we are done. */
1619 if (unsignedp)
1620 return result;
1621 /* Signed bit field: sign-extend with two arithmetic shifts. */
1622 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1623 build_int_2 (BITS_PER_WORD - bitsize, 0),
1624 NULL_RTX, 0);
1625 return expand_shift (RSHIFT_EXPR, word_mode, result,
1626 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1629 /* Add INC into TARGET. */
1631 void
1632 expand_inc (target, inc)
1633 rtx target, inc;
1635 rtx value = expand_binop (GET_MODE (target), add_optab,
1636 target, inc,
1637 target, 0, OPTAB_LIB_WIDEN);
1638 if (value != target)
1639 emit_move_insn (target, value);
1642 /* Subtract DEC from TARGET. */
1644 void
1645 expand_dec (target, dec)
1646 rtx target, dec;
1648 rtx value = expand_binop (GET_MODE (target), sub_optab,
1649 target, dec,
1650 target, 0, OPTAB_LIB_WIDEN);
1651 if (value != target)
1652 emit_move_insn (target, value);
1655 /* Output a shift instruction for expression code CODE,
1656 with SHIFTED being the rtx for the value to shift,
1657 and AMOUNT the tree for the amount to shift by.
1658 Store the result in the rtx TARGET, if that is convenient.
1659 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1660 Return the rtx for where the value is. */
1663 expand_shift (code, mode, shifted, amount, target, unsignedp)
1664 enum tree_code code;
1665 register enum machine_mode mode;
1666 rtx shifted;
1667 tree amount;
1668 register rtx target;
1669 int unsignedp;
1671 register rtx op1, temp = 0;
1672 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1673 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1674 int try;
1676 /* Previously detected shift-counts computed by NEGATE_EXPR
1677 and shifted in the other direction; but that does not work
1678 on all machines. */
1680 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1682 #if SHIFT_COUNT_TRUNCATED
1683 if (SHIFT_COUNT_TRUNCATED
1684 && GET_CODE (op1) == CONST_INT
1685 && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
1686 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1687 % GET_MODE_BITSIZE (mode));
1688 #endif
1690 if (op1 == const0_rtx)
1691 return shifted;
1693 for (try = 0; temp == 0 && try < 3; try++)
1695 enum optab_methods methods;
1697 if (try == 0)
1698 methods = OPTAB_DIRECT;
1699 else if (try == 1)
1700 methods = OPTAB_WIDEN;
1701 else
1702 methods = OPTAB_LIB_WIDEN;
1704 if (rotate)
1706 /* Widening does not work for rotation. */
1707 if (methods == OPTAB_WIDEN)
1708 continue;
1709 else if (methods == OPTAB_LIB_WIDEN)
1711 /* If we have been unable to open-code this by a rotation,
1712 do it as the IOR of two shifts. I.e., to rotate A
1713 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1714 where C is the bitsize of A.
1716 It is theoretically possible that the target machine might
1717 not be able to perform either shift and hence we would
1718 be making two libcalls rather than just the one for the
1719 shift (similarly if IOR could not be done). We will allow
1720 this extremely unlikely lossage to avoid complicating the
1721 code below. */
1723 rtx subtarget = target == shifted ? 0 : target;
1724 rtx temp1;
1725 tree type = TREE_TYPE (amount);
1726 tree new_amount = make_tree (type, op1);
1727 tree other_amount
1728 = fold (build (MINUS_EXPR, type,
1729 convert (type,
1730 build_int_2 (GET_MODE_BITSIZE (mode),
1731 0)),
1732 amount));
1734 shifted = force_reg (mode, shifted);
1736 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1737 mode, shifted, new_amount, subtarget, 1);
1738 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1739 mode, shifted, other_amount, 0, 1);
1740 return expand_binop (mode, ior_optab, temp, temp1, target,
1741 unsignedp, methods);
1744 temp = expand_binop (mode,
1745 left ? rotl_optab : rotr_optab,
1746 shifted, op1, target, unsignedp, methods);
1748 /* If we don't have the rotate, but we are rotating by a constant
1749 that is in range, try a rotate in the opposite direction. */
1751 if (temp == 0 && GET_CODE (op1) == CONST_INT
1752 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1753 temp = expand_binop (mode,
1754 left ? rotr_optab : rotl_optab,
1755 shifted,
1756 GEN_INT (GET_MODE_BITSIZE (mode)
1757 - INTVAL (op1)),
1758 target, unsignedp, methods);
1760 else if (unsignedp)
1761 temp = expand_binop (mode,
1762 left ? ashl_optab : lshr_optab,
1763 shifted, op1, target, unsignedp, methods);
1765 /* Do arithmetic shifts.
1766 Also, if we are going to widen the operand, we can just as well
1767 use an arithmetic right-shift instead of a logical one. */
1768 if (temp == 0 && ! rotate
1769 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1771 enum optab_methods methods1 = methods;
1773 /* If trying to widen a log shift to an arithmetic shift,
1774 don't accept an arithmetic shift of the same size. */
1775 if (unsignedp)
1776 methods1 = OPTAB_MUST_WIDEN;
1778 /* Arithmetic shift */
1780 temp = expand_binop (mode,
1781 left ? ashl_optab : ashr_optab,
1782 shifted, op1, target, unsignedp, methods1);
1785 /* We used to try extzv here for logical right shifts, but that was
1786 only useful for one machine, the VAX, and caused poor code
1787 generation there for lshrdi3, so the code was deleted and a
1788 define_expand for lshrsi3 was added to vax.md. */
1791 if (temp == 0)
1792 abort ();
1793 return temp;
1796 enum alg_code { alg_zero, alg_m, alg_shift,
1797 alg_add_t_m2, alg_sub_t_m2,
1798 alg_add_factor, alg_sub_factor,
1799 alg_add_t2_m, alg_sub_t2_m,
1800 alg_add, alg_subtract, alg_factor, alg_shiftop };
1802 /* This structure records a sequence of operations.
1803 `ops' is the number of operations recorded.
1804 `cost' is their total cost.
1805 The operations are stored in `op' and the corresponding
1806 logarithms of the integer coefficients in `log'.
1808 These are the operations:
1809 alg_zero total := 0;
1810 alg_m total := multiplicand;
1811 alg_shift total := total * coeff
1812 alg_add_t_m2 total := total + multiplicand * coeff;
1813 alg_sub_t_m2 total := total - multiplicand * coeff;
1814 alg_add_factor total := total * coeff + total;
1815 alg_sub_factor total := total * coeff - total;
1816 alg_add_t2_m total := total * coeff + multiplicand;
1817 alg_sub_t2_m total := total * coeff - multiplicand;
1819 The first operand must be either alg_zero or alg_m. */
1821 struct algorithm
1823 short cost;
1824 short ops;
1825 /* The size of the OP and LOG fields are not directly related to the
1826 word size, but the worst-case algorithms will be if we have few
1827 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1828 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1829 in total wordsize operations. */
1830 enum alg_code op[MAX_BITS_PER_WORD];
1831 char log[MAX_BITS_PER_WORD];
1834 /* Compute and return the best algorithm for multiplying by T.
1835 The algorithm must cost less than cost_limit
1836 If retval.cost >= COST_LIMIT, no algorithm was found and all
1837 other field of the returned struct are undefined. */
1839 static void
1840 synth_mult (alg_out, t, cost_limit)
1841 struct algorithm *alg_out;
1842 unsigned HOST_WIDE_INT t;
1843 int cost_limit;
1845 int m;
1846 struct algorithm *alg_in, *best_alg;
1847 unsigned int cost;
1848 unsigned HOST_WIDE_INT q;
1850 /* Indicate that no algorithm is yet found. If no algorithm
1851 is found, this value will be returned and indicate failure. */
1852 alg_out->cost = cost_limit;
1854 if (cost_limit <= 0)
1855 return;
1857 /* t == 1 can be done in zero cost. */
1858 if (t == 1)
1860 alg_out->ops = 1;
1861 alg_out->cost = 0;
1862 alg_out->op[0] = alg_m;
1863 return;
1866 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1867 fail now. */
1868 if (t == 0)
1870 if (zero_cost >= cost_limit)
1871 return;
1872 else
1874 alg_out->ops = 1;
1875 alg_out->cost = zero_cost;
1876 alg_out->op[0] = alg_zero;
1877 return;
1881 /* We'll be needing a couple extra algorithm structures now. */
1883 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1884 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1886 /* If we have a group of zero bits at the low-order part of T, try
1887 multiplying by the remaining bits and then doing a shift. */
1889 if ((t & 1) == 0)
1891 m = floor_log2 (t & -t); /* m = number of low zero bits */
1892 q = t >> m;
1893 cost = shift_cost[m];
1894 synth_mult (alg_in, q, cost_limit - cost);
1896 cost += alg_in->cost;
1897 if (cost < cost_limit)
1899 struct algorithm *x;
1900 x = alg_in, alg_in = best_alg, best_alg = x;
1901 best_alg->log[best_alg->ops] = m;
1902 best_alg->op[best_alg->ops] = alg_shift;
1903 cost_limit = cost;
1907 /* If we have an odd number, add or subtract one. */
1908 if ((t & 1) != 0)
1910 unsigned HOST_WIDE_INT w;
1912 for (w = 1; (w & t) != 0; w <<= 1)
1914 if (w > 2
1915 /* Reject the case where t is 3.
1916 Thus we prefer addition in that case. */
1917 && t != 3)
1919 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
1921 cost = add_cost;
1922 synth_mult (alg_in, t + 1, cost_limit - cost);
1924 cost += alg_in->cost;
1925 if (cost < cost_limit)
1927 struct algorithm *x;
1928 x = alg_in, alg_in = best_alg, best_alg = x;
1929 best_alg->log[best_alg->ops] = 0;
1930 best_alg->op[best_alg->ops] = alg_sub_t_m2;
1931 cost_limit = cost;
1934 else
1936 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
1938 cost = add_cost;
1939 synth_mult (alg_in, t - 1, cost_limit - cost);
1941 cost += alg_in->cost;
1942 if (cost < cost_limit)
1944 struct algorithm *x;
1945 x = alg_in, alg_in = best_alg, best_alg = x;
1946 best_alg->log[best_alg->ops] = 0;
1947 best_alg->op[best_alg->ops] = alg_add_t_m2;
1948 cost_limit = cost;
1953 /* Look for factors of t of the form
1954 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
1955 If we find such a factor, we can multiply by t using an algorithm that
1956 multiplies by q, shift the result by m and add/subtract it to itself.
1958 We search for large factors first and loop down, even if large factors
1959 are less probable than small; if we find a large factor we will find a
1960 good sequence quickly, and therefore be able to prune (by decreasing
1961 COST_LIMIT) the search. */
1963 for (m = floor_log2 (t - 1); m >= 2; m--)
1965 unsigned HOST_WIDE_INT d;
1967 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
1968 if (t % d == 0 && t > d)
1970 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
1971 synth_mult (alg_in, t / d, cost_limit - cost);
1973 cost += alg_in->cost;
1974 if (cost < cost_limit)
1976 struct algorithm *x;
1977 x = alg_in, alg_in = best_alg, best_alg = x;
1978 best_alg->log[best_alg->ops] = m;
1979 best_alg->op[best_alg->ops] = alg_add_factor;
1980 cost_limit = cost;
1982 /* Other factors will have been taken care of in the recursion. */
1983 break;
1986 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
1987 if (t % d == 0 && t > d)
1989 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
1990 synth_mult (alg_in, t / d, cost_limit - cost);
1992 cost += alg_in->cost;
1993 if (cost < cost_limit)
1995 struct algorithm *x;
1996 x = alg_in, alg_in = best_alg, best_alg = x;
1997 best_alg->log[best_alg->ops] = m;
1998 best_alg->op[best_alg->ops] = alg_sub_factor;
1999 cost_limit = cost;
2001 break;
2005 /* Try shift-and-add (load effective address) instructions,
2006 i.e. do a*3, a*5, a*9. */
2007 if ((t & 1) != 0)
2009 q = t - 1;
2010 q = q & -q;
2011 m = exact_log2 (q);
2012 if (m >= 0)
2014 cost = shiftadd_cost[m];
2015 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2017 cost += alg_in->cost;
2018 if (cost < cost_limit)
2020 struct algorithm *x;
2021 x = alg_in, alg_in = best_alg, best_alg = x;
2022 best_alg->log[best_alg->ops] = m;
2023 best_alg->op[best_alg->ops] = alg_add_t2_m;
2024 cost_limit = cost;
2028 q = t + 1;
2029 q = q & -q;
2030 m = exact_log2 (q);
2031 if (m >= 0)
2033 cost = shiftsub_cost[m];
2034 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2036 cost += alg_in->cost;
2037 if (cost < cost_limit)
2039 struct algorithm *x;
2040 x = alg_in, alg_in = best_alg, best_alg = x;
2041 best_alg->log[best_alg->ops] = m;
2042 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2043 cost_limit = cost;
2048 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2049 we have not found any algorithm. */
2050 if (cost_limit == alg_out->cost)
2051 return;
2053 /* If we are getting a too long sequence for `struct algorithm'
2054 to record, make this search fail. */
2055 if (best_alg->ops == MAX_BITS_PER_WORD)
2056 return;
2058 /* Copy the algorithm from temporary space to the space at alg_out.
2059 We avoid using structure assignment because the majority of
2060 best_alg is normally undefined, and this is a critical function. */
2061 alg_out->ops = best_alg->ops + 1;
2062 alg_out->cost = cost_limit;
2063 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2064 alg_out->ops * sizeof *alg_out->op);
2065 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2066 alg_out->ops * sizeof *alg_out->log);
2069 /* Perform a multiplication and return an rtx for the result.
2070 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2071 TARGET is a suggestion for where to store the result (an rtx).
2073 We check specially for a constant integer as OP1.
2074 If you want this check for OP0 as well, then before calling
2075 you should swap the two operands if OP0 would be constant. */
2078 expand_mult (mode, op0, op1, target, unsignedp)
2079 enum machine_mode mode;
2080 register rtx op0, op1, target;
2081 int unsignedp;
2083 rtx const_op1 = op1;
2085 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2086 less than or equal in size to `unsigned int' this doesn't matter.
2087 If the mode is larger than `unsigned int', then synth_mult works only
2088 if the constant value exactly fits in an `unsigned int' without any
2089 truncation. This means that multiplying by negative values does
2090 not work; results are off by 2^32 on a 32 bit machine. */
2092 /* If we are multiplying in DImode, it may still be a win
2093 to try to work with shifts and adds. */
2094 if (GET_CODE (op1) == CONST_DOUBLE
2095 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2096 && HOST_BITS_PER_INT >= BITS_PER_WORD
2097 && CONST_DOUBLE_HIGH (op1) == 0)
2098 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2099 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2100 && GET_CODE (op1) == CONST_INT
2101 && INTVAL (op1) < 0)
2102 const_op1 = 0;
2104 /* We used to test optimize here, on the grounds that it's better to
2105 produce a smaller program when -O is not used.
2106 But this causes such a terrible slowdown sometimes
2107 that it seems better to use synth_mult always. */
2109 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2111 struct algorithm alg;
2112 struct algorithm alg2;
2113 HOST_WIDE_INT val = INTVAL (op1);
2114 HOST_WIDE_INT val_so_far;
2115 rtx insn;
2116 int mult_cost;
2117 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2119 /* Try to do the computation three ways: multiply by the negative of OP1
2120 and then negate, do the multiplication directly, or do multiplication
2121 by OP1 - 1. */
2123 mult_cost = rtx_cost (gen_rtx (MULT, mode, op0, op1), SET);
2124 mult_cost = MIN (12 * add_cost, mult_cost);
2126 synth_mult (&alg, val, mult_cost);
2128 /* This works only if the inverted value actually fits in an
2129 `unsigned int' */
2130 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2132 synth_mult (&alg2, - val,
2133 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2134 if (alg2.cost + negate_cost < alg.cost)
2135 alg = alg2, variant = negate_variant;
2138 /* This proves very useful for division-by-constant. */
2139 synth_mult (&alg2, val - 1,
2140 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2141 if (alg2.cost + add_cost < alg.cost)
2142 alg = alg2, variant = add_variant;
2144 if (alg.cost < mult_cost)
2146 /* We found something cheaper than a multiply insn. */
2147 int opno;
2148 rtx accum, tem;
2150 op0 = protect_from_queue (op0, 0);
2152 /* Avoid referencing memory over and over.
2153 For speed, but also for correctness when mem is volatile. */
2154 if (GET_CODE (op0) == MEM)
2155 op0 = force_reg (mode, op0);
2157 /* ACCUM starts out either as OP0 or as a zero, depending on
2158 the first operation. */
2160 if (alg.op[0] == alg_zero)
2162 accum = copy_to_mode_reg (mode, const0_rtx);
2163 val_so_far = 0;
2165 else if (alg.op[0] == alg_m)
2167 accum = copy_to_mode_reg (mode, op0);
2168 val_so_far = 1;
2170 else
2171 abort ();
2173 for (opno = 1; opno < alg.ops; opno++)
2175 int log = alg.log[opno];
2176 int preserve = preserve_subexpressions_p ();
2177 rtx shift_subtarget = preserve ? 0 : accum;
2178 rtx add_target
2179 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2180 ? target : 0);
2181 rtx accum_target = preserve ? 0 : accum;
2183 switch (alg.op[opno])
2185 case alg_shift:
2186 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2187 build_int_2 (log, 0), NULL_RTX, 0);
2188 val_so_far <<= log;
2189 break;
2191 case alg_add_t_m2:
2192 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2193 build_int_2 (log, 0), NULL_RTX, 0);
2194 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2195 add_target ? add_target : accum_target);
2196 val_so_far += (HOST_WIDE_INT) 1 << log;
2197 break;
2199 case alg_sub_t_m2:
2200 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2201 build_int_2 (log, 0), NULL_RTX, 0);
2202 accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
2203 add_target ? add_target : accum_target);
2204 val_so_far -= (HOST_WIDE_INT) 1 << log;
2205 break;
2207 case alg_add_t2_m:
2208 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2209 build_int_2 (log, 0), shift_subtarget,
2211 accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
2212 add_target ? add_target : accum_target);
2213 val_so_far = (val_so_far << log) + 1;
2214 break;
2216 case alg_sub_t2_m:
2217 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2218 build_int_2 (log, 0), shift_subtarget,
2220 accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
2221 add_target ? add_target : accum_target);
2222 val_so_far = (val_so_far << log) - 1;
2223 break;
2225 case alg_add_factor:
2226 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2227 build_int_2 (log, 0), NULL_RTX, 0);
2228 accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
2229 add_target ? add_target : accum_target);
2230 val_so_far += val_so_far << log;
2231 break;
2233 case alg_sub_factor:
2234 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2235 build_int_2 (log, 0), NULL_RTX, 0);
2236 accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
2237 (add_target ? add_target
2238 : preserve ? 0 : tem));
2239 val_so_far = (val_so_far << log) - val_so_far;
2240 break;
2242 default:
2243 abort ();;
2246 /* Write a REG_EQUAL note on the last insn so that we can cse
2247 multiplication sequences. */
2249 insn = get_last_insn ();
2250 REG_NOTES (insn)
2251 = gen_rtx (EXPR_LIST, REG_EQUAL,
2252 gen_rtx (MULT, mode, op0, GEN_INT (val_so_far)),
2253 REG_NOTES (insn));
2256 if (variant == negate_variant)
2258 val_so_far = - val_so_far;
2259 accum = expand_unop (mode, neg_optab, accum, target, 0);
2261 else if (variant == add_variant)
2263 val_so_far = val_so_far + 1;
2264 accum = force_operand (gen_rtx (PLUS, mode, accum, op0), target);
2267 if (val != val_so_far)
2268 abort ();
2270 return accum;
2274 /* This used to use umul_optab if unsigned, but for non-widening multiply
2275 there is no difference between signed and unsigned. */
2276 op0 = expand_binop (mode, smul_optab,
2277 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2278 if (op0 == 0)
2279 abort ();
2280 return op0;
2283 /* Return the smallest n such that 2**n >= X. */
2286 ceil_log2 (x)
2287 unsigned HOST_WIDE_INT x;
2289 return floor_log2 (x - 1) + 1;
2292 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2293 replace division by D, and put the least significant N bits of the result
2294 in *MULTIPLIER_PTR and return the most significant bit.
2296 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2297 needed precision is in PRECISION (should be <= N).
2299 PRECISION should be as small as possible so this function can choose
2300 multiplier more freely.
2302 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2303 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2305 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2306 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2308 static
2309 unsigned HOST_WIDE_INT
2310 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2311 unsigned HOST_WIDE_INT d;
2312 int n;
2313 int precision;
2314 unsigned HOST_WIDE_INT *multiplier_ptr;
2315 int *post_shift_ptr;
2316 int *lgup_ptr;
2318 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2319 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2320 int lgup, post_shift;
2321 int pow, pow2;
2322 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2324 /* lgup = ceil(log2(divisor)); */
2325 lgup = ceil_log2 (d);
2327 if (lgup > n)
2328 abort ();
2330 pow = n + lgup;
2331 pow2 = n + lgup - precision;
2333 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2335 /* We could handle this with some effort, but this case is much better
2336 handled directly with a scc insn, so rely on caller using that. */
2337 abort ();
2340 /* mlow = 2^(N + lgup)/d */
2341 if (pow >= HOST_BITS_PER_WIDE_INT)
2343 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2344 nl = 0;
2346 else
2348 nh = 0;
2349 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2351 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2352 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2354 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2355 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2356 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2357 else
2358 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2359 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2360 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2362 if (mhigh_hi && nh - d >= d)
2363 abort ();
2364 if (mhigh_hi > 1 || mlow_hi > 1)
2365 abort ();
2366 /* assert that mlow < mhigh. */
2367 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2368 abort();
2370 /* If precision == N, then mlow, mhigh exceed 2^N
2371 (but they do not exceed 2^(N+1)). */
2373 /* Reduce to lowest terms */
2374 for (post_shift = lgup; post_shift > 0; post_shift--)
2376 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2377 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2378 if (ml_lo >= mh_lo)
2379 break;
2381 mlow_hi = 0;
2382 mlow_lo = ml_lo;
2383 mhigh_hi = 0;
2384 mhigh_lo = mh_lo;
2387 *post_shift_ptr = post_shift;
2388 *lgup_ptr = lgup;
2389 if (n < HOST_BITS_PER_WIDE_INT)
2391 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2392 *multiplier_ptr = mhigh_lo & mask;
2393 return mhigh_lo >= mask;
2395 else
2397 *multiplier_ptr = mhigh_lo;
2398 return mhigh_hi;
2402 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2403 congruent to 1 (mod 2**N). */
2405 static unsigned HOST_WIDE_INT
2406 invert_mod2n (x, n)
2407 unsigned HOST_WIDE_INT x;
2408 int n;
2410 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2412 /* The algorithm notes that the choice y = x satisfies
2413 x*y == 1 mod 2^3, since x is assumed odd.
2414 Each iteration doubles the number of bits of significance in y. */
2416 unsigned HOST_WIDE_INT mask;
2417 unsigned HOST_WIDE_INT y = x;
2418 int nbit = 3;
2420 mask = (n == HOST_BITS_PER_WIDE_INT
2421 ? ~(unsigned HOST_WIDE_INT) 0
2422 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2424 while (nbit < n)
2426 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2427 nbit *= 2;
2429 return y;
2432 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2433 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2434 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2435 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2436 become signed.
2438 The result is put in TARGET if that is convenient.
2440 MODE is the mode of operation. */
2443 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2444 enum machine_mode mode;
2445 register rtx adj_operand, op0, op1, target;
2446 int unsignedp;
2448 rtx tem;
2449 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2451 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2452 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2453 NULL_RTX, 0);
2454 tem = expand_and (tem, op1, NULL_RTX);
2455 adj_operand = force_operand (gen_rtx (adj_code, mode, adj_operand, tem),
2456 adj_operand);
2458 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2459 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2460 NULL_RTX, 0);
2461 tem = expand_and (tem, op0, NULL_RTX);
2462 target = force_operand (gen_rtx (adj_code, mode, adj_operand, tem), target);
2464 return target;
2467 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2468 in TARGET if that is convenient, and return where the result is. If the
2469 operation can not be performed, 0 is returned.
2471 MODE is the mode of operation and result.
2473 UNSIGNEDP nonzero means unsigned multiply. */
2476 expand_mult_highpart (mode, op0, cnst1, target, unsignedp)
2477 enum machine_mode mode;
2478 register rtx op0, target;
2479 unsigned HOST_WIDE_INT cnst1;
2480 int unsignedp;
2482 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2483 optab mul_highpart_optab;
2484 optab moptab;
2485 rtx tem;
2486 int size = GET_MODE_BITSIZE (mode);
2487 rtx op1, wide_op1;
2489 /* We can't support modes wider than HOST_BITS_PER_INT. */
2490 if (size > HOST_BITS_PER_WIDE_INT)
2491 abort ();
2493 op1 = GEN_INT (cnst1);
2495 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2496 wide_op1 = op1;
2497 else
2498 wide_op1
2499 = immed_double_const (cnst1,
2500 (unsignedp
2501 ? (HOST_WIDE_INT) 0
2502 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2503 wider_mode);
2505 /* expand_mult handles constant multiplication of word_mode
2506 or narrower. It does a poor job for large modes. */
2507 if (size < BITS_PER_WORD)
2509 /* We have to do this, since expand_binop doesn't do conversion for
2510 multiply. Maybe change expand_binop to handle widening multiply? */
2511 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2513 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2514 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2515 build_int_2 (size, 0), NULL_RTX, 1);
2516 return convert_modes (mode, wider_mode, tem, unsignedp);
2519 if (target == 0)
2520 target = gen_reg_rtx (mode);
2522 /* Firstly, try using a multiplication insn that only generates the needed
2523 high part of the product, and in the sign flavor of unsignedp. */
2524 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2525 target = expand_binop (mode, mul_highpart_optab,
2526 op0, op1, target, unsignedp, OPTAB_DIRECT);
2527 if (target)
2528 return target;
2530 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2531 Need to adjust the result after the multiplication. */
2532 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2533 target = expand_binop (mode, mul_highpart_optab,
2534 op0, op1, target, unsignedp, OPTAB_DIRECT);
2535 if (target)
2536 /* We used the wrong signedness. Adjust the result. */
2537 return expand_mult_highpart_adjust (mode, target, op0,
2538 op1, target, unsignedp);
2540 /* Thirdly, we try to use a widening multiplication, or a wider mode
2541 multiplication. */
2543 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2544 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2546 else if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2547 moptab = smul_optab;
2548 else
2550 /* Try widening multiplication of opposite signedness, and adjust. */
2551 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2552 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2554 tem = expand_binop (wider_mode, moptab, op0, wide_op1,
2555 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2556 if (tem != 0)
2558 /* Extract the high half of the just generated product. */
2559 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2560 build_int_2 (size, 0), NULL_RTX, 1);
2561 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2562 /* We used the wrong signedness. Adjust the result. */
2563 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2564 target, unsignedp);
2568 /* As a last resort, try widening the mode and perform a
2569 non-widening multiplication. */
2570 moptab = smul_optab;
2573 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2574 tem = expand_binop (wider_mode, moptab, op0, wide_op1,
2575 NULL_RTX, unsignedp, OPTAB_WIDEN);
2576 if (tem == 0)
2577 return 0;
2579 /* Extract the high half of the just generated product. */
2580 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2581 build_int_2 (size, 0), NULL_RTX, 1);
2582 return convert_modes (mode, wider_mode, tem, unsignedp);
2585 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2586 if that is convenient, and returning where the result is.
2587 You may request either the quotient or the remainder as the result;
2588 specify REM_FLAG nonzero to get the remainder.
2590 CODE is the expression code for which kind of division this is;
2591 it controls how rounding is done. MODE is the machine mode to use.
2592 UNSIGNEDP nonzero means do unsigned division. */
2594 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2595 and then correct it by or'ing in missing high bits
2596 if result of ANDI is nonzero.
2597 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2598 This could optimize to a bfexts instruction.
2599 But C doesn't use these operations, so their optimizations are
2600 left for later. */
2602 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2605 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2606 int rem_flag;
2607 enum tree_code code;
2608 enum machine_mode mode;
2609 register rtx op0, op1, target;
2610 int unsignedp;
2612 enum machine_mode compute_mode;
2613 register rtx tquotient;
2614 rtx quotient = 0, remainder = 0;
2615 rtx last;
2616 int size;
2617 rtx insn, set;
2618 optab optab1, optab2;
2619 int op1_is_constant, op1_is_pow2;
2621 op1_is_constant = GET_CODE (op1) == CONST_INT;
2622 op1_is_pow2 = (op1_is_constant
2623 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2624 || EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))));
2627 This is the structure of expand_divmod:
2629 First comes code to fix up the operands so we can perform the operations
2630 correctly and efficiently.
2632 Second comes a switch statement with code specific for each rounding mode.
2633 For some special operands this code emits all RTL for the desired
2634 operation, for other cases, it generates only a quotient and stores it in
2635 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2636 to indicate that it has not done anything.
2638 Last comes code that finishes the operation. If QUOTIENT is set and
2639 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2640 QUOTIENT is not set, it is computed using trunc rounding.
2642 We try to generate special code for division and remainder when OP1 is a
2643 constant. If |OP1| = 2**n we can use shifts and some other fast
2644 operations. For other values of OP1, we compute a carefully selected
2645 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2646 by m.
2648 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2649 half of the product. Different strategies for generating the product are
2650 implemented in expand_mult_highpart.
2652 If what we actually want is the remainder, we generate that by another
2653 by-constant multiplication and a subtraction. */
2655 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2656 code below will malfunction if we are, so check here and handle
2657 the special case if so. */
2658 if (op1 == const1_rtx)
2659 return rem_flag ? const0_rtx : op0;
2661 if (target
2662 /* Don't use the function value register as a target
2663 since we have to read it as well as write it,
2664 and function-inlining gets confused by this. */
2665 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2666 /* Don't clobber an operand while doing a multi-step calculation. */
2667 || ((rem_flag || op1_is_constant)
2668 && (reg_mentioned_p (target, op0)
2669 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2670 || reg_mentioned_p (target, op1)
2671 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2672 target = 0;
2674 /* Get the mode in which to perform this computation. Normally it will
2675 be MODE, but sometimes we can't do the desired operation in MODE.
2676 If so, pick a wider mode in which we can do the operation. Convert
2677 to that mode at the start to avoid repeated conversions.
2679 First see what operations we need. These depend on the expression
2680 we are evaluating. (We assume that divxx3 insns exist under the
2681 same conditions that modxx3 insns and that these insns don't normally
2682 fail. If these assumptions are not correct, we may generate less
2683 efficient code in some cases.)
2685 Then see if we find a mode in which we can open-code that operation
2686 (either a division, modulus, or shift). Finally, check for the smallest
2687 mode for which we can do the operation with a library call. */
2689 /* We might want to refine this now that we have division-by-constant
2690 optimization. Since expand_mult_highpart tries so many variants, it is
2691 not straightforward to generalize this. Maybe we should make an array
2692 of possible modes in init_expmed? Save this for GCC 2.7. */
2694 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2695 : (unsignedp ? udiv_optab : sdiv_optab));
2696 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2698 for (compute_mode = mode; compute_mode != VOIDmode;
2699 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2700 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2701 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2702 break;
2704 if (compute_mode == VOIDmode)
2705 for (compute_mode = mode; compute_mode != VOIDmode;
2706 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2707 if (optab1->handlers[(int) compute_mode].libfunc
2708 || optab2->handlers[(int) compute_mode].libfunc)
2709 break;
2711 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2712 in expand_binop. */
2713 if (compute_mode == VOIDmode)
2714 compute_mode = mode;
2716 if (target && GET_MODE (target) == compute_mode)
2717 tquotient = target;
2718 else
2719 tquotient = gen_reg_rtx (compute_mode);
2721 size = GET_MODE_BITSIZE (compute_mode);
2722 #if 0
2723 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2724 (mode), and thereby get better code when OP1 is a constant. Do that for
2725 GCC 2.7. It will require going over all usages of SIZE below. */
2726 size = GET_MODE_BITSIZE (mode);
2727 #endif
2729 /* Now convert to the best mode to use. */
2730 if (compute_mode != mode)
2732 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2733 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2736 /* If one of the operands is a volatile MEM, copy it into a register. */
2738 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2739 op0 = force_reg (compute_mode, op0);
2740 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2741 op1 = force_reg (compute_mode, op1);
2743 /* If we need the remainder or if OP1 is constant, we need to
2744 put OP0 in a register in case it has any queued subexpressions. */
2745 if (rem_flag || op1_is_constant)
2746 op0 = force_reg (compute_mode, op0);
2748 last = get_last_insn ();
2750 /* Promote floor rouding to trunc rounding for unsigned operations. */
2751 if (unsignedp)
2753 if (code == FLOOR_DIV_EXPR)
2754 code = TRUNC_DIV_EXPR;
2755 if (code == FLOOR_MOD_EXPR)
2756 code = TRUNC_MOD_EXPR;
2759 if (op1 != const0_rtx)
2760 switch (code)
2762 case TRUNC_MOD_EXPR:
2763 case TRUNC_DIV_EXPR:
2764 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
2766 if (unsignedp
2767 || (INTVAL (op1)
2768 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (compute_mode) - 1)))
2770 unsigned HOST_WIDE_INT mh, ml;
2771 int pre_shift, post_shift;
2772 int dummy;
2773 unsigned HOST_WIDE_INT d = INTVAL (op1);
2775 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
2777 pre_shift = floor_log2 (d);
2778 if (rem_flag)
2780 remainder = expand_binop (compute_mode, and_optab, op0,
2781 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
2782 remainder, 1,
2783 OPTAB_LIB_WIDEN);
2784 if (remainder)
2785 return gen_lowpart (mode, remainder);
2787 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2788 build_int_2 (pre_shift, 0),
2789 tquotient, 1);
2791 else if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
2793 /* Most significant bit of divisor is set, emit a scc insn.
2794 emit_store_flag needs to be passed a place for the
2795 result. */
2796 quotient = emit_store_flag (tquotient, GEU, op0, op1,
2797 compute_mode, 1, 1);
2798 /* Can emit_store_flag have failed? */
2799 if (quotient == 0)
2800 goto fail1;
2802 else
2804 /* Find a suitable multiplier and right shift count instead
2805 of multiplying with D. */
2807 mh = choose_multiplier (d, size, size,
2808 &ml, &post_shift, &dummy);
2810 /* If the suggested multiplier is more than SIZE bits, we
2811 can do better for even divisors, using an initial right
2812 shift. */
2813 if (mh != 0 && (d & 1) == 0)
2815 pre_shift = floor_log2 (d & -d);
2816 mh = choose_multiplier (d >> pre_shift, size,
2817 size - pre_shift,
2818 &ml, &post_shift, &dummy);
2819 if (mh)
2820 abort ();
2822 else
2823 pre_shift = 0;
2825 if (mh != 0)
2827 rtx t1, t2, t3, t4;
2829 t1 = expand_mult_highpart (compute_mode, op0, ml,
2830 NULL_RTX, 1);
2831 if (t1 == 0)
2832 goto fail1;
2833 t2 = force_operand (gen_rtx (MINUS, compute_mode,
2834 op0, t1),
2835 NULL_RTX);
2836 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2837 build_int_2 (1, 0), NULL_RTX, 1);
2838 t4 = force_operand (gen_rtx (PLUS, compute_mode,
2839 t1, t3),
2840 NULL_RTX);
2841 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t4,
2842 build_int_2 (post_shift - 1,
2844 tquotient, 1);
2846 else
2848 rtx t1, t2;
2850 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2851 build_int_2 (pre_shift, 0),
2852 NULL_RTX, 1);
2853 t2 = expand_mult_highpart (compute_mode, t1, ml,
2854 NULL_RTX, 1);
2855 if (t2 == 0)
2856 goto fail1;
2857 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2858 build_int_2 (post_shift, 0),
2859 tquotient, 1);
2863 insn = get_last_insn ();
2864 if (insn != last
2865 && (set = single_set (insn)) != 0
2866 && SET_DEST (set) == quotient)
2867 REG_NOTES (insn)
2868 = gen_rtx (EXPR_LIST, REG_EQUAL,
2869 gen_rtx (UDIV, compute_mode, op0, op1),
2870 REG_NOTES (insn));
2872 else /* TRUNC_DIV, signed */
2874 unsigned HOST_WIDE_INT ml;
2875 int lgup, post_shift;
2876 HOST_WIDE_INT d = INTVAL (op1);
2877 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
2879 /* n rem d = n rem -d */
2880 if (rem_flag && d < 0)
2882 d = abs_d;
2883 op1 = GEN_INT (abs_d);
2886 if (d == 1)
2887 quotient = op0;
2888 else if (d == -1)
2889 quotient = expand_unop (compute_mode, neg_optab, op0,
2890 tquotient, 0);
2891 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
2892 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
2894 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
2896 lgup = floor_log2 (abs_d);
2897 if (abs_d != 2 && BRANCH_COST < 3)
2899 rtx label = gen_label_rtx ();
2900 rtx t1;
2902 t1 = copy_to_mode_reg (compute_mode, op0);
2903 emit_cmp_insn (t1, const0_rtx, GE,
2904 NULL_RTX, compute_mode, 0, 0);
2905 emit_jump_insn (gen_bge (label));
2906 expand_inc (t1, GEN_INT (abs_d - 1));
2907 emit_label (label);
2908 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
2909 build_int_2 (lgup, 0),
2910 tquotient, 0);
2912 else
2914 rtx t1, t2, t3;
2915 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2916 build_int_2 (size - 1, 0),
2917 NULL_RTX, 0);
2918 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
2919 build_int_2 (size - lgup, 0),
2920 NULL_RTX, 1);
2921 t3 = force_operand (gen_rtx (PLUS, compute_mode,
2922 op0, t2),
2923 NULL_RTX);
2924 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
2925 build_int_2 (lgup, 0),
2926 tquotient, 0);
2929 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
2930 the quotient. */
2931 if (d < 0)
2933 insn = get_last_insn ();
2934 if (insn != last
2935 && (set = single_set (insn)) != 0
2936 && SET_DEST (set) == quotient)
2937 REG_NOTES (insn)
2938 = gen_rtx (EXPR_LIST, REG_EQUAL,
2939 gen_rtx (DIV, compute_mode, op0,
2940 GEN_INT (abs_d)),
2941 REG_NOTES (insn));
2943 quotient = expand_unop (compute_mode, neg_optab,
2944 quotient, quotient, 0);
2947 else
2949 choose_multiplier (abs_d, size, size - 1,
2950 &ml, &post_shift, &lgup);
2951 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
2953 rtx t1, t2, t3;
2955 t1 = expand_mult_highpart (compute_mode, op0, ml,
2956 NULL_RTX, 0);
2957 if (t1 == 0)
2958 goto fail1;
2959 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
2960 build_int_2 (post_shift, 0), NULL_RTX, 0);
2961 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2962 build_int_2 (size - 1, 0), NULL_RTX, 0);
2963 if (d < 0)
2964 quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t2),
2965 tquotient);
2966 else
2967 quotient = force_operand (gen_rtx (MINUS, compute_mode, t2, t3),
2968 tquotient);
2970 else
2972 rtx t1, t2, t3, t4;
2974 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
2975 t1 = expand_mult_highpart (compute_mode, op0, ml,
2976 NULL_RTX, 0);
2977 if (t1 == 0)
2978 goto fail1;
2979 t2 = force_operand (gen_rtx (PLUS, compute_mode, t1, op0),
2980 NULL_RTX);
2981 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2982 build_int_2 (post_shift, 0), NULL_RTX, 0);
2983 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2984 build_int_2 (size - 1, 0), NULL_RTX, 0);
2985 if (d < 0)
2986 quotient = force_operand (gen_rtx (MINUS, compute_mode, t4, t3),
2987 tquotient);
2988 else
2989 quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t4),
2990 tquotient);
2994 insn = get_last_insn ();
2995 if (insn != last
2996 && (set = single_set (insn)) != 0
2997 && SET_DEST (set) == quotient)
2998 REG_NOTES (insn)
2999 = gen_rtx (EXPR_LIST, REG_EQUAL,
3000 gen_rtx (DIV, compute_mode, op0, op1),
3001 REG_NOTES (insn));
3003 break;
3005 fail1:
3006 delete_insns_since (last);
3007 break;
3009 case FLOOR_DIV_EXPR:
3010 case FLOOR_MOD_EXPR:
3011 /* We will come here only for signed operations. */
3012 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3014 unsigned HOST_WIDE_INT mh, ml;
3015 int pre_shift, lgup, post_shift;
3016 HOST_WIDE_INT d = INTVAL (op1);
3018 if (d > 0)
3020 /* We could just as easily deal with negative constants here,
3021 but it does not seem worth the trouble for GCC 2.6. */
3022 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3024 pre_shift = floor_log2 (d);
3025 if (rem_flag)
3027 remainder = expand_binop (compute_mode, and_optab, op0,
3028 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3029 remainder, 0, OPTAB_LIB_WIDEN);
3030 if (remainder)
3031 return gen_lowpart (mode, remainder);
3033 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3034 build_int_2 (pre_shift, 0),
3035 tquotient, 0);
3037 else
3039 rtx t1, t2, t3, t4;
3041 mh = choose_multiplier (d, size, size - 1,
3042 &ml, &post_shift, &lgup);
3043 if (mh)
3044 abort ();
3046 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3047 build_int_2 (size - 1, 0), NULL_RTX, 0);
3048 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3049 NULL_RTX, 0, OPTAB_WIDEN);
3050 t3 = expand_mult_highpart (compute_mode, t2, ml,
3051 NULL_RTX, 1);
3052 if (t3 != 0)
3054 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3055 build_int_2 (post_shift, 0),
3056 NULL_RTX, 1);
3057 quotient = expand_binop (compute_mode, xor_optab,
3058 t4, t1, tquotient, 0,
3059 OPTAB_WIDEN);
3063 else
3065 rtx nsign, t1, t2, t3, t4;
3066 t1 = force_operand (gen_rtx (PLUS, compute_mode,
3067 op0, constm1_rtx), NULL_RTX);
3068 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3069 0, OPTAB_WIDEN);
3070 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3071 build_int_2 (size - 1, 0), NULL_RTX, 0);
3072 t3 = force_operand (gen_rtx (MINUS, compute_mode, t1, nsign),
3073 NULL_RTX);
3074 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3075 NULL_RTX, 0);
3076 if (t4)
3078 rtx t5;
3079 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3080 NULL_RTX, 0);
3081 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3082 t4, t5),
3083 tquotient);
3088 if (quotient != 0)
3089 break;
3090 delete_insns_since (last);
3092 /* Try using an instruction that produces both the quotient and
3093 remainder, using truncation. We can easily compensate the quotient
3094 or remainder to get floor rounding, once we have the remainder.
3095 Notice that we compute also the final remainder value here,
3096 and return the result right away. */
3097 if (target == 0)
3098 target = gen_reg_rtx (compute_mode);
3099 if (rem_flag)
3101 remainder = target;
3102 quotient = gen_reg_rtx (compute_mode);
3104 else
3106 quotient = target;
3107 remainder = gen_reg_rtx (compute_mode);
3110 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3111 quotient, remainder, 0))
3113 /* This could be computed with a branch-less sequence.
3114 Save that for later. */
3115 rtx tem;
3116 rtx label = gen_label_rtx ();
3117 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3118 compute_mode, 0, 0);
3119 emit_jump_insn (gen_beq (label));
3120 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3121 NULL_RTX, 0, OPTAB_WIDEN);
3122 emit_cmp_insn (tem, const0_rtx, GE, NULL_RTX, compute_mode, 0, 0);
3123 emit_jump_insn (gen_bge (label));
3124 expand_dec (quotient, const1_rtx);
3125 expand_inc (remainder, op1);
3126 emit_label (label);
3127 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3130 /* No luck with division elimination or divmod. Have to do it
3131 by conditionally adjusting op0 *and* the result. */
3133 rtx label1, label2, label3, label4, label5;
3134 rtx adjusted_op0;
3135 rtx tem;
3137 quotient = gen_reg_rtx (compute_mode);
3138 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3139 label1 = gen_label_rtx ();
3140 label2 = gen_label_rtx ();
3141 label3 = gen_label_rtx ();
3142 label4 = gen_label_rtx ();
3143 label5 = gen_label_rtx ();
3144 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX, compute_mode, 0, 0);
3145 emit_jump_insn (gen_blt (label2));
3146 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3147 compute_mode, 0, 0);
3148 emit_jump_insn (gen_blt (label1));
3149 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3150 quotient, 0, OPTAB_LIB_WIDEN);
3151 if (tem != quotient)
3152 emit_move_insn (quotient, tem);
3153 emit_jump_insn (gen_jump (label5));
3154 emit_barrier ();
3155 emit_label (label1);
3156 expand_inc (adjusted_op0, const1_rtx);
3157 emit_jump_insn (gen_jump (label4));
3158 emit_barrier ();
3159 emit_label (label2);
3160 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3161 compute_mode, 0, 0);
3162 emit_jump_insn (gen_bgt (label3));
3163 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3164 quotient, 0, OPTAB_LIB_WIDEN);
3165 if (tem != quotient)
3166 emit_move_insn (quotient, tem);
3167 emit_jump_insn (gen_jump (label5));
3168 emit_barrier ();
3169 emit_label (label3);
3170 expand_dec (adjusted_op0, const1_rtx);
3171 emit_label (label4);
3172 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3173 quotient, 0, OPTAB_LIB_WIDEN);
3174 if (tem != quotient)
3175 emit_move_insn (quotient, tem);
3176 expand_dec (quotient, const1_rtx);
3177 emit_label (label5);
3179 break;
3181 case CEIL_DIV_EXPR:
3182 case CEIL_MOD_EXPR:
3183 if (unsignedp)
3185 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3187 rtx t1, t2, t3;
3188 unsigned HOST_WIDE_INT d = INTVAL (op1);
3189 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3190 build_int_2 (floor_log2 (d), 0),
3191 tquotient, 1);
3192 t2 = expand_binop (compute_mode, and_optab, op0,
3193 GEN_INT (d - 1),
3194 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3195 t3 = gen_reg_rtx (compute_mode);
3196 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3197 compute_mode, 1, 1);
3198 if (t3 == 0)
3200 rtx lab;
3201 lab = gen_label_rtx ();
3202 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3203 compute_mode, 0, 0);
3204 emit_jump_insn (gen_beq (lab));
3205 expand_inc (t1, const1_rtx);
3206 emit_label (lab);
3207 quotient = t1;
3209 else
3210 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3211 t1, t3),
3212 tquotient);
3213 break;
3216 /* Try using an instruction that produces both the quotient and
3217 remainder, using truncation. We can easily compensate the
3218 quotient or remainder to get ceiling rounding, once we have the
3219 remainder. Notice that we compute also the final remainder
3220 value here, and return the result right away. */
3221 if (target == 0)
3222 target = gen_reg_rtx (compute_mode);
3223 if (rem_flag)
3225 remainder = target;
3226 quotient = gen_reg_rtx (compute_mode);
3228 else
3230 quotient = target;
3231 remainder = gen_reg_rtx (compute_mode);
3234 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3235 remainder, 1))
3237 /* This could be computed with a branch-less sequence.
3238 Save that for later. */
3239 rtx label = gen_label_rtx ();
3240 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3241 compute_mode, 0, 0);
3242 emit_jump_insn (gen_beq (label));
3243 expand_inc (quotient, const1_rtx);
3244 expand_dec (remainder, op1);
3245 emit_label (label);
3246 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3249 /* No luck with division elimination or divmod. Have to do it
3250 by conditionally adjusting op0 *and* the result. */
3252 rtx label1, label2;
3253 rtx adjusted_op0, tem;
3255 quotient = gen_reg_rtx (compute_mode);
3256 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3257 label1 = gen_label_rtx ();
3258 label2 = gen_label_rtx ();
3259 emit_cmp_insn (adjusted_op0, const0_rtx, NE, NULL_RTX,
3260 compute_mode, 0, 0);
3261 emit_jump_insn (gen_bne (label1));
3262 emit_move_insn (quotient, const0_rtx);
3263 emit_jump_insn (gen_jump (label2));
3264 emit_barrier ();
3265 emit_label (label1);
3266 expand_dec (adjusted_op0, const1_rtx);
3267 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3268 quotient, 1, OPTAB_LIB_WIDEN);
3269 if (tem != quotient)
3270 emit_move_insn (quotient, tem);
3271 expand_inc (quotient, const1_rtx);
3272 emit_label (label2);
3275 else /* signed */
3277 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3278 && INTVAL (op1) >= 0)
3280 /* This is extremely similar to the code for the unsigned case
3281 above. For 2.7 we should merge these variants, but for
3282 2.6.1 I don't want to touch the code for unsigned since that
3283 get used in C. The signed case will only be used by other
3284 languages (Ada). */
3286 rtx t1, t2, t3;
3287 unsigned HOST_WIDE_INT d = INTVAL (op1);
3288 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3289 build_int_2 (floor_log2 (d), 0),
3290 tquotient, 0);
3291 t2 = expand_binop (compute_mode, and_optab, op0,
3292 GEN_INT (d - 1),
3293 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3294 t3 = gen_reg_rtx (compute_mode);
3295 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3296 compute_mode, 1, 1);
3297 if (t3 == 0)
3299 rtx lab;
3300 lab = gen_label_rtx ();
3301 emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
3302 compute_mode, 0, 0);
3303 emit_jump_insn (gen_beq (lab));
3304 expand_inc (t1, const1_rtx);
3305 emit_label (lab);
3306 quotient = t1;
3308 else
3309 quotient = force_operand (gen_rtx (PLUS, compute_mode,
3310 t1, t3),
3311 tquotient);
3312 break;
3315 /* Try using an instruction that produces both the quotient and
3316 remainder, using truncation. We can easily compensate the
3317 quotient or remainder to get ceiling rounding, once we have the
3318 remainder. Notice that we compute also the final remainder
3319 value here, and return the result right away. */
3320 if (target == 0)
3321 target = gen_reg_rtx (compute_mode);
3322 if (rem_flag)
3324 remainder = target;
3325 quotient = gen_reg_rtx (compute_mode);
3327 else
3329 quotient = target;
3330 remainder = gen_reg_rtx (compute_mode);
3333 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3334 remainder, 0))
3336 /* This could be computed with a branch-less sequence.
3337 Save that for later. */
3338 rtx tem;
3339 rtx label = gen_label_rtx ();
3340 emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
3341 compute_mode, 0, 0);
3342 emit_jump_insn (gen_beq (label));
3343 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3344 NULL_RTX, 0, OPTAB_WIDEN);
3345 emit_cmp_insn (tem, const0_rtx, LT, NULL_RTX,
3346 compute_mode, 0, 0);
3347 emit_jump_insn (gen_blt (label));
3348 expand_inc (quotient, const1_rtx);
3349 expand_dec (remainder, op1);
3350 emit_label (label);
3351 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3354 /* No luck with division elimination or divmod. Have to do it
3355 by conditionally adjusting op0 *and* the result. */
3357 rtx label1, label2, label3, label4, label5;
3358 rtx adjusted_op0;
3359 rtx tem;
3361 quotient = gen_reg_rtx (compute_mode);
3362 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3363 label1 = gen_label_rtx ();
3364 label2 = gen_label_rtx ();
3365 label3 = gen_label_rtx ();
3366 label4 = gen_label_rtx ();
3367 label5 = gen_label_rtx ();
3368 emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX,
3369 compute_mode, 0, 0);
3370 emit_jump_insn (gen_blt (label2));
3371 emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
3372 compute_mode, 0, 0);
3373 emit_jump_insn (gen_bgt (label1));
3374 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3375 quotient, 0, OPTAB_LIB_WIDEN);
3376 if (tem != quotient)
3377 emit_move_insn (quotient, tem);
3378 emit_jump_insn (gen_jump (label5));
3379 emit_barrier ();
3380 emit_label (label1);
3381 expand_dec (adjusted_op0, const1_rtx);
3382 emit_jump_insn (gen_jump (label4));
3383 emit_barrier ();
3384 emit_label (label2);
3385 emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
3386 compute_mode, 0, 0);
3387 emit_jump_insn (gen_blt (label3));
3388 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3389 quotient, 0, OPTAB_LIB_WIDEN);
3390 if (tem != quotient)
3391 emit_move_insn (quotient, tem);
3392 emit_jump_insn (gen_jump (label5));
3393 emit_barrier ();
3394 emit_label (label3);
3395 expand_inc (adjusted_op0, const1_rtx);
3396 emit_label (label4);
3397 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3398 quotient, 0, OPTAB_LIB_WIDEN);
3399 if (tem != quotient)
3400 emit_move_insn (quotient, tem);
3401 expand_inc (quotient, const1_rtx);
3402 emit_label (label5);
3405 break;
3407 case EXACT_DIV_EXPR:
3408 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3410 HOST_WIDE_INT d = INTVAL (op1);
3411 unsigned HOST_WIDE_INT ml;
3412 int post_shift;
3413 rtx t1;
3415 post_shift = floor_log2 (d & -d);
3416 ml = invert_mod2n (d >> post_shift, size);
3417 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3418 unsignedp);
3419 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3420 build_int_2 (post_shift, 0),
3421 NULL_RTX, unsignedp);
3423 insn = get_last_insn ();
3424 REG_NOTES (insn)
3425 = gen_rtx (EXPR_LIST, REG_EQUAL,
3426 gen_rtx (unsignedp ? UDIV : DIV, compute_mode,
3427 op0, op1),
3428 REG_NOTES (insn));
3430 break;
3432 case ROUND_DIV_EXPR:
3433 case ROUND_MOD_EXPR:
3434 if (unsignedp)
3436 rtx tem;
3437 rtx label;
3438 label = gen_label_rtx ();
3439 quotient = gen_reg_rtx (compute_mode);
3440 remainder = gen_reg_rtx (compute_mode);
3441 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3443 rtx tem;
3444 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3445 quotient, 1, OPTAB_LIB_WIDEN);
3446 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3447 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3448 remainder, 1, OPTAB_LIB_WIDEN);
3450 tem = plus_constant (op1, -1);
3451 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3452 build_int_2 (1, 0), NULL_RTX, 1);
3453 emit_cmp_insn (remainder, tem, LEU, NULL_RTX, compute_mode, 0, 0);
3454 emit_jump_insn (gen_bleu (label));
3455 expand_inc (quotient, const1_rtx);
3456 expand_dec (remainder, op1);
3457 emit_label (label);
3459 else
3461 rtx abs_rem, abs_op1, tem, mask;
3462 rtx label;
3463 label = gen_label_rtx ();
3464 quotient = gen_reg_rtx (compute_mode);
3465 remainder = gen_reg_rtx (compute_mode);
3466 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3468 rtx tem;
3469 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3470 quotient, 0, OPTAB_LIB_WIDEN);
3471 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3472 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3473 remainder, 0, OPTAB_LIB_WIDEN);
3475 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3476 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3477 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3478 build_int_2 (1, 0), NULL_RTX, 1);
3479 emit_cmp_insn (tem, abs_op1, LTU, NULL_RTX, compute_mode, 0, 0);
3480 emit_jump_insn (gen_bltu (label));
3481 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3482 NULL_RTX, 0, OPTAB_WIDEN);
3483 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3484 build_int_2 (size - 1, 0), NULL_RTX, 0);
3485 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3486 NULL_RTX, 0, OPTAB_WIDEN);
3487 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3488 NULL_RTX, 0, OPTAB_WIDEN);
3489 expand_inc (quotient, tem);
3490 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3491 NULL_RTX, 0, OPTAB_WIDEN);
3492 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3493 NULL_RTX, 0, OPTAB_WIDEN);
3494 expand_dec (remainder, tem);
3495 emit_label (label);
3497 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3500 if (quotient == 0)
3502 if (rem_flag)
3504 /* Try to produce the remainder directly without a library call. */
3505 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3506 op0, op1, target,
3507 unsignedp, OPTAB_WIDEN);
3508 if (remainder == 0)
3510 /* No luck there. Can we do remainder and divide at once
3511 without a library call? */
3512 remainder = gen_reg_rtx (compute_mode);
3513 if (! expand_twoval_binop ((unsignedp
3514 ? udivmod_optab
3515 : sdivmod_optab),
3516 op0, op1,
3517 NULL_RTX, remainder, unsignedp))
3518 remainder = 0;
3521 if (remainder)
3522 return gen_lowpart (mode, remainder);
3525 /* Produce the quotient. */
3526 /* Try a quotient insn, but not a library call. */
3527 quotient = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3528 op0, op1, rem_flag ? NULL_RTX : target,
3529 unsignedp, OPTAB_WIDEN);
3530 if (quotient == 0)
3532 /* No luck there. Try a quotient-and-remainder insn,
3533 keeping the quotient alone. */
3534 quotient = gen_reg_rtx (compute_mode);
3535 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3536 op0, op1,
3537 quotient, NULL_RTX, unsignedp))
3539 quotient = 0;
3540 if (! rem_flag)
3541 /* Still no luck. If we are not computing the remainder,
3542 use a library call for the quotient. */
3543 quotient = sign_expand_binop (compute_mode,
3544 udiv_optab, sdiv_optab,
3545 op0, op1, target,
3546 unsignedp, OPTAB_LIB_WIDEN);
3551 if (rem_flag)
3553 if (quotient == 0)
3554 /* No divide instruction either. Use library for remainder. */
3555 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3556 op0, op1, target,
3557 unsignedp, OPTAB_LIB_WIDEN);
3558 else
3560 /* We divided. Now finish doing X - Y * (X / Y). */
3561 remainder = expand_mult (compute_mode, quotient, op1,
3562 NULL_RTX, unsignedp);
3563 remainder = expand_binop (compute_mode, sub_optab, op0,
3564 remainder, target, unsignedp,
3565 OPTAB_LIB_WIDEN);
3569 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3572 /* Return a tree node with data type TYPE, describing the value of X.
3573 Usually this is an RTL_EXPR, if there is no obvious better choice.
3574 X may be an expression, however we only support those expressions
3575 generated by loop.c. */
3577 tree
3578 make_tree (type, x)
3579 tree type;
3580 rtx x;
3582 tree t;
3584 switch (GET_CODE (x))
3586 case CONST_INT:
3587 t = build_int_2 (INTVAL (x),
3588 TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
3589 TREE_TYPE (t) = type;
3590 return t;
3592 case CONST_DOUBLE:
3593 if (GET_MODE (x) == VOIDmode)
3595 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3596 TREE_TYPE (t) = type;
3598 else
3600 REAL_VALUE_TYPE d;
3602 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3603 t = build_real (type, d);
3606 return t;
3608 case PLUS:
3609 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3610 make_tree (type, XEXP (x, 1))));
3612 case MINUS:
3613 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3614 make_tree (type, XEXP (x, 1))));
3616 case NEG:
3617 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3619 case MULT:
3620 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3621 make_tree (type, XEXP (x, 1))));
3623 case ASHIFT:
3624 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3625 make_tree (type, XEXP (x, 1))));
3627 case LSHIFTRT:
3628 return fold (convert (type,
3629 build (RSHIFT_EXPR, unsigned_type (type),
3630 make_tree (unsigned_type (type),
3631 XEXP (x, 0)),
3632 make_tree (type, XEXP (x, 1)))));
3634 case ASHIFTRT:
3635 return fold (convert (type,
3636 build (RSHIFT_EXPR, signed_type (type),
3637 make_tree (signed_type (type), XEXP (x, 0)),
3638 make_tree (type, XEXP (x, 1)))));
3640 case DIV:
3641 if (TREE_CODE (type) != REAL_TYPE)
3642 t = signed_type (type);
3643 else
3644 t = type;
3646 return fold (convert (type,
3647 build (TRUNC_DIV_EXPR, t,
3648 make_tree (t, XEXP (x, 0)),
3649 make_tree (t, XEXP (x, 1)))));
3650 case UDIV:
3651 t = unsigned_type (type);
3652 return fold (convert (type,
3653 build (TRUNC_DIV_EXPR, t,
3654 make_tree (t, XEXP (x, 0)),
3655 make_tree (t, XEXP (x, 1)))));
3656 default:
3657 t = make_node (RTL_EXPR);
3658 TREE_TYPE (t) = type;
3659 RTL_EXPR_RTL (t) = x;
3660 /* There are no insns to be output
3661 when this rtl_expr is used. */
3662 RTL_EXPR_SEQUENCE (t) = 0;
3663 return t;
3667 /* Return an rtx representing the value of X * MULT + ADD.
3668 TARGET is a suggestion for where to store the result (an rtx).
3669 MODE is the machine mode for the computation.
3670 X and MULT must have mode MODE. ADD may have a different mode.
3671 So can X (defaults to same as MODE).
3672 UNSIGNEDP is non-zero to do unsigned multiplication.
3673 This may emit insns. */
3676 expand_mult_add (x, target, mult, add, mode, unsignedp)
3677 rtx x, target, mult, add;
3678 enum machine_mode mode;
3679 int unsignedp;
3681 tree type = type_for_mode (mode, unsignedp);
3682 tree add_type = (GET_MODE (add) == VOIDmode
3683 ? type : type_for_mode (GET_MODE (add), unsignedp));
3684 tree result = fold (build (PLUS_EXPR, type,
3685 fold (build (MULT_EXPR, type,
3686 make_tree (type, x),
3687 make_tree (type, mult))),
3688 make_tree (add_type, add)));
3690 return expand_expr (result, target, VOIDmode, 0);
3693 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3694 and returning TARGET.
3696 If TARGET is 0, a pseudo-register or constant is returned. */
3699 expand_and (op0, op1, target)
3700 rtx op0, op1, target;
3702 enum machine_mode mode = VOIDmode;
3703 rtx tem;
3705 if (GET_MODE (op0) != VOIDmode)
3706 mode = GET_MODE (op0);
3707 else if (GET_MODE (op1) != VOIDmode)
3708 mode = GET_MODE (op1);
3710 if (mode != VOIDmode)
3711 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3712 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
3713 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
3714 else
3715 abort ();
3717 if (target == 0)
3718 target = tem;
3719 else if (tem != target)
3720 emit_move_insn (target, tem);
3721 return target;
3724 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3725 and storing in TARGET. Normally return TARGET.
3726 Return 0 if that cannot be done.
3728 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3729 it is VOIDmode, they cannot both be CONST_INT.
3731 UNSIGNEDP is for the case where we have to widen the operands
3732 to perform the operation. It says to use zero-extension.
3734 NORMALIZEP is 1 if we should convert the result to be either zero
3735 or one one. Normalize is -1 if we should convert the result to be
3736 either zero or -1. If NORMALIZEP is zero, the result will be left
3737 "raw" out of the scc insn. */
3740 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
3741 rtx target;
3742 enum rtx_code code;
3743 rtx op0, op1;
3744 enum machine_mode mode;
3745 int unsignedp;
3746 int normalizep;
3748 rtx subtarget;
3749 enum insn_code icode;
3750 enum machine_mode compare_mode;
3751 enum machine_mode target_mode = GET_MODE (target);
3752 rtx tem;
3753 rtx last = 0;
3754 rtx pattern, comparison;
3756 /* If one operand is constant, make it the second one. Only do this
3757 if the other operand is not constant as well. */
3759 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
3760 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
3762 tem = op0;
3763 op0 = op1;
3764 op1 = tem;
3765 code = swap_condition (code);
3768 if (mode == VOIDmode)
3769 mode = GET_MODE (op0);
3771 /* For some comparisons with 1 and -1, we can convert this to
3772 comparisons with zero. This will often produce more opportunities for
3773 store-flag insns. */
3775 switch (code)
3777 case LT:
3778 if (op1 == const1_rtx)
3779 op1 = const0_rtx, code = LE;
3780 break;
3781 case LE:
3782 if (op1 == constm1_rtx)
3783 op1 = const0_rtx, code = LT;
3784 break;
3785 case GE:
3786 if (op1 == const1_rtx)
3787 op1 = const0_rtx, code = GT;
3788 break;
3789 case GT:
3790 if (op1 == constm1_rtx)
3791 op1 = const0_rtx, code = GE;
3792 break;
3793 case GEU:
3794 if (op1 == const1_rtx)
3795 op1 = const0_rtx, code = NE;
3796 break;
3797 case LTU:
3798 if (op1 == const1_rtx)
3799 op1 = const0_rtx, code = EQ;
3800 break;
3803 /* From now on, we won't change CODE, so set ICODE now. */
3804 icode = setcc_gen_code[(int) code];
3806 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3807 complement of A (for GE) and shifting the sign bit to the low bit. */
3808 if (op1 == const0_rtx && (code == LT || code == GE)
3809 && GET_MODE_CLASS (mode) == MODE_INT
3810 && (normalizep || STORE_FLAG_VALUE == 1
3811 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
3812 && (STORE_FLAG_VALUE
3813 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
3815 subtarget = target;
3817 /* If the result is to be wider than OP0, it is best to convert it
3818 first. If it is to be narrower, it is *incorrect* to convert it
3819 first. */
3820 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
3822 op0 = protect_from_queue (op0, 0);
3823 op0 = convert_modes (target_mode, mode, op0, 0);
3824 mode = target_mode;
3827 if (target_mode != mode)
3828 subtarget = 0;
3830 if (code == GE)
3831 op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
3833 if (normalizep || STORE_FLAG_VALUE == 1)
3834 /* If we are supposed to produce a 0/1 value, we want to do
3835 a logical shift from the sign bit to the low-order bit; for
3836 a -1/0 value, we do an arithmetic shift. */
3837 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
3838 size_int (GET_MODE_BITSIZE (mode) - 1),
3839 subtarget, normalizep != -1);
3841 if (mode != target_mode)
3842 op0 = convert_modes (target_mode, mode, op0, 0);
3844 return op0;
3847 if (icode != CODE_FOR_nothing)
3849 /* We think we may be able to do this with a scc insn. Emit the
3850 comparison and then the scc insn.
3852 compare_from_rtx may call emit_queue, which would be deleted below
3853 if the scc insn fails. So call it ourselves before setting LAST. */
3855 emit_queue ();
3856 last = get_last_insn ();
3858 comparison
3859 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
3860 if (GET_CODE (comparison) == CONST_INT)
3861 return (comparison == const0_rtx ? const0_rtx
3862 : normalizep == 1 ? const1_rtx
3863 : normalizep == -1 ? constm1_rtx
3864 : const_true_rtx);
3866 /* If the code of COMPARISON doesn't match CODE, something is
3867 wrong; we can no longer be sure that we have the operation.
3868 We could handle this case, but it should not happen. */
3870 if (GET_CODE (comparison) != code)
3871 abort ();
3873 /* Get a reference to the target in the proper mode for this insn. */
3874 compare_mode = insn_operand_mode[(int) icode][0];
3875 subtarget = target;
3876 if (preserve_subexpressions_p ()
3877 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
3878 subtarget = gen_reg_rtx (compare_mode);
3880 pattern = GEN_FCN (icode) (subtarget);
3881 if (pattern)
3883 emit_insn (pattern);
3885 /* If we are converting to a wider mode, first convert to
3886 TARGET_MODE, then normalize. This produces better combining
3887 opportunities on machines that have a SIGN_EXTRACT when we are
3888 testing a single bit. This mostly benefits the 68k.
3890 If STORE_FLAG_VALUE does not have the sign bit set when
3891 interpreted in COMPARE_MODE, we can do this conversion as
3892 unsigned, which is usually more efficient. */
3893 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
3895 convert_move (target, subtarget,
3896 (GET_MODE_BITSIZE (compare_mode)
3897 <= HOST_BITS_PER_WIDE_INT)
3898 && 0 == (STORE_FLAG_VALUE
3899 & ((HOST_WIDE_INT) 1
3900 << (GET_MODE_BITSIZE (compare_mode) -1))));
3901 op0 = target;
3902 compare_mode = target_mode;
3904 else
3905 op0 = subtarget;
3907 /* If we want to keep subexpressions around, don't reuse our
3908 last target. */
3910 if (preserve_subexpressions_p ())
3911 subtarget = 0;
3913 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
3914 we don't have to do anything. */
3915 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
3917 else if (normalizep == - STORE_FLAG_VALUE)
3918 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
3920 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
3921 makes it hard to use a value of just the sign bit due to
3922 ANSI integer constant typing rules. */
3923 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
3924 && (STORE_FLAG_VALUE
3925 & ((HOST_WIDE_INT) 1
3926 << (GET_MODE_BITSIZE (compare_mode) - 1))))
3927 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
3928 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
3929 subtarget, normalizep == 1);
3930 else if (STORE_FLAG_VALUE & 1)
3932 op0 = expand_and (op0, const1_rtx, subtarget);
3933 if (normalizep == -1)
3934 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
3936 else
3937 abort ();
3939 /* If we were converting to a smaller mode, do the
3940 conversion now. */
3941 if (target_mode != compare_mode)
3943 convert_move (target, op0, 0);
3944 return target;
3946 else
3947 return op0;
3951 if (last)
3952 delete_insns_since (last);
3954 subtarget = target_mode == mode ? target : 0;
3956 /* If we reached here, we can't do this with a scc insn. However, there
3957 are some comparisons that can be done directly. For example, if
3958 this is an equality comparison of integers, we can try to exclusive-or
3959 (or subtract) the two operands and use a recursive call to try the
3960 comparison with zero. Don't do any of these cases if branches are
3961 very cheap. */
3963 if (BRANCH_COST > 0
3964 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
3965 && op1 != const0_rtx)
3967 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
3968 OPTAB_WIDEN);
3970 if (tem == 0)
3971 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
3972 OPTAB_WIDEN);
3973 if (tem != 0)
3974 tem = emit_store_flag (target, code, tem, const0_rtx,
3975 mode, unsignedp, normalizep);
3976 if (tem == 0)
3977 delete_insns_since (last);
3978 return tem;
3981 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
3982 the constant zero. Reject all other comparisons at this point. Only
3983 do LE and GT if branches are expensive since they are expensive on
3984 2-operand machines. */
3986 if (BRANCH_COST == 0
3987 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
3988 || (code != EQ && code != NE
3989 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
3990 return 0;
3992 /* See what we need to return. We can only return a 1, -1, or the
3993 sign bit. */
3995 if (normalizep == 0)
3997 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
3998 normalizep = STORE_FLAG_VALUE;
4000 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4001 && (STORE_FLAG_VALUE
4002 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4004 else
4005 return 0;
4008 /* Try to put the result of the comparison in the sign bit. Assume we can't
4009 do the necessary operation below. */
4011 tem = 0;
4013 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4014 the sign bit set. */
4016 if (code == LE)
4018 /* This is destructive, so SUBTARGET can't be OP0. */
4019 if (rtx_equal_p (subtarget, op0))
4020 subtarget = 0;
4022 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4023 OPTAB_WIDEN);
4024 if (tem)
4025 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4026 OPTAB_WIDEN);
4029 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4030 number of bits in the mode of OP0, minus one. */
4032 if (code == GT)
4034 if (rtx_equal_p (subtarget, op0))
4035 subtarget = 0;
4037 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4038 size_int (GET_MODE_BITSIZE (mode) - 1),
4039 subtarget, 0);
4040 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4041 OPTAB_WIDEN);
4044 if (code == EQ || code == NE)
4046 /* For EQ or NE, one way to do the comparison is to apply an operation
4047 that converts the operand into a positive number if it is non-zero
4048 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4049 for NE we negate. This puts the result in the sign bit. Then we
4050 normalize with a shift, if needed.
4052 Two operations that can do the above actions are ABS and FFS, so try
4053 them. If that doesn't work, and MODE is smaller than a full word,
4054 we can use zero-extension to the wider mode (an unsigned conversion)
4055 as the operation. */
4057 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4058 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4059 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4060 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4061 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4063 op0 = protect_from_queue (op0, 0);
4064 tem = convert_modes (word_mode, mode, op0, 1);
4065 mode = word_mode;
4068 if (tem != 0)
4070 if (code == EQ)
4071 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4072 0, OPTAB_WIDEN);
4073 else
4074 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4077 /* If we couldn't do it that way, for NE we can "or" the two's complement
4078 of the value with itself. For EQ, we take the one's complement of
4079 that "or", which is an extra insn, so we only handle EQ if branches
4080 are expensive. */
4082 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4084 if (rtx_equal_p (subtarget, op0))
4085 subtarget = 0;
4087 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4088 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4089 OPTAB_WIDEN);
4091 if (tem && code == EQ)
4092 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4096 if (tem && normalizep)
4097 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4098 size_int (GET_MODE_BITSIZE (mode) - 1),
4099 tem, normalizep == 1);
4101 if (tem && GET_MODE (tem) != target_mode)
4103 convert_move (target, tem, 0);
4104 tem = target;
4107 if (tem == 0)
4108 delete_insns_since (last);
4110 return tem;
4112 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4113 emit_move_insn (target, const1_rtx);
4114 emit_label (label);
4116 return target;