Daily bump.
[official-gcc.git] / gcc / expmed.c
blobab127ec6043cafcee16c6f02919445b3924690e8
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
37 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
38 unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT, rtx));
40 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
41 unsigned HOST_WIDE_INT, rtx));
42 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
43 unsigned HOST_WIDE_INT,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 rtx, int));
47 static rtx mask_rtx PARAMS ((enum machine_mode, int,
48 int, int));
49 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
50 int, int));
51 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, int));
53 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
54 enum machine_mode, rtx));
56 /* Non-zero means divides or modulus operations are relatively cheap for
57 powers of two, so don't use branches; emit the operation instead.
58 Usually, this will mean that the MD file will emit non-branch
59 sequences. */
61 static int sdiv_pow2_cheap, smod_pow2_cheap;
63 #ifndef SLOW_UNALIGNED_ACCESS
64 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
65 #endif
67 /* For compilers that support multiple targets with different word sizes,
68 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
69 is the H8/300(H) compiler. */
71 #ifndef MAX_BITS_PER_WORD
72 #define MAX_BITS_PER_WORD BITS_PER_WORD
73 #endif
75 /* Reduce conditional compilation elsewhere. */
76 #ifndef HAVE_insv
77 #define HAVE_insv 0
78 #define CODE_FOR_insv CODE_FOR_nothing
79 #define gen_insv(a,b,c,d) NULL_RTX
80 #endif
81 #ifndef HAVE_extv
82 #define HAVE_extv 0
83 #define CODE_FOR_extv CODE_FOR_nothing
84 #define gen_extv(a,b,c,d) NULL_RTX
85 #endif
86 #ifndef HAVE_extzv
87 #define HAVE_extzv 0
88 #define CODE_FOR_extzv CODE_FOR_nothing
89 #define gen_extzv(a,b,c,d) NULL_RTX
90 #endif
92 /* Cost of various pieces of RTL. Note that some of these are indexed by
93 shift count and some by mode. */
94 static int add_cost, negate_cost, zero_cost;
95 static int shift_cost[MAX_BITS_PER_WORD];
96 static int shiftadd_cost[MAX_BITS_PER_WORD];
97 static int shiftsub_cost[MAX_BITS_PER_WORD];
98 static int mul_cost[NUM_MACHINE_MODES];
99 static int div_cost[NUM_MACHINE_MODES];
100 static int mul_widen_cost[NUM_MACHINE_MODES];
101 static int mul_highpart_cost[NUM_MACHINE_MODES];
103 void
104 init_expmed ()
106 /* This is "some random pseudo register" for purposes of calling recog
107 to see what insns exist. */
108 rtx reg = gen_rtx_REG (word_mode, 10000);
109 rtx shift_insn, shiftadd_insn, shiftsub_insn;
110 int dummy;
111 int m;
112 enum machine_mode mode, wider_mode;
114 start_sequence ();
116 reg = gen_rtx_REG (word_mode, 10000);
118 zero_cost = rtx_cost (const0_rtx, 0);
119 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
121 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
122 gen_rtx_ASHIFT (word_mode, reg,
123 const0_rtx)));
125 shiftadd_insn
126 = emit_insn (gen_rtx_SET (VOIDmode, reg,
127 gen_rtx_PLUS (word_mode,
128 gen_rtx_MULT (word_mode,
129 reg, const0_rtx),
130 reg)));
132 shiftsub_insn
133 = emit_insn (gen_rtx_SET (VOIDmode, reg,
134 gen_rtx_MINUS (word_mode,
135 gen_rtx_MULT (word_mode,
136 reg, const0_rtx),
137 reg)));
139 init_recog ();
141 shift_cost[0] = 0;
142 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
144 for (m = 1; m < MAX_BITS_PER_WORD; m++)
146 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
148 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
149 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
150 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
152 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
153 = GEN_INT ((HOST_WIDE_INT) 1 << m);
154 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
155 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
157 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
158 = GEN_INT ((HOST_WIDE_INT) 1 << m);
159 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
160 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
163 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
165 sdiv_pow2_cheap
166 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
168 smod_pow2_cheap
169 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
170 <= 2 * add_cost);
172 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
173 mode != VOIDmode;
174 mode = GET_MODE_WIDER_MODE (mode))
176 reg = gen_rtx_REG (mode, 10000);
177 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
178 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
179 wider_mode = GET_MODE_WIDER_MODE (mode);
180 if (wider_mode != VOIDmode)
182 mul_widen_cost[(int) wider_mode]
183 = rtx_cost (gen_rtx_MULT (wider_mode,
184 gen_rtx_ZERO_EXTEND (wider_mode, reg),
185 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
186 SET);
187 mul_highpart_cost[(int) mode]
188 = rtx_cost (gen_rtx_TRUNCATE
189 (mode,
190 gen_rtx_LSHIFTRT (wider_mode,
191 gen_rtx_MULT (wider_mode,
192 gen_rtx_ZERO_EXTEND
193 (wider_mode, reg),
194 gen_rtx_ZERO_EXTEND
195 (wider_mode, reg)),
196 GEN_INT (GET_MODE_BITSIZE (mode)))),
197 SET);
201 end_sequence ();
204 /* Return an rtx representing minus the value of X.
205 MODE is the intended mode of the result,
206 useful if X is a CONST_INT. */
209 negate_rtx (mode, x)
210 enum machine_mode mode;
211 rtx x;
213 rtx result = simplify_unary_operation (NEG, mode, x, mode);
215 if (result == 0)
216 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
218 return result;
221 /* Report on the availability of insv/extv/extzv and the desired mode
222 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
223 is false; else the mode of the specified operand. If OPNO is -1,
224 all the caller cares about is whether the insn is available. */
225 enum machine_mode
226 mode_for_extraction (pattern, opno)
227 enum extraction_pattern pattern;
228 int opno;
230 const struct insn_data *data;
232 switch (pattern)
234 case EP_insv:
235 if (HAVE_insv)
237 data = &insn_data[CODE_FOR_insv];
238 break;
240 return MAX_MACHINE_MODE;
242 case EP_extv:
243 if (HAVE_extv)
245 data = &insn_data[CODE_FOR_extv];
246 break;
248 return MAX_MACHINE_MODE;
250 case EP_extzv:
251 if (HAVE_extzv)
253 data = &insn_data[CODE_FOR_extzv];
254 break;
256 return MAX_MACHINE_MODE;
258 default:
259 abort ();
262 if (opno == -1)
263 return VOIDmode;
265 /* Everyone who uses this function used to follow it with
266 if (result == VOIDmode) result = word_mode; */
267 if (data->operand[opno].mode == VOIDmode)
268 return word_mode;
269 return data->operand[opno].mode;
273 /* Generate code to store value from rtx VALUE
274 into a bit-field within structure STR_RTX
275 containing BITSIZE bits starting at bit BITNUM.
276 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
277 ALIGN is the alignment that STR_RTX is known to have.
278 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
280 /* ??? Note that there are two different ideas here for how
281 to determine the size to count bits within, for a register.
282 One is BITS_PER_WORD, and the other is the size of operand 3
283 of the insv pattern.
285 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
286 else, we use the mode of operand 3. */
289 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
290 rtx str_rtx;
291 unsigned HOST_WIDE_INT bitsize;
292 unsigned HOST_WIDE_INT bitnum;
293 enum machine_mode fieldmode;
294 rtx value;
295 HOST_WIDE_INT total_size;
297 unsigned int unit
298 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
299 unsigned HOST_WIDE_INT offset = bitnum / unit;
300 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
301 rtx op0 = str_rtx;
302 int byte_offset;
304 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
306 /* Discount the part of the structure before the desired byte.
307 We need to know how many bytes are safe to reference after it. */
308 if (total_size >= 0)
309 total_size -= (bitpos / BIGGEST_ALIGNMENT
310 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
312 while (GET_CODE (op0) == SUBREG)
314 /* The following line once was done only if WORDS_BIG_ENDIAN,
315 but I think that is a mistake. WORDS_BIG_ENDIAN is
316 meaningful at a much higher level; when structures are copied
317 between memory and regs, the higher-numbered regs
318 always get higher addresses. */
319 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
320 /* We used to adjust BITPOS here, but now we do the whole adjustment
321 right after the loop. */
322 op0 = SUBREG_REG (op0);
325 value = protect_from_queue (value, 0);
327 if (flag_force_mem)
329 int old_generating_concat_p = generating_concat_p;
330 generating_concat_p = 0;
331 value = force_not_mem (value);
332 generating_concat_p = old_generating_concat_p;
335 /* If the target is a register, overwriting the entire object, or storing
336 a full-word or multi-word field can be done with just a SUBREG.
338 If the target is memory, storing any naturally aligned field can be
339 done with a simple store. For targets that support fast unaligned
340 memory, any naturally sized, unit aligned field can be done directly. */
342 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
343 + (offset * UNITS_PER_WORD);
345 if (bitpos == 0
346 && bitsize == GET_MODE_BITSIZE (fieldmode)
347 && (GET_CODE (op0) != MEM
348 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
349 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
350 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
351 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
352 || (offset * BITS_PER_UNIT % bitsize == 0
353 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
355 if (GET_MODE (op0) != fieldmode)
357 if (GET_CODE (op0) == SUBREG)
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
369 if (GET_CODE (op0) == REG)
370 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
371 else
372 op0 = adjust_address (op0, fieldmode, offset);
374 emit_move_insn (op0, value);
375 return value;
378 /* Make sure we are playing with integral modes. Pun with subregs
379 if we aren't. This must come after the entire register case above,
380 since that case is valid for any mode. The following cases are only
381 valid for integral modes. */
383 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
384 if (imode != GET_MODE (op0))
386 if (GET_CODE (op0) == MEM)
387 op0 = adjust_address (op0, imode, 0);
388 else if (imode != BLKmode)
389 op0 = gen_lowpart (imode, op0);
390 else
391 abort ();
395 /* We may be accessing data outside the field, which means
396 we can alias adjacent data. */
397 if (GET_CODE (op0) == MEM)
399 op0 = shallow_copy_rtx (op0);
400 set_mem_alias_set (op0, 0);
401 set_mem_expr (op0, 0);
404 /* If OP0 is a register, BITPOS must count within a word.
405 But as we have it, it counts within whatever size OP0 now has.
406 On a bigendian machine, these are not the same, so convert. */
407 if (BYTES_BIG_ENDIAN
408 && GET_CODE (op0) != MEM
409 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
410 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
412 /* Storing an lsb-aligned field in a register
413 can be done with a movestrict instruction. */
415 if (GET_CODE (op0) != MEM
416 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
417 && bitsize == GET_MODE_BITSIZE (fieldmode)
418 && (movstrict_optab->handlers[(int) fieldmode].insn_code
419 != CODE_FOR_nothing))
421 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
423 /* Get appropriate low part of the value being stored. */
424 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
425 value = gen_lowpart (fieldmode, value);
426 else if (!(GET_CODE (value) == SYMBOL_REF
427 || GET_CODE (value) == LABEL_REF
428 || GET_CODE (value) == CONST))
429 value = convert_to_mode (fieldmode, value, 0);
431 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
432 value = copy_to_mode_reg (fieldmode, value);
434 if (GET_CODE (op0) == SUBREG)
436 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
437 || GET_MODE_CLASS (fieldmode) == MODE_INT
438 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
439 op0 = SUBREG_REG (op0);
440 else
441 /* Else we've got some float mode source being extracted into
442 a different float mode destination -- this combination of
443 subregs results in Severe Tire Damage. */
444 abort ();
447 emit_insn (GEN_FCN (icode)
448 (gen_rtx_SUBREG (fieldmode, op0,
449 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 + (offset * UNITS_PER_WORD)),
451 value));
453 return value;
456 /* Handle fields bigger than a word. */
458 if (bitsize > BITS_PER_WORD)
460 /* Here we transfer the words of the field
461 in the order least significant first.
462 This is because the most significant word is the one which may
463 be less than full.
464 However, only do that if the value is not BLKmode. */
466 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
467 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
468 unsigned int i;
470 /* This is the mode we must force value to, so that there will be enough
471 subwords to extract. Note that fieldmode will often (always?) be
472 VOIDmode, because that is what store_field uses to indicate that this
473 is a bit field, but passing VOIDmode to operand_subword_force will
474 result in an abort. */
475 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
477 for (i = 0; i < nwords; i++)
479 /* If I is 0, use the low-order word in both field and target;
480 if I is 1, use the next to lowest word; and so on. */
481 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
482 unsigned int bit_offset = (backwards
483 ? MAX ((int) bitsize - ((int) i + 1)
484 * BITS_PER_WORD,
486 : (int) i * BITS_PER_WORD);
488 store_bit_field (op0, MIN (BITS_PER_WORD,
489 bitsize - i * BITS_PER_WORD),
490 bitnum + bit_offset, word_mode,
491 operand_subword_force (value, wordnum,
492 (GET_MODE (value) == VOIDmode
493 ? fieldmode
494 : GET_MODE (value))),
495 total_size);
497 return value;
500 /* From here on we can assume that the field to be stored in is
501 a full-word (whatever type that is), since it is shorter than a word. */
503 /* OFFSET is the number of words or bytes (UNIT says which)
504 from STR_RTX to the first word or byte containing part of the field. */
506 if (GET_CODE (op0) != MEM)
508 if (offset != 0
509 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
511 if (GET_CODE (op0) != REG)
513 /* Since this is a destination (lvalue), we can't copy it to a
514 pseudo. We can trivially remove a SUBREG that does not
515 change the size of the operand. Such a SUBREG may have been
516 added above. Otherwise, abort. */
517 if (GET_CODE (op0) == SUBREG
518 && (GET_MODE_SIZE (GET_MODE (op0))
519 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
520 op0 = SUBREG_REG (op0);
521 else
522 abort ();
524 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
525 op0, (offset * UNITS_PER_WORD));
527 offset = 0;
529 else
530 op0 = protect_from_queue (op0, 1);
532 /* If VALUE is a floating-point mode, access it as an integer of the
533 corresponding size. This can occur on a machine with 64 bit registers
534 that uses SFmode for float. This can also occur for unaligned float
535 structure fields. */
536 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
537 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
538 value = gen_lowpart (word_mode, value);
540 /* Now OFFSET is nonzero only if OP0 is memory
541 and is therefore always measured in bytes. */
543 if (HAVE_insv
544 && GET_MODE (value) != BLKmode
545 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
546 /* Ensure insv's size is wide enough for this field. */
547 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
548 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
549 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
551 int xbitpos = bitpos;
552 rtx value1;
553 rtx xop0 = op0;
554 rtx last = get_last_insn ();
555 rtx pat;
556 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
557 int save_volatile_ok = volatile_ok;
559 volatile_ok = 1;
561 /* If this machine's insv can only insert into a register, copy OP0
562 into a register and save it back later. */
563 /* This used to check flag_force_mem, but that was a serious
564 de-optimization now that flag_force_mem is enabled by -O2. */
565 if (GET_CODE (op0) == MEM
566 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
567 (op0, VOIDmode)))
569 rtx tempreg;
570 enum machine_mode bestmode;
572 /* Get the mode to use for inserting into this field. If OP0 is
573 BLKmode, get the smallest mode consistent with the alignment. If
574 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
575 mode. Otherwise, use the smallest mode containing the field. */
577 if (GET_MODE (op0) == BLKmode
578 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
579 bestmode
580 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
581 MEM_VOLATILE_P (op0));
582 else
583 bestmode = GET_MODE (op0);
585 if (bestmode == VOIDmode
586 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
587 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
588 goto insv_loses;
590 /* Adjust address to point to the containing unit of that mode.
591 Compute offset as multiple of this unit, counting in bytes. */
592 unit = GET_MODE_BITSIZE (bestmode);
593 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
594 bitpos = bitnum % unit;
595 op0 = adjust_address (op0, bestmode, offset);
597 /* Fetch that unit, store the bitfield in it, then store
598 the unit. */
599 tempreg = copy_to_reg (op0);
600 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
601 total_size);
602 emit_move_insn (op0, tempreg);
603 return value;
605 volatile_ok = save_volatile_ok;
607 /* Add OFFSET into OP0's address. */
608 if (GET_CODE (xop0) == MEM)
609 xop0 = adjust_address (xop0, byte_mode, offset);
611 /* If xop0 is a register, we need it in MAXMODE
612 to make it acceptable to the format of insv. */
613 if (GET_CODE (xop0) == SUBREG)
614 /* We can't just change the mode, because this might clobber op0,
615 and we will need the original value of op0 if insv fails. */
616 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
617 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
618 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
620 /* On big-endian machines, we count bits from the most significant.
621 If the bit field insn does not, we must invert. */
623 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
624 xbitpos = unit - bitsize - xbitpos;
626 /* We have been counting XBITPOS within UNIT.
627 Count instead within the size of the register. */
628 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
629 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
631 unit = GET_MODE_BITSIZE (maxmode);
633 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
634 value1 = value;
635 if (GET_MODE (value) != maxmode)
637 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
639 /* Optimization: Don't bother really extending VALUE
640 if it has all the bits we will actually use. However,
641 if we must narrow it, be sure we do it correctly. */
643 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
645 rtx tmp;
647 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
648 if (! tmp)
649 tmp = simplify_gen_subreg (maxmode,
650 force_reg (GET_MODE (value),
651 value1),
652 GET_MODE (value), 0);
653 value1 = tmp;
655 else
656 value1 = gen_lowpart (maxmode, value1);
658 else if (GET_CODE (value) == CONST_INT)
659 value1 = GEN_INT (trunc_int_for_mode (INTVAL (value), maxmode));
660 else if (!CONSTANT_P (value))
661 /* Parse phase is supposed to make VALUE's data type
662 match that of the component reference, which is a type
663 at least as wide as the field; so VALUE should have
664 a mode that corresponds to that type. */
665 abort ();
668 /* If this machine's insv insists on a register,
669 get VALUE1 into a register. */
670 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
671 (value1, maxmode)))
672 value1 = force_reg (maxmode, value1);
674 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
675 if (pat)
676 emit_insn (pat);
677 else
679 delete_insns_since (last);
680 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
683 else
684 insv_loses:
685 /* Insv is not available; store using shifts and boolean ops. */
686 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
687 return value;
690 /* Use shifts and boolean operations to store VALUE
691 into a bit field of width BITSIZE
692 in a memory location specified by OP0 except offset by OFFSET bytes.
693 (OFFSET must be 0 if OP0 is a register.)
694 The field starts at position BITPOS within the byte.
695 (If OP0 is a register, it may be a full word or a narrower mode,
696 but BITPOS still counts within a full word,
697 which is significant on bigendian machines.)
699 Note that protect_from_queue has already been done on OP0 and VALUE. */
701 static void
702 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
703 rtx op0;
704 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
705 rtx value;
707 enum machine_mode mode;
708 unsigned int total_bits = BITS_PER_WORD;
709 rtx subtarget, temp;
710 int all_zero = 0;
711 int all_one = 0;
713 /* There is a case not handled here:
714 a structure with a known alignment of just a halfword
715 and a field split across two aligned halfwords within the structure.
716 Or likewise a structure with a known alignment of just a byte
717 and a field split across two bytes.
718 Such cases are not supposed to be able to occur. */
720 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
722 if (offset != 0)
723 abort ();
724 /* Special treatment for a bit field split across two registers. */
725 if (bitsize + bitpos > BITS_PER_WORD)
727 store_split_bit_field (op0, bitsize, bitpos, value);
728 return;
731 else
733 /* Get the proper mode to use for this field. We want a mode that
734 includes the entire field. If such a mode would be larger than
735 a word, we won't be doing the extraction the normal way.
736 We don't want a mode bigger than the destination. */
738 mode = GET_MODE (op0);
739 if (GET_MODE_BITSIZE (mode) == 0
740 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
741 mode = word_mode;
742 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
743 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
745 if (mode == VOIDmode)
747 /* The only way this should occur is if the field spans word
748 boundaries. */
749 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
750 value);
751 return;
754 total_bits = GET_MODE_BITSIZE (mode);
756 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
757 be in the range 0 to total_bits-1, and put any excess bytes in
758 OFFSET. */
759 if (bitpos >= total_bits)
761 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
762 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
763 * BITS_PER_UNIT);
766 /* Get ref to an aligned byte, halfword, or word containing the field.
767 Adjust BITPOS to be position within a word,
768 and OFFSET to be the offset of that word.
769 Then alter OP0 to refer to that word. */
770 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
771 offset -= (offset % (total_bits / BITS_PER_UNIT));
772 op0 = adjust_address (op0, mode, offset);
775 mode = GET_MODE (op0);
777 /* Now MODE is either some integral mode for a MEM as OP0,
778 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
779 The bit field is contained entirely within OP0.
780 BITPOS is the starting bit number within OP0.
781 (OP0's mode may actually be narrower than MODE.) */
783 if (BYTES_BIG_ENDIAN)
784 /* BITPOS is the distance between our msb
785 and that of the containing datum.
786 Convert it to the distance from the lsb. */
787 bitpos = total_bits - bitsize - bitpos;
789 /* Now BITPOS is always the distance between our lsb
790 and that of OP0. */
792 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
793 we must first convert its mode to MODE. */
795 if (GET_CODE (value) == CONST_INT)
797 HOST_WIDE_INT v = INTVAL (value);
799 if (bitsize < HOST_BITS_PER_WIDE_INT)
800 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
802 if (v == 0)
803 all_zero = 1;
804 else if ((bitsize < HOST_BITS_PER_WIDE_INT
805 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
806 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
807 all_one = 1;
809 value = lshift_value (mode, value, bitpos, bitsize);
811 else
813 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
814 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
816 if (GET_MODE (value) != mode)
818 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
819 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
820 value = gen_lowpart (mode, value);
821 else
822 value = convert_to_mode (mode, value, 1);
825 if (must_and)
826 value = expand_binop (mode, and_optab, value,
827 mask_rtx (mode, 0, bitsize, 0),
828 NULL_RTX, 1, OPTAB_LIB_WIDEN);
829 if (bitpos > 0)
830 value = expand_shift (LSHIFT_EXPR, mode, value,
831 build_int_2 (bitpos, 0), NULL_RTX, 1);
834 /* Now clear the chosen bits in OP0,
835 except that if VALUE is -1 we need not bother. */
837 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
839 if (! all_one)
841 temp = expand_binop (mode, and_optab, op0,
842 mask_rtx (mode, bitpos, bitsize, 1),
843 subtarget, 1, OPTAB_LIB_WIDEN);
844 subtarget = temp;
846 else
847 temp = op0;
849 /* Now logical-or VALUE into OP0, unless it is zero. */
851 if (! all_zero)
852 temp = expand_binop (mode, ior_optab, temp, value,
853 subtarget, 1, OPTAB_LIB_WIDEN);
854 if (op0 != temp)
855 emit_move_insn (op0, temp);
858 /* Store a bit field that is split across multiple accessible memory objects.
860 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
861 BITSIZE is the field width; BITPOS the position of its first bit
862 (within the word).
863 VALUE is the value to store.
865 This does not yet handle fields wider than BITS_PER_WORD. */
867 static void
868 store_split_bit_field (op0, bitsize, bitpos, value)
869 rtx op0;
870 unsigned HOST_WIDE_INT bitsize, bitpos;
871 rtx value;
873 unsigned int unit;
874 unsigned int bitsdone = 0;
876 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
877 much at a time. */
878 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
879 unit = BITS_PER_WORD;
880 else
881 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
883 /* If VALUE is a constant other than a CONST_INT, get it into a register in
884 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
885 that VALUE might be a floating-point constant. */
886 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
888 rtx word = gen_lowpart_common (word_mode, value);
890 if (word && (value != word))
891 value = word;
892 else
893 value = gen_lowpart_common (word_mode,
894 force_reg (GET_MODE (value) != VOIDmode
895 ? GET_MODE (value)
896 : word_mode, value));
898 else if (GET_CODE (value) == ADDRESSOF)
899 value = copy_to_reg (value);
901 while (bitsdone < bitsize)
903 unsigned HOST_WIDE_INT thissize;
904 rtx part, word;
905 unsigned HOST_WIDE_INT thispos;
906 unsigned HOST_WIDE_INT offset;
908 offset = (bitpos + bitsdone) / unit;
909 thispos = (bitpos + bitsdone) % unit;
911 /* THISSIZE must not overrun a word boundary. Otherwise,
912 store_fixed_bit_field will call us again, and we will mutually
913 recurse forever. */
914 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
915 thissize = MIN (thissize, unit - thispos);
917 if (BYTES_BIG_ENDIAN)
919 int total_bits;
921 /* We must do an endian conversion exactly the same way as it is
922 done in extract_bit_field, so that the two calls to
923 extract_fixed_bit_field will have comparable arguments. */
924 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
925 total_bits = BITS_PER_WORD;
926 else
927 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
929 /* Fetch successively less significant portions. */
930 if (GET_CODE (value) == CONST_INT)
931 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
932 >> (bitsize - bitsdone - thissize))
933 & (((HOST_WIDE_INT) 1 << thissize) - 1));
934 else
935 /* The args are chosen so that the last part includes the
936 lsb. Give extract_bit_field the value it needs (with
937 endianness compensation) to fetch the piece we want. */
938 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
939 total_bits - bitsize + bitsdone,
940 NULL_RTX, 1);
942 else
944 /* Fetch successively more significant portions. */
945 if (GET_CODE (value) == CONST_INT)
946 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
947 >> bitsdone)
948 & (((HOST_WIDE_INT) 1 << thissize) - 1));
949 else
950 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
951 bitsdone, NULL_RTX, 1);
954 /* If OP0 is a register, then handle OFFSET here.
956 When handling multiword bitfields, extract_bit_field may pass
957 down a word_mode SUBREG of a larger REG for a bitfield that actually
958 crosses a word boundary. Thus, for a SUBREG, we must find
959 the current word starting from the base register. */
960 if (GET_CODE (op0) == SUBREG)
962 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
963 word = operand_subword_force (SUBREG_REG (op0), word_offset,
964 GET_MODE (SUBREG_REG (op0)));
965 offset = 0;
967 else if (GET_CODE (op0) == REG)
969 word = operand_subword_force (op0, offset, GET_MODE (op0));
970 offset = 0;
972 else
973 word = op0;
975 /* OFFSET is in UNITs, and UNIT is in bits.
976 store_fixed_bit_field wants offset in bytes. */
977 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
978 thispos, part);
979 bitsdone += thissize;
983 /* Generate code to extract a byte-field from STR_RTX
984 containing BITSIZE bits, starting at BITNUM,
985 and put it in TARGET if possible (if TARGET is nonzero).
986 Regardless of TARGET, we return the rtx for where the value is placed.
987 It may be a QUEUED.
989 STR_RTX is the structure containing the byte (a REG or MEM).
990 UNSIGNEDP is nonzero if this is an unsigned bit field.
991 MODE is the natural mode of the field value once extracted.
992 TMODE is the mode the caller would like the value to have;
993 but the value may be returned with type MODE instead.
995 TOTAL_SIZE is the size in bytes of the containing structure,
996 or -1 if varying.
998 If a TARGET is specified and we can store in it at no extra cost,
999 we do so, and return TARGET.
1000 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1001 if they are equally easy. */
1004 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1005 target, mode, tmode, total_size)
1006 rtx str_rtx;
1007 unsigned HOST_WIDE_INT bitsize;
1008 unsigned HOST_WIDE_INT bitnum;
1009 int unsignedp;
1010 rtx target;
1011 enum machine_mode mode, tmode;
1012 HOST_WIDE_INT total_size;
1014 unsigned int unit
1015 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1016 unsigned HOST_WIDE_INT offset = bitnum / unit;
1017 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1018 rtx op0 = str_rtx;
1019 rtx spec_target = target;
1020 rtx spec_target_subreg = 0;
1021 enum machine_mode int_mode;
1022 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1023 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1024 enum machine_mode mode1;
1025 int byte_offset;
1027 /* Discount the part of the structure before the desired byte.
1028 We need to know how many bytes are safe to reference after it. */
1029 if (total_size >= 0)
1030 total_size -= (bitpos / BIGGEST_ALIGNMENT
1031 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1033 if (tmode == VOIDmode)
1034 tmode = mode;
1035 while (GET_CODE (op0) == SUBREG)
1037 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1038 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1040 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1042 inner_size = MIN (inner_size, BITS_PER_WORD);
1044 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1046 bitpos += inner_size - outer_size;
1047 if (bitpos > unit)
1049 offset += (bitpos / unit);
1050 bitpos %= unit;
1054 op0 = SUBREG_REG (op0);
1057 if (GET_CODE (op0) == REG
1058 && mode == GET_MODE (op0)
1059 && bitnum == 0
1060 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1062 /* We're trying to extract a full register from itself. */
1063 return op0;
1066 /* Make sure we are playing with integral modes. Pun with subregs
1067 if we aren't. */
1069 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1070 if (imode != GET_MODE (op0))
1072 if (GET_CODE (op0) == MEM)
1073 op0 = adjust_address (op0, imode, 0);
1074 else if (imode != BLKmode)
1075 op0 = gen_lowpart (imode, op0);
1076 else
1077 abort ();
1081 /* We may be accessing data outside the field, which means
1082 we can alias adjacent data. */
1083 if (GET_CODE (op0) == MEM)
1085 op0 = shallow_copy_rtx (op0);
1086 set_mem_alias_set (op0, 0);
1087 set_mem_expr (op0, 0);
1090 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1091 If that's wrong, the solution is to test for it and set TARGET to 0
1092 if needed. */
1094 /* If OP0 is a register, BITPOS must count within a word.
1095 But as we have it, it counts within whatever size OP0 now has.
1096 On a bigendian machine, these are not the same, so convert. */
1097 if (BYTES_BIG_ENDIAN
1098 && GET_CODE (op0) != MEM
1099 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1100 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1102 /* Extracting a full-word or multi-word value
1103 from a structure in a register or aligned memory.
1104 This can be done with just SUBREG.
1105 So too extracting a subword value in
1106 the least significant part of the register. */
1108 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1109 + (offset * UNITS_PER_WORD);
1111 mode1 = (VECTOR_MODE_P (tmode)
1112 ? mode
1113 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1115 if (((GET_CODE (op0) != MEM
1116 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1117 GET_MODE_BITSIZE (GET_MODE (op0)))
1118 && GET_MODE_SIZE (mode1) != 0
1119 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1120 || (GET_CODE (op0) == MEM
1121 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1122 || (offset * BITS_PER_UNIT % bitsize == 0
1123 && MEM_ALIGN (op0) % bitsize == 0))))
1124 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1125 && bitpos % BITS_PER_WORD == 0)
1126 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1127 /* ??? The big endian test here is wrong. This is correct
1128 if the value is in a register, and if mode_for_size is not
1129 the same mode as op0. This causes us to get unnecessarily
1130 inefficient code from the Thumb port when -mbig-endian. */
1131 && (BYTES_BIG_ENDIAN
1132 ? bitpos + bitsize == BITS_PER_WORD
1133 : bitpos == 0))))
1135 if (mode1 != GET_MODE (op0))
1137 if (GET_CODE (op0) == SUBREG)
1139 if (GET_MODE (SUBREG_REG (op0)) == mode1
1140 || GET_MODE_CLASS (mode1) == MODE_INT
1141 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1142 op0 = SUBREG_REG (op0);
1143 else
1144 /* Else we've got some float mode source being extracted into
1145 a different float mode destination -- this combination of
1146 subregs results in Severe Tire Damage. */
1147 abort ();
1149 if (GET_CODE (op0) == REG)
1150 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1151 else
1152 op0 = adjust_address (op0, mode1, offset);
1154 if (mode1 != mode)
1155 return convert_to_mode (tmode, op0, unsignedp);
1156 return op0;
1159 /* Handle fields bigger than a word. */
1161 if (bitsize > BITS_PER_WORD)
1163 /* Here we transfer the words of the field
1164 in the order least significant first.
1165 This is because the most significant word is the one which may
1166 be less than full. */
1168 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1169 unsigned int i;
1171 if (target == 0 || GET_CODE (target) != REG)
1172 target = gen_reg_rtx (mode);
1174 /* Indicate for flow that the entire target reg is being set. */
1175 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1177 for (i = 0; i < nwords; i++)
1179 /* If I is 0, use the low-order word in both field and target;
1180 if I is 1, use the next to lowest word; and so on. */
1181 /* Word number in TARGET to use. */
1182 unsigned int wordnum
1183 = (WORDS_BIG_ENDIAN
1184 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1185 : i);
1186 /* Offset from start of field in OP0. */
1187 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1188 ? MAX (0, ((int) bitsize - ((int) i + 1)
1189 * (int) BITS_PER_WORD))
1190 : (int) i * BITS_PER_WORD);
1191 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1192 rtx result_part
1193 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1194 bitsize - i * BITS_PER_WORD),
1195 bitnum + bit_offset, 1, target_part, mode,
1196 word_mode, total_size);
1198 if (target_part == 0)
1199 abort ();
1201 if (result_part != target_part)
1202 emit_move_insn (target_part, result_part);
1205 if (unsignedp)
1207 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1208 need to be zero'd out. */
1209 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1211 unsigned int i, total_words;
1213 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1214 for (i = nwords; i < total_words; i++)
1215 emit_move_insn
1216 (operand_subword (target,
1217 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1218 1, VOIDmode),
1219 const0_rtx);
1221 return target;
1224 /* Signed bit field: sign-extend with two arithmetic shifts. */
1225 target = expand_shift (LSHIFT_EXPR, mode, target,
1226 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1227 NULL_RTX, 0);
1228 return expand_shift (RSHIFT_EXPR, mode, target,
1229 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1230 NULL_RTX, 0);
1233 /* From here on we know the desired field is smaller than a word. */
1235 /* Check if there is a correspondingly-sized integer field, so we can
1236 safely extract it as one size of integer, if necessary; then
1237 truncate or extend to the size that is wanted; then use SUBREGs or
1238 convert_to_mode to get one of the modes we really wanted. */
1240 int_mode = int_mode_for_mode (tmode);
1241 if (int_mode == BLKmode)
1242 int_mode = int_mode_for_mode (mode);
1243 if (int_mode == BLKmode)
1244 abort (); /* Should probably push op0 out to memory and then
1245 do a load. */
1247 /* OFFSET is the number of words or bytes (UNIT says which)
1248 from STR_RTX to the first word or byte containing part of the field. */
1250 if (GET_CODE (op0) != MEM)
1252 if (offset != 0
1253 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1255 if (GET_CODE (op0) != REG)
1256 op0 = copy_to_reg (op0);
1257 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1258 op0, (offset * UNITS_PER_WORD));
1260 offset = 0;
1262 else
1263 op0 = protect_from_queue (str_rtx, 1);
1265 /* Now OFFSET is nonzero only for memory operands. */
1267 if (unsignedp)
1269 if (HAVE_extzv
1270 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1271 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1272 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1274 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1275 rtx bitsize_rtx, bitpos_rtx;
1276 rtx last = get_last_insn ();
1277 rtx xop0 = op0;
1278 rtx xtarget = target;
1279 rtx xspec_target = spec_target;
1280 rtx xspec_target_subreg = spec_target_subreg;
1281 rtx pat;
1282 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1284 if (GET_CODE (xop0) == MEM)
1286 int save_volatile_ok = volatile_ok;
1287 volatile_ok = 1;
1289 /* Is the memory operand acceptable? */
1290 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1291 (xop0, GET_MODE (xop0))))
1293 /* No, load into a reg and extract from there. */
1294 enum machine_mode bestmode;
1296 /* Get the mode to use for inserting into this field. If
1297 OP0 is BLKmode, get the smallest mode consistent with the
1298 alignment. If OP0 is a non-BLKmode object that is no
1299 wider than MAXMODE, use its mode. Otherwise, use the
1300 smallest mode containing the field. */
1302 if (GET_MODE (xop0) == BLKmode
1303 || (GET_MODE_SIZE (GET_MODE (op0))
1304 > GET_MODE_SIZE (maxmode)))
1305 bestmode = get_best_mode (bitsize, bitnum,
1306 MEM_ALIGN (xop0), maxmode,
1307 MEM_VOLATILE_P (xop0));
1308 else
1309 bestmode = GET_MODE (xop0);
1311 if (bestmode == VOIDmode
1312 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1313 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1314 goto extzv_loses;
1316 /* Compute offset as multiple of this unit,
1317 counting in bytes. */
1318 unit = GET_MODE_BITSIZE (bestmode);
1319 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1320 xbitpos = bitnum % unit;
1321 xop0 = adjust_address (xop0, bestmode, xoffset);
1323 /* Fetch it to a register in that size. */
1324 xop0 = force_reg (bestmode, xop0);
1326 /* XBITPOS counts within UNIT, which is what is expected. */
1328 else
1329 /* Get ref to first byte containing part of the field. */
1330 xop0 = adjust_address (xop0, byte_mode, xoffset);
1332 volatile_ok = save_volatile_ok;
1335 /* If op0 is a register, we need it in MAXMODE (which is usually
1336 SImode). to make it acceptable to the format of extzv. */
1337 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1338 goto extzv_loses;
1339 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1340 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1342 /* On big-endian machines, we count bits from the most significant.
1343 If the bit field insn does not, we must invert. */
1344 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1345 xbitpos = unit - bitsize - xbitpos;
1347 /* Now convert from counting within UNIT to counting in MAXMODE. */
1348 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1349 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1351 unit = GET_MODE_BITSIZE (maxmode);
1353 if (xtarget == 0
1354 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1355 xtarget = xspec_target = gen_reg_rtx (tmode);
1357 if (GET_MODE (xtarget) != maxmode)
1359 if (GET_CODE (xtarget) == REG)
1361 int wider = (GET_MODE_SIZE (maxmode)
1362 > GET_MODE_SIZE (GET_MODE (xtarget)));
1363 xtarget = gen_lowpart (maxmode, xtarget);
1364 if (wider)
1365 xspec_target_subreg = xtarget;
1367 else
1368 xtarget = gen_reg_rtx (maxmode);
1371 /* If this machine's extzv insists on a register target,
1372 make sure we have one. */
1373 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1374 (xtarget, maxmode)))
1375 xtarget = gen_reg_rtx (maxmode);
1377 bitsize_rtx = GEN_INT (bitsize);
1378 bitpos_rtx = GEN_INT (xbitpos);
1380 pat = gen_extzv (protect_from_queue (xtarget, 1),
1381 xop0, bitsize_rtx, bitpos_rtx);
1382 if (pat)
1384 emit_insn (pat);
1385 target = xtarget;
1386 spec_target = xspec_target;
1387 spec_target_subreg = xspec_target_subreg;
1389 else
1391 delete_insns_since (last);
1392 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1393 bitpos, target, 1);
1396 else
1397 extzv_loses:
1398 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1399 bitpos, target, 1);
1401 else
1403 if (HAVE_extv
1404 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1405 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1406 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1408 int xbitpos = bitpos, xoffset = offset;
1409 rtx bitsize_rtx, bitpos_rtx;
1410 rtx last = get_last_insn ();
1411 rtx xop0 = op0, xtarget = target;
1412 rtx xspec_target = spec_target;
1413 rtx xspec_target_subreg = spec_target_subreg;
1414 rtx pat;
1415 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1417 if (GET_CODE (xop0) == MEM)
1419 /* Is the memory operand acceptable? */
1420 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1421 (xop0, GET_MODE (xop0))))
1423 /* No, load into a reg and extract from there. */
1424 enum machine_mode bestmode;
1426 /* Get the mode to use for inserting into this field. If
1427 OP0 is BLKmode, get the smallest mode consistent with the
1428 alignment. If OP0 is a non-BLKmode object that is no
1429 wider than MAXMODE, use its mode. Otherwise, use the
1430 smallest mode containing the field. */
1432 if (GET_MODE (xop0) == BLKmode
1433 || (GET_MODE_SIZE (GET_MODE (op0))
1434 > GET_MODE_SIZE (maxmode)))
1435 bestmode = get_best_mode (bitsize, bitnum,
1436 MEM_ALIGN (xop0), maxmode,
1437 MEM_VOLATILE_P (xop0));
1438 else
1439 bestmode = GET_MODE (xop0);
1441 if (bestmode == VOIDmode
1442 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1443 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1444 goto extv_loses;
1446 /* Compute offset as multiple of this unit,
1447 counting in bytes. */
1448 unit = GET_MODE_BITSIZE (bestmode);
1449 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1450 xbitpos = bitnum % unit;
1451 xop0 = adjust_address (xop0, bestmode, xoffset);
1453 /* Fetch it to a register in that size. */
1454 xop0 = force_reg (bestmode, xop0);
1456 /* XBITPOS counts within UNIT, which is what is expected. */
1458 else
1459 /* Get ref to first byte containing part of the field. */
1460 xop0 = adjust_address (xop0, byte_mode, xoffset);
1463 /* If op0 is a register, we need it in MAXMODE (which is usually
1464 SImode) to make it acceptable to the format of extv. */
1465 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1466 goto extv_loses;
1467 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1468 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1470 /* On big-endian machines, we count bits from the most significant.
1471 If the bit field insn does not, we must invert. */
1472 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1473 xbitpos = unit - bitsize - xbitpos;
1475 /* XBITPOS counts within a size of UNIT.
1476 Adjust to count within a size of MAXMODE. */
1477 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1478 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1480 unit = GET_MODE_BITSIZE (maxmode);
1482 if (xtarget == 0
1483 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1484 xtarget = xspec_target = gen_reg_rtx (tmode);
1486 if (GET_MODE (xtarget) != maxmode)
1488 if (GET_CODE (xtarget) == REG)
1490 int wider = (GET_MODE_SIZE (maxmode)
1491 > GET_MODE_SIZE (GET_MODE (xtarget)));
1492 xtarget = gen_lowpart (maxmode, xtarget);
1493 if (wider)
1494 xspec_target_subreg = xtarget;
1496 else
1497 xtarget = gen_reg_rtx (maxmode);
1500 /* If this machine's extv insists on a register target,
1501 make sure we have one. */
1502 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1503 (xtarget, maxmode)))
1504 xtarget = gen_reg_rtx (maxmode);
1506 bitsize_rtx = GEN_INT (bitsize);
1507 bitpos_rtx = GEN_INT (xbitpos);
1509 pat = gen_extv (protect_from_queue (xtarget, 1),
1510 xop0, bitsize_rtx, bitpos_rtx);
1511 if (pat)
1513 emit_insn (pat);
1514 target = xtarget;
1515 spec_target = xspec_target;
1516 spec_target_subreg = xspec_target_subreg;
1518 else
1520 delete_insns_since (last);
1521 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1522 bitpos, target, 0);
1525 else
1526 extv_loses:
1527 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1528 bitpos, target, 0);
1530 if (target == spec_target)
1531 return target;
1532 if (target == spec_target_subreg)
1533 return spec_target;
1534 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1536 /* If the target mode is floating-point, first convert to the
1537 integer mode of that size and then access it as a floating-point
1538 value via a SUBREG. */
1539 if (GET_MODE_CLASS (tmode) != MODE_INT
1540 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1542 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1543 MODE_INT, 0),
1544 target, unsignedp);
1545 return gen_lowpart (tmode, target);
1547 else
1548 return convert_to_mode (tmode, target, unsignedp);
1550 return target;
1553 /* Extract a bit field using shifts and boolean operations
1554 Returns an rtx to represent the value.
1555 OP0 addresses a register (word) or memory (byte).
1556 BITPOS says which bit within the word or byte the bit field starts in.
1557 OFFSET says how many bytes farther the bit field starts;
1558 it is 0 if OP0 is a register.
1559 BITSIZE says how many bits long the bit field is.
1560 (If OP0 is a register, it may be narrower than a full word,
1561 but BITPOS still counts within a full word,
1562 which is significant on bigendian machines.)
1564 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1565 If TARGET is nonzero, attempts to store the value there
1566 and return TARGET, but this is not guaranteed.
1567 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1569 static rtx
1570 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1571 target, unsignedp)
1572 enum machine_mode tmode;
1573 rtx op0, target;
1574 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1575 int unsignedp;
1577 unsigned int total_bits = BITS_PER_WORD;
1578 enum machine_mode mode;
1580 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1582 /* Special treatment for a bit field split across two registers. */
1583 if (bitsize + bitpos > BITS_PER_WORD)
1584 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1586 else
1588 /* Get the proper mode to use for this field. We want a mode that
1589 includes the entire field. If such a mode would be larger than
1590 a word, we won't be doing the extraction the normal way. */
1592 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1593 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1595 if (mode == VOIDmode)
1596 /* The only way this should occur is if the field spans word
1597 boundaries. */
1598 return extract_split_bit_field (op0, bitsize,
1599 bitpos + offset * BITS_PER_UNIT,
1600 unsignedp);
1602 total_bits = GET_MODE_BITSIZE (mode);
1604 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1605 be in the range 0 to total_bits-1, and put any excess bytes in
1606 OFFSET. */
1607 if (bitpos >= total_bits)
1609 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1610 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1611 * BITS_PER_UNIT);
1614 /* Get ref to an aligned byte, halfword, or word containing the field.
1615 Adjust BITPOS to be position within a word,
1616 and OFFSET to be the offset of that word.
1617 Then alter OP0 to refer to that word. */
1618 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1619 offset -= (offset % (total_bits / BITS_PER_UNIT));
1620 op0 = adjust_address (op0, mode, offset);
1623 mode = GET_MODE (op0);
1625 if (BYTES_BIG_ENDIAN)
1626 /* BITPOS is the distance between our msb and that of OP0.
1627 Convert it to the distance from the lsb. */
1628 bitpos = total_bits - bitsize - bitpos;
1630 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1631 We have reduced the big-endian case to the little-endian case. */
1633 if (unsignedp)
1635 if (bitpos)
1637 /* If the field does not already start at the lsb,
1638 shift it so it does. */
1639 tree amount = build_int_2 (bitpos, 0);
1640 /* Maybe propagate the target for the shift. */
1641 /* But not if we will return it--could confuse integrate.c. */
1642 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1643 && !REG_FUNCTION_VALUE_P (target)
1644 ? target : 0);
1645 if (tmode != mode) subtarget = 0;
1646 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1648 /* Convert the value to the desired mode. */
1649 if (mode != tmode)
1650 op0 = convert_to_mode (tmode, op0, 1);
1652 /* Unless the msb of the field used to be the msb when we shifted,
1653 mask out the upper bits. */
1655 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1656 return expand_binop (GET_MODE (op0), and_optab, op0,
1657 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1658 target, 1, OPTAB_LIB_WIDEN);
1659 return op0;
1662 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1663 then arithmetic-shift its lsb to the lsb of the word. */
1664 op0 = force_reg (mode, op0);
1665 if (mode != tmode)
1666 target = 0;
1668 /* Find the narrowest integer mode that contains the field. */
1670 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1671 mode = GET_MODE_WIDER_MODE (mode))
1672 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1674 op0 = convert_to_mode (mode, op0, 0);
1675 break;
1678 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1680 tree amount
1681 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1682 /* Maybe propagate the target for the shift. */
1683 /* But not if we will return the result--could confuse integrate.c. */
1684 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1685 && ! REG_FUNCTION_VALUE_P (target)
1686 ? target : 0);
1687 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1690 return expand_shift (RSHIFT_EXPR, mode, op0,
1691 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1692 target, 0);
1695 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1696 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1697 complement of that if COMPLEMENT. The mask is truncated if
1698 necessary to the width of mode MODE. The mask is zero-extended if
1699 BITSIZE+BITPOS is too small for MODE. */
1701 static rtx
1702 mask_rtx (mode, bitpos, bitsize, complement)
1703 enum machine_mode mode;
1704 int bitpos, bitsize, complement;
1706 HOST_WIDE_INT masklow, maskhigh;
1708 if (bitpos < HOST_BITS_PER_WIDE_INT)
1709 masklow = (HOST_WIDE_INT) -1 << bitpos;
1710 else
1711 masklow = 0;
1713 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1714 masklow &= ((unsigned HOST_WIDE_INT) -1
1715 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1717 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1718 maskhigh = -1;
1719 else
1720 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1722 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1723 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1724 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1725 else
1726 maskhigh = 0;
1728 if (complement)
1730 maskhigh = ~maskhigh;
1731 masklow = ~masklow;
1734 return immed_double_const (masklow, maskhigh, mode);
1737 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1738 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1740 static rtx
1741 lshift_value (mode, value, bitpos, bitsize)
1742 enum machine_mode mode;
1743 rtx value;
1744 int bitpos, bitsize;
1746 unsigned HOST_WIDE_INT v = INTVAL (value);
1747 HOST_WIDE_INT low, high;
1749 if (bitsize < HOST_BITS_PER_WIDE_INT)
1750 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1752 if (bitpos < HOST_BITS_PER_WIDE_INT)
1754 low = v << bitpos;
1755 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1757 else
1759 low = 0;
1760 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1763 return immed_double_const (low, high, mode);
1766 /* Extract a bit field that is split across two words
1767 and return an RTX for the result.
1769 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1770 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1771 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1773 static rtx
1774 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1775 rtx op0;
1776 unsigned HOST_WIDE_INT bitsize, bitpos;
1777 int unsignedp;
1779 unsigned int unit;
1780 unsigned int bitsdone = 0;
1781 rtx result = NULL_RTX;
1782 int first = 1;
1784 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1785 much at a time. */
1786 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1787 unit = BITS_PER_WORD;
1788 else
1789 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1791 while (bitsdone < bitsize)
1793 unsigned HOST_WIDE_INT thissize;
1794 rtx part, word;
1795 unsigned HOST_WIDE_INT thispos;
1796 unsigned HOST_WIDE_INT offset;
1798 offset = (bitpos + bitsdone) / unit;
1799 thispos = (bitpos + bitsdone) % unit;
1801 /* THISSIZE must not overrun a word boundary. Otherwise,
1802 extract_fixed_bit_field will call us again, and we will mutually
1803 recurse forever. */
1804 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1805 thissize = MIN (thissize, unit - thispos);
1807 /* If OP0 is a register, then handle OFFSET here.
1809 When handling multiword bitfields, extract_bit_field may pass
1810 down a word_mode SUBREG of a larger REG for a bitfield that actually
1811 crosses a word boundary. Thus, for a SUBREG, we must find
1812 the current word starting from the base register. */
1813 if (GET_CODE (op0) == SUBREG)
1815 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1816 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1817 GET_MODE (SUBREG_REG (op0)));
1818 offset = 0;
1820 else if (GET_CODE (op0) == REG)
1822 word = operand_subword_force (op0, offset, GET_MODE (op0));
1823 offset = 0;
1825 else
1826 word = op0;
1828 /* Extract the parts in bit-counting order,
1829 whose meaning is determined by BYTES_PER_UNIT.
1830 OFFSET is in UNITs, and UNIT is in bits.
1831 extract_fixed_bit_field wants offset in bytes. */
1832 part = extract_fixed_bit_field (word_mode, word,
1833 offset * unit / BITS_PER_UNIT,
1834 thissize, thispos, 0, 1);
1835 bitsdone += thissize;
1837 /* Shift this part into place for the result. */
1838 if (BYTES_BIG_ENDIAN)
1840 if (bitsize != bitsdone)
1841 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1842 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1844 else
1846 if (bitsdone != thissize)
1847 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1848 build_int_2 (bitsdone - thissize, 0), 0, 1);
1851 if (first)
1852 result = part;
1853 else
1854 /* Combine the parts with bitwise or. This works
1855 because we extracted each part as an unsigned bit field. */
1856 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1857 OPTAB_LIB_WIDEN);
1859 first = 0;
1862 /* Unsigned bit field: we are done. */
1863 if (unsignedp)
1864 return result;
1865 /* Signed bit field: sign-extend with two arithmetic shifts. */
1866 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1867 build_int_2 (BITS_PER_WORD - bitsize, 0),
1868 NULL_RTX, 0);
1869 return expand_shift (RSHIFT_EXPR, word_mode, result,
1870 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1873 /* Add INC into TARGET. */
1875 void
1876 expand_inc (target, inc)
1877 rtx target, inc;
1879 rtx value = expand_binop (GET_MODE (target), add_optab,
1880 target, inc,
1881 target, 0, OPTAB_LIB_WIDEN);
1882 if (value != target)
1883 emit_move_insn (target, value);
1886 /* Subtract DEC from TARGET. */
1888 void
1889 expand_dec (target, dec)
1890 rtx target, dec;
1892 rtx value = expand_binop (GET_MODE (target), sub_optab,
1893 target, dec,
1894 target, 0, OPTAB_LIB_WIDEN);
1895 if (value != target)
1896 emit_move_insn (target, value);
1899 /* Output a shift instruction for expression code CODE,
1900 with SHIFTED being the rtx for the value to shift,
1901 and AMOUNT the tree for the amount to shift by.
1902 Store the result in the rtx TARGET, if that is convenient.
1903 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1904 Return the rtx for where the value is. */
1907 expand_shift (code, mode, shifted, amount, target, unsignedp)
1908 enum tree_code code;
1909 enum machine_mode mode;
1910 rtx shifted;
1911 tree amount;
1912 rtx target;
1913 int unsignedp;
1915 rtx op1, temp = 0;
1916 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1917 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1918 int try;
1920 /* Previously detected shift-counts computed by NEGATE_EXPR
1921 and shifted in the other direction; but that does not work
1922 on all machines. */
1924 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1926 #ifdef SHIFT_COUNT_TRUNCATED
1927 if (SHIFT_COUNT_TRUNCATED)
1929 if (GET_CODE (op1) == CONST_INT
1930 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1931 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1932 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1933 % GET_MODE_BITSIZE (mode));
1934 else if (GET_CODE (op1) == SUBREG
1935 && SUBREG_BYTE (op1) == 0)
1936 op1 = SUBREG_REG (op1);
1938 #endif
1940 if (op1 == const0_rtx)
1941 return shifted;
1943 for (try = 0; temp == 0 && try < 3; try++)
1945 enum optab_methods methods;
1947 if (try == 0)
1948 methods = OPTAB_DIRECT;
1949 else if (try == 1)
1950 methods = OPTAB_WIDEN;
1951 else
1952 methods = OPTAB_LIB_WIDEN;
1954 if (rotate)
1956 /* Widening does not work for rotation. */
1957 if (methods == OPTAB_WIDEN)
1958 continue;
1959 else if (methods == OPTAB_LIB_WIDEN)
1961 /* If we have been unable to open-code this by a rotation,
1962 do it as the IOR of two shifts. I.e., to rotate A
1963 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1964 where C is the bitsize of A.
1966 It is theoretically possible that the target machine might
1967 not be able to perform either shift and hence we would
1968 be making two libcalls rather than just the one for the
1969 shift (similarly if IOR could not be done). We will allow
1970 this extremely unlikely lossage to avoid complicating the
1971 code below. */
1973 rtx subtarget = target == shifted ? 0 : target;
1974 rtx temp1;
1975 tree type = TREE_TYPE (amount);
1976 tree new_amount = make_tree (type, op1);
1977 tree other_amount
1978 = fold (build (MINUS_EXPR, type,
1979 convert (type,
1980 build_int_2 (GET_MODE_BITSIZE (mode),
1981 0)),
1982 amount));
1984 shifted = force_reg (mode, shifted);
1986 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1987 mode, shifted, new_amount, subtarget, 1);
1988 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1989 mode, shifted, other_amount, 0, 1);
1990 return expand_binop (mode, ior_optab, temp, temp1, target,
1991 unsignedp, methods);
1994 temp = expand_binop (mode,
1995 left ? rotl_optab : rotr_optab,
1996 shifted, op1, target, unsignedp, methods);
1998 /* If we don't have the rotate, but we are rotating by a constant
1999 that is in range, try a rotate in the opposite direction. */
2001 if (temp == 0 && GET_CODE (op1) == CONST_INT
2002 && INTVAL (op1) > 0
2003 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2004 temp = expand_binop (mode,
2005 left ? rotr_optab : rotl_optab,
2006 shifted,
2007 GEN_INT (GET_MODE_BITSIZE (mode)
2008 - INTVAL (op1)),
2009 target, unsignedp, methods);
2011 else if (unsignedp)
2012 temp = expand_binop (mode,
2013 left ? ashl_optab : lshr_optab,
2014 shifted, op1, target, unsignedp, methods);
2016 /* Do arithmetic shifts.
2017 Also, if we are going to widen the operand, we can just as well
2018 use an arithmetic right-shift instead of a logical one. */
2019 if (temp == 0 && ! rotate
2020 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2022 enum optab_methods methods1 = methods;
2024 /* If trying to widen a log shift to an arithmetic shift,
2025 don't accept an arithmetic shift of the same size. */
2026 if (unsignedp)
2027 methods1 = OPTAB_MUST_WIDEN;
2029 /* Arithmetic shift */
2031 temp = expand_binop (mode,
2032 left ? ashl_optab : ashr_optab,
2033 shifted, op1, target, unsignedp, methods1);
2036 /* We used to try extzv here for logical right shifts, but that was
2037 only useful for one machine, the VAX, and caused poor code
2038 generation there for lshrdi3, so the code was deleted and a
2039 define_expand for lshrsi3 was added to vax.md. */
2042 if (temp == 0)
2043 abort ();
2044 return temp;
2047 enum alg_code { alg_zero, alg_m, alg_shift,
2048 alg_add_t_m2, alg_sub_t_m2,
2049 alg_add_factor, alg_sub_factor,
2050 alg_add_t2_m, alg_sub_t2_m,
2051 alg_add, alg_subtract, alg_factor, alg_shiftop };
2053 /* This structure records a sequence of operations.
2054 `ops' is the number of operations recorded.
2055 `cost' is their total cost.
2056 The operations are stored in `op' and the corresponding
2057 logarithms of the integer coefficients in `log'.
2059 These are the operations:
2060 alg_zero total := 0;
2061 alg_m total := multiplicand;
2062 alg_shift total := total * coeff
2063 alg_add_t_m2 total := total + multiplicand * coeff;
2064 alg_sub_t_m2 total := total - multiplicand * coeff;
2065 alg_add_factor total := total * coeff + total;
2066 alg_sub_factor total := total * coeff - total;
2067 alg_add_t2_m total := total * coeff + multiplicand;
2068 alg_sub_t2_m total := total * coeff - multiplicand;
2070 The first operand must be either alg_zero or alg_m. */
2072 struct algorithm
2074 short cost;
2075 short ops;
2076 /* The size of the OP and LOG fields are not directly related to the
2077 word size, but the worst-case algorithms will be if we have few
2078 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2079 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2080 in total wordsize operations. */
2081 enum alg_code op[MAX_BITS_PER_WORD];
2082 char log[MAX_BITS_PER_WORD];
2085 static void synth_mult PARAMS ((struct algorithm *,
2086 unsigned HOST_WIDE_INT,
2087 int));
2088 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2089 int, int,
2090 unsigned HOST_WIDE_INT *,
2091 int *, int *));
2092 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2093 int));
2094 /* Compute and return the best algorithm for multiplying by T.
2095 The algorithm must cost less than cost_limit
2096 If retval.cost >= COST_LIMIT, no algorithm was found and all
2097 other field of the returned struct are undefined. */
2099 static void
2100 synth_mult (alg_out, t, cost_limit)
2101 struct algorithm *alg_out;
2102 unsigned HOST_WIDE_INT t;
2103 int cost_limit;
2105 int m;
2106 struct algorithm *alg_in, *best_alg;
2107 int cost;
2108 unsigned HOST_WIDE_INT q;
2110 /* Indicate that no algorithm is yet found. If no algorithm
2111 is found, this value will be returned and indicate failure. */
2112 alg_out->cost = cost_limit;
2114 if (cost_limit <= 0)
2115 return;
2117 /* t == 1 can be done in zero cost. */
2118 if (t == 1)
2120 alg_out->ops = 1;
2121 alg_out->cost = 0;
2122 alg_out->op[0] = alg_m;
2123 return;
2126 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2127 fail now. */
2128 if (t == 0)
2130 if (zero_cost >= cost_limit)
2131 return;
2132 else
2134 alg_out->ops = 1;
2135 alg_out->cost = zero_cost;
2136 alg_out->op[0] = alg_zero;
2137 return;
2141 /* We'll be needing a couple extra algorithm structures now. */
2143 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2144 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2146 /* If we have a group of zero bits at the low-order part of T, try
2147 multiplying by the remaining bits and then doing a shift. */
2149 if ((t & 1) == 0)
2151 m = floor_log2 (t & -t); /* m = number of low zero bits */
2152 if (m < BITS_PER_WORD)
2154 q = t >> m;
2155 cost = shift_cost[m];
2156 synth_mult (alg_in, q, cost_limit - cost);
2158 cost += alg_in->cost;
2159 if (cost < cost_limit)
2161 struct algorithm *x;
2162 x = alg_in, alg_in = best_alg, best_alg = x;
2163 best_alg->log[best_alg->ops] = m;
2164 best_alg->op[best_alg->ops] = alg_shift;
2165 cost_limit = cost;
2170 /* If we have an odd number, add or subtract one. */
2171 if ((t & 1) != 0)
2173 unsigned HOST_WIDE_INT w;
2175 for (w = 1; (w & t) != 0; w <<= 1)
2177 /* If T was -1, then W will be zero after the loop. This is another
2178 case where T ends with ...111. Handling this with (T + 1) and
2179 subtract 1 produces slightly better code and results in algorithm
2180 selection much faster than treating it like the ...0111 case
2181 below. */
2182 if (w == 0
2183 || (w > 2
2184 /* Reject the case where t is 3.
2185 Thus we prefer addition in that case. */
2186 && t != 3))
2188 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2190 cost = add_cost;
2191 synth_mult (alg_in, t + 1, cost_limit - cost);
2193 cost += alg_in->cost;
2194 if (cost < cost_limit)
2196 struct algorithm *x;
2197 x = alg_in, alg_in = best_alg, best_alg = x;
2198 best_alg->log[best_alg->ops] = 0;
2199 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2200 cost_limit = cost;
2203 else
2205 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2207 cost = add_cost;
2208 synth_mult (alg_in, t - 1, cost_limit - cost);
2210 cost += alg_in->cost;
2211 if (cost < cost_limit)
2213 struct algorithm *x;
2214 x = alg_in, alg_in = best_alg, best_alg = x;
2215 best_alg->log[best_alg->ops] = 0;
2216 best_alg->op[best_alg->ops] = alg_add_t_m2;
2217 cost_limit = cost;
2222 /* Look for factors of t of the form
2223 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2224 If we find such a factor, we can multiply by t using an algorithm that
2225 multiplies by q, shift the result by m and add/subtract it to itself.
2227 We search for large factors first and loop down, even if large factors
2228 are less probable than small; if we find a large factor we will find a
2229 good sequence quickly, and therefore be able to prune (by decreasing
2230 COST_LIMIT) the search. */
2232 for (m = floor_log2 (t - 1); m >= 2; m--)
2234 unsigned HOST_WIDE_INT d;
2236 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2237 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2239 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2240 synth_mult (alg_in, t / d, cost_limit - cost);
2242 cost += alg_in->cost;
2243 if (cost < cost_limit)
2245 struct algorithm *x;
2246 x = alg_in, alg_in = best_alg, best_alg = x;
2247 best_alg->log[best_alg->ops] = m;
2248 best_alg->op[best_alg->ops] = alg_add_factor;
2249 cost_limit = cost;
2251 /* Other factors will have been taken care of in the recursion. */
2252 break;
2255 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2256 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2258 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2259 synth_mult (alg_in, t / d, cost_limit - cost);
2261 cost += alg_in->cost;
2262 if (cost < cost_limit)
2264 struct algorithm *x;
2265 x = alg_in, alg_in = best_alg, best_alg = x;
2266 best_alg->log[best_alg->ops] = m;
2267 best_alg->op[best_alg->ops] = alg_sub_factor;
2268 cost_limit = cost;
2270 break;
2274 /* Try shift-and-add (load effective address) instructions,
2275 i.e. do a*3, a*5, a*9. */
2276 if ((t & 1) != 0)
2278 q = t - 1;
2279 q = q & -q;
2280 m = exact_log2 (q);
2281 if (m >= 0 && m < BITS_PER_WORD)
2283 cost = shiftadd_cost[m];
2284 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2286 cost += alg_in->cost;
2287 if (cost < cost_limit)
2289 struct algorithm *x;
2290 x = alg_in, alg_in = best_alg, best_alg = x;
2291 best_alg->log[best_alg->ops] = m;
2292 best_alg->op[best_alg->ops] = alg_add_t2_m;
2293 cost_limit = cost;
2297 q = t + 1;
2298 q = q & -q;
2299 m = exact_log2 (q);
2300 if (m >= 0 && m < BITS_PER_WORD)
2302 cost = shiftsub_cost[m];
2303 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2305 cost += alg_in->cost;
2306 if (cost < cost_limit)
2308 struct algorithm *x;
2309 x = alg_in, alg_in = best_alg, best_alg = x;
2310 best_alg->log[best_alg->ops] = m;
2311 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2312 cost_limit = cost;
2317 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2318 we have not found any algorithm. */
2319 if (cost_limit == alg_out->cost)
2320 return;
2322 /* If we are getting a too long sequence for `struct algorithm'
2323 to record, make this search fail. */
2324 if (best_alg->ops == MAX_BITS_PER_WORD)
2325 return;
2327 /* Copy the algorithm from temporary space to the space at alg_out.
2328 We avoid using structure assignment because the majority of
2329 best_alg is normally undefined, and this is a critical function. */
2330 alg_out->ops = best_alg->ops + 1;
2331 alg_out->cost = cost_limit;
2332 memcpy (alg_out->op, best_alg->op,
2333 alg_out->ops * sizeof *alg_out->op);
2334 memcpy (alg_out->log, best_alg->log,
2335 alg_out->ops * sizeof *alg_out->log);
2338 /* Perform a multiplication and return an rtx for the result.
2339 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2340 TARGET is a suggestion for where to store the result (an rtx).
2342 We check specially for a constant integer as OP1.
2343 If you want this check for OP0 as well, then before calling
2344 you should swap the two operands if OP0 would be constant. */
2347 expand_mult (mode, op0, op1, target, unsignedp)
2348 enum machine_mode mode;
2349 rtx op0, op1, target;
2350 int unsignedp;
2352 rtx const_op1 = op1;
2354 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2355 less than or equal in size to `unsigned int' this doesn't matter.
2356 If the mode is larger than `unsigned int', then synth_mult works only
2357 if the constant value exactly fits in an `unsigned int' without any
2358 truncation. This means that multiplying by negative values does
2359 not work; results are off by 2^32 on a 32 bit machine. */
2361 /* If we are multiplying in DImode, it may still be a win
2362 to try to work with shifts and adds. */
2363 if (GET_CODE (op1) == CONST_DOUBLE
2364 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2365 && HOST_BITS_PER_INT >= BITS_PER_WORD
2366 && CONST_DOUBLE_HIGH (op1) == 0)
2367 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2368 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2369 && GET_CODE (op1) == CONST_INT
2370 && INTVAL (op1) < 0)
2371 const_op1 = 0;
2373 /* We used to test optimize here, on the grounds that it's better to
2374 produce a smaller program when -O is not used.
2375 But this causes such a terrible slowdown sometimes
2376 that it seems better to use synth_mult always. */
2378 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2379 && (unsignedp || ! flag_trapv))
2381 struct algorithm alg;
2382 struct algorithm alg2;
2383 HOST_WIDE_INT val = INTVAL (op1);
2384 HOST_WIDE_INT val_so_far;
2385 rtx insn;
2386 int mult_cost;
2387 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2389 /* op0 must be register to make mult_cost match the precomputed
2390 shiftadd_cost array. */
2391 op0 = force_reg (mode, op0);
2393 /* Try to do the computation three ways: multiply by the negative of OP1
2394 and then negate, do the multiplication directly, or do multiplication
2395 by OP1 - 1. */
2397 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2398 mult_cost = MIN (12 * add_cost, mult_cost);
2400 synth_mult (&alg, val, mult_cost);
2402 /* This works only if the inverted value actually fits in an
2403 `unsigned int' */
2404 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2406 synth_mult (&alg2, - val,
2407 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2408 if (alg2.cost + negate_cost < alg.cost)
2409 alg = alg2, variant = negate_variant;
2412 /* This proves very useful for division-by-constant. */
2413 synth_mult (&alg2, val - 1,
2414 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2415 if (alg2.cost + add_cost < alg.cost)
2416 alg = alg2, variant = add_variant;
2418 if (alg.cost < mult_cost)
2420 /* We found something cheaper than a multiply insn. */
2421 int opno;
2422 rtx accum, tem;
2423 enum machine_mode nmode;
2425 op0 = protect_from_queue (op0, 0);
2427 /* Avoid referencing memory over and over.
2428 For speed, but also for correctness when mem is volatile. */
2429 if (GET_CODE (op0) == MEM)
2430 op0 = force_reg (mode, op0);
2432 /* ACCUM starts out either as OP0 or as a zero, depending on
2433 the first operation. */
2435 if (alg.op[0] == alg_zero)
2437 accum = copy_to_mode_reg (mode, const0_rtx);
2438 val_so_far = 0;
2440 else if (alg.op[0] == alg_m)
2442 accum = copy_to_mode_reg (mode, op0);
2443 val_so_far = 1;
2445 else
2446 abort ();
2448 for (opno = 1; opno < alg.ops; opno++)
2450 int log = alg.log[opno];
2451 int preserve = preserve_subexpressions_p ();
2452 rtx shift_subtarget = preserve ? 0 : accum;
2453 rtx add_target
2454 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2455 && ! preserve)
2456 ? target : 0;
2457 rtx accum_target = preserve ? 0 : accum;
2459 switch (alg.op[opno])
2461 case alg_shift:
2462 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2463 build_int_2 (log, 0), NULL_RTX, 0);
2464 val_so_far <<= log;
2465 break;
2467 case alg_add_t_m2:
2468 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2469 build_int_2 (log, 0), NULL_RTX, 0);
2470 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2471 add_target
2472 ? add_target : accum_target);
2473 val_so_far += (HOST_WIDE_INT) 1 << log;
2474 break;
2476 case alg_sub_t_m2:
2477 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2478 build_int_2 (log, 0), NULL_RTX, 0);
2479 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2480 add_target
2481 ? add_target : accum_target);
2482 val_so_far -= (HOST_WIDE_INT) 1 << log;
2483 break;
2485 case alg_add_t2_m:
2486 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2487 build_int_2 (log, 0), shift_subtarget,
2489 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2490 add_target
2491 ? add_target : accum_target);
2492 val_so_far = (val_so_far << log) + 1;
2493 break;
2495 case alg_sub_t2_m:
2496 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2497 build_int_2 (log, 0), shift_subtarget,
2499 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2500 add_target
2501 ? add_target : accum_target);
2502 val_so_far = (val_so_far << log) - 1;
2503 break;
2505 case alg_add_factor:
2506 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2507 build_int_2 (log, 0), NULL_RTX, 0);
2508 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2509 add_target
2510 ? add_target : accum_target);
2511 val_so_far += val_so_far << log;
2512 break;
2514 case alg_sub_factor:
2515 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2516 build_int_2 (log, 0), NULL_RTX, 0);
2517 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2518 (add_target ? add_target
2519 : preserve ? 0 : tem));
2520 val_so_far = (val_so_far << log) - val_so_far;
2521 break;
2523 default:
2524 abort ();
2527 /* Write a REG_EQUAL note on the last insn so that we can cse
2528 multiplication sequences. Note that if ACCUM is a SUBREG,
2529 we've set the inner register and must properly indicate
2530 that. */
2532 tem = op0, nmode = mode;
2533 if (GET_CODE (accum) == SUBREG)
2535 nmode = GET_MODE (SUBREG_REG (accum));
2536 tem = gen_lowpart (nmode, op0);
2539 insn = get_last_insn ();
2540 set_unique_reg_note (insn,
2541 REG_EQUAL,
2542 gen_rtx_MULT (nmode, tem,
2543 GEN_INT (val_so_far)));
2546 if (variant == negate_variant)
2548 val_so_far = - val_so_far;
2549 accum = expand_unop (mode, neg_optab, accum, target, 0);
2551 else if (variant == add_variant)
2553 val_so_far = val_so_far + 1;
2554 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2557 if (val != val_so_far)
2558 abort ();
2560 return accum;
2564 /* This used to use umul_optab if unsigned, but for non-widening multiply
2565 there is no difference between signed and unsigned. */
2566 op0 = expand_binop (mode,
2567 ! unsignedp
2568 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2569 ? smulv_optab : smul_optab,
2570 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2571 if (op0 == 0)
2572 abort ();
2573 return op0;
2576 /* Return the smallest n such that 2**n >= X. */
2579 ceil_log2 (x)
2580 unsigned HOST_WIDE_INT x;
2582 return floor_log2 (x - 1) + 1;
2585 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2586 replace division by D, and put the least significant N bits of the result
2587 in *MULTIPLIER_PTR and return the most significant bit.
2589 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2590 needed precision is in PRECISION (should be <= N).
2592 PRECISION should be as small as possible so this function can choose
2593 multiplier more freely.
2595 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2596 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2598 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2599 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2601 static
2602 unsigned HOST_WIDE_INT
2603 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2604 unsigned HOST_WIDE_INT d;
2605 int n;
2606 int precision;
2607 unsigned HOST_WIDE_INT *multiplier_ptr;
2608 int *post_shift_ptr;
2609 int *lgup_ptr;
2611 HOST_WIDE_INT mhigh_hi, mlow_hi;
2612 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2613 int lgup, post_shift;
2614 int pow, pow2;
2615 unsigned HOST_WIDE_INT nl, dummy1;
2616 HOST_WIDE_INT nh, dummy2;
2618 /* lgup = ceil(log2(divisor)); */
2619 lgup = ceil_log2 (d);
2621 if (lgup > n)
2622 abort ();
2624 pow = n + lgup;
2625 pow2 = n + lgup - precision;
2627 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2629 /* We could handle this with some effort, but this case is much better
2630 handled directly with a scc insn, so rely on caller using that. */
2631 abort ();
2634 /* mlow = 2^(N + lgup)/d */
2635 if (pow >= HOST_BITS_PER_WIDE_INT)
2637 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2638 nl = 0;
2640 else
2642 nh = 0;
2643 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2645 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2646 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2648 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2649 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2650 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2651 else
2652 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2653 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2654 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2656 if (mhigh_hi && nh - d >= d)
2657 abort ();
2658 if (mhigh_hi > 1 || mlow_hi > 1)
2659 abort ();
2660 /* assert that mlow < mhigh. */
2661 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2662 abort ();
2664 /* If precision == N, then mlow, mhigh exceed 2^N
2665 (but they do not exceed 2^(N+1)). */
2667 /* Reduce to lowest terms */
2668 for (post_shift = lgup; post_shift > 0; post_shift--)
2670 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2671 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2672 if (ml_lo >= mh_lo)
2673 break;
2675 mlow_hi = 0;
2676 mlow_lo = ml_lo;
2677 mhigh_hi = 0;
2678 mhigh_lo = mh_lo;
2681 *post_shift_ptr = post_shift;
2682 *lgup_ptr = lgup;
2683 if (n < HOST_BITS_PER_WIDE_INT)
2685 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2686 *multiplier_ptr = mhigh_lo & mask;
2687 return mhigh_lo >= mask;
2689 else
2691 *multiplier_ptr = mhigh_lo;
2692 return mhigh_hi;
2696 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2697 congruent to 1 (mod 2**N). */
2699 static unsigned HOST_WIDE_INT
2700 invert_mod2n (x, n)
2701 unsigned HOST_WIDE_INT x;
2702 int n;
2704 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2706 /* The algorithm notes that the choice y = x satisfies
2707 x*y == 1 mod 2^3, since x is assumed odd.
2708 Each iteration doubles the number of bits of significance in y. */
2710 unsigned HOST_WIDE_INT mask;
2711 unsigned HOST_WIDE_INT y = x;
2712 int nbit = 3;
2714 mask = (n == HOST_BITS_PER_WIDE_INT
2715 ? ~(unsigned HOST_WIDE_INT) 0
2716 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2718 while (nbit < n)
2720 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2721 nbit *= 2;
2723 return y;
2726 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2727 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2728 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2729 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2730 become signed.
2732 The result is put in TARGET if that is convenient.
2734 MODE is the mode of operation. */
2737 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2738 enum machine_mode mode;
2739 rtx adj_operand, op0, op1, target;
2740 int unsignedp;
2742 rtx tem;
2743 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2745 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2746 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2747 NULL_RTX, 0);
2748 tem = expand_and (mode, tem, op1, NULL_RTX);
2749 adj_operand
2750 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2751 adj_operand);
2753 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2754 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2755 NULL_RTX, 0);
2756 tem = expand_and (mode, tem, op0, NULL_RTX);
2757 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2758 target);
2760 return target;
2763 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2764 in TARGET if that is convenient, and return where the result is. If the
2765 operation can not be performed, 0 is returned.
2767 MODE is the mode of operation and result.
2769 UNSIGNEDP nonzero means unsigned multiply.
2771 MAX_COST is the total allowed cost for the expanded RTL. */
2774 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2775 enum machine_mode mode;
2776 rtx op0, target;
2777 unsigned HOST_WIDE_INT cnst1;
2778 int unsignedp;
2779 int max_cost;
2781 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2782 optab mul_highpart_optab;
2783 optab moptab;
2784 rtx tem;
2785 int size = GET_MODE_BITSIZE (mode);
2786 rtx op1, wide_op1;
2788 /* We can't support modes wider than HOST_BITS_PER_INT. */
2789 if (size > HOST_BITS_PER_WIDE_INT)
2790 abort ();
2792 op1 = GEN_INT (trunc_int_for_mode (cnst1, mode));
2794 wide_op1
2795 = immed_double_const (cnst1,
2796 (unsignedp
2797 ? (HOST_WIDE_INT) 0
2798 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2799 wider_mode);
2801 /* expand_mult handles constant multiplication of word_mode
2802 or narrower. It does a poor job for large modes. */
2803 if (size < BITS_PER_WORD
2804 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2806 /* We have to do this, since expand_binop doesn't do conversion for
2807 multiply. Maybe change expand_binop to handle widening multiply? */
2808 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2810 /* We know that this can't have signed overflow, so pretend this is
2811 an unsigned multiply. */
2812 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2813 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2814 build_int_2 (size, 0), NULL_RTX, 1);
2815 return convert_modes (mode, wider_mode, tem, unsignedp);
2818 if (target == 0)
2819 target = gen_reg_rtx (mode);
2821 /* Firstly, try using a multiplication insn that only generates the needed
2822 high part of the product, and in the sign flavor of unsignedp. */
2823 if (mul_highpart_cost[(int) mode] < max_cost)
2825 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2826 target = expand_binop (mode, mul_highpart_optab,
2827 op0, op1, target, unsignedp, OPTAB_DIRECT);
2828 if (target)
2829 return target;
2832 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2833 Need to adjust the result after the multiplication. */
2834 if (size - 1 < BITS_PER_WORD
2835 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2836 < max_cost))
2838 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2839 target = expand_binop (mode, mul_highpart_optab,
2840 op0, op1, target, unsignedp, OPTAB_DIRECT);
2841 if (target)
2842 /* We used the wrong signedness. Adjust the result. */
2843 return expand_mult_highpart_adjust (mode, target, op0,
2844 op1, target, unsignedp);
2847 /* Try widening multiplication. */
2848 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2849 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2850 && mul_widen_cost[(int) wider_mode] < max_cost)
2852 op1 = force_reg (mode, op1);
2853 goto try;
2856 /* Try widening the mode and perform a non-widening multiplication. */
2857 moptab = smul_optab;
2858 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2859 && size - 1 < BITS_PER_WORD
2860 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2862 op1 = wide_op1;
2863 goto try;
2866 /* Try widening multiplication of opposite signedness, and adjust. */
2867 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2868 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2869 && size - 1 < BITS_PER_WORD
2870 && (mul_widen_cost[(int) wider_mode]
2871 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2873 rtx regop1 = force_reg (mode, op1);
2874 tem = expand_binop (wider_mode, moptab, op0, regop1,
2875 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2876 if (tem != 0)
2878 /* Extract the high half of the just generated product. */
2879 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2880 build_int_2 (size, 0), NULL_RTX, 1);
2881 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2882 /* We used the wrong signedness. Adjust the result. */
2883 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2884 target, unsignedp);
2888 return 0;
2890 try:
2891 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2892 tem = expand_binop (wider_mode, moptab, op0, op1,
2893 NULL_RTX, unsignedp, OPTAB_WIDEN);
2894 if (tem == 0)
2895 return 0;
2897 /* Extract the high half of the just generated product. */
2898 if (mode == word_mode)
2900 return gen_highpart (mode, tem);
2902 else
2904 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2905 build_int_2 (size, 0), NULL_RTX, 1);
2906 return convert_modes (mode, wider_mode, tem, unsignedp);
2910 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2911 if that is convenient, and returning where the result is.
2912 You may request either the quotient or the remainder as the result;
2913 specify REM_FLAG nonzero to get the remainder.
2915 CODE is the expression code for which kind of division this is;
2916 it controls how rounding is done. MODE is the machine mode to use.
2917 UNSIGNEDP nonzero means do unsigned division. */
2919 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2920 and then correct it by or'ing in missing high bits
2921 if result of ANDI is nonzero.
2922 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2923 This could optimize to a bfexts instruction.
2924 But C doesn't use these operations, so their optimizations are
2925 left for later. */
2926 /* ??? For modulo, we don't actually need the highpart of the first product,
2927 the low part will do nicely. And for small divisors, the second multiply
2928 can also be a low-part only multiply or even be completely left out.
2929 E.g. to calculate the remainder of a division by 3 with a 32 bit
2930 multiply, multiply with 0x55555556 and extract the upper two bits;
2931 the result is exact for inputs up to 0x1fffffff.
2932 The input range can be reduced by using cross-sum rules.
2933 For odd divisors >= 3, the following table gives right shift counts
2934 so that if an number is shifted by an integer multiple of the given
2935 amount, the remainder stays the same:
2936 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2937 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2938 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2939 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2940 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2942 Cross-sum rules for even numbers can be derived by leaving as many bits
2943 to the right alone as the divisor has zeros to the right.
2944 E.g. if x is an unsigned 32 bit number:
2945 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2948 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2951 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2952 int rem_flag;
2953 enum tree_code code;
2954 enum machine_mode mode;
2955 rtx op0, op1, target;
2956 int unsignedp;
2958 enum machine_mode compute_mode;
2959 rtx tquotient;
2960 rtx quotient = 0, remainder = 0;
2961 rtx last;
2962 int size;
2963 rtx insn, set;
2964 optab optab1, optab2;
2965 int op1_is_constant, op1_is_pow2;
2966 int max_cost, extra_cost;
2967 static HOST_WIDE_INT last_div_const = 0;
2969 op1_is_constant = GET_CODE (op1) == CONST_INT;
2970 op1_is_pow2 = (op1_is_constant
2971 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2972 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2975 This is the structure of expand_divmod:
2977 First comes code to fix up the operands so we can perform the operations
2978 correctly and efficiently.
2980 Second comes a switch statement with code specific for each rounding mode.
2981 For some special operands this code emits all RTL for the desired
2982 operation, for other cases, it generates only a quotient and stores it in
2983 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2984 to indicate that it has not done anything.
2986 Last comes code that finishes the operation. If QUOTIENT is set and
2987 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2988 QUOTIENT is not set, it is computed using trunc rounding.
2990 We try to generate special code for division and remainder when OP1 is a
2991 constant. If |OP1| = 2**n we can use shifts and some other fast
2992 operations. For other values of OP1, we compute a carefully selected
2993 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2994 by m.
2996 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2997 half of the product. Different strategies for generating the product are
2998 implemented in expand_mult_highpart.
3000 If what we actually want is the remainder, we generate that by another
3001 by-constant multiplication and a subtraction. */
3003 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3004 code below will malfunction if we are, so check here and handle
3005 the special case if so. */
3006 if (op1 == const1_rtx)
3007 return rem_flag ? const0_rtx : op0;
3009 /* When dividing by -1, we could get an overflow.
3010 negv_optab can handle overflows. */
3011 if (! unsignedp && op1 == constm1_rtx)
3013 if (rem_flag)
3014 return const0_rtx;
3015 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3016 ? negv_optab : neg_optab, op0, target, 0);
3019 if (target
3020 /* Don't use the function value register as a target
3021 since we have to read it as well as write it,
3022 and function-inlining gets confused by this. */
3023 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3024 /* Don't clobber an operand while doing a multi-step calculation. */
3025 || ((rem_flag || op1_is_constant)
3026 && (reg_mentioned_p (target, op0)
3027 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3028 || reg_mentioned_p (target, op1)
3029 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3030 target = 0;
3032 /* Get the mode in which to perform this computation. Normally it will
3033 be MODE, but sometimes we can't do the desired operation in MODE.
3034 If so, pick a wider mode in which we can do the operation. Convert
3035 to that mode at the start to avoid repeated conversions.
3037 First see what operations we need. These depend on the expression
3038 we are evaluating. (We assume that divxx3 insns exist under the
3039 same conditions that modxx3 insns and that these insns don't normally
3040 fail. If these assumptions are not correct, we may generate less
3041 efficient code in some cases.)
3043 Then see if we find a mode in which we can open-code that operation
3044 (either a division, modulus, or shift). Finally, check for the smallest
3045 mode for which we can do the operation with a library call. */
3047 /* We might want to refine this now that we have division-by-constant
3048 optimization. Since expand_mult_highpart tries so many variants, it is
3049 not straightforward to generalize this. Maybe we should make an array
3050 of possible modes in init_expmed? Save this for GCC 2.7. */
3052 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3053 ? (unsignedp ? lshr_optab : ashr_optab)
3054 : (unsignedp ? udiv_optab : sdiv_optab));
3055 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3056 ? optab1
3057 : (unsignedp ? udivmod_optab : sdivmod_optab));
3059 for (compute_mode = mode; compute_mode != VOIDmode;
3060 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3061 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3062 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3063 break;
3065 if (compute_mode == VOIDmode)
3066 for (compute_mode = mode; compute_mode != VOIDmode;
3067 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3068 if (optab1->handlers[(int) compute_mode].libfunc
3069 || optab2->handlers[(int) compute_mode].libfunc)
3070 break;
3072 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3073 in expand_binop. */
3074 if (compute_mode == VOIDmode)
3075 compute_mode = mode;
3077 if (target && GET_MODE (target) == compute_mode)
3078 tquotient = target;
3079 else
3080 tquotient = gen_reg_rtx (compute_mode);
3082 size = GET_MODE_BITSIZE (compute_mode);
3083 #if 0
3084 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3085 (mode), and thereby get better code when OP1 is a constant. Do that
3086 later. It will require going over all usages of SIZE below. */
3087 size = GET_MODE_BITSIZE (mode);
3088 #endif
3090 /* Only deduct something for a REM if the last divide done was
3091 for a different constant. Then set the constant of the last
3092 divide. */
3093 max_cost = div_cost[(int) compute_mode]
3094 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3095 && INTVAL (op1) == last_div_const)
3096 ? mul_cost[(int) compute_mode] + add_cost : 0);
3098 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3100 /* Now convert to the best mode to use. */
3101 if (compute_mode != mode)
3103 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3104 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3106 /* convert_modes may have placed op1 into a register, so we
3107 must recompute the following. */
3108 op1_is_constant = GET_CODE (op1) == CONST_INT;
3109 op1_is_pow2 = (op1_is_constant
3110 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3111 || (! unsignedp
3112 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3115 /* If one of the operands is a volatile MEM, copy it into a register. */
3117 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3118 op0 = force_reg (compute_mode, op0);
3119 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3120 op1 = force_reg (compute_mode, op1);
3122 /* If we need the remainder or if OP1 is constant, we need to
3123 put OP0 in a register in case it has any queued subexpressions. */
3124 if (rem_flag || op1_is_constant)
3125 op0 = force_reg (compute_mode, op0);
3127 last = get_last_insn ();
3129 /* Promote floor rounding to trunc rounding for unsigned operations. */
3130 if (unsignedp)
3132 if (code == FLOOR_DIV_EXPR)
3133 code = TRUNC_DIV_EXPR;
3134 if (code == FLOOR_MOD_EXPR)
3135 code = TRUNC_MOD_EXPR;
3136 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3137 code = TRUNC_DIV_EXPR;
3140 if (op1 != const0_rtx)
3141 switch (code)
3143 case TRUNC_MOD_EXPR:
3144 case TRUNC_DIV_EXPR:
3145 if (op1_is_constant)
3147 if (unsignedp)
3149 unsigned HOST_WIDE_INT mh, ml;
3150 int pre_shift, post_shift;
3151 int dummy;
3152 unsigned HOST_WIDE_INT d = INTVAL (op1);
3154 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3156 pre_shift = floor_log2 (d);
3157 if (rem_flag)
3159 remainder
3160 = expand_binop (compute_mode, and_optab, op0,
3161 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3162 remainder, 1,
3163 OPTAB_LIB_WIDEN);
3164 if (remainder)
3165 return gen_lowpart (mode, remainder);
3167 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3168 build_int_2 (pre_shift, 0),
3169 tquotient, 1);
3171 else if (size <= HOST_BITS_PER_WIDE_INT)
3173 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3175 /* Most significant bit of divisor is set; emit an scc
3176 insn. */
3177 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3178 compute_mode, 1, 1);
3179 if (quotient == 0)
3180 goto fail1;
3182 else
3184 /* Find a suitable multiplier and right shift count
3185 instead of multiplying with D. */
3187 mh = choose_multiplier (d, size, size,
3188 &ml, &post_shift, &dummy);
3190 /* If the suggested multiplier is more than SIZE bits,
3191 we can do better for even divisors, using an
3192 initial right shift. */
3193 if (mh != 0 && (d & 1) == 0)
3195 pre_shift = floor_log2 (d & -d);
3196 mh = choose_multiplier (d >> pre_shift, size,
3197 size - pre_shift,
3198 &ml, &post_shift, &dummy);
3199 if (mh)
3200 abort ();
3202 else
3203 pre_shift = 0;
3205 if (mh != 0)
3207 rtx t1, t2, t3, t4;
3209 if (post_shift - 1 >= BITS_PER_WORD)
3210 goto fail1;
3212 extra_cost = (shift_cost[post_shift - 1]
3213 + shift_cost[1] + 2 * add_cost);
3214 t1 = expand_mult_highpart (compute_mode, op0, ml,
3215 NULL_RTX, 1,
3216 max_cost - extra_cost);
3217 if (t1 == 0)
3218 goto fail1;
3219 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3220 op0, t1),
3221 NULL_RTX);
3222 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3223 build_int_2 (1, 0), NULL_RTX,1);
3224 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3225 t1, t3),
3226 NULL_RTX);
3227 quotient
3228 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3229 build_int_2 (post_shift - 1, 0),
3230 tquotient, 1);
3232 else
3234 rtx t1, t2;
3236 if (pre_shift >= BITS_PER_WORD
3237 || post_shift >= BITS_PER_WORD)
3238 goto fail1;
3240 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3241 build_int_2 (pre_shift, 0),
3242 NULL_RTX, 1);
3243 extra_cost = (shift_cost[pre_shift]
3244 + shift_cost[post_shift]);
3245 t2 = expand_mult_highpart (compute_mode, t1, ml,
3246 NULL_RTX, 1,
3247 max_cost - extra_cost);
3248 if (t2 == 0)
3249 goto fail1;
3250 quotient
3251 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3252 build_int_2 (post_shift, 0),
3253 tquotient, 1);
3257 else /* Too wide mode to use tricky code */
3258 break;
3260 insn = get_last_insn ();
3261 if (insn != last
3262 && (set = single_set (insn)) != 0
3263 && SET_DEST (set) == quotient)
3264 set_unique_reg_note (insn,
3265 REG_EQUAL,
3266 gen_rtx_UDIV (compute_mode, op0, op1));
3268 else /* TRUNC_DIV, signed */
3270 unsigned HOST_WIDE_INT ml;
3271 int lgup, post_shift;
3272 HOST_WIDE_INT d = INTVAL (op1);
3273 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3275 /* n rem d = n rem -d */
3276 if (rem_flag && d < 0)
3278 d = abs_d;
3279 op1 = GEN_INT (trunc_int_for_mode (abs_d, compute_mode));
3282 if (d == 1)
3283 quotient = op0;
3284 else if (d == -1)
3285 quotient = expand_unop (compute_mode, neg_optab, op0,
3286 tquotient, 0);
3287 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3289 /* This case is not handled correctly below. */
3290 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3291 compute_mode, 1, 1);
3292 if (quotient == 0)
3293 goto fail1;
3295 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3296 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3297 /* ??? The cheap metric is computed only for
3298 word_mode. If this operation is wider, this may
3299 not be so. Assume true if the optab has an
3300 expander for this mode. */
3301 && (((rem_flag ? smod_optab : sdiv_optab)
3302 ->handlers[(int) compute_mode].insn_code
3303 != CODE_FOR_nothing)
3304 || (sdivmod_optab->handlers[(int) compute_mode]
3305 .insn_code != CODE_FOR_nothing)))
3307 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3309 lgup = floor_log2 (abs_d);
3310 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3312 rtx label = gen_label_rtx ();
3313 rtx t1;
3315 t1 = copy_to_mode_reg (compute_mode, op0);
3316 do_cmp_and_jump (t1, const0_rtx, GE,
3317 compute_mode, label);
3318 expand_inc (t1, GEN_INT (trunc_int_for_mode
3319 (abs_d - 1, compute_mode)));
3320 emit_label (label);
3321 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3322 build_int_2 (lgup, 0),
3323 tquotient, 0);
3325 else
3327 rtx t1, t2, t3;
3328 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3329 build_int_2 (size - 1, 0),
3330 NULL_RTX, 0);
3331 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3332 build_int_2 (size - lgup, 0),
3333 NULL_RTX, 1);
3334 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3335 op0, t2),
3336 NULL_RTX);
3337 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3338 build_int_2 (lgup, 0),
3339 tquotient, 0);
3342 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3343 the quotient. */
3344 if (d < 0)
3346 insn = get_last_insn ();
3347 if (insn != last
3348 && (set = single_set (insn)) != 0
3349 && SET_DEST (set) == quotient
3350 && abs_d < ((unsigned HOST_WIDE_INT) 1
3351 << (HOST_BITS_PER_WIDE_INT - 1)))
3352 set_unique_reg_note (insn,
3353 REG_EQUAL,
3354 gen_rtx_DIV (compute_mode,
3355 op0,
3356 GEN_INT
3357 (trunc_int_for_mode
3358 (abs_d,
3359 compute_mode))));
3361 quotient = expand_unop (compute_mode, neg_optab,
3362 quotient, quotient, 0);
3365 else if (size <= HOST_BITS_PER_WIDE_INT)
3367 choose_multiplier (abs_d, size, size - 1,
3368 &ml, &post_shift, &lgup);
3369 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3371 rtx t1, t2, t3;
3373 if (post_shift >= BITS_PER_WORD
3374 || size - 1 >= BITS_PER_WORD)
3375 goto fail1;
3377 extra_cost = (shift_cost[post_shift]
3378 + shift_cost[size - 1] + add_cost);
3379 t1 = expand_mult_highpart (compute_mode, op0, ml,
3380 NULL_RTX, 0,
3381 max_cost - extra_cost);
3382 if (t1 == 0)
3383 goto fail1;
3384 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3385 build_int_2 (post_shift, 0), NULL_RTX, 0);
3386 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3387 build_int_2 (size - 1, 0), NULL_RTX, 0);
3388 if (d < 0)
3389 quotient
3390 = force_operand (gen_rtx_MINUS (compute_mode,
3391 t3, t2),
3392 tquotient);
3393 else
3394 quotient
3395 = force_operand (gen_rtx_MINUS (compute_mode,
3396 t2, t3),
3397 tquotient);
3399 else
3401 rtx t1, t2, t3, t4;
3403 if (post_shift >= BITS_PER_WORD
3404 || size - 1 >= BITS_PER_WORD)
3405 goto fail1;
3407 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3408 extra_cost = (shift_cost[post_shift]
3409 + shift_cost[size - 1] + 2 * add_cost);
3410 t1 = expand_mult_highpart (compute_mode, op0, ml,
3411 NULL_RTX, 0,
3412 max_cost - extra_cost);
3413 if (t1 == 0)
3414 goto fail1;
3415 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3416 t1, op0),
3417 NULL_RTX);
3418 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3419 build_int_2 (post_shift, 0),
3420 NULL_RTX, 0);
3421 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3422 build_int_2 (size - 1, 0),
3423 NULL_RTX, 0);
3424 if (d < 0)
3425 quotient
3426 = force_operand (gen_rtx_MINUS (compute_mode,
3427 t4, t3),
3428 tquotient);
3429 else
3430 quotient
3431 = force_operand (gen_rtx_MINUS (compute_mode,
3432 t3, t4),
3433 tquotient);
3436 else /* Too wide mode to use tricky code */
3437 break;
3439 insn = get_last_insn ();
3440 if (insn != last
3441 && (set = single_set (insn)) != 0
3442 && SET_DEST (set) == quotient)
3443 set_unique_reg_note (insn,
3444 REG_EQUAL,
3445 gen_rtx_DIV (compute_mode, op0, op1));
3447 break;
3449 fail1:
3450 delete_insns_since (last);
3451 break;
3453 case FLOOR_DIV_EXPR:
3454 case FLOOR_MOD_EXPR:
3455 /* We will come here only for signed operations. */
3456 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3458 unsigned HOST_WIDE_INT mh, ml;
3459 int pre_shift, lgup, post_shift;
3460 HOST_WIDE_INT d = INTVAL (op1);
3462 if (d > 0)
3464 /* We could just as easily deal with negative constants here,
3465 but it does not seem worth the trouble for GCC 2.6. */
3466 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3468 pre_shift = floor_log2 (d);
3469 if (rem_flag)
3471 remainder = expand_binop (compute_mode, and_optab, op0,
3472 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3473 remainder, 0, OPTAB_LIB_WIDEN);
3474 if (remainder)
3475 return gen_lowpart (mode, remainder);
3477 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3478 build_int_2 (pre_shift, 0),
3479 tquotient, 0);
3481 else
3483 rtx t1, t2, t3, t4;
3485 mh = choose_multiplier (d, size, size - 1,
3486 &ml, &post_shift, &lgup);
3487 if (mh)
3488 abort ();
3490 if (post_shift < BITS_PER_WORD
3491 && size - 1 < BITS_PER_WORD)
3493 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3494 build_int_2 (size - 1, 0),
3495 NULL_RTX, 0);
3496 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3497 NULL_RTX, 0, OPTAB_WIDEN);
3498 extra_cost = (shift_cost[post_shift]
3499 + shift_cost[size - 1] + 2 * add_cost);
3500 t3 = expand_mult_highpart (compute_mode, t2, ml,
3501 NULL_RTX, 1,
3502 max_cost - extra_cost);
3503 if (t3 != 0)
3505 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3506 build_int_2 (post_shift, 0),
3507 NULL_RTX, 1);
3508 quotient = expand_binop (compute_mode, xor_optab,
3509 t4, t1, tquotient, 0,
3510 OPTAB_WIDEN);
3515 else
3517 rtx nsign, t1, t2, t3, t4;
3518 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3519 op0, constm1_rtx), NULL_RTX);
3520 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3521 0, OPTAB_WIDEN);
3522 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3523 build_int_2 (size - 1, 0), NULL_RTX, 0);
3524 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3525 NULL_RTX);
3526 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3527 NULL_RTX, 0);
3528 if (t4)
3530 rtx t5;
3531 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3532 NULL_RTX, 0);
3533 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3534 t4, t5),
3535 tquotient);
3540 if (quotient != 0)
3541 break;
3542 delete_insns_since (last);
3544 /* Try using an instruction that produces both the quotient and
3545 remainder, using truncation. We can easily compensate the quotient
3546 or remainder to get floor rounding, once we have the remainder.
3547 Notice that we compute also the final remainder value here,
3548 and return the result right away. */
3549 if (target == 0 || GET_MODE (target) != compute_mode)
3550 target = gen_reg_rtx (compute_mode);
3552 if (rem_flag)
3554 remainder
3555 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3556 quotient = gen_reg_rtx (compute_mode);
3558 else
3560 quotient
3561 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3562 remainder = gen_reg_rtx (compute_mode);
3565 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3566 quotient, remainder, 0))
3568 /* This could be computed with a branch-less sequence.
3569 Save that for later. */
3570 rtx tem;
3571 rtx label = gen_label_rtx ();
3572 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3573 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3574 NULL_RTX, 0, OPTAB_WIDEN);
3575 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3576 expand_dec (quotient, const1_rtx);
3577 expand_inc (remainder, op1);
3578 emit_label (label);
3579 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3582 /* No luck with division elimination or divmod. Have to do it
3583 by conditionally adjusting op0 *and* the result. */
3585 rtx label1, label2, label3, label4, label5;
3586 rtx adjusted_op0;
3587 rtx tem;
3589 quotient = gen_reg_rtx (compute_mode);
3590 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3591 label1 = gen_label_rtx ();
3592 label2 = gen_label_rtx ();
3593 label3 = gen_label_rtx ();
3594 label4 = gen_label_rtx ();
3595 label5 = gen_label_rtx ();
3596 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3597 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3598 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3599 quotient, 0, OPTAB_LIB_WIDEN);
3600 if (tem != quotient)
3601 emit_move_insn (quotient, tem);
3602 emit_jump_insn (gen_jump (label5));
3603 emit_barrier ();
3604 emit_label (label1);
3605 expand_inc (adjusted_op0, const1_rtx);
3606 emit_jump_insn (gen_jump (label4));
3607 emit_barrier ();
3608 emit_label (label2);
3609 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3610 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3611 quotient, 0, OPTAB_LIB_WIDEN);
3612 if (tem != quotient)
3613 emit_move_insn (quotient, tem);
3614 emit_jump_insn (gen_jump (label5));
3615 emit_barrier ();
3616 emit_label (label3);
3617 expand_dec (adjusted_op0, const1_rtx);
3618 emit_label (label4);
3619 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3620 quotient, 0, OPTAB_LIB_WIDEN);
3621 if (tem != quotient)
3622 emit_move_insn (quotient, tem);
3623 expand_dec (quotient, const1_rtx);
3624 emit_label (label5);
3626 break;
3628 case CEIL_DIV_EXPR:
3629 case CEIL_MOD_EXPR:
3630 if (unsignedp)
3632 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3634 rtx t1, t2, t3;
3635 unsigned HOST_WIDE_INT d = INTVAL (op1);
3636 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3637 build_int_2 (floor_log2 (d), 0),
3638 tquotient, 1);
3639 t2 = expand_binop (compute_mode, and_optab, op0,
3640 GEN_INT (d - 1),
3641 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3642 t3 = gen_reg_rtx (compute_mode);
3643 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3644 compute_mode, 1, 1);
3645 if (t3 == 0)
3647 rtx lab;
3648 lab = gen_label_rtx ();
3649 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3650 expand_inc (t1, const1_rtx);
3651 emit_label (lab);
3652 quotient = t1;
3654 else
3655 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3656 t1, t3),
3657 tquotient);
3658 break;
3661 /* Try using an instruction that produces both the quotient and
3662 remainder, using truncation. We can easily compensate the
3663 quotient or remainder to get ceiling rounding, once we have the
3664 remainder. Notice that we compute also the final remainder
3665 value here, and return the result right away. */
3666 if (target == 0 || GET_MODE (target) != compute_mode)
3667 target = gen_reg_rtx (compute_mode);
3669 if (rem_flag)
3671 remainder = (GET_CODE (target) == REG
3672 ? target : gen_reg_rtx (compute_mode));
3673 quotient = gen_reg_rtx (compute_mode);
3675 else
3677 quotient = (GET_CODE (target) == REG
3678 ? target : gen_reg_rtx (compute_mode));
3679 remainder = gen_reg_rtx (compute_mode);
3682 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3683 remainder, 1))
3685 /* This could be computed with a branch-less sequence.
3686 Save that for later. */
3687 rtx label = gen_label_rtx ();
3688 do_cmp_and_jump (remainder, const0_rtx, EQ,
3689 compute_mode, label);
3690 expand_inc (quotient, const1_rtx);
3691 expand_dec (remainder, op1);
3692 emit_label (label);
3693 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3696 /* No luck with division elimination or divmod. Have to do it
3697 by conditionally adjusting op0 *and* the result. */
3699 rtx label1, label2;
3700 rtx adjusted_op0, tem;
3702 quotient = gen_reg_rtx (compute_mode);
3703 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3704 label1 = gen_label_rtx ();
3705 label2 = gen_label_rtx ();
3706 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3707 compute_mode, label1);
3708 emit_move_insn (quotient, const0_rtx);
3709 emit_jump_insn (gen_jump (label2));
3710 emit_barrier ();
3711 emit_label (label1);
3712 expand_dec (adjusted_op0, const1_rtx);
3713 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3714 quotient, 1, OPTAB_LIB_WIDEN);
3715 if (tem != quotient)
3716 emit_move_insn (quotient, tem);
3717 expand_inc (quotient, const1_rtx);
3718 emit_label (label2);
3721 else /* signed */
3723 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3724 && INTVAL (op1) >= 0)
3726 /* This is extremely similar to the code for the unsigned case
3727 above. For 2.7 we should merge these variants, but for
3728 2.6.1 I don't want to touch the code for unsigned since that
3729 get used in C. The signed case will only be used by other
3730 languages (Ada). */
3732 rtx t1, t2, t3;
3733 unsigned HOST_WIDE_INT d = INTVAL (op1);
3734 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3735 build_int_2 (floor_log2 (d), 0),
3736 tquotient, 0);
3737 t2 = expand_binop (compute_mode, and_optab, op0,
3738 GEN_INT (d - 1),
3739 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3740 t3 = gen_reg_rtx (compute_mode);
3741 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3742 compute_mode, 1, 1);
3743 if (t3 == 0)
3745 rtx lab;
3746 lab = gen_label_rtx ();
3747 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3748 expand_inc (t1, const1_rtx);
3749 emit_label (lab);
3750 quotient = t1;
3752 else
3753 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3754 t1, t3),
3755 tquotient);
3756 break;
3759 /* Try using an instruction that produces both the quotient and
3760 remainder, using truncation. We can easily compensate the
3761 quotient or remainder to get ceiling rounding, once we have the
3762 remainder. Notice that we compute also the final remainder
3763 value here, and return the result right away. */
3764 if (target == 0 || GET_MODE (target) != compute_mode)
3765 target = gen_reg_rtx (compute_mode);
3766 if (rem_flag)
3768 remainder= (GET_CODE (target) == REG
3769 ? target : gen_reg_rtx (compute_mode));
3770 quotient = gen_reg_rtx (compute_mode);
3772 else
3774 quotient = (GET_CODE (target) == REG
3775 ? target : gen_reg_rtx (compute_mode));
3776 remainder = gen_reg_rtx (compute_mode);
3779 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3780 remainder, 0))
3782 /* This could be computed with a branch-less sequence.
3783 Save that for later. */
3784 rtx tem;
3785 rtx label = gen_label_rtx ();
3786 do_cmp_and_jump (remainder, const0_rtx, EQ,
3787 compute_mode, label);
3788 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3789 NULL_RTX, 0, OPTAB_WIDEN);
3790 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3791 expand_inc (quotient, const1_rtx);
3792 expand_dec (remainder, op1);
3793 emit_label (label);
3794 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3797 /* No luck with division elimination or divmod. Have to do it
3798 by conditionally adjusting op0 *and* the result. */
3800 rtx label1, label2, label3, label4, label5;
3801 rtx adjusted_op0;
3802 rtx tem;
3804 quotient = gen_reg_rtx (compute_mode);
3805 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3806 label1 = gen_label_rtx ();
3807 label2 = gen_label_rtx ();
3808 label3 = gen_label_rtx ();
3809 label4 = gen_label_rtx ();
3810 label5 = gen_label_rtx ();
3811 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3812 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3813 compute_mode, label1);
3814 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3815 quotient, 0, OPTAB_LIB_WIDEN);
3816 if (tem != quotient)
3817 emit_move_insn (quotient, tem);
3818 emit_jump_insn (gen_jump (label5));
3819 emit_barrier ();
3820 emit_label (label1);
3821 expand_dec (adjusted_op0, const1_rtx);
3822 emit_jump_insn (gen_jump (label4));
3823 emit_barrier ();
3824 emit_label (label2);
3825 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3826 compute_mode, label3);
3827 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3828 quotient, 0, OPTAB_LIB_WIDEN);
3829 if (tem != quotient)
3830 emit_move_insn (quotient, tem);
3831 emit_jump_insn (gen_jump (label5));
3832 emit_barrier ();
3833 emit_label (label3);
3834 expand_inc (adjusted_op0, const1_rtx);
3835 emit_label (label4);
3836 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3837 quotient, 0, OPTAB_LIB_WIDEN);
3838 if (tem != quotient)
3839 emit_move_insn (quotient, tem);
3840 expand_inc (quotient, const1_rtx);
3841 emit_label (label5);
3844 break;
3846 case EXACT_DIV_EXPR:
3847 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3849 HOST_WIDE_INT d = INTVAL (op1);
3850 unsigned HOST_WIDE_INT ml;
3851 int pre_shift;
3852 rtx t1;
3854 pre_shift = floor_log2 (d & -d);
3855 ml = invert_mod2n (d >> pre_shift, size);
3856 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3857 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3858 quotient = expand_mult (compute_mode, t1,
3859 GEN_INT (trunc_int_for_mode
3860 (ml, compute_mode)),
3861 NULL_RTX, 0);
3863 insn = get_last_insn ();
3864 set_unique_reg_note (insn,
3865 REG_EQUAL,
3866 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3867 compute_mode,
3868 op0, op1));
3870 break;
3872 case ROUND_DIV_EXPR:
3873 case ROUND_MOD_EXPR:
3874 if (unsignedp)
3876 rtx tem;
3877 rtx label;
3878 label = gen_label_rtx ();
3879 quotient = gen_reg_rtx (compute_mode);
3880 remainder = gen_reg_rtx (compute_mode);
3881 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3883 rtx tem;
3884 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3885 quotient, 1, OPTAB_LIB_WIDEN);
3886 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3887 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3888 remainder, 1, OPTAB_LIB_WIDEN);
3890 tem = plus_constant (op1, -1);
3891 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3892 build_int_2 (1, 0), NULL_RTX, 1);
3893 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3894 expand_inc (quotient, const1_rtx);
3895 expand_dec (remainder, op1);
3896 emit_label (label);
3898 else
3900 rtx abs_rem, abs_op1, tem, mask;
3901 rtx label;
3902 label = gen_label_rtx ();
3903 quotient = gen_reg_rtx (compute_mode);
3904 remainder = gen_reg_rtx (compute_mode);
3905 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3907 rtx tem;
3908 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3909 quotient, 0, OPTAB_LIB_WIDEN);
3910 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3911 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3912 remainder, 0, OPTAB_LIB_WIDEN);
3914 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3915 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3916 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3917 build_int_2 (1, 0), NULL_RTX, 1);
3918 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3919 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3920 NULL_RTX, 0, OPTAB_WIDEN);
3921 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3922 build_int_2 (size - 1, 0), NULL_RTX, 0);
3923 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3924 NULL_RTX, 0, OPTAB_WIDEN);
3925 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3926 NULL_RTX, 0, OPTAB_WIDEN);
3927 expand_inc (quotient, tem);
3928 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3929 NULL_RTX, 0, OPTAB_WIDEN);
3930 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3931 NULL_RTX, 0, OPTAB_WIDEN);
3932 expand_dec (remainder, tem);
3933 emit_label (label);
3935 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3937 default:
3938 abort ();
3941 if (quotient == 0)
3943 if (target && GET_MODE (target) != compute_mode)
3944 target = 0;
3946 if (rem_flag)
3948 /* Try to produce the remainder without producing the quotient.
3949 If we seem to have a divmod pattern that does not require widening,
3950 don't try widening here. We should really have an WIDEN argument
3951 to expand_twoval_binop, since what we'd really like to do here is
3952 1) try a mod insn in compute_mode
3953 2) try a divmod insn in compute_mode
3954 3) try a div insn in compute_mode and multiply-subtract to get
3955 remainder
3956 4) try the same things with widening allowed. */
3957 remainder
3958 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3959 op0, op1, target,
3960 unsignedp,
3961 ((optab2->handlers[(int) compute_mode].insn_code
3962 != CODE_FOR_nothing)
3963 ? OPTAB_DIRECT : OPTAB_WIDEN));
3964 if (remainder == 0)
3966 /* No luck there. Can we do remainder and divide at once
3967 without a library call? */
3968 remainder = gen_reg_rtx (compute_mode);
3969 if (! expand_twoval_binop ((unsignedp
3970 ? udivmod_optab
3971 : sdivmod_optab),
3972 op0, op1,
3973 NULL_RTX, remainder, unsignedp))
3974 remainder = 0;
3977 if (remainder)
3978 return gen_lowpart (mode, remainder);
3981 /* Produce the quotient. Try a quotient insn, but not a library call.
3982 If we have a divmod in this mode, use it in preference to widening
3983 the div (for this test we assume it will not fail). Note that optab2
3984 is set to the one of the two optabs that the call below will use. */
3985 quotient
3986 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3987 op0, op1, rem_flag ? NULL_RTX : target,
3988 unsignedp,
3989 ((optab2->handlers[(int) compute_mode].insn_code
3990 != CODE_FOR_nothing)
3991 ? OPTAB_DIRECT : OPTAB_WIDEN));
3993 if (quotient == 0)
3995 /* No luck there. Try a quotient-and-remainder insn,
3996 keeping the quotient alone. */
3997 quotient = gen_reg_rtx (compute_mode);
3998 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3999 op0, op1,
4000 quotient, NULL_RTX, unsignedp))
4002 quotient = 0;
4003 if (! rem_flag)
4004 /* Still no luck. If we are not computing the remainder,
4005 use a library call for the quotient. */
4006 quotient = sign_expand_binop (compute_mode,
4007 udiv_optab, sdiv_optab,
4008 op0, op1, target,
4009 unsignedp, OPTAB_LIB_WIDEN);
4014 if (rem_flag)
4016 if (target && GET_MODE (target) != compute_mode)
4017 target = 0;
4019 if (quotient == 0)
4020 /* No divide instruction either. Use library for remainder. */
4021 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4022 op0, op1, target,
4023 unsignedp, OPTAB_LIB_WIDEN);
4024 else
4026 /* We divided. Now finish doing X - Y * (X / Y). */
4027 remainder = expand_mult (compute_mode, quotient, op1,
4028 NULL_RTX, unsignedp);
4029 remainder = expand_binop (compute_mode, sub_optab, op0,
4030 remainder, target, unsignedp,
4031 OPTAB_LIB_WIDEN);
4035 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4038 /* Return a tree node with data type TYPE, describing the value of X.
4039 Usually this is an RTL_EXPR, if there is no obvious better choice.
4040 X may be an expression, however we only support those expressions
4041 generated by loop.c. */
4043 tree
4044 make_tree (type, x)
4045 tree type;
4046 rtx x;
4048 tree t;
4050 switch (GET_CODE (x))
4052 case CONST_INT:
4053 t = build_int_2 (INTVAL (x),
4054 (TREE_UNSIGNED (type)
4055 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4056 || INTVAL (x) >= 0 ? 0 : -1);
4057 TREE_TYPE (t) = type;
4058 return t;
4060 case CONST_DOUBLE:
4061 if (GET_MODE (x) == VOIDmode)
4063 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4064 TREE_TYPE (t) = type;
4066 else
4068 REAL_VALUE_TYPE d;
4070 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4071 t = build_real (type, d);
4074 return t;
4076 case CONST_VECTOR:
4078 int i, units;
4079 rtx elt;
4080 tree t = NULL_TREE;
4082 units = CONST_VECTOR_NUNITS (x);
4084 /* Build a tree with vector elements. */
4085 for (i = units - 1; i >= 0; --i)
4087 elt = CONST_VECTOR_ELT (x, i);
4088 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4091 return build_vector (type, t);
4094 case PLUS:
4095 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4096 make_tree (type, XEXP (x, 1))));
4098 case MINUS:
4099 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4100 make_tree (type, XEXP (x, 1))));
4102 case NEG:
4103 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4105 case MULT:
4106 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4107 make_tree (type, XEXP (x, 1))));
4109 case ASHIFT:
4110 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4111 make_tree (type, XEXP (x, 1))));
4113 case LSHIFTRT:
4114 return fold (convert (type,
4115 build (RSHIFT_EXPR, unsigned_type (type),
4116 make_tree (unsigned_type (type),
4117 XEXP (x, 0)),
4118 make_tree (type, XEXP (x, 1)))));
4120 case ASHIFTRT:
4121 return fold (convert (type,
4122 build (RSHIFT_EXPR, signed_type (type),
4123 make_tree (signed_type (type), XEXP (x, 0)),
4124 make_tree (type, XEXP (x, 1)))));
4126 case DIV:
4127 if (TREE_CODE (type) != REAL_TYPE)
4128 t = signed_type (type);
4129 else
4130 t = type;
4132 return fold (convert (type,
4133 build (TRUNC_DIV_EXPR, t,
4134 make_tree (t, XEXP (x, 0)),
4135 make_tree (t, XEXP (x, 1)))));
4136 case UDIV:
4137 t = unsigned_type (type);
4138 return fold (convert (type,
4139 build (TRUNC_DIV_EXPR, t,
4140 make_tree (t, XEXP (x, 0)),
4141 make_tree (t, XEXP (x, 1)))));
4143 case SIGN_EXTEND:
4144 case ZERO_EXTEND:
4145 t = type_for_mode (GET_MODE (XEXP (x, 0)), GET_CODE (x) == ZERO_EXTEND);
4146 return fold (convert (type, make_tree (t, XEXP (x, 0))));
4148 default:
4149 t = make_node (RTL_EXPR);
4150 TREE_TYPE (t) = type;
4152 #ifdef POINTERS_EXTEND_UNSIGNED
4153 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4154 ptr_mode. So convert. */
4155 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4156 x = convert_memory_address (TYPE_MODE (type), x);
4157 #endif
4159 RTL_EXPR_RTL (t) = x;
4160 /* There are no insns to be output
4161 when this rtl_expr is used. */
4162 RTL_EXPR_SEQUENCE (t) = 0;
4163 return t;
4167 /* Return an rtx representing the value of X * MULT + ADD.
4168 TARGET is a suggestion for where to store the result (an rtx).
4169 MODE is the machine mode for the computation.
4170 X and MULT must have mode MODE. ADD may have a different mode.
4171 So can X (defaults to same as MODE).
4172 UNSIGNEDP is non-zero to do unsigned multiplication.
4173 This may emit insns. */
4176 expand_mult_add (x, target, mult, add, mode, unsignedp)
4177 rtx x, target, mult, add;
4178 enum machine_mode mode;
4179 int unsignedp;
4181 tree type = type_for_mode (mode, unsignedp);
4182 tree add_type = (GET_MODE (add) == VOIDmode
4183 ? type : type_for_mode (GET_MODE (add), unsignedp));
4184 tree result = fold (build (PLUS_EXPR, type,
4185 fold (build (MULT_EXPR, type,
4186 make_tree (type, x),
4187 make_tree (type, mult))),
4188 make_tree (add_type, add)));
4190 return expand_expr (result, target, VOIDmode, 0);
4193 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4194 and returning TARGET.
4196 If TARGET is 0, a pseudo-register or constant is returned. */
4199 expand_and (mode, op0, op1, target)
4200 enum machine_mode mode;
4201 rtx op0, op1, target;
4203 rtx tem = 0;
4205 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4206 tem = simplify_binary_operation (AND, mode, op0, op1);
4207 if (tem == 0)
4208 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4210 if (target == 0)
4211 target = tem;
4212 else if (tem != target)
4213 emit_move_insn (target, tem);
4214 return target;
4217 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4218 and storing in TARGET. Normally return TARGET.
4219 Return 0 if that cannot be done.
4221 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4222 it is VOIDmode, they cannot both be CONST_INT.
4224 UNSIGNEDP is for the case where we have to widen the operands
4225 to perform the operation. It says to use zero-extension.
4227 NORMALIZEP is 1 if we should convert the result to be either zero
4228 or one. Normalize is -1 if we should convert the result to be
4229 either zero or -1. If NORMALIZEP is zero, the result will be left
4230 "raw" out of the scc insn. */
4233 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4234 rtx target;
4235 enum rtx_code code;
4236 rtx op0, op1;
4237 enum machine_mode mode;
4238 int unsignedp;
4239 int normalizep;
4241 rtx subtarget;
4242 enum insn_code icode;
4243 enum machine_mode compare_mode;
4244 enum machine_mode target_mode = GET_MODE (target);
4245 rtx tem;
4246 rtx last = get_last_insn ();
4247 rtx pattern, comparison;
4249 /* ??? Ok to do this and then fail? */
4250 op0 = protect_from_queue (op0, 0);
4251 op1 = protect_from_queue (op1, 0);
4253 if (unsignedp)
4254 code = unsigned_condition (code);
4256 /* If one operand is constant, make it the second one. Only do this
4257 if the other operand is not constant as well. */
4259 if (swap_commutative_operands_p (op0, op1))
4261 tem = op0;
4262 op0 = op1;
4263 op1 = tem;
4264 code = swap_condition (code);
4267 if (mode == VOIDmode)
4268 mode = GET_MODE (op0);
4270 /* For some comparisons with 1 and -1, we can convert this to
4271 comparisons with zero. This will often produce more opportunities for
4272 store-flag insns. */
4274 switch (code)
4276 case LT:
4277 if (op1 == const1_rtx)
4278 op1 = const0_rtx, code = LE;
4279 break;
4280 case LE:
4281 if (op1 == constm1_rtx)
4282 op1 = const0_rtx, code = LT;
4283 break;
4284 case GE:
4285 if (op1 == const1_rtx)
4286 op1 = const0_rtx, code = GT;
4287 break;
4288 case GT:
4289 if (op1 == constm1_rtx)
4290 op1 = const0_rtx, code = GE;
4291 break;
4292 case GEU:
4293 if (op1 == const1_rtx)
4294 op1 = const0_rtx, code = NE;
4295 break;
4296 case LTU:
4297 if (op1 == const1_rtx)
4298 op1 = const0_rtx, code = EQ;
4299 break;
4300 default:
4301 break;
4304 /* If we are comparing a double-word integer with zero, we can convert
4305 the comparison into one involving a single word. */
4306 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4307 && GET_MODE_CLASS (mode) == MODE_INT
4308 && op1 == const0_rtx
4309 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4311 if (code == EQ || code == NE)
4313 /* Do a logical OR of the two words and compare the result. */
4314 rtx op0h = gen_highpart (word_mode, op0);
4315 rtx op0l = gen_lowpart (word_mode, op0);
4316 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4317 NULL_RTX, unsignedp, OPTAB_DIRECT);
4318 if (op0both != 0)
4319 return emit_store_flag (target, code, op0both, op1, word_mode,
4320 unsignedp, normalizep);
4322 else if (code == LT || code == GE)
4323 /* If testing the sign bit, can just test on high word. */
4324 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4325 op1, word_mode, unsignedp, normalizep);
4328 /* From now on, we won't change CODE, so set ICODE now. */
4329 icode = setcc_gen_code[(int) code];
4331 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4332 complement of A (for GE) and shifting the sign bit to the low bit. */
4333 if (op1 == const0_rtx && (code == LT || code == GE)
4334 && GET_MODE_CLASS (mode) == MODE_INT
4335 && (normalizep || STORE_FLAG_VALUE == 1
4336 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4337 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4338 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4340 subtarget = target;
4342 /* If the result is to be wider than OP0, it is best to convert it
4343 first. If it is to be narrower, it is *incorrect* to convert it
4344 first. */
4345 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4347 op0 = protect_from_queue (op0, 0);
4348 op0 = convert_modes (target_mode, mode, op0, 0);
4349 mode = target_mode;
4352 if (target_mode != mode)
4353 subtarget = 0;
4355 if (code == GE)
4356 op0 = expand_unop (mode, one_cmpl_optab, op0,
4357 ((STORE_FLAG_VALUE == 1 || normalizep)
4358 ? 0 : subtarget), 0);
4360 if (STORE_FLAG_VALUE == 1 || normalizep)
4361 /* If we are supposed to produce a 0/1 value, we want to do
4362 a logical shift from the sign bit to the low-order bit; for
4363 a -1/0 value, we do an arithmetic shift. */
4364 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4365 size_int (GET_MODE_BITSIZE (mode) - 1),
4366 subtarget, normalizep != -1);
4368 if (mode != target_mode)
4369 op0 = convert_modes (target_mode, mode, op0, 0);
4371 return op0;
4374 if (icode != CODE_FOR_nothing)
4376 insn_operand_predicate_fn pred;
4378 /* We think we may be able to do this with a scc insn. Emit the
4379 comparison and then the scc insn.
4381 compare_from_rtx may call emit_queue, which would be deleted below
4382 if the scc insn fails. So call it ourselves before setting LAST.
4383 Likewise for do_pending_stack_adjust. */
4385 emit_queue ();
4386 do_pending_stack_adjust ();
4387 last = get_last_insn ();
4389 comparison
4390 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4391 if (GET_CODE (comparison) == CONST_INT)
4392 return (comparison == const0_rtx ? const0_rtx
4393 : normalizep == 1 ? const1_rtx
4394 : normalizep == -1 ? constm1_rtx
4395 : const_true_rtx);
4397 /* The code of COMPARISON may not match CODE if compare_from_rtx
4398 decided to swap its operands and reverse the original code.
4400 We know that compare_from_rtx returns either a CONST_INT or
4401 a new comparison code, so it is safe to just extract the
4402 code from COMPARISON. */
4403 code = GET_CODE (comparison);
4405 /* Get a reference to the target in the proper mode for this insn. */
4406 compare_mode = insn_data[(int) icode].operand[0].mode;
4407 subtarget = target;
4408 pred = insn_data[(int) icode].operand[0].predicate;
4409 if (preserve_subexpressions_p ()
4410 || ! (*pred) (subtarget, compare_mode))
4411 subtarget = gen_reg_rtx (compare_mode);
4413 pattern = GEN_FCN (icode) (subtarget);
4414 if (pattern)
4416 emit_insn (pattern);
4418 /* If we are converting to a wider mode, first convert to
4419 TARGET_MODE, then normalize. This produces better combining
4420 opportunities on machines that have a SIGN_EXTRACT when we are
4421 testing a single bit. This mostly benefits the 68k.
4423 If STORE_FLAG_VALUE does not have the sign bit set when
4424 interpreted in COMPARE_MODE, we can do this conversion as
4425 unsigned, which is usually more efficient. */
4426 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4428 convert_move (target, subtarget,
4429 (GET_MODE_BITSIZE (compare_mode)
4430 <= HOST_BITS_PER_WIDE_INT)
4431 && 0 == (STORE_FLAG_VALUE
4432 & ((HOST_WIDE_INT) 1
4433 << (GET_MODE_BITSIZE (compare_mode) -1))));
4434 op0 = target;
4435 compare_mode = target_mode;
4437 else
4438 op0 = subtarget;
4440 /* If we want to keep subexpressions around, don't reuse our
4441 last target. */
4443 if (preserve_subexpressions_p ())
4444 subtarget = 0;
4446 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4447 we don't have to do anything. */
4448 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4450 /* STORE_FLAG_VALUE might be the most negative number, so write
4451 the comparison this way to avoid a compiler-time warning. */
4452 else if (- normalizep == STORE_FLAG_VALUE)
4453 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4455 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4456 makes it hard to use a value of just the sign bit due to
4457 ANSI integer constant typing rules. */
4458 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4459 && (STORE_FLAG_VALUE
4460 & ((HOST_WIDE_INT) 1
4461 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4462 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4463 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4464 subtarget, normalizep == 1);
4465 else if (STORE_FLAG_VALUE & 1)
4467 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4468 if (normalizep == -1)
4469 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4471 else
4472 abort ();
4474 /* If we were converting to a smaller mode, do the
4475 conversion now. */
4476 if (target_mode != compare_mode)
4478 convert_move (target, op0, 0);
4479 return target;
4481 else
4482 return op0;
4486 delete_insns_since (last);
4488 /* If expensive optimizations, use different pseudo registers for each
4489 insn, instead of reusing the same pseudo. This leads to better CSE,
4490 but slows down the compiler, since there are more pseudos */
4491 subtarget = (!flag_expensive_optimizations
4492 && (target_mode == mode)) ? target : NULL_RTX;
4494 /* If we reached here, we can't do this with a scc insn. However, there
4495 are some comparisons that can be done directly. For example, if
4496 this is an equality comparison of integers, we can try to exclusive-or
4497 (or subtract) the two operands and use a recursive call to try the
4498 comparison with zero. Don't do any of these cases if branches are
4499 very cheap. */
4501 if (BRANCH_COST > 0
4502 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4503 && op1 != const0_rtx)
4505 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4506 OPTAB_WIDEN);
4508 if (tem == 0)
4509 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4510 OPTAB_WIDEN);
4511 if (tem != 0)
4512 tem = emit_store_flag (target, code, tem, const0_rtx,
4513 mode, unsignedp, normalizep);
4514 if (tem == 0)
4515 delete_insns_since (last);
4516 return tem;
4519 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4520 the constant zero. Reject all other comparisons at this point. Only
4521 do LE and GT if branches are expensive since they are expensive on
4522 2-operand machines. */
4524 if (BRANCH_COST == 0
4525 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4526 || (code != EQ && code != NE
4527 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4528 return 0;
4530 /* See what we need to return. We can only return a 1, -1, or the
4531 sign bit. */
4533 if (normalizep == 0)
4535 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4536 normalizep = STORE_FLAG_VALUE;
4538 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4539 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4540 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4542 else
4543 return 0;
4546 /* Try to put the result of the comparison in the sign bit. Assume we can't
4547 do the necessary operation below. */
4549 tem = 0;
4551 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4552 the sign bit set. */
4554 if (code == LE)
4556 /* This is destructive, so SUBTARGET can't be OP0. */
4557 if (rtx_equal_p (subtarget, op0))
4558 subtarget = 0;
4560 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4561 OPTAB_WIDEN);
4562 if (tem)
4563 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4564 OPTAB_WIDEN);
4567 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4568 number of bits in the mode of OP0, minus one. */
4570 if (code == GT)
4572 if (rtx_equal_p (subtarget, op0))
4573 subtarget = 0;
4575 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4576 size_int (GET_MODE_BITSIZE (mode) - 1),
4577 subtarget, 0);
4578 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4579 OPTAB_WIDEN);
4582 if (code == EQ || code == NE)
4584 /* For EQ or NE, one way to do the comparison is to apply an operation
4585 that converts the operand into a positive number if it is non-zero
4586 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4587 for NE we negate. This puts the result in the sign bit. Then we
4588 normalize with a shift, if needed.
4590 Two operations that can do the above actions are ABS and FFS, so try
4591 them. If that doesn't work, and MODE is smaller than a full word,
4592 we can use zero-extension to the wider mode (an unsigned conversion)
4593 as the operation. */
4595 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4596 that is compensated by the subsequent overflow when subtracting
4597 one / negating. */
4599 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4600 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4601 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4602 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4603 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4605 op0 = protect_from_queue (op0, 0);
4606 tem = convert_modes (word_mode, mode, op0, 1);
4607 mode = word_mode;
4610 if (tem != 0)
4612 if (code == EQ)
4613 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4614 0, OPTAB_WIDEN);
4615 else
4616 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4619 /* If we couldn't do it that way, for NE we can "or" the two's complement
4620 of the value with itself. For EQ, we take the one's complement of
4621 that "or", which is an extra insn, so we only handle EQ if branches
4622 are expensive. */
4624 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4626 if (rtx_equal_p (subtarget, op0))
4627 subtarget = 0;
4629 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4630 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4631 OPTAB_WIDEN);
4633 if (tem && code == EQ)
4634 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4638 if (tem && normalizep)
4639 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4640 size_int (GET_MODE_BITSIZE (mode) - 1),
4641 subtarget, normalizep == 1);
4643 if (tem)
4645 if (GET_MODE (tem) != target_mode)
4647 convert_move (target, tem, 0);
4648 tem = target;
4650 else if (!subtarget)
4652 emit_move_insn (target, tem);
4653 tem = target;
4656 else
4657 delete_insns_since (last);
4659 return tem;
4662 /* Like emit_store_flag, but always succeeds. */
4665 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4666 rtx target;
4667 enum rtx_code code;
4668 rtx op0, op1;
4669 enum machine_mode mode;
4670 int unsignedp;
4671 int normalizep;
4673 rtx tem, label;
4675 /* First see if emit_store_flag can do the job. */
4676 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4677 if (tem != 0)
4678 return tem;
4680 if (normalizep == 0)
4681 normalizep = 1;
4683 /* If this failed, we have to do this with set/compare/jump/set code. */
4685 if (GET_CODE (target) != REG
4686 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4687 target = gen_reg_rtx (GET_MODE (target));
4689 emit_move_insn (target, const1_rtx);
4690 label = gen_label_rtx ();
4691 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4692 NULL_RTX, label);
4694 emit_move_insn (target, const0_rtx);
4695 emit_label (label);
4697 return target;
4700 /* Perform possibly multi-word comparison and conditional jump to LABEL
4701 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4703 The algorithm is based on the code in expr.c:do_jump.
4705 Note that this does not perform a general comparison. Only variants
4706 generated within expmed.c are correctly handled, others abort (but could
4707 be handled if needed). */
4709 static void
4710 do_cmp_and_jump (arg1, arg2, op, mode, label)
4711 rtx arg1, arg2, label;
4712 enum rtx_code op;
4713 enum machine_mode mode;
4715 /* If this mode is an integer too wide to compare properly,
4716 compare word by word. Rely on cse to optimize constant cases. */
4718 if (GET_MODE_CLASS (mode) == MODE_INT
4719 && ! can_compare_p (op, mode, ccp_jump))
4721 rtx label2 = gen_label_rtx ();
4723 switch (op)
4725 case LTU:
4726 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4727 break;
4729 case LEU:
4730 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4731 break;
4733 case LT:
4734 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4735 break;
4737 case GT:
4738 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4739 break;
4741 case GE:
4742 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4743 break;
4745 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4746 that's the only equality operations we do */
4747 case EQ:
4748 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4749 abort ();
4750 do_jump_by_parts_equality_rtx (arg1, label2, label);
4751 break;
4753 case NE:
4754 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4755 abort ();
4756 do_jump_by_parts_equality_rtx (arg1, label, label2);
4757 break;
4759 default:
4760 abort ();
4763 emit_label (label2);
4765 else
4766 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);