PR testsuite/44195
[official-gcc.git] / gcc / expmed.c
blob4bf86dd23d9ce78dbc20e5ca7f7069891af440ec
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "tm_p.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "expr.h"
35 #include "optabs.h"
36 #include "recog.h"
37 #include "langhooks.h"
38 #include "df.h"
39 #include "target.h"
41 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT,
43 unsigned HOST_WIDE_INT, rtx);
44 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT, rtx);
46 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
47 unsigned HOST_WIDE_INT,
48 unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT, rtx, int);
50 static rtx mask_rtx (enum machine_mode, int, int, int);
51 static rtx lshift_value (enum machine_mode, rtx, int, int);
52 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int);
54 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
55 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
58 /* Test whether a value is zero of a power of two. */
59 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
61 /* Nonzero means divides or modulus operations are relatively cheap for
62 powers of two, so don't use branches; emit the operation instead.
63 Usually, this will mean that the MD file will emit non-branch
64 sequences. */
66 static bool sdiv_pow2_cheap[2][NUM_MACHINE_MODES];
67 static bool smod_pow2_cheap[2][NUM_MACHINE_MODES];
69 #ifndef SLOW_UNALIGNED_ACCESS
70 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
71 #endif
73 /* For compilers that support multiple targets with different word sizes,
74 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
75 is the H8/300(H) compiler. */
77 #ifndef MAX_BITS_PER_WORD
78 #define MAX_BITS_PER_WORD BITS_PER_WORD
79 #endif
81 /* Reduce conditional compilation elsewhere. */
82 #ifndef HAVE_insv
83 #define HAVE_insv 0
84 #define CODE_FOR_insv CODE_FOR_nothing
85 #define gen_insv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extv
88 #define HAVE_extv 0
89 #define CODE_FOR_extv CODE_FOR_nothing
90 #define gen_extv(a,b,c,d) NULL_RTX
91 #endif
92 #ifndef HAVE_extzv
93 #define HAVE_extzv 0
94 #define CODE_FOR_extzv CODE_FOR_nothing
95 #define gen_extzv(a,b,c,d) NULL_RTX
96 #endif
98 /* Cost of various pieces of RTL. Note that some of these are indexed by
99 shift count and some by mode. */
100 static int zero_cost[2];
101 static int add_cost[2][NUM_MACHINE_MODES];
102 static int neg_cost[2][NUM_MACHINE_MODES];
103 static int shift_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104 static int shiftadd_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105 static int shiftsub0_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
106 static int shiftsub1_cost[2][NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
107 static int mul_cost[2][NUM_MACHINE_MODES];
108 static int sdiv_cost[2][NUM_MACHINE_MODES];
109 static int udiv_cost[2][NUM_MACHINE_MODES];
110 static int mul_widen_cost[2][NUM_MACHINE_MODES];
111 static int mul_highpart_cost[2][NUM_MACHINE_MODES];
113 void
114 init_expmed (void)
116 struct
118 struct rtx_def reg; rtunion reg_fld[2];
119 struct rtx_def plus; rtunion plus_fld1;
120 struct rtx_def neg;
121 struct rtx_def mult; rtunion mult_fld1;
122 struct rtx_def sdiv; rtunion sdiv_fld1;
123 struct rtx_def udiv; rtunion udiv_fld1;
124 struct rtx_def zext;
125 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
126 struct rtx_def smod_32; rtunion smod_32_fld1;
127 struct rtx_def wide_mult; rtunion wide_mult_fld1;
128 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
129 struct rtx_def wide_trunc;
130 struct rtx_def shift; rtunion shift_fld1;
131 struct rtx_def shift_mult; rtunion shift_mult_fld1;
132 struct rtx_def shift_add; rtunion shift_add_fld1;
133 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
134 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
135 } all;
137 rtx pow2[MAX_BITS_PER_WORD];
138 rtx cint[MAX_BITS_PER_WORD];
139 int m, n;
140 enum machine_mode mode, wider_mode;
141 int speed;
144 for (m = 1; m < MAX_BITS_PER_WORD; m++)
146 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
147 cint[m] = GEN_INT (m);
149 memset (&all, 0, sizeof all);
151 PUT_CODE (&all.reg, REG);
152 /* Avoid using hard regs in ways which may be unsupported. */
153 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
155 PUT_CODE (&all.plus, PLUS);
156 XEXP (&all.plus, 0) = &all.reg;
157 XEXP (&all.plus, 1) = &all.reg;
159 PUT_CODE (&all.neg, NEG);
160 XEXP (&all.neg, 0) = &all.reg;
162 PUT_CODE (&all.mult, MULT);
163 XEXP (&all.mult, 0) = &all.reg;
164 XEXP (&all.mult, 1) = &all.reg;
166 PUT_CODE (&all.sdiv, DIV);
167 XEXP (&all.sdiv, 0) = &all.reg;
168 XEXP (&all.sdiv, 1) = &all.reg;
170 PUT_CODE (&all.udiv, UDIV);
171 XEXP (&all.udiv, 0) = &all.reg;
172 XEXP (&all.udiv, 1) = &all.reg;
174 PUT_CODE (&all.sdiv_32, DIV);
175 XEXP (&all.sdiv_32, 0) = &all.reg;
176 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
178 PUT_CODE (&all.smod_32, MOD);
179 XEXP (&all.smod_32, 0) = &all.reg;
180 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
182 PUT_CODE (&all.zext, ZERO_EXTEND);
183 XEXP (&all.zext, 0) = &all.reg;
185 PUT_CODE (&all.wide_mult, MULT);
186 XEXP (&all.wide_mult, 0) = &all.zext;
187 XEXP (&all.wide_mult, 1) = &all.zext;
189 PUT_CODE (&all.wide_lshr, LSHIFTRT);
190 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
192 PUT_CODE (&all.wide_trunc, TRUNCATE);
193 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
195 PUT_CODE (&all.shift, ASHIFT);
196 XEXP (&all.shift, 0) = &all.reg;
198 PUT_CODE (&all.shift_mult, MULT);
199 XEXP (&all.shift_mult, 0) = &all.reg;
201 PUT_CODE (&all.shift_add, PLUS);
202 XEXP (&all.shift_add, 0) = &all.shift_mult;
203 XEXP (&all.shift_add, 1) = &all.reg;
205 PUT_CODE (&all.shift_sub0, MINUS);
206 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
207 XEXP (&all.shift_sub0, 1) = &all.reg;
209 PUT_CODE (&all.shift_sub1, MINUS);
210 XEXP (&all.shift_sub1, 0) = &all.reg;
211 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
213 for (speed = 0; speed < 2; speed++)
215 crtl->maybe_hot_insn_p = speed;
216 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
218 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
219 mode != VOIDmode;
220 mode = GET_MODE_WIDER_MODE (mode))
222 PUT_MODE (&all.reg, mode);
223 PUT_MODE (&all.plus, mode);
224 PUT_MODE (&all.neg, mode);
225 PUT_MODE (&all.mult, mode);
226 PUT_MODE (&all.sdiv, mode);
227 PUT_MODE (&all.udiv, mode);
228 PUT_MODE (&all.sdiv_32, mode);
229 PUT_MODE (&all.smod_32, mode);
230 PUT_MODE (&all.wide_trunc, mode);
231 PUT_MODE (&all.shift, mode);
232 PUT_MODE (&all.shift_mult, mode);
233 PUT_MODE (&all.shift_add, mode);
234 PUT_MODE (&all.shift_sub0, mode);
235 PUT_MODE (&all.shift_sub1, mode);
237 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
238 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
239 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
240 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
241 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
243 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
244 <= 2 * add_cost[speed][mode]);
245 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
246 <= 4 * add_cost[speed][mode]);
248 wider_mode = GET_MODE_WIDER_MODE (mode);
249 if (wider_mode != VOIDmode)
251 PUT_MODE (&all.zext, wider_mode);
252 PUT_MODE (&all.wide_mult, wider_mode);
253 PUT_MODE (&all.wide_lshr, wider_mode);
254 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
256 mul_widen_cost[speed][wider_mode]
257 = rtx_cost (&all.wide_mult, SET, speed);
258 mul_highpart_cost[speed][mode]
259 = rtx_cost (&all.wide_trunc, SET, speed);
262 shift_cost[speed][mode][0] = 0;
263 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
264 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
266 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
267 for (m = 1; m < n; m++)
269 XEXP (&all.shift, 1) = cint[m];
270 XEXP (&all.shift_mult, 1) = pow2[m];
272 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
273 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
274 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
275 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
279 default_rtl_profile ();
282 /* Return an rtx representing minus the value of X.
283 MODE is the intended mode of the result,
284 useful if X is a CONST_INT. */
287 negate_rtx (enum machine_mode mode, rtx x)
289 rtx result = simplify_unary_operation (NEG, mode, x, mode);
291 if (result == 0)
292 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
294 return result;
297 /* Report on the availability of insv/extv/extzv and the desired mode
298 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
299 is false; else the mode of the specified operand. If OPNO is -1,
300 all the caller cares about is whether the insn is available. */
301 enum machine_mode
302 mode_for_extraction (enum extraction_pattern pattern, int opno)
304 const struct insn_data_d *data;
306 switch (pattern)
308 case EP_insv:
309 if (HAVE_insv)
311 data = &insn_data[CODE_FOR_insv];
312 break;
314 return MAX_MACHINE_MODE;
316 case EP_extv:
317 if (HAVE_extv)
319 data = &insn_data[CODE_FOR_extv];
320 break;
322 return MAX_MACHINE_MODE;
324 case EP_extzv:
325 if (HAVE_extzv)
327 data = &insn_data[CODE_FOR_extzv];
328 break;
330 return MAX_MACHINE_MODE;
332 default:
333 gcc_unreachable ();
336 if (opno == -1)
337 return VOIDmode;
339 /* Everyone who uses this function used to follow it with
340 if (result == VOIDmode) result = word_mode; */
341 if (data->operand[opno].mode == VOIDmode)
342 return word_mode;
343 return data->operand[opno].mode;
346 /* Return true if X, of mode MODE, matches the predicate for operand
347 OPNO of instruction ICODE. Allow volatile memories, regardless of
348 the ambient volatile_ok setting. */
350 static bool
351 check_predicate_volatile_ok (enum insn_code icode, int opno,
352 rtx x, enum machine_mode mode)
354 bool save_volatile_ok, result;
356 save_volatile_ok = volatile_ok;
357 result = insn_data[(int) icode].operand[opno].predicate (x, mode);
358 volatile_ok = save_volatile_ok;
359 return result;
362 /* A subroutine of store_bit_field, with the same arguments. Return true
363 if the operation could be implemented.
365 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
366 no other way of implementing the operation. If FALLBACK_P is false,
367 return false instead. */
369 static bool
370 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
371 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
372 rtx value, bool fallback_p)
374 unsigned int unit
375 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
376 unsigned HOST_WIDE_INT offset, bitpos;
377 rtx op0 = str_rtx;
378 int byte_offset;
379 rtx orig_value;
381 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
383 while (GET_CODE (op0) == SUBREG)
385 /* The following line once was done only if WORDS_BIG_ENDIAN,
386 but I think that is a mistake. WORDS_BIG_ENDIAN is
387 meaningful at a much higher level; when structures are copied
388 between memory and regs, the higher-numbered regs
389 always get higher addresses. */
390 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
391 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
393 byte_offset = 0;
395 /* Paradoxical subregs need special handling on big endian machines. */
396 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
398 int difference = inner_mode_size - outer_mode_size;
400 if (WORDS_BIG_ENDIAN)
401 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
402 if (BYTES_BIG_ENDIAN)
403 byte_offset += difference % UNITS_PER_WORD;
405 else
406 byte_offset = SUBREG_BYTE (op0);
408 bitnum += byte_offset * BITS_PER_UNIT;
409 op0 = SUBREG_REG (op0);
412 /* No action is needed if the target is a register and if the field
413 lies completely outside that register. This can occur if the source
414 code contains an out-of-bounds access to a small array. */
415 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
416 return true;
418 /* Use vec_set patterns for inserting parts of vectors whenever
419 available. */
420 if (VECTOR_MODE_P (GET_MODE (op0))
421 && !MEM_P (op0)
422 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
423 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
424 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
425 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
427 enum machine_mode outermode = GET_MODE (op0);
428 enum machine_mode innermode = GET_MODE_INNER (outermode);
429 int icode = (int) optab_handler (vec_set_optab, outermode);
430 int pos = bitnum / GET_MODE_BITSIZE (innermode);
431 rtx rtxpos = GEN_INT (pos);
432 rtx src = value;
433 rtx dest = op0;
434 rtx pat, seq;
435 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
436 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
437 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
439 start_sequence ();
441 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
442 src = copy_to_mode_reg (mode1, src);
444 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
445 rtxpos = copy_to_mode_reg (mode1, rtxpos);
447 /* We could handle this, but we should always be called with a pseudo
448 for our targets and all insns should take them as outputs. */
449 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
450 && (*insn_data[icode].operand[1].predicate) (src, mode1)
451 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
452 pat = GEN_FCN (icode) (dest, src, rtxpos);
453 seq = get_insns ();
454 end_sequence ();
455 if (pat)
457 emit_insn (seq);
458 emit_insn (pat);
459 return true;
463 /* If the target is a register, overwriting the entire object, or storing
464 a full-word or multi-word field can be done with just a SUBREG.
466 If the target is memory, storing any naturally aligned field can be
467 done with a simple store. For targets that support fast unaligned
468 memory, any naturally sized, unit aligned field can be done directly. */
470 offset = bitnum / unit;
471 bitpos = bitnum % unit;
472 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
473 + (offset * UNITS_PER_WORD);
475 if (bitpos == 0
476 && bitsize == GET_MODE_BITSIZE (fieldmode)
477 && (!MEM_P (op0)
478 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
479 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
480 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
481 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
482 || (offset * BITS_PER_UNIT % bitsize == 0
483 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
485 if (MEM_P (op0))
486 op0 = adjust_address (op0, fieldmode, offset);
487 else if (GET_MODE (op0) != fieldmode)
488 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
489 byte_offset);
490 emit_move_insn (op0, value);
491 return true;
494 /* Make sure we are playing with integral modes. Pun with subregs
495 if we aren't. This must come after the entire register case above,
496 since that case is valid for any mode. The following cases are only
497 valid for integral modes. */
499 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
500 if (imode != GET_MODE (op0))
502 if (MEM_P (op0))
503 op0 = adjust_address (op0, imode, 0);
504 else
506 gcc_assert (imode != BLKmode);
507 op0 = gen_lowpart (imode, op0);
512 /* We may be accessing data outside the field, which means
513 we can alias adjacent data. */
514 if (MEM_P (op0))
516 op0 = shallow_copy_rtx (op0);
517 set_mem_alias_set (op0, 0);
518 set_mem_expr (op0, 0);
521 /* If OP0 is a register, BITPOS must count within a word.
522 But as we have it, it counts within whatever size OP0 now has.
523 On a bigendian machine, these are not the same, so convert. */
524 if (BYTES_BIG_ENDIAN
525 && !MEM_P (op0)
526 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
527 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
529 /* Storing an lsb-aligned field in a register
530 can be done with a movestrict instruction. */
532 if (!MEM_P (op0)
533 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
534 && bitsize == GET_MODE_BITSIZE (fieldmode)
535 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
537 int icode = optab_handler (movstrict_optab, fieldmode);
538 rtx insn;
539 rtx start = get_last_insn ();
540 rtx arg0 = op0;
542 /* Get appropriate low part of the value being stored. */
543 if (CONST_INT_P (value) || REG_P (value))
544 value = gen_lowpart (fieldmode, value);
545 else if (!(GET_CODE (value) == SYMBOL_REF
546 || GET_CODE (value) == LABEL_REF
547 || GET_CODE (value) == CONST))
548 value = convert_to_mode (fieldmode, value, 0);
550 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
551 value = copy_to_mode_reg (fieldmode, value);
553 if (GET_CODE (op0) == SUBREG)
555 /* Else we've got some float mode source being extracted into
556 a different float mode destination -- this combination of
557 subregs results in Severe Tire Damage. */
558 gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
559 || GET_MODE_CLASS (fieldmode) == MODE_INT
560 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
561 arg0 = SUBREG_REG (op0);
564 insn = (GEN_FCN (icode)
565 (gen_rtx_SUBREG (fieldmode, arg0,
566 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
567 + (offset * UNITS_PER_WORD)),
568 value));
569 if (insn)
571 emit_insn (insn);
572 return true;
574 delete_insns_since (start);
577 /* Handle fields bigger than a word. */
579 if (bitsize > BITS_PER_WORD)
581 /* Here we transfer the words of the field
582 in the order least significant first.
583 This is because the most significant word is the one which may
584 be less than full.
585 However, only do that if the value is not BLKmode. */
587 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
588 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
589 unsigned int i;
590 rtx last;
592 /* This is the mode we must force value to, so that there will be enough
593 subwords to extract. Note that fieldmode will often (always?) be
594 VOIDmode, because that is what store_field uses to indicate that this
595 is a bit field, but passing VOIDmode to operand_subword_force
596 is not allowed. */
597 fieldmode = GET_MODE (value);
598 if (fieldmode == VOIDmode)
599 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
601 last = get_last_insn ();
602 for (i = 0; i < nwords; i++)
604 /* If I is 0, use the low-order word in both field and target;
605 if I is 1, use the next to lowest word; and so on. */
606 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
607 unsigned int bit_offset = (backwards
608 ? MAX ((int) bitsize - ((int) i + 1)
609 * BITS_PER_WORD,
611 : (int) i * BITS_PER_WORD);
612 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
614 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
615 bitsize - i * BITS_PER_WORD),
616 bitnum + bit_offset, word_mode,
617 value_word, fallback_p))
619 delete_insns_since (last);
620 return false;
623 return true;
626 /* From here on we can assume that the field to be stored in is
627 a full-word (whatever type that is), since it is shorter than a word. */
629 /* OFFSET is the number of words or bytes (UNIT says which)
630 from STR_RTX to the first word or byte containing part of the field. */
632 if (!MEM_P (op0))
634 if (offset != 0
635 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
637 if (!REG_P (op0))
639 /* Since this is a destination (lvalue), we can't copy
640 it to a pseudo. We can remove a SUBREG that does not
641 change the size of the operand. Such a SUBREG may
642 have been added above. */
643 gcc_assert (GET_CODE (op0) == SUBREG
644 && (GET_MODE_SIZE (GET_MODE (op0))
645 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
646 op0 = SUBREG_REG (op0);
648 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
649 op0, (offset * UNITS_PER_WORD));
651 offset = 0;
654 /* If VALUE has a floating-point or complex mode, access it as an
655 integer of the corresponding size. This can occur on a machine
656 with 64 bit registers that uses SFmode for float. It can also
657 occur for unaligned float or complex fields. */
658 orig_value = value;
659 if (GET_MODE (value) != VOIDmode
660 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
661 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
663 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
664 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
667 /* Now OFFSET is nonzero only if OP0 is memory
668 and is therefore always measured in bytes. */
670 if (HAVE_insv
671 && GET_MODE (value) != BLKmode
672 && bitsize > 0
673 && GET_MODE_BITSIZE (op_mode) >= bitsize
674 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
675 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
676 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
677 VOIDmode)
678 && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode))
680 int xbitpos = bitpos;
681 rtx value1;
682 rtx xop0 = op0;
683 rtx last = get_last_insn ();
684 rtx pat;
685 bool copy_back = false;
687 /* Add OFFSET into OP0's address. */
688 if (MEM_P (xop0))
689 xop0 = adjust_address (xop0, byte_mode, offset);
691 /* If xop0 is a register, we need it in OP_MODE
692 to make it acceptable to the format of insv. */
693 if (GET_CODE (xop0) == SUBREG)
694 /* We can't just change the mode, because this might clobber op0,
695 and we will need the original value of op0 if insv fails. */
696 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
697 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
698 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
700 /* If the destination is a paradoxical subreg such that we need a
701 truncate to the inner mode, perform the insertion on a temporary and
702 truncate the result to the original destination. Note that we can't
703 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
704 X) 0)) is (reg:N X). */
705 if (GET_CODE (xop0) == SUBREG
706 && REG_P (SUBREG_REG (xop0))
707 && (!TRULY_NOOP_TRUNCATION
708 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
709 GET_MODE_BITSIZE (op_mode))))
711 rtx tem = gen_reg_rtx (op_mode);
712 emit_move_insn (tem, xop0);
713 xop0 = tem;
714 copy_back = true;
717 /* On big-endian machines, we count bits from the most significant.
718 If the bit field insn does not, we must invert. */
720 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
721 xbitpos = unit - bitsize - xbitpos;
723 /* We have been counting XBITPOS within UNIT.
724 Count instead within the size of the register. */
725 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
726 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
728 unit = GET_MODE_BITSIZE (op_mode);
730 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
731 value1 = value;
732 if (GET_MODE (value) != op_mode)
734 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
736 /* Optimization: Don't bother really extending VALUE
737 if it has all the bits we will actually use. However,
738 if we must narrow it, be sure we do it correctly. */
740 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
742 rtx tmp;
744 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
745 if (! tmp)
746 tmp = simplify_gen_subreg (op_mode,
747 force_reg (GET_MODE (value),
748 value1),
749 GET_MODE (value), 0);
750 value1 = tmp;
752 else
753 value1 = gen_lowpart (op_mode, value1);
755 else if (CONST_INT_P (value))
756 value1 = gen_int_mode (INTVAL (value), op_mode);
757 else
758 /* Parse phase is supposed to make VALUE's data type
759 match that of the component reference, which is a type
760 at least as wide as the field; so VALUE should have
761 a mode that corresponds to that type. */
762 gcc_assert (CONSTANT_P (value));
765 /* If this machine's insv insists on a register,
766 get VALUE1 into a register. */
767 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
768 (value1, op_mode)))
769 value1 = force_reg (op_mode, value1);
771 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
772 if (pat)
774 emit_insn (pat);
776 if (copy_back)
777 convert_move (op0, xop0, true);
778 return true;
780 delete_insns_since (last);
783 /* If OP0 is a memory, try copying it to a register and seeing if a
784 cheap register alternative is available. */
785 if (HAVE_insv && MEM_P (op0))
787 enum machine_mode bestmode;
789 /* Get the mode to use for inserting into this field. If OP0 is
790 BLKmode, get the smallest mode consistent with the alignment. If
791 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
792 mode. Otherwise, use the smallest mode containing the field. */
794 if (GET_MODE (op0) == BLKmode
795 || (op_mode != MAX_MACHINE_MODE
796 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
797 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
798 (op_mode == MAX_MACHINE_MODE
799 ? VOIDmode : op_mode),
800 MEM_VOLATILE_P (op0));
801 else
802 bestmode = GET_MODE (op0);
804 if (bestmode != VOIDmode
805 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
806 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
807 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
809 rtx last, tempreg, xop0;
810 unsigned HOST_WIDE_INT xoffset, xbitpos;
812 last = get_last_insn ();
814 /* Adjust address to point to the containing unit of
815 that mode. Compute the offset as a multiple of this unit,
816 counting in bytes. */
817 unit = GET_MODE_BITSIZE (bestmode);
818 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
819 xbitpos = bitnum % unit;
820 xop0 = adjust_address (op0, bestmode, xoffset);
822 /* Fetch that unit, store the bitfield in it, then store
823 the unit. */
824 tempreg = copy_to_reg (xop0);
825 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
826 fieldmode, orig_value, false))
828 emit_move_insn (xop0, tempreg);
829 return true;
831 delete_insns_since (last);
835 if (!fallback_p)
836 return false;
838 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
839 return true;
842 /* Generate code to store value from rtx VALUE
843 into a bit-field within structure STR_RTX
844 containing BITSIZE bits starting at bit BITNUM.
845 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
847 void
848 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
849 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
850 rtx value)
852 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
853 gcc_unreachable ();
856 /* Use shifts and boolean operations to store VALUE
857 into a bit field of width BITSIZE
858 in a memory location specified by OP0 except offset by OFFSET bytes.
859 (OFFSET must be 0 if OP0 is a register.)
860 The field starts at position BITPOS within the byte.
861 (If OP0 is a register, it may be a full word or a narrower mode,
862 but BITPOS still counts within a full word,
863 which is significant on bigendian machines.) */
865 static void
866 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
867 unsigned HOST_WIDE_INT bitsize,
868 unsigned HOST_WIDE_INT bitpos, rtx value)
870 enum machine_mode mode;
871 unsigned int total_bits = BITS_PER_WORD;
872 rtx temp;
873 int all_zero = 0;
874 int all_one = 0;
876 /* There is a case not handled here:
877 a structure with a known alignment of just a halfword
878 and a field split across two aligned halfwords within the structure.
879 Or likewise a structure with a known alignment of just a byte
880 and a field split across two bytes.
881 Such cases are not supposed to be able to occur. */
883 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
885 gcc_assert (!offset);
886 /* Special treatment for a bit field split across two registers. */
887 if (bitsize + bitpos > BITS_PER_WORD)
889 store_split_bit_field (op0, bitsize, bitpos, value);
890 return;
893 else
895 /* Get the proper mode to use for this field. We want a mode that
896 includes the entire field. If such a mode would be larger than
897 a word, we won't be doing the extraction the normal way.
898 We don't want a mode bigger than the destination. */
900 mode = GET_MODE (op0);
901 if (GET_MODE_BITSIZE (mode) == 0
902 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
903 mode = word_mode;
905 if (MEM_VOLATILE_P (op0)
906 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
907 && flag_strict_volatile_bitfields > 0)
908 mode = GET_MODE (op0);
909 else
910 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
911 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
913 if (mode == VOIDmode)
915 /* The only way this should occur is if the field spans word
916 boundaries. */
917 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
918 value);
919 return;
922 total_bits = GET_MODE_BITSIZE (mode);
924 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
925 be in the range 0 to total_bits-1, and put any excess bytes in
926 OFFSET. */
927 if (bitpos >= total_bits)
929 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
930 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
931 * BITS_PER_UNIT);
934 /* Get ref to an aligned byte, halfword, or word containing the field.
935 Adjust BITPOS to be position within a word,
936 and OFFSET to be the offset of that word.
937 Then alter OP0 to refer to that word. */
938 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
939 offset -= (offset % (total_bits / BITS_PER_UNIT));
940 op0 = adjust_address (op0, mode, offset);
943 mode = GET_MODE (op0);
945 /* Now MODE is either some integral mode for a MEM as OP0,
946 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
947 The bit field is contained entirely within OP0.
948 BITPOS is the starting bit number within OP0.
949 (OP0's mode may actually be narrower than MODE.) */
951 if (BYTES_BIG_ENDIAN)
952 /* BITPOS is the distance between our msb
953 and that of the containing datum.
954 Convert it to the distance from the lsb. */
955 bitpos = total_bits - bitsize - bitpos;
957 /* Now BITPOS is always the distance between our lsb
958 and that of OP0. */
960 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
961 we must first convert its mode to MODE. */
963 if (CONST_INT_P (value))
965 HOST_WIDE_INT v = INTVAL (value);
967 if (bitsize < HOST_BITS_PER_WIDE_INT)
968 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
970 if (v == 0)
971 all_zero = 1;
972 else if ((bitsize < HOST_BITS_PER_WIDE_INT
973 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
974 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
975 all_one = 1;
977 value = lshift_value (mode, value, bitpos, bitsize);
979 else
981 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
982 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
984 if (GET_MODE (value) != mode)
985 value = convert_to_mode (mode, value, 1);
987 if (must_and)
988 value = expand_binop (mode, and_optab, value,
989 mask_rtx (mode, 0, bitsize, 0),
990 NULL_RTX, 1, OPTAB_LIB_WIDEN);
991 if (bitpos > 0)
992 value = expand_shift (LSHIFT_EXPR, mode, value,
993 build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
996 /* Now clear the chosen bits in OP0,
997 except that if VALUE is -1 we need not bother. */
998 /* We keep the intermediates in registers to allow CSE to combine
999 consecutive bitfield assignments. */
1001 temp = force_reg (mode, op0);
1003 if (! all_one)
1005 temp = expand_binop (mode, and_optab, temp,
1006 mask_rtx (mode, bitpos, bitsize, 1),
1007 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1008 temp = force_reg (mode, temp);
1011 /* Now logical-or VALUE into OP0, unless it is zero. */
1013 if (! all_zero)
1015 temp = expand_binop (mode, ior_optab, temp, value,
1016 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1017 temp = force_reg (mode, temp);
1020 if (op0 != temp)
1022 op0 = copy_rtx (op0);
1023 emit_move_insn (op0, temp);
1027 /* Store a bit field that is split across multiple accessible memory objects.
1029 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1030 BITSIZE is the field width; BITPOS the position of its first bit
1031 (within the word).
1032 VALUE is the value to store.
1034 This does not yet handle fields wider than BITS_PER_WORD. */
1036 static void
1037 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1038 unsigned HOST_WIDE_INT bitpos, rtx value)
1040 unsigned int unit;
1041 unsigned int bitsdone = 0;
1043 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1044 much at a time. */
1045 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1046 unit = BITS_PER_WORD;
1047 else
1048 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1050 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1051 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1052 that VALUE might be a floating-point constant. */
1053 if (CONSTANT_P (value) && !CONST_INT_P (value))
1055 rtx word = gen_lowpart_common (word_mode, value);
1057 if (word && (value != word))
1058 value = word;
1059 else
1060 value = gen_lowpart_common (word_mode,
1061 force_reg (GET_MODE (value) != VOIDmode
1062 ? GET_MODE (value)
1063 : word_mode, value));
1066 while (bitsdone < bitsize)
1068 unsigned HOST_WIDE_INT thissize;
1069 rtx part, word;
1070 unsigned HOST_WIDE_INT thispos;
1071 unsigned HOST_WIDE_INT offset;
1073 offset = (bitpos + bitsdone) / unit;
1074 thispos = (bitpos + bitsdone) % unit;
1076 /* THISSIZE must not overrun a word boundary. Otherwise,
1077 store_fixed_bit_field will call us again, and we will mutually
1078 recurse forever. */
1079 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1080 thissize = MIN (thissize, unit - thispos);
1082 if (BYTES_BIG_ENDIAN)
1084 int total_bits;
1086 /* We must do an endian conversion exactly the same way as it is
1087 done in extract_bit_field, so that the two calls to
1088 extract_fixed_bit_field will have comparable arguments. */
1089 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1090 total_bits = BITS_PER_WORD;
1091 else
1092 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1094 /* Fetch successively less significant portions. */
1095 if (CONST_INT_P (value))
1096 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1097 >> (bitsize - bitsdone - thissize))
1098 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1099 else
1100 /* The args are chosen so that the last part includes the
1101 lsb. Give extract_bit_field the value it needs (with
1102 endianness compensation) to fetch the piece we want. */
1103 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1104 total_bits - bitsize + bitsdone,
1105 NULL_RTX, 1);
1107 else
1109 /* Fetch successively more significant portions. */
1110 if (CONST_INT_P (value))
1111 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1112 >> bitsdone)
1113 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1114 else
1115 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1116 bitsdone, NULL_RTX, 1);
1119 /* If OP0 is a register, then handle OFFSET here.
1121 When handling multiword bitfields, extract_bit_field may pass
1122 down a word_mode SUBREG of a larger REG for a bitfield that actually
1123 crosses a word boundary. Thus, for a SUBREG, we must find
1124 the current word starting from the base register. */
1125 if (GET_CODE (op0) == SUBREG)
1127 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1128 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1129 GET_MODE (SUBREG_REG (op0)));
1130 offset = 0;
1132 else if (REG_P (op0))
1134 word = operand_subword_force (op0, offset, GET_MODE (op0));
1135 offset = 0;
1137 else
1138 word = op0;
1140 /* OFFSET is in UNITs, and UNIT is in bits.
1141 store_fixed_bit_field wants offset in bytes. */
1142 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1143 thispos, part);
1144 bitsdone += thissize;
1148 /* A subroutine of extract_bit_field_1 that converts return value X
1149 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1150 to extract_bit_field. */
1152 static rtx
1153 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1154 enum machine_mode tmode, bool unsignedp)
1156 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1157 return x;
1159 /* If the x mode is not a scalar integral, first convert to the
1160 integer mode of that size and then access it as a floating-point
1161 value via a SUBREG. */
1162 if (!SCALAR_INT_MODE_P (tmode))
1164 enum machine_mode smode;
1166 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1167 x = convert_to_mode (smode, x, unsignedp);
1168 x = force_reg (smode, x);
1169 return gen_lowpart (tmode, x);
1172 return convert_to_mode (tmode, x, unsignedp);
1175 /* A subroutine of extract_bit_field, with the same arguments.
1176 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1177 if we can find no other means of implementing the operation.
1178 if FALLBACK_P is false, return NULL instead. */
1180 static rtx
1181 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1182 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1183 enum machine_mode mode, enum machine_mode tmode,
1184 bool fallback_p)
1186 unsigned int unit
1187 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1188 unsigned HOST_WIDE_INT offset, bitpos;
1189 rtx op0 = str_rtx;
1190 enum machine_mode int_mode;
1191 enum machine_mode ext_mode;
1192 enum machine_mode mode1;
1193 enum insn_code icode;
1194 int byte_offset;
1196 if (tmode == VOIDmode)
1197 tmode = mode;
1199 while (GET_CODE (op0) == SUBREG)
1201 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1202 op0 = SUBREG_REG (op0);
1205 /* If we have an out-of-bounds access to a register, just return an
1206 uninitialized register of the required mode. This can occur if the
1207 source code contains an out-of-bounds access to a small array. */
1208 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1209 return gen_reg_rtx (tmode);
1211 if (REG_P (op0)
1212 && mode == GET_MODE (op0)
1213 && bitnum == 0
1214 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1216 /* We're trying to extract a full register from itself. */
1217 return op0;
1220 /* See if we can get a better vector mode before extracting. */
1221 if (VECTOR_MODE_P (GET_MODE (op0))
1222 && !MEM_P (op0)
1223 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1225 enum machine_mode new_mode;
1226 int nunits = GET_MODE_NUNITS (GET_MODE (op0));
1228 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1229 new_mode = MIN_MODE_VECTOR_FLOAT;
1230 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1231 new_mode = MIN_MODE_VECTOR_FRACT;
1232 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1233 new_mode = MIN_MODE_VECTOR_UFRACT;
1234 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1235 new_mode = MIN_MODE_VECTOR_ACCUM;
1236 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1237 new_mode = MIN_MODE_VECTOR_UACCUM;
1238 else
1239 new_mode = MIN_MODE_VECTOR_INT;
1241 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1242 if (GET_MODE_NUNITS (new_mode) == nunits
1243 && GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1244 && targetm.vector_mode_supported_p (new_mode))
1245 break;
1246 if (new_mode != VOIDmode)
1247 op0 = gen_lowpart (new_mode, op0);
1250 /* Use vec_extract patterns for extracting parts of vectors whenever
1251 available. */
1252 if (VECTOR_MODE_P (GET_MODE (op0))
1253 && !MEM_P (op0)
1254 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1255 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1256 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1258 enum machine_mode outermode = GET_MODE (op0);
1259 enum machine_mode innermode = GET_MODE_INNER (outermode);
1260 int icode = (int) optab_handler (vec_extract_optab, outermode);
1261 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1262 rtx rtxpos = GEN_INT (pos);
1263 rtx src = op0;
1264 rtx dest = NULL, pat, seq;
1265 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1266 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1267 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1269 if (innermode == tmode || innermode == mode)
1270 dest = target;
1272 if (!dest)
1273 dest = gen_reg_rtx (innermode);
1275 start_sequence ();
1277 if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1278 dest = copy_to_mode_reg (mode0, dest);
1280 if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1281 src = copy_to_mode_reg (mode1, src);
1283 if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1284 rtxpos = copy_to_mode_reg (mode1, rtxpos);
1286 /* We could handle this, but we should always be called with a pseudo
1287 for our targets and all insns should take them as outputs. */
1288 gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1289 && (*insn_data[icode].operand[1].predicate) (src, mode1)
1290 && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1292 pat = GEN_FCN (icode) (dest, src, rtxpos);
1293 seq = get_insns ();
1294 end_sequence ();
1295 if (pat)
1297 emit_insn (seq);
1298 emit_insn (pat);
1299 if (mode0 != mode)
1300 return gen_lowpart (tmode, dest);
1301 return dest;
1305 /* Make sure we are playing with integral modes. Pun with subregs
1306 if we aren't. */
1308 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1309 if (imode != GET_MODE (op0))
1311 if (MEM_P (op0))
1312 op0 = adjust_address (op0, imode, 0);
1313 else if (imode != BLKmode)
1315 op0 = gen_lowpart (imode, op0);
1317 /* If we got a SUBREG, force it into a register since we
1318 aren't going to be able to do another SUBREG on it. */
1319 if (GET_CODE (op0) == SUBREG)
1320 op0 = force_reg (imode, op0);
1322 else if (REG_P (op0))
1324 rtx reg, subreg;
1325 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1326 MODE_INT);
1327 reg = gen_reg_rtx (imode);
1328 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1329 emit_move_insn (subreg, op0);
1330 op0 = reg;
1331 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1333 else
1335 rtx mem = assign_stack_temp (GET_MODE (op0),
1336 GET_MODE_SIZE (GET_MODE (op0)), 0);
1337 emit_move_insn (mem, op0);
1338 op0 = adjust_address (mem, BLKmode, 0);
1343 /* We may be accessing data outside the field, which means
1344 we can alias adjacent data. */
1345 if (MEM_P (op0))
1347 op0 = shallow_copy_rtx (op0);
1348 set_mem_alias_set (op0, 0);
1349 set_mem_expr (op0, 0);
1352 /* Extraction of a full-word or multi-word value from a structure
1353 in a register or aligned memory can be done with just a SUBREG.
1354 A subword value in the least significant part of a register
1355 can also be extracted with a SUBREG. For this, we need the
1356 byte offset of the value in op0. */
1358 bitpos = bitnum % unit;
1359 offset = bitnum / unit;
1360 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1362 /* If OP0 is a register, BITPOS must count within a word.
1363 But as we have it, it counts within whatever size OP0 now has.
1364 On a bigendian machine, these are not the same, so convert. */
1365 if (BYTES_BIG_ENDIAN
1366 && !MEM_P (op0)
1367 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1368 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1370 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1371 If that's wrong, the solution is to test for it and set TARGET to 0
1372 if needed. */
1374 /* Only scalar integer modes can be converted via subregs. There is an
1375 additional problem for FP modes here in that they can have a precision
1376 which is different from the size. mode_for_size uses precision, but
1377 we want a mode based on the size, so we must avoid calling it for FP
1378 modes. */
1379 mode1 = (SCALAR_INT_MODE_P (tmode)
1380 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1381 : mode);
1383 /* If the bitfield is volatile, we need to make sure the access
1384 remains on a type-aligned boundary. */
1385 if (GET_CODE (op0) == MEM
1386 && MEM_VOLATILE_P (op0)
1387 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1388 && flag_strict_volatile_bitfields > 0)
1389 goto no_subreg_mode_swap;
1391 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1392 && bitpos % BITS_PER_WORD == 0)
1393 || (mode1 != BLKmode
1394 /* ??? The big endian test here is wrong. This is correct
1395 if the value is in a register, and if mode_for_size is not
1396 the same mode as op0. This causes us to get unnecessarily
1397 inefficient code from the Thumb port when -mbig-endian. */
1398 && (BYTES_BIG_ENDIAN
1399 ? bitpos + bitsize == BITS_PER_WORD
1400 : bitpos == 0)))
1401 && ((!MEM_P (op0)
1402 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1403 GET_MODE_BITSIZE (GET_MODE (op0)))
1404 && GET_MODE_SIZE (mode1) != 0
1405 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1406 || (MEM_P (op0)
1407 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1408 || (offset * BITS_PER_UNIT % bitsize == 0
1409 && MEM_ALIGN (op0) % bitsize == 0)))))
1411 if (MEM_P (op0))
1412 op0 = adjust_address (op0, mode1, offset);
1413 else if (mode1 != GET_MODE (op0))
1415 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1416 byte_offset);
1417 if (sub == NULL)
1418 goto no_subreg_mode_swap;
1419 op0 = sub;
1421 if (mode1 != mode)
1422 return convert_to_mode (tmode, op0, unsignedp);
1423 return op0;
1425 no_subreg_mode_swap:
1427 /* Handle fields bigger than a word. */
1429 if (bitsize > BITS_PER_WORD)
1431 /* Here we transfer the words of the field
1432 in the order least significant first.
1433 This is because the most significant word is the one which may
1434 be less than full. */
1436 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1437 unsigned int i;
1439 if (target == 0 || !REG_P (target))
1440 target = gen_reg_rtx (mode);
1442 /* Indicate for flow that the entire target reg is being set. */
1443 emit_clobber (target);
1445 for (i = 0; i < nwords; i++)
1447 /* If I is 0, use the low-order word in both field and target;
1448 if I is 1, use the next to lowest word; and so on. */
1449 /* Word number in TARGET to use. */
1450 unsigned int wordnum
1451 = (WORDS_BIG_ENDIAN
1452 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1453 : i);
1454 /* Offset from start of field in OP0. */
1455 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1456 ? MAX (0, ((int) bitsize - ((int) i + 1)
1457 * (int) BITS_PER_WORD))
1458 : (int) i * BITS_PER_WORD);
1459 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1460 rtx result_part
1461 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1462 bitsize - i * BITS_PER_WORD),
1463 bitnum + bit_offset, 1, target_part, mode,
1464 word_mode);
1466 gcc_assert (target_part);
1468 if (result_part != target_part)
1469 emit_move_insn (target_part, result_part);
1472 if (unsignedp)
1474 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1475 need to be zero'd out. */
1476 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1478 unsigned int i, total_words;
1480 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1481 for (i = nwords; i < total_words; i++)
1482 emit_move_insn
1483 (operand_subword (target,
1484 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1485 1, VOIDmode),
1486 const0_rtx);
1488 return target;
1491 /* Signed bit field: sign-extend with two arithmetic shifts. */
1492 target = expand_shift (LSHIFT_EXPR, mode, target,
1493 build_int_cst (NULL_TREE,
1494 GET_MODE_BITSIZE (mode) - bitsize),
1495 NULL_RTX, 0);
1496 return expand_shift (RSHIFT_EXPR, mode, target,
1497 build_int_cst (NULL_TREE,
1498 GET_MODE_BITSIZE (mode) - bitsize),
1499 NULL_RTX, 0);
1502 /* From here on we know the desired field is smaller than a word. */
1504 /* Check if there is a correspondingly-sized integer field, so we can
1505 safely extract it as one size of integer, if necessary; then
1506 truncate or extend to the size that is wanted; then use SUBREGs or
1507 convert_to_mode to get one of the modes we really wanted. */
1509 int_mode = int_mode_for_mode (tmode);
1510 if (int_mode == BLKmode)
1511 int_mode = int_mode_for_mode (mode);
1512 /* Should probably push op0 out to memory and then do a load. */
1513 gcc_assert (int_mode != BLKmode);
1515 /* OFFSET is the number of words or bytes (UNIT says which)
1516 from STR_RTX to the first word or byte containing part of the field. */
1517 if (!MEM_P (op0))
1519 if (offset != 0
1520 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1522 if (!REG_P (op0))
1523 op0 = copy_to_reg (op0);
1524 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1525 op0, (offset * UNITS_PER_WORD));
1527 offset = 0;
1530 /* Now OFFSET is nonzero only for memory operands. */
1531 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1532 icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv;
1533 if (ext_mode != MAX_MACHINE_MODE
1534 && bitsize > 0
1535 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1536 /* If op0 is a register, we need it in EXT_MODE to make it
1537 acceptable to the format of ext(z)v. */
1538 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1539 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1540 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode)))
1541 && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0)))
1543 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1544 rtx bitsize_rtx, bitpos_rtx;
1545 rtx last = get_last_insn ();
1546 rtx xop0 = op0;
1547 rtx xtarget = target;
1548 rtx xspec_target = target;
1549 rtx xspec_target_subreg = 0;
1550 rtx pat;
1552 /* If op0 is a register, we need it in EXT_MODE to make it
1553 acceptable to the format of ext(z)v. */
1554 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1555 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1556 if (MEM_P (xop0))
1557 /* Get ref to first byte containing part of the field. */
1558 xop0 = adjust_address (xop0, byte_mode, xoffset);
1560 /* On big-endian machines, we count bits from the most significant.
1561 If the bit field insn does not, we must invert. */
1562 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1563 xbitpos = unit - bitsize - xbitpos;
1565 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1566 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1567 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1569 unit = GET_MODE_BITSIZE (ext_mode);
1571 if (xtarget == 0)
1572 xtarget = xspec_target = gen_reg_rtx (tmode);
1574 if (GET_MODE (xtarget) != ext_mode)
1576 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1577 between the mode of the extraction (word_mode) and the target
1578 mode. Instead, create a temporary and use convert_move to set
1579 the target. */
1580 if (REG_P (xtarget)
1581 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1582 GET_MODE_BITSIZE (ext_mode)))
1584 xtarget = gen_lowpart (ext_mode, xtarget);
1585 if (GET_MODE_SIZE (ext_mode)
1586 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1587 xspec_target_subreg = xtarget;
1589 else
1590 xtarget = gen_reg_rtx (ext_mode);
1593 /* If this machine's ext(z)v insists on a register target,
1594 make sure we have one. */
1595 if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode))
1596 xtarget = gen_reg_rtx (ext_mode);
1598 bitsize_rtx = GEN_INT (bitsize);
1599 bitpos_rtx = GEN_INT (xbitpos);
1601 pat = (unsignedp
1602 ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx)
1603 : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx));
1604 if (pat)
1606 emit_insn (pat);
1607 if (xtarget == xspec_target)
1608 return xtarget;
1609 if (xtarget == xspec_target_subreg)
1610 return xspec_target;
1611 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1613 delete_insns_since (last);
1616 /* If OP0 is a memory, try copying it to a register and seeing if a
1617 cheap register alternative is available. */
1618 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1620 enum machine_mode bestmode;
1622 /* Get the mode to use for inserting into this field. If
1623 OP0 is BLKmode, get the smallest mode consistent with the
1624 alignment. If OP0 is a non-BLKmode object that is no
1625 wider than EXT_MODE, use its mode. Otherwise, use the
1626 smallest mode containing the field. */
1628 if (GET_MODE (op0) == BLKmode
1629 || (ext_mode != MAX_MACHINE_MODE
1630 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1631 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1632 (ext_mode == MAX_MACHINE_MODE
1633 ? VOIDmode : ext_mode),
1634 MEM_VOLATILE_P (op0));
1635 else
1636 bestmode = GET_MODE (op0);
1638 if (bestmode != VOIDmode
1639 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1640 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1642 unsigned HOST_WIDE_INT xoffset, xbitpos;
1644 /* Compute the offset as a multiple of this unit,
1645 counting in bytes. */
1646 unit = GET_MODE_BITSIZE (bestmode);
1647 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1648 xbitpos = bitnum % unit;
1650 /* Make sure the register is big enough for the whole field. */
1651 if (xoffset * BITS_PER_UNIT + unit
1652 >= offset * BITS_PER_UNIT + bitsize)
1654 rtx last, result, xop0;
1656 last = get_last_insn ();
1658 /* Fetch it to a register in that size. */
1659 xop0 = adjust_address (op0, bestmode, xoffset);
1660 xop0 = force_reg (bestmode, xop0);
1661 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1662 unsignedp, target,
1663 mode, tmode, false);
1664 if (result)
1665 return result;
1667 delete_insns_since (last);
1672 if (!fallback_p)
1673 return NULL;
1675 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1676 bitpos, target, unsignedp);
1677 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1680 /* Generate code to extract a byte-field from STR_RTX
1681 containing BITSIZE bits, starting at BITNUM,
1682 and put it in TARGET if possible (if TARGET is nonzero).
1683 Regardless of TARGET, we return the rtx for where the value is placed.
1685 STR_RTX is the structure containing the byte (a REG or MEM).
1686 UNSIGNEDP is nonzero if this is an unsigned bit field.
1687 MODE is the natural mode of the field value once extracted.
1688 TMODE is the mode the caller would like the value to have;
1689 but the value may be returned with type MODE instead.
1691 If a TARGET is specified and we can store in it at no extra cost,
1692 we do so, and return TARGET.
1693 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1694 if they are equally easy. */
1697 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1698 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1699 enum machine_mode mode, enum machine_mode tmode)
1701 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1702 target, mode, tmode, true);
1705 /* Extract a bit field using shifts and boolean operations
1706 Returns an rtx to represent the value.
1707 OP0 addresses a register (word) or memory (byte).
1708 BITPOS says which bit within the word or byte the bit field starts in.
1709 OFFSET says how many bytes farther the bit field starts;
1710 it is 0 if OP0 is a register.
1711 BITSIZE says how many bits long the bit field is.
1712 (If OP0 is a register, it may be narrower than a full word,
1713 but BITPOS still counts within a full word,
1714 which is significant on bigendian machines.)
1716 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1717 If TARGET is nonzero, attempts to store the value there
1718 and return TARGET, but this is not guaranteed.
1719 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1721 static rtx
1722 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1723 unsigned HOST_WIDE_INT offset,
1724 unsigned HOST_WIDE_INT bitsize,
1725 unsigned HOST_WIDE_INT bitpos, rtx target,
1726 int unsignedp)
1728 unsigned int total_bits = BITS_PER_WORD;
1729 enum machine_mode mode;
1731 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1733 /* Special treatment for a bit field split across two registers. */
1734 if (bitsize + bitpos > BITS_PER_WORD)
1735 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1737 else
1739 /* Get the proper mode to use for this field. We want a mode that
1740 includes the entire field. If such a mode would be larger than
1741 a word, we won't be doing the extraction the normal way. */
1743 if (MEM_VOLATILE_P (op0)
1744 && flag_strict_volatile_bitfields > 0)
1746 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1747 mode = GET_MODE (op0);
1748 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1749 mode = GET_MODE (target);
1750 else
1751 mode = tmode;
1753 else
1754 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1755 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1757 if (mode == VOIDmode)
1758 /* The only way this should occur is if the field spans word
1759 boundaries. */
1760 return extract_split_bit_field (op0, bitsize,
1761 bitpos + offset * BITS_PER_UNIT,
1762 unsignedp);
1764 total_bits = GET_MODE_BITSIZE (mode);
1766 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1767 be in the range 0 to total_bits-1, and put any excess bytes in
1768 OFFSET. */
1769 if (bitpos >= total_bits)
1771 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1772 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1773 * BITS_PER_UNIT);
1776 /* If we're accessing a volatile MEM, we can't do the next
1777 alignment step if it results in a multi-word access where we
1778 otherwise wouldn't have one. So, check for that case
1779 here. */
1780 if (MEM_P (op0)
1781 && MEM_VOLATILE_P (op0)
1782 && flag_strict_volatile_bitfields > 0
1783 && bitpos + bitsize <= total_bits
1784 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1786 if (STRICT_ALIGNMENT)
1788 static bool informed_about_misalignment = false;
1789 bool warned;
1791 if (bitsize == total_bits)
1792 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1793 "mis-aligned access used for structure member");
1794 else
1795 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1796 "mis-aligned access used for structure bitfield");
1798 if (! informed_about_misalignment && warned)
1800 informed_about_misalignment = true;
1801 inform (input_location,
1802 "When a volatile object spans multiple type-sized locations,"
1803 " the compiler must choose between using a single mis-aligned access to"
1804 " preserve the volatility, or using multiple aligned accesses to avoid"
1805 " runtime faults. This code may fail at runtime if the hardware does"
1806 " not allow this access.");
1810 else
1813 /* Get ref to an aligned byte, halfword, or word containing the field.
1814 Adjust BITPOS to be position within a word,
1815 and OFFSET to be the offset of that word.
1816 Then alter OP0 to refer to that word. */
1817 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1818 offset -= (offset % (total_bits / BITS_PER_UNIT));
1821 op0 = adjust_address (op0, mode, offset);
1824 mode = GET_MODE (op0);
1826 if (BYTES_BIG_ENDIAN)
1827 /* BITPOS is the distance between our msb and that of OP0.
1828 Convert it to the distance from the lsb. */
1829 bitpos = total_bits - bitsize - bitpos;
1831 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1832 We have reduced the big-endian case to the little-endian case. */
1834 if (unsignedp)
1836 if (bitpos)
1838 /* If the field does not already start at the lsb,
1839 shift it so it does. */
1840 tree amount = build_int_cst (NULL_TREE, bitpos);
1841 /* Maybe propagate the target for the shift. */
1842 /* But not if we will return it--could confuse integrate.c. */
1843 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1844 if (tmode != mode) subtarget = 0;
1845 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1847 /* Convert the value to the desired mode. */
1848 if (mode != tmode)
1849 op0 = convert_to_mode (tmode, op0, 1);
1851 /* Unless the msb of the field used to be the msb when we shifted,
1852 mask out the upper bits. */
1854 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1855 return expand_binop (GET_MODE (op0), and_optab, op0,
1856 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1857 target, 1, OPTAB_LIB_WIDEN);
1858 return op0;
1861 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1862 then arithmetic-shift its lsb to the lsb of the word. */
1863 op0 = force_reg (mode, op0);
1864 if (mode != tmode)
1865 target = 0;
1867 /* Find the narrowest integer mode that contains the field. */
1869 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1870 mode = GET_MODE_WIDER_MODE (mode))
1871 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1873 op0 = convert_to_mode (mode, op0, 0);
1874 break;
1877 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1879 tree amount
1880 = build_int_cst (NULL_TREE,
1881 GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1882 /* Maybe propagate the target for the shift. */
1883 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1884 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1887 return expand_shift (RSHIFT_EXPR, mode, op0,
1888 build_int_cst (NULL_TREE,
1889 GET_MODE_BITSIZE (mode) - bitsize),
1890 target, 0);
1893 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1894 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1895 complement of that if COMPLEMENT. The mask is truncated if
1896 necessary to the width of mode MODE. The mask is zero-extended if
1897 BITSIZE+BITPOS is too small for MODE. */
1899 static rtx
1900 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1902 double_int mask;
1904 mask = double_int_mask (bitsize);
1905 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1907 if (complement)
1908 mask = double_int_not (mask);
1910 return immed_double_int_const (mask, mode);
1913 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1914 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1916 static rtx
1917 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1919 double_int val;
1921 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1922 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1924 return immed_double_int_const (val, mode);
1927 /* Extract a bit field that is split across two words
1928 and return an RTX for the result.
1930 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1931 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1932 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1934 static rtx
1935 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1936 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1938 unsigned int unit;
1939 unsigned int bitsdone = 0;
1940 rtx result = NULL_RTX;
1941 int first = 1;
1943 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1944 much at a time. */
1945 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1946 unit = BITS_PER_WORD;
1947 else
1948 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1950 while (bitsdone < bitsize)
1952 unsigned HOST_WIDE_INT thissize;
1953 rtx part, word;
1954 unsigned HOST_WIDE_INT thispos;
1955 unsigned HOST_WIDE_INT offset;
1957 offset = (bitpos + bitsdone) / unit;
1958 thispos = (bitpos + bitsdone) % unit;
1960 /* THISSIZE must not overrun a word boundary. Otherwise,
1961 extract_fixed_bit_field will call us again, and we will mutually
1962 recurse forever. */
1963 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1964 thissize = MIN (thissize, unit - thispos);
1966 /* If OP0 is a register, then handle OFFSET here.
1968 When handling multiword bitfields, extract_bit_field may pass
1969 down a word_mode SUBREG of a larger REG for a bitfield that actually
1970 crosses a word boundary. Thus, for a SUBREG, we must find
1971 the current word starting from the base register. */
1972 if (GET_CODE (op0) == SUBREG)
1974 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1975 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1976 GET_MODE (SUBREG_REG (op0)));
1977 offset = 0;
1979 else if (REG_P (op0))
1981 word = operand_subword_force (op0, offset, GET_MODE (op0));
1982 offset = 0;
1984 else
1985 word = op0;
1987 /* Extract the parts in bit-counting order,
1988 whose meaning is determined by BYTES_PER_UNIT.
1989 OFFSET is in UNITs, and UNIT is in bits.
1990 extract_fixed_bit_field wants offset in bytes. */
1991 part = extract_fixed_bit_field (word_mode, word,
1992 offset * unit / BITS_PER_UNIT,
1993 thissize, thispos, 0, 1);
1994 bitsdone += thissize;
1996 /* Shift this part into place for the result. */
1997 if (BYTES_BIG_ENDIAN)
1999 if (bitsize != bitsdone)
2000 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2001 build_int_cst (NULL_TREE, bitsize - bitsdone),
2002 0, 1);
2004 else
2006 if (bitsdone != thissize)
2007 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2008 build_int_cst (NULL_TREE,
2009 bitsdone - thissize), 0, 1);
2012 if (first)
2013 result = part;
2014 else
2015 /* Combine the parts with bitwise or. This works
2016 because we extracted each part as an unsigned bit field. */
2017 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2018 OPTAB_LIB_WIDEN);
2020 first = 0;
2023 /* Unsigned bit field: we are done. */
2024 if (unsignedp)
2025 return result;
2026 /* Signed bit field: sign-extend with two arithmetic shifts. */
2027 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2028 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2029 NULL_RTX, 0);
2030 return expand_shift (RSHIFT_EXPR, word_mode, result,
2031 build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2032 NULL_RTX, 0);
2035 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2036 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2037 MODE, fill the upper bits with zeros. Fail if the layout of either
2038 mode is unknown (as for CC modes) or if the extraction would involve
2039 unprofitable mode punning. Return the value on success, otherwise
2040 return null.
2042 This is different from gen_lowpart* in these respects:
2044 - the returned value must always be considered an rvalue
2046 - when MODE is wider than SRC_MODE, the extraction involves
2047 a zero extension
2049 - when MODE is smaller than SRC_MODE, the extraction involves
2050 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2052 In other words, this routine performs a computation, whereas the
2053 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2054 operations. */
2057 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2059 enum machine_mode int_mode, src_int_mode;
2061 if (mode == src_mode)
2062 return src;
2064 if (CONSTANT_P (src))
2066 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2067 fails, it will happily create (subreg (symbol_ref)) or similar
2068 invalid SUBREGs. */
2069 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2070 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2071 if (ret)
2072 return ret;
2074 if (GET_MODE (src) == VOIDmode
2075 || !validate_subreg (mode, src_mode, src, byte))
2076 return NULL_RTX;
2078 src = force_reg (GET_MODE (src), src);
2079 return gen_rtx_SUBREG (mode, src, byte);
2082 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2083 return NULL_RTX;
2085 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2086 && MODES_TIEABLE_P (mode, src_mode))
2088 rtx x = gen_lowpart_common (mode, src);
2089 if (x)
2090 return x;
2093 src_int_mode = int_mode_for_mode (src_mode);
2094 int_mode = int_mode_for_mode (mode);
2095 if (src_int_mode == BLKmode || int_mode == BLKmode)
2096 return NULL_RTX;
2098 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2099 return NULL_RTX;
2100 if (!MODES_TIEABLE_P (int_mode, mode))
2101 return NULL_RTX;
2103 src = gen_lowpart (src_int_mode, src);
2104 src = convert_modes (int_mode, src_int_mode, src, true);
2105 src = gen_lowpart (mode, src);
2106 return src;
2109 /* Add INC into TARGET. */
2111 void
2112 expand_inc (rtx target, rtx inc)
2114 rtx value = expand_binop (GET_MODE (target), add_optab,
2115 target, inc,
2116 target, 0, OPTAB_LIB_WIDEN);
2117 if (value != target)
2118 emit_move_insn (target, value);
2121 /* Subtract DEC from TARGET. */
2123 void
2124 expand_dec (rtx target, rtx dec)
2126 rtx value = expand_binop (GET_MODE (target), sub_optab,
2127 target, dec,
2128 target, 0, OPTAB_LIB_WIDEN);
2129 if (value != target)
2130 emit_move_insn (target, value);
2133 /* Output a shift instruction for expression code CODE,
2134 with SHIFTED being the rtx for the value to shift,
2135 and AMOUNT the tree for the amount to shift by.
2136 Store the result in the rtx TARGET, if that is convenient.
2137 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2138 Return the rtx for where the value is. */
2141 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2142 tree amount, rtx target, int unsignedp)
2144 rtx op1, temp = 0;
2145 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2146 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2147 optab lshift_optab = ashl_optab;
2148 optab rshift_arith_optab = ashr_optab;
2149 optab rshift_uns_optab = lshr_optab;
2150 optab lrotate_optab = rotl_optab;
2151 optab rrotate_optab = rotr_optab;
2152 enum machine_mode op1_mode;
2153 int attempt;
2154 bool speed = optimize_insn_for_speed_p ();
2156 op1 = expand_normal (amount);
2157 op1_mode = GET_MODE (op1);
2159 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2160 shift amount is a vector, use the vector/vector shift patterns. */
2161 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2163 lshift_optab = vashl_optab;
2164 rshift_arith_optab = vashr_optab;
2165 rshift_uns_optab = vlshr_optab;
2166 lrotate_optab = vrotl_optab;
2167 rrotate_optab = vrotr_optab;
2170 /* Previously detected shift-counts computed by NEGATE_EXPR
2171 and shifted in the other direction; but that does not work
2172 on all machines. */
2174 if (SHIFT_COUNT_TRUNCATED)
2176 if (CONST_INT_P (op1)
2177 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2178 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2179 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2180 % GET_MODE_BITSIZE (mode));
2181 else if (GET_CODE (op1) == SUBREG
2182 && subreg_lowpart_p (op1)
2183 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2184 op1 = SUBREG_REG (op1);
2187 if (op1 == const0_rtx)
2188 return shifted;
2190 /* Check whether its cheaper to implement a left shift by a constant
2191 bit count by a sequence of additions. */
2192 if (code == LSHIFT_EXPR
2193 && CONST_INT_P (op1)
2194 && INTVAL (op1) > 0
2195 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2196 && INTVAL (op1) < MAX_BITS_PER_WORD
2197 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2198 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2200 int i;
2201 for (i = 0; i < INTVAL (op1); i++)
2203 temp = force_reg (mode, shifted);
2204 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2205 unsignedp, OPTAB_LIB_WIDEN);
2207 return shifted;
2210 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2212 enum optab_methods methods;
2214 if (attempt == 0)
2215 methods = OPTAB_DIRECT;
2216 else if (attempt == 1)
2217 methods = OPTAB_WIDEN;
2218 else
2219 methods = OPTAB_LIB_WIDEN;
2221 if (rotate)
2223 /* Widening does not work for rotation. */
2224 if (methods == OPTAB_WIDEN)
2225 continue;
2226 else if (methods == OPTAB_LIB_WIDEN)
2228 /* If we have been unable to open-code this by a rotation,
2229 do it as the IOR of two shifts. I.e., to rotate A
2230 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2231 where C is the bitsize of A.
2233 It is theoretically possible that the target machine might
2234 not be able to perform either shift and hence we would
2235 be making two libcalls rather than just the one for the
2236 shift (similarly if IOR could not be done). We will allow
2237 this extremely unlikely lossage to avoid complicating the
2238 code below. */
2240 rtx subtarget = target == shifted ? 0 : target;
2241 tree new_amount, other_amount;
2242 rtx temp1;
2243 tree type = TREE_TYPE (amount);
2244 if (GET_MODE (op1) != TYPE_MODE (type)
2245 && GET_MODE (op1) != VOIDmode)
2246 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2247 new_amount = make_tree (type, op1);
2248 other_amount
2249 = fold_build2 (MINUS_EXPR, type,
2250 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2251 new_amount);
2253 shifted = force_reg (mode, shifted);
2255 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2256 mode, shifted, new_amount, 0, 1);
2257 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2258 mode, shifted, other_amount, subtarget, 1);
2259 return expand_binop (mode, ior_optab, temp, temp1, target,
2260 unsignedp, methods);
2263 temp = expand_binop (mode,
2264 left ? lrotate_optab : rrotate_optab,
2265 shifted, op1, target, unsignedp, methods);
2267 else if (unsignedp)
2268 temp = expand_binop (mode,
2269 left ? lshift_optab : rshift_uns_optab,
2270 shifted, op1, target, unsignedp, methods);
2272 /* Do arithmetic shifts.
2273 Also, if we are going to widen the operand, we can just as well
2274 use an arithmetic right-shift instead of a logical one. */
2275 if (temp == 0 && ! rotate
2276 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2278 enum optab_methods methods1 = methods;
2280 /* If trying to widen a log shift to an arithmetic shift,
2281 don't accept an arithmetic shift of the same size. */
2282 if (unsignedp)
2283 methods1 = OPTAB_MUST_WIDEN;
2285 /* Arithmetic shift */
2287 temp = expand_binop (mode,
2288 left ? lshift_optab : rshift_arith_optab,
2289 shifted, op1, target, unsignedp, methods1);
2292 /* We used to try extzv here for logical right shifts, but that was
2293 only useful for one machine, the VAX, and caused poor code
2294 generation there for lshrdi3, so the code was deleted and a
2295 define_expand for lshrsi3 was added to vax.md. */
2298 gcc_assert (temp);
2299 return temp;
2302 enum alg_code {
2303 alg_unknown,
2304 alg_zero,
2305 alg_m, alg_shift,
2306 alg_add_t_m2,
2307 alg_sub_t_m2,
2308 alg_add_factor,
2309 alg_sub_factor,
2310 alg_add_t2_m,
2311 alg_sub_t2_m,
2312 alg_impossible
2315 /* This structure holds the "cost" of a multiply sequence. The
2316 "cost" field holds the total rtx_cost of every operator in the
2317 synthetic multiplication sequence, hence cost(a op b) is defined
2318 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2319 The "latency" field holds the minimum possible latency of the
2320 synthetic multiply, on a hypothetical infinitely parallel CPU.
2321 This is the critical path, or the maximum height, of the expression
2322 tree which is the sum of rtx_costs on the most expensive path from
2323 any leaf to the root. Hence latency(a op b) is defined as zero for
2324 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2326 struct mult_cost {
2327 short cost; /* Total rtx_cost of the multiplication sequence. */
2328 short latency; /* The latency of the multiplication sequence. */
2331 /* This macro is used to compare a pointer to a mult_cost against an
2332 single integer "rtx_cost" value. This is equivalent to the macro
2333 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2334 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2335 || ((X)->cost == (Y) && (X)->latency < (Y)))
2337 /* This macro is used to compare two pointers to mult_costs against
2338 each other. The macro returns true if X is cheaper than Y.
2339 Currently, the cheaper of two mult_costs is the one with the
2340 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2341 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2342 || ((X)->cost == (Y)->cost \
2343 && (X)->latency < (Y)->latency))
2345 /* This structure records a sequence of operations.
2346 `ops' is the number of operations recorded.
2347 `cost' is their total cost.
2348 The operations are stored in `op' and the corresponding
2349 logarithms of the integer coefficients in `log'.
2351 These are the operations:
2352 alg_zero total := 0;
2353 alg_m total := multiplicand;
2354 alg_shift total := total * coeff
2355 alg_add_t_m2 total := total + multiplicand * coeff;
2356 alg_sub_t_m2 total := total - multiplicand * coeff;
2357 alg_add_factor total := total * coeff + total;
2358 alg_sub_factor total := total * coeff - total;
2359 alg_add_t2_m total := total * coeff + multiplicand;
2360 alg_sub_t2_m total := total * coeff - multiplicand;
2362 The first operand must be either alg_zero or alg_m. */
2364 struct algorithm
2366 struct mult_cost cost;
2367 short ops;
2368 /* The size of the OP and LOG fields are not directly related to the
2369 word size, but the worst-case algorithms will be if we have few
2370 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2371 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2372 in total wordsize operations. */
2373 enum alg_code op[MAX_BITS_PER_WORD];
2374 char log[MAX_BITS_PER_WORD];
2377 /* The entry for our multiplication cache/hash table. */
2378 struct alg_hash_entry {
2379 /* The number we are multiplying by. */
2380 unsigned HOST_WIDE_INT t;
2382 /* The mode in which we are multiplying something by T. */
2383 enum machine_mode mode;
2385 /* The best multiplication algorithm for t. */
2386 enum alg_code alg;
2388 /* The cost of multiplication if ALG_CODE is not alg_impossible.
2389 Otherwise, the cost within which multiplication by T is
2390 impossible. */
2391 struct mult_cost cost;
2393 /* OPtimized for speed? */
2394 bool speed;
2397 /* The number of cache/hash entries. */
2398 #if HOST_BITS_PER_WIDE_INT == 64
2399 #define NUM_ALG_HASH_ENTRIES 1031
2400 #else
2401 #define NUM_ALG_HASH_ENTRIES 307
2402 #endif
2404 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2405 actually a hash table. If we have a collision, that the older
2406 entry is kicked out. */
2407 static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2409 /* Indicates the type of fixup needed after a constant multiplication.
2410 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2411 the result should be negated, and ADD_VARIANT means that the
2412 multiplicand should be added to the result. */
2413 enum mult_variant {basic_variant, negate_variant, add_variant};
2415 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2416 const struct mult_cost *, enum machine_mode mode);
2417 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2418 struct algorithm *, enum mult_variant *, int);
2419 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2420 const struct algorithm *, enum mult_variant);
2421 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2422 int, rtx *, int *, int *);
2423 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2424 static rtx extract_high_half (enum machine_mode, rtx);
2425 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2426 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2427 int, int);
2428 /* Compute and return the best algorithm for multiplying by T.
2429 The algorithm must cost less than cost_limit
2430 If retval.cost >= COST_LIMIT, no algorithm was found and all
2431 other field of the returned struct are undefined.
2432 MODE is the machine mode of the multiplication. */
2434 static void
2435 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2436 const struct mult_cost *cost_limit, enum machine_mode mode)
2438 int m;
2439 struct algorithm *alg_in, *best_alg;
2440 struct mult_cost best_cost;
2441 struct mult_cost new_limit;
2442 int op_cost, op_latency;
2443 unsigned HOST_WIDE_INT orig_t = t;
2444 unsigned HOST_WIDE_INT q;
2445 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2446 int hash_index;
2447 bool cache_hit = false;
2448 enum alg_code cache_alg = alg_zero;
2449 bool speed = optimize_insn_for_speed_p ();
2451 /* Indicate that no algorithm is yet found. If no algorithm
2452 is found, this value will be returned and indicate failure. */
2453 alg_out->cost.cost = cost_limit->cost + 1;
2454 alg_out->cost.latency = cost_limit->latency + 1;
2456 if (cost_limit->cost < 0
2457 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2458 return;
2460 /* Restrict the bits of "t" to the multiplication's mode. */
2461 t &= GET_MODE_MASK (mode);
2463 /* t == 1 can be done in zero cost. */
2464 if (t == 1)
2466 alg_out->ops = 1;
2467 alg_out->cost.cost = 0;
2468 alg_out->cost.latency = 0;
2469 alg_out->op[0] = alg_m;
2470 return;
2473 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2474 fail now. */
2475 if (t == 0)
2477 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2478 return;
2479 else
2481 alg_out->ops = 1;
2482 alg_out->cost.cost = zero_cost[speed];
2483 alg_out->cost.latency = zero_cost[speed];
2484 alg_out->op[0] = alg_zero;
2485 return;
2489 /* We'll be needing a couple extra algorithm structures now. */
2491 alg_in = XALLOCA (struct algorithm);
2492 best_alg = XALLOCA (struct algorithm);
2493 best_cost = *cost_limit;
2495 /* Compute the hash index. */
2496 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2498 /* See if we already know what to do for T. */
2499 if (alg_hash[hash_index].t == t
2500 && alg_hash[hash_index].mode == mode
2501 && alg_hash[hash_index].mode == mode
2502 && alg_hash[hash_index].speed == speed
2503 && alg_hash[hash_index].alg != alg_unknown)
2505 cache_alg = alg_hash[hash_index].alg;
2507 if (cache_alg == alg_impossible)
2509 /* The cache tells us that it's impossible to synthesize
2510 multiplication by T within alg_hash[hash_index].cost. */
2511 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2512 /* COST_LIMIT is at least as restrictive as the one
2513 recorded in the hash table, in which case we have no
2514 hope of synthesizing a multiplication. Just
2515 return. */
2516 return;
2518 /* If we get here, COST_LIMIT is less restrictive than the
2519 one recorded in the hash table, so we may be able to
2520 synthesize a multiplication. Proceed as if we didn't
2521 have the cache entry. */
2523 else
2525 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2526 /* The cached algorithm shows that this multiplication
2527 requires more cost than COST_LIMIT. Just return. This
2528 way, we don't clobber this cache entry with
2529 alg_impossible but retain useful information. */
2530 return;
2532 cache_hit = true;
2534 switch (cache_alg)
2536 case alg_shift:
2537 goto do_alg_shift;
2539 case alg_add_t_m2:
2540 case alg_sub_t_m2:
2541 goto do_alg_addsub_t_m2;
2543 case alg_add_factor:
2544 case alg_sub_factor:
2545 goto do_alg_addsub_factor;
2547 case alg_add_t2_m:
2548 goto do_alg_add_t2_m;
2550 case alg_sub_t2_m:
2551 goto do_alg_sub_t2_m;
2553 default:
2554 gcc_unreachable ();
2559 /* If we have a group of zero bits at the low-order part of T, try
2560 multiplying by the remaining bits and then doing a shift. */
2562 if ((t & 1) == 0)
2564 do_alg_shift:
2565 m = floor_log2 (t & -t); /* m = number of low zero bits */
2566 if (m < maxm)
2568 q = t >> m;
2569 /* The function expand_shift will choose between a shift and
2570 a sequence of additions, so the observed cost is given as
2571 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2572 op_cost = m * add_cost[speed][mode];
2573 if (shift_cost[speed][mode][m] < op_cost)
2574 op_cost = shift_cost[speed][mode][m];
2575 new_limit.cost = best_cost.cost - op_cost;
2576 new_limit.latency = best_cost.latency - op_cost;
2577 synth_mult (alg_in, q, &new_limit, mode);
2579 alg_in->cost.cost += op_cost;
2580 alg_in->cost.latency += op_cost;
2581 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2583 struct algorithm *x;
2584 best_cost = alg_in->cost;
2585 x = alg_in, alg_in = best_alg, best_alg = x;
2586 best_alg->log[best_alg->ops] = m;
2587 best_alg->op[best_alg->ops] = alg_shift;
2590 /* See if treating ORIG_T as a signed number yields a better
2591 sequence. Try this sequence only for a negative ORIG_T
2592 as it would be useless for a non-negative ORIG_T. */
2593 if ((HOST_WIDE_INT) orig_t < 0)
2595 /* Shift ORIG_T as follows because a right shift of a
2596 negative-valued signed type is implementation
2597 defined. */
2598 q = ~(~orig_t >> m);
2599 /* The function expand_shift will choose between a shift
2600 and a sequence of additions, so the observed cost is
2601 given as MIN (m * add_cost[speed][mode],
2602 shift_cost[speed][mode][m]). */
2603 op_cost = m * add_cost[speed][mode];
2604 if (shift_cost[speed][mode][m] < op_cost)
2605 op_cost = shift_cost[speed][mode][m];
2606 new_limit.cost = best_cost.cost - op_cost;
2607 new_limit.latency = best_cost.latency - op_cost;
2608 synth_mult (alg_in, q, &new_limit, mode);
2610 alg_in->cost.cost += op_cost;
2611 alg_in->cost.latency += op_cost;
2612 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2614 struct algorithm *x;
2615 best_cost = alg_in->cost;
2616 x = alg_in, alg_in = best_alg, best_alg = x;
2617 best_alg->log[best_alg->ops] = m;
2618 best_alg->op[best_alg->ops] = alg_shift;
2622 if (cache_hit)
2623 goto done;
2626 /* If we have an odd number, add or subtract one. */
2627 if ((t & 1) != 0)
2629 unsigned HOST_WIDE_INT w;
2631 do_alg_addsub_t_m2:
2632 for (w = 1; (w & t) != 0; w <<= 1)
2634 /* If T was -1, then W will be zero after the loop. This is another
2635 case where T ends with ...111. Handling this with (T + 1) and
2636 subtract 1 produces slightly better code and results in algorithm
2637 selection much faster than treating it like the ...0111 case
2638 below. */
2639 if (w == 0
2640 || (w > 2
2641 /* Reject the case where t is 3.
2642 Thus we prefer addition in that case. */
2643 && t != 3))
2645 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2647 op_cost = add_cost[speed][mode];
2648 new_limit.cost = best_cost.cost - op_cost;
2649 new_limit.latency = best_cost.latency - op_cost;
2650 synth_mult (alg_in, t + 1, &new_limit, mode);
2652 alg_in->cost.cost += op_cost;
2653 alg_in->cost.latency += op_cost;
2654 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2656 struct algorithm *x;
2657 best_cost = alg_in->cost;
2658 x = alg_in, alg_in = best_alg, best_alg = x;
2659 best_alg->log[best_alg->ops] = 0;
2660 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2663 else
2665 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2667 op_cost = add_cost[speed][mode];
2668 new_limit.cost = best_cost.cost - op_cost;
2669 new_limit.latency = best_cost.latency - op_cost;
2670 synth_mult (alg_in, t - 1, &new_limit, mode);
2672 alg_in->cost.cost += op_cost;
2673 alg_in->cost.latency += op_cost;
2674 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2676 struct algorithm *x;
2677 best_cost = alg_in->cost;
2678 x = alg_in, alg_in = best_alg, best_alg = x;
2679 best_alg->log[best_alg->ops] = 0;
2680 best_alg->op[best_alg->ops] = alg_add_t_m2;
2684 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2685 quickly with a - a * n for some appropriate constant n. */
2686 m = exact_log2 (-orig_t + 1);
2687 if (m >= 0 && m < maxm)
2689 op_cost = shiftsub1_cost[speed][mode][m];
2690 new_limit.cost = best_cost.cost - op_cost;
2691 new_limit.latency = best_cost.latency - op_cost;
2692 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2694 alg_in->cost.cost += op_cost;
2695 alg_in->cost.latency += op_cost;
2696 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2698 struct algorithm *x;
2699 best_cost = alg_in->cost;
2700 x = alg_in, alg_in = best_alg, best_alg = x;
2701 best_alg->log[best_alg->ops] = m;
2702 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2706 if (cache_hit)
2707 goto done;
2710 /* Look for factors of t of the form
2711 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2712 If we find such a factor, we can multiply by t using an algorithm that
2713 multiplies by q, shift the result by m and add/subtract it to itself.
2715 We search for large factors first and loop down, even if large factors
2716 are less probable than small; if we find a large factor we will find a
2717 good sequence quickly, and therefore be able to prune (by decreasing
2718 COST_LIMIT) the search. */
2720 do_alg_addsub_factor:
2721 for (m = floor_log2 (t - 1); m >= 2; m--)
2723 unsigned HOST_WIDE_INT d;
2725 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2726 if (t % d == 0 && t > d && m < maxm
2727 && (!cache_hit || cache_alg == alg_add_factor))
2729 /* If the target has a cheap shift-and-add instruction use
2730 that in preference to a shift insn followed by an add insn.
2731 Assume that the shift-and-add is "atomic" with a latency
2732 equal to its cost, otherwise assume that on superscalar
2733 hardware the shift may be executed concurrently with the
2734 earlier steps in the algorithm. */
2735 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2736 if (shiftadd_cost[speed][mode][m] < op_cost)
2738 op_cost = shiftadd_cost[speed][mode][m];
2739 op_latency = op_cost;
2741 else
2742 op_latency = add_cost[speed][mode];
2744 new_limit.cost = best_cost.cost - op_cost;
2745 new_limit.latency = best_cost.latency - op_latency;
2746 synth_mult (alg_in, t / d, &new_limit, mode);
2748 alg_in->cost.cost += op_cost;
2749 alg_in->cost.latency += op_latency;
2750 if (alg_in->cost.latency < op_cost)
2751 alg_in->cost.latency = op_cost;
2752 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2754 struct algorithm *x;
2755 best_cost = alg_in->cost;
2756 x = alg_in, alg_in = best_alg, best_alg = x;
2757 best_alg->log[best_alg->ops] = m;
2758 best_alg->op[best_alg->ops] = alg_add_factor;
2760 /* Other factors will have been taken care of in the recursion. */
2761 break;
2764 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2765 if (t % d == 0 && t > d && m < maxm
2766 && (!cache_hit || cache_alg == alg_sub_factor))
2768 /* If the target has a cheap shift-and-subtract insn use
2769 that in preference to a shift insn followed by a sub insn.
2770 Assume that the shift-and-sub is "atomic" with a latency
2771 equal to it's cost, otherwise assume that on superscalar
2772 hardware the shift may be executed concurrently with the
2773 earlier steps in the algorithm. */
2774 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2775 if (shiftsub0_cost[speed][mode][m] < op_cost)
2777 op_cost = shiftsub0_cost[speed][mode][m];
2778 op_latency = op_cost;
2780 else
2781 op_latency = add_cost[speed][mode];
2783 new_limit.cost = best_cost.cost - op_cost;
2784 new_limit.latency = best_cost.latency - op_latency;
2785 synth_mult (alg_in, t / d, &new_limit, mode);
2787 alg_in->cost.cost += op_cost;
2788 alg_in->cost.latency += op_latency;
2789 if (alg_in->cost.latency < op_cost)
2790 alg_in->cost.latency = op_cost;
2791 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2793 struct algorithm *x;
2794 best_cost = alg_in->cost;
2795 x = alg_in, alg_in = best_alg, best_alg = x;
2796 best_alg->log[best_alg->ops] = m;
2797 best_alg->op[best_alg->ops] = alg_sub_factor;
2799 break;
2802 if (cache_hit)
2803 goto done;
2805 /* Try shift-and-add (load effective address) instructions,
2806 i.e. do a*3, a*5, a*9. */
2807 if ((t & 1) != 0)
2809 do_alg_add_t2_m:
2810 q = t - 1;
2811 q = q & -q;
2812 m = exact_log2 (q);
2813 if (m >= 0 && m < maxm)
2815 op_cost = shiftadd_cost[speed][mode][m];
2816 new_limit.cost = best_cost.cost - op_cost;
2817 new_limit.latency = best_cost.latency - op_cost;
2818 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2820 alg_in->cost.cost += op_cost;
2821 alg_in->cost.latency += op_cost;
2822 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2824 struct algorithm *x;
2825 best_cost = alg_in->cost;
2826 x = alg_in, alg_in = best_alg, best_alg = x;
2827 best_alg->log[best_alg->ops] = m;
2828 best_alg->op[best_alg->ops] = alg_add_t2_m;
2831 if (cache_hit)
2832 goto done;
2834 do_alg_sub_t2_m:
2835 q = t + 1;
2836 q = q & -q;
2837 m = exact_log2 (q);
2838 if (m >= 0 && m < maxm)
2840 op_cost = shiftsub0_cost[speed][mode][m];
2841 new_limit.cost = best_cost.cost - op_cost;
2842 new_limit.latency = best_cost.latency - op_cost;
2843 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2845 alg_in->cost.cost += op_cost;
2846 alg_in->cost.latency += op_cost;
2847 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2849 struct algorithm *x;
2850 best_cost = alg_in->cost;
2851 x = alg_in, alg_in = best_alg, best_alg = x;
2852 best_alg->log[best_alg->ops] = m;
2853 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2856 if (cache_hit)
2857 goto done;
2860 done:
2861 /* If best_cost has not decreased, we have not found any algorithm. */
2862 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2864 /* We failed to find an algorithm. Record alg_impossible for
2865 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2866 we are asked to find an algorithm for T within the same or
2867 lower COST_LIMIT, we can immediately return to the
2868 caller. */
2869 alg_hash[hash_index].t = t;
2870 alg_hash[hash_index].mode = mode;
2871 alg_hash[hash_index].speed = speed;
2872 alg_hash[hash_index].alg = alg_impossible;
2873 alg_hash[hash_index].cost = *cost_limit;
2874 return;
2877 /* Cache the result. */
2878 if (!cache_hit)
2880 alg_hash[hash_index].t = t;
2881 alg_hash[hash_index].mode = mode;
2882 alg_hash[hash_index].speed = speed;
2883 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2884 alg_hash[hash_index].cost.cost = best_cost.cost;
2885 alg_hash[hash_index].cost.latency = best_cost.latency;
2888 /* If we are getting a too long sequence for `struct algorithm'
2889 to record, make this search fail. */
2890 if (best_alg->ops == MAX_BITS_PER_WORD)
2891 return;
2893 /* Copy the algorithm from temporary space to the space at alg_out.
2894 We avoid using structure assignment because the majority of
2895 best_alg is normally undefined, and this is a critical function. */
2896 alg_out->ops = best_alg->ops + 1;
2897 alg_out->cost = best_cost;
2898 memcpy (alg_out->op, best_alg->op,
2899 alg_out->ops * sizeof *alg_out->op);
2900 memcpy (alg_out->log, best_alg->log,
2901 alg_out->ops * sizeof *alg_out->log);
2904 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2905 Try three variations:
2907 - a shift/add sequence based on VAL itself
2908 - a shift/add sequence based on -VAL, followed by a negation
2909 - a shift/add sequence based on VAL - 1, followed by an addition.
2911 Return true if the cheapest of these cost less than MULT_COST,
2912 describing the algorithm in *ALG and final fixup in *VARIANT. */
2914 static bool
2915 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2916 struct algorithm *alg, enum mult_variant *variant,
2917 int mult_cost)
2919 struct algorithm alg2;
2920 struct mult_cost limit;
2921 int op_cost;
2922 bool speed = optimize_insn_for_speed_p ();
2924 /* Fail quickly for impossible bounds. */
2925 if (mult_cost < 0)
2926 return false;
2928 /* Ensure that mult_cost provides a reasonable upper bound.
2929 Any constant multiplication can be performed with less
2930 than 2 * bits additions. */
2931 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2932 if (mult_cost > op_cost)
2933 mult_cost = op_cost;
2935 *variant = basic_variant;
2936 limit.cost = mult_cost;
2937 limit.latency = mult_cost;
2938 synth_mult (alg, val, &limit, mode);
2940 /* This works only if the inverted value actually fits in an
2941 `unsigned int' */
2942 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2944 op_cost = neg_cost[speed][mode];
2945 if (MULT_COST_LESS (&alg->cost, mult_cost))
2947 limit.cost = alg->cost.cost - op_cost;
2948 limit.latency = alg->cost.latency - op_cost;
2950 else
2952 limit.cost = mult_cost - op_cost;
2953 limit.latency = mult_cost - op_cost;
2956 synth_mult (&alg2, -val, &limit, mode);
2957 alg2.cost.cost += op_cost;
2958 alg2.cost.latency += op_cost;
2959 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2960 *alg = alg2, *variant = negate_variant;
2963 /* This proves very useful for division-by-constant. */
2964 op_cost = add_cost[speed][mode];
2965 if (MULT_COST_LESS (&alg->cost, mult_cost))
2967 limit.cost = alg->cost.cost - op_cost;
2968 limit.latency = alg->cost.latency - op_cost;
2970 else
2972 limit.cost = mult_cost - op_cost;
2973 limit.latency = mult_cost - op_cost;
2976 synth_mult (&alg2, val - 1, &limit, mode);
2977 alg2.cost.cost += op_cost;
2978 alg2.cost.latency += op_cost;
2979 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2980 *alg = alg2, *variant = add_variant;
2982 return MULT_COST_LESS (&alg->cost, mult_cost);
2985 /* A subroutine of expand_mult, used for constant multiplications.
2986 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2987 convenient. Use the shift/add sequence described by ALG and apply
2988 the final fixup specified by VARIANT. */
2990 static rtx
2991 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2992 rtx target, const struct algorithm *alg,
2993 enum mult_variant variant)
2995 HOST_WIDE_INT val_so_far;
2996 rtx insn, accum, tem;
2997 int opno;
2998 enum machine_mode nmode;
3000 /* Avoid referencing memory over and over and invalid sharing
3001 on SUBREGs. */
3002 op0 = force_reg (mode, op0);
3004 /* ACCUM starts out either as OP0 or as a zero, depending on
3005 the first operation. */
3007 if (alg->op[0] == alg_zero)
3009 accum = copy_to_mode_reg (mode, const0_rtx);
3010 val_so_far = 0;
3012 else if (alg->op[0] == alg_m)
3014 accum = copy_to_mode_reg (mode, op0);
3015 val_so_far = 1;
3017 else
3018 gcc_unreachable ();
3020 for (opno = 1; opno < alg->ops; opno++)
3022 int log = alg->log[opno];
3023 rtx shift_subtarget = optimize ? 0 : accum;
3024 rtx add_target
3025 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3026 && !optimize)
3027 ? target : 0;
3028 rtx accum_target = optimize ? 0 : accum;
3030 switch (alg->op[opno])
3032 case alg_shift:
3033 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3034 build_int_cst (NULL_TREE, log),
3035 NULL_RTX, 0);
3036 val_so_far <<= log;
3037 break;
3039 case alg_add_t_m2:
3040 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3041 build_int_cst (NULL_TREE, log),
3042 NULL_RTX, 0);
3043 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3044 add_target ? add_target : accum_target);
3045 val_so_far += (HOST_WIDE_INT) 1 << log;
3046 break;
3048 case alg_sub_t_m2:
3049 tem = expand_shift (LSHIFT_EXPR, mode, op0,
3050 build_int_cst (NULL_TREE, log),
3051 NULL_RTX, 0);
3052 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3053 add_target ? add_target : accum_target);
3054 val_so_far -= (HOST_WIDE_INT) 1 << log;
3055 break;
3057 case alg_add_t2_m:
3058 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3059 build_int_cst (NULL_TREE, log),
3060 shift_subtarget,
3062 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3063 add_target ? add_target : accum_target);
3064 val_so_far = (val_so_far << log) + 1;
3065 break;
3067 case alg_sub_t2_m:
3068 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3069 build_int_cst (NULL_TREE, log),
3070 shift_subtarget, 0);
3071 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3072 add_target ? add_target : accum_target);
3073 val_so_far = (val_so_far << log) - 1;
3074 break;
3076 case alg_add_factor:
3077 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3078 build_int_cst (NULL_TREE, log),
3079 NULL_RTX, 0);
3080 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3081 add_target ? add_target : accum_target);
3082 val_so_far += val_so_far << log;
3083 break;
3085 case alg_sub_factor:
3086 tem = expand_shift (LSHIFT_EXPR, mode, accum,
3087 build_int_cst (NULL_TREE, log),
3088 NULL_RTX, 0);
3089 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3090 (add_target
3091 ? add_target : (optimize ? 0 : tem)));
3092 val_so_far = (val_so_far << log) - val_so_far;
3093 break;
3095 default:
3096 gcc_unreachable ();
3099 /* Write a REG_EQUAL note on the last insn so that we can cse
3100 multiplication sequences. Note that if ACCUM is a SUBREG,
3101 we've set the inner register and must properly indicate
3102 that. */
3104 tem = op0, nmode = mode;
3105 if (GET_CODE (accum) == SUBREG)
3107 nmode = GET_MODE (SUBREG_REG (accum));
3108 tem = gen_lowpart (nmode, op0);
3111 insn = get_last_insn ();
3112 set_unique_reg_note (insn, REG_EQUAL,
3113 gen_rtx_MULT (nmode, tem,
3114 GEN_INT (val_so_far)));
3117 if (variant == negate_variant)
3119 val_so_far = -val_so_far;
3120 accum = expand_unop (mode, neg_optab, accum, target, 0);
3122 else if (variant == add_variant)
3124 val_so_far = val_so_far + 1;
3125 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3128 /* Compare only the bits of val and val_so_far that are significant
3129 in the result mode, to avoid sign-/zero-extension confusion. */
3130 val &= GET_MODE_MASK (mode);
3131 val_so_far &= GET_MODE_MASK (mode);
3132 gcc_assert (val == val_so_far);
3134 return accum;
3137 /* Perform a multiplication and return an rtx for the result.
3138 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3139 TARGET is a suggestion for where to store the result (an rtx).
3141 We check specially for a constant integer as OP1.
3142 If you want this check for OP0 as well, then before calling
3143 you should swap the two operands if OP0 would be constant. */
3146 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3147 int unsignedp)
3149 enum mult_variant variant;
3150 struct algorithm algorithm;
3151 int max_cost;
3152 bool speed = optimize_insn_for_speed_p ();
3154 /* Handling const0_rtx here allows us to use zero as a rogue value for
3155 coeff below. */
3156 if (op1 == const0_rtx)
3157 return const0_rtx;
3158 if (op1 == const1_rtx)
3159 return op0;
3160 if (op1 == constm1_rtx)
3161 return expand_unop (mode,
3162 GET_MODE_CLASS (mode) == MODE_INT
3163 && !unsignedp && flag_trapv
3164 ? negv_optab : neg_optab,
3165 op0, target, 0);
3167 /* These are the operations that are potentially turned into a sequence
3168 of shifts and additions. */
3169 if (SCALAR_INT_MODE_P (mode)
3170 && (unsignedp || !flag_trapv))
3172 HOST_WIDE_INT coeff = 0;
3173 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3175 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3176 less than or equal in size to `unsigned int' this doesn't matter.
3177 If the mode is larger than `unsigned int', then synth_mult works
3178 only if the constant value exactly fits in an `unsigned int' without
3179 any truncation. This means that multiplying by negative values does
3180 not work; results are off by 2^32 on a 32 bit machine. */
3182 if (CONST_INT_P (op1))
3184 /* Attempt to handle multiplication of DImode values by negative
3185 coefficients, by performing the multiplication by a positive
3186 multiplier and then inverting the result. */
3187 if (INTVAL (op1) < 0
3188 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3190 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3191 result is interpreted as an unsigned coefficient.
3192 Exclude cost of op0 from max_cost to match the cost
3193 calculation of the synth_mult. */
3194 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3195 - neg_cost[speed][mode];
3196 if (max_cost > 0
3197 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3198 &variant, max_cost))
3200 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3201 NULL_RTX, &algorithm,
3202 variant);
3203 return expand_unop (mode, neg_optab, temp, target, 0);
3206 else coeff = INTVAL (op1);
3208 else if (GET_CODE (op1) == CONST_DOUBLE)
3210 /* If we are multiplying in DImode, it may still be a win
3211 to try to work with shifts and adds. */
3212 if (CONST_DOUBLE_HIGH (op1) == 0
3213 && CONST_DOUBLE_LOW (op1) > 0)
3214 coeff = CONST_DOUBLE_LOW (op1);
3215 else if (CONST_DOUBLE_LOW (op1) == 0
3216 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3218 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3219 + HOST_BITS_PER_WIDE_INT;
3220 return expand_shift (LSHIFT_EXPR, mode, op0,
3221 build_int_cst (NULL_TREE, shift),
3222 target, unsignedp);
3226 /* We used to test optimize here, on the grounds that it's better to
3227 produce a smaller program when -O is not used. But this causes
3228 such a terrible slowdown sometimes that it seems better to always
3229 use synth_mult. */
3230 if (coeff != 0)
3232 /* Special case powers of two. */
3233 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3234 return expand_shift (LSHIFT_EXPR, mode, op0,
3235 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3236 target, unsignedp);
3238 /* Exclude cost of op0 from max_cost to match the cost
3239 calculation of the synth_mult. */
3240 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3241 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3242 max_cost))
3243 return expand_mult_const (mode, op0, coeff, target,
3244 &algorithm, variant);
3248 if (GET_CODE (op0) == CONST_DOUBLE)
3250 rtx temp = op0;
3251 op0 = op1;
3252 op1 = temp;
3255 /* Expand x*2.0 as x+x. */
3256 if (GET_CODE (op1) == CONST_DOUBLE
3257 && SCALAR_FLOAT_MODE_P (mode))
3259 REAL_VALUE_TYPE d;
3260 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3262 if (REAL_VALUES_EQUAL (d, dconst2))
3264 op0 = force_reg (GET_MODE (op0), op0);
3265 return expand_binop (mode, add_optab, op0, op0,
3266 target, unsignedp, OPTAB_LIB_WIDEN);
3270 /* This used to use umul_optab if unsigned, but for non-widening multiply
3271 there is no difference between signed and unsigned. */
3272 op0 = expand_binop (mode,
3273 ! unsignedp
3274 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3275 ? smulv_optab : smul_optab,
3276 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3277 gcc_assert (op0);
3278 return op0;
3281 /* Perform a widening multiplication and return an rtx for the result.
3282 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3283 TARGET is a suggestion for where to store the result (an rtx).
3284 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3285 or smul_widen_optab.
3287 We check specially for a constant integer as OP1, comparing the
3288 cost of a widening multiply against the cost of a sequence of shifts
3289 and adds. */
3292 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3293 int unsignedp, optab this_optab)
3295 bool speed = optimize_insn_for_speed_p ();
3297 if (CONST_INT_P (op1)
3298 && (INTVAL (op1) >= 0
3299 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3301 HOST_WIDE_INT coeff = INTVAL (op1);
3302 int max_cost;
3303 enum mult_variant variant;
3304 struct algorithm algorithm;
3306 /* Special case powers of two. */
3307 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3309 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3310 return expand_shift (LSHIFT_EXPR, mode, op0,
3311 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3312 target, unsignedp);
3315 /* Exclude cost of op0 from max_cost to match the cost
3316 calculation of the synth_mult. */
3317 max_cost = mul_widen_cost[speed][mode];
3318 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3319 max_cost))
3321 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3322 return expand_mult_const (mode, op0, coeff, target,
3323 &algorithm, variant);
3326 return expand_binop (mode, this_optab, op0, op1, target,
3327 unsignedp, OPTAB_LIB_WIDEN);
3330 /* Return the smallest n such that 2**n >= X. */
3333 ceil_log2 (unsigned HOST_WIDE_INT x)
3335 return floor_log2 (x - 1) + 1;
3338 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3339 replace division by D, and put the least significant N bits of the result
3340 in *MULTIPLIER_PTR and return the most significant bit.
3342 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3343 needed precision is in PRECISION (should be <= N).
3345 PRECISION should be as small as possible so this function can choose
3346 multiplier more freely.
3348 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3349 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3351 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3352 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3354 static
3355 unsigned HOST_WIDE_INT
3356 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3357 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3359 HOST_WIDE_INT mhigh_hi, mlow_hi;
3360 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3361 int lgup, post_shift;
3362 int pow, pow2;
3363 unsigned HOST_WIDE_INT nl, dummy1;
3364 HOST_WIDE_INT nh, dummy2;
3366 /* lgup = ceil(log2(divisor)); */
3367 lgup = ceil_log2 (d);
3369 gcc_assert (lgup <= n);
3371 pow = n + lgup;
3372 pow2 = n + lgup - precision;
3374 /* We could handle this with some effort, but this case is much
3375 better handled directly with a scc insn, so rely on caller using
3376 that. */
3377 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3379 /* mlow = 2^(N + lgup)/d */
3380 if (pow >= HOST_BITS_PER_WIDE_INT)
3382 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3383 nl = 0;
3385 else
3387 nh = 0;
3388 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3390 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3391 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3393 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3394 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3395 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3396 else
3397 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3398 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3399 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3401 gcc_assert (!mhigh_hi || nh - d < d);
3402 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3403 /* Assert that mlow < mhigh. */
3404 gcc_assert (mlow_hi < mhigh_hi
3405 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3407 /* If precision == N, then mlow, mhigh exceed 2^N
3408 (but they do not exceed 2^(N+1)). */
3410 /* Reduce to lowest terms. */
3411 for (post_shift = lgup; post_shift > 0; post_shift--)
3413 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3414 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3415 if (ml_lo >= mh_lo)
3416 break;
3418 mlow_hi = 0;
3419 mlow_lo = ml_lo;
3420 mhigh_hi = 0;
3421 mhigh_lo = mh_lo;
3424 *post_shift_ptr = post_shift;
3425 *lgup_ptr = lgup;
3426 if (n < HOST_BITS_PER_WIDE_INT)
3428 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3429 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3430 return mhigh_lo >= mask;
3432 else
3434 *multiplier_ptr = GEN_INT (mhigh_lo);
3435 return mhigh_hi;
3439 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3440 congruent to 1 (mod 2**N). */
3442 static unsigned HOST_WIDE_INT
3443 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3445 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3447 /* The algorithm notes that the choice y = x satisfies
3448 x*y == 1 mod 2^3, since x is assumed odd.
3449 Each iteration doubles the number of bits of significance in y. */
3451 unsigned HOST_WIDE_INT mask;
3452 unsigned HOST_WIDE_INT y = x;
3453 int nbit = 3;
3455 mask = (n == HOST_BITS_PER_WIDE_INT
3456 ? ~(unsigned HOST_WIDE_INT) 0
3457 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3459 while (nbit < n)
3461 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3462 nbit *= 2;
3464 return y;
3467 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3468 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3469 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3470 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3471 become signed.
3473 The result is put in TARGET if that is convenient.
3475 MODE is the mode of operation. */
3478 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3479 rtx op1, rtx target, int unsignedp)
3481 rtx tem;
3482 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3484 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3485 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3486 NULL_RTX, 0);
3487 tem = expand_and (mode, tem, op1, NULL_RTX);
3488 adj_operand
3489 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3490 adj_operand);
3492 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3493 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3494 NULL_RTX, 0);
3495 tem = expand_and (mode, tem, op0, NULL_RTX);
3496 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3497 target);
3499 return target;
3502 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3504 static rtx
3505 extract_high_half (enum machine_mode mode, rtx op)
3507 enum machine_mode wider_mode;
3509 if (mode == word_mode)
3510 return gen_highpart (mode, op);
3512 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3514 wider_mode = GET_MODE_WIDER_MODE (mode);
3515 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3516 build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3517 return convert_modes (mode, wider_mode, op, 0);
3520 /* Like expand_mult_highpart, but only consider using a multiplication
3521 optab. OP1 is an rtx for the constant operand. */
3523 static rtx
3524 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3525 rtx target, int unsignedp, int max_cost)
3527 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3528 enum machine_mode wider_mode;
3529 optab moptab;
3530 rtx tem;
3531 int size;
3532 bool speed = optimize_insn_for_speed_p ();
3534 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3536 wider_mode = GET_MODE_WIDER_MODE (mode);
3537 size = GET_MODE_BITSIZE (mode);
3539 /* Firstly, try using a multiplication insn that only generates the needed
3540 high part of the product, and in the sign flavor of unsignedp. */
3541 if (mul_highpart_cost[speed][mode] < max_cost)
3543 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3544 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3545 unsignedp, OPTAB_DIRECT);
3546 if (tem)
3547 return tem;
3550 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3551 Need to adjust the result after the multiplication. */
3552 if (size - 1 < BITS_PER_WORD
3553 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3554 + 4 * add_cost[speed][mode] < max_cost))
3556 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3557 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3558 unsignedp, OPTAB_DIRECT);
3559 if (tem)
3560 /* We used the wrong signedness. Adjust the result. */
3561 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3562 tem, unsignedp);
3565 /* Try widening multiplication. */
3566 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3567 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3568 && mul_widen_cost[speed][wider_mode] < max_cost)
3570 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3571 unsignedp, OPTAB_WIDEN);
3572 if (tem)
3573 return extract_high_half (mode, tem);
3576 /* Try widening the mode and perform a non-widening multiplication. */
3577 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3578 && size - 1 < BITS_PER_WORD
3579 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3581 rtx insns, wop0, wop1;
3583 /* We need to widen the operands, for example to ensure the
3584 constant multiplier is correctly sign or zero extended.
3585 Use a sequence to clean-up any instructions emitted by
3586 the conversions if things don't work out. */
3587 start_sequence ();
3588 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3589 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3590 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3591 unsignedp, OPTAB_WIDEN);
3592 insns = get_insns ();
3593 end_sequence ();
3595 if (tem)
3597 emit_insn (insns);
3598 return extract_high_half (mode, tem);
3602 /* Try widening multiplication of opposite signedness, and adjust. */
3603 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3604 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3605 && size - 1 < BITS_PER_WORD
3606 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3607 + 4 * add_cost[speed][mode] < max_cost))
3609 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3610 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3611 if (tem != 0)
3613 tem = extract_high_half (mode, tem);
3614 /* We used the wrong signedness. Adjust the result. */
3615 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3616 target, unsignedp);
3620 return 0;
3623 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3624 putting the high half of the result in TARGET if that is convenient,
3625 and return where the result is. If the operation can not be performed,
3626 0 is returned.
3628 MODE is the mode of operation and result.
3630 UNSIGNEDP nonzero means unsigned multiply.
3632 MAX_COST is the total allowed cost for the expanded RTL. */
3634 static rtx
3635 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3636 rtx target, int unsignedp, int max_cost)
3638 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3639 unsigned HOST_WIDE_INT cnst1;
3640 int extra_cost;
3641 bool sign_adjust = false;
3642 enum mult_variant variant;
3643 struct algorithm alg;
3644 rtx tem;
3645 bool speed = optimize_insn_for_speed_p ();
3647 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3648 /* We can't support modes wider than HOST_BITS_PER_INT. */
3649 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3651 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3653 /* We can't optimize modes wider than BITS_PER_WORD.
3654 ??? We might be able to perform double-word arithmetic if
3655 mode == word_mode, however all the cost calculations in
3656 synth_mult etc. assume single-word operations. */
3657 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3658 return expand_mult_highpart_optab (mode, op0, op1, target,
3659 unsignedp, max_cost);
3661 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3663 /* Check whether we try to multiply by a negative constant. */
3664 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3666 sign_adjust = true;
3667 extra_cost += add_cost[speed][mode];
3670 /* See whether shift/add multiplication is cheap enough. */
3671 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3672 max_cost - extra_cost))
3674 /* See whether the specialized multiplication optabs are
3675 cheaper than the shift/add version. */
3676 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3677 alg.cost.cost + extra_cost);
3678 if (tem)
3679 return tem;
3681 tem = convert_to_mode (wider_mode, op0, unsignedp);
3682 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3683 tem = extract_high_half (mode, tem);
3685 /* Adjust result for signedness. */
3686 if (sign_adjust)
3687 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3689 return tem;
3691 return expand_mult_highpart_optab (mode, op0, op1, target,
3692 unsignedp, max_cost);
3696 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3698 static rtx
3699 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3701 unsigned HOST_WIDE_INT masklow, maskhigh;
3702 rtx result, temp, shift, label;
3703 int logd;
3705 logd = floor_log2 (d);
3706 result = gen_reg_rtx (mode);
3708 /* Avoid conditional branches when they're expensive. */
3709 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3710 && optimize_insn_for_speed_p ())
3712 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3713 mode, 0, -1);
3714 if (signmask)
3716 signmask = force_reg (mode, signmask);
3717 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3718 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3720 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3721 which instruction sequence to use. If logical right shifts
3722 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3723 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3725 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3726 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3727 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3729 temp = expand_binop (mode, xor_optab, op0, signmask,
3730 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3731 temp = expand_binop (mode, sub_optab, temp, signmask,
3732 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3733 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3734 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3735 temp = expand_binop (mode, xor_optab, temp, signmask,
3736 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3737 temp = expand_binop (mode, sub_optab, temp, signmask,
3738 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3740 else
3742 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3743 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3744 signmask = force_reg (mode, signmask);
3746 temp = expand_binop (mode, add_optab, op0, signmask,
3747 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3748 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3749 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3750 temp = expand_binop (mode, sub_optab, temp, signmask,
3751 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3753 return temp;
3757 /* Mask contains the mode's signbit and the significant bits of the
3758 modulus. By including the signbit in the operation, many targets
3759 can avoid an explicit compare operation in the following comparison
3760 against zero. */
3762 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3763 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3765 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3766 maskhigh = -1;
3768 else
3769 maskhigh = (HOST_WIDE_INT) -1
3770 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3772 temp = expand_binop (mode, and_optab, op0,
3773 immed_double_const (masklow, maskhigh, mode),
3774 result, 1, OPTAB_LIB_WIDEN);
3775 if (temp != result)
3776 emit_move_insn (result, temp);
3778 label = gen_label_rtx ();
3779 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3781 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3782 0, OPTAB_LIB_WIDEN);
3783 masklow = (HOST_WIDE_INT) -1 << logd;
3784 maskhigh = -1;
3785 temp = expand_binop (mode, ior_optab, temp,
3786 immed_double_const (masklow, maskhigh, mode),
3787 result, 1, OPTAB_LIB_WIDEN);
3788 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3789 0, OPTAB_LIB_WIDEN);
3790 if (temp != result)
3791 emit_move_insn (result, temp);
3792 emit_label (label);
3793 return result;
3796 /* Expand signed division of OP0 by a power of two D in mode MODE.
3797 This routine is only called for positive values of D. */
3799 static rtx
3800 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3802 rtx temp, label;
3803 tree shift;
3804 int logd;
3806 logd = floor_log2 (d);
3807 shift = build_int_cst (NULL_TREE, logd);
3809 if (d == 2
3810 && BRANCH_COST (optimize_insn_for_speed_p (),
3811 false) >= 1)
3813 temp = gen_reg_rtx (mode);
3814 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3815 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3816 0, OPTAB_LIB_WIDEN);
3817 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3820 #ifdef HAVE_conditional_move
3821 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3822 >= 2)
3824 rtx temp2;
3826 /* ??? emit_conditional_move forces a stack adjustment via
3827 compare_from_rtx so, if the sequence is discarded, it will
3828 be lost. Do it now instead. */
3829 do_pending_stack_adjust ();
3831 start_sequence ();
3832 temp2 = copy_to_mode_reg (mode, op0);
3833 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3834 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3835 temp = force_reg (mode, temp);
3837 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3838 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3839 mode, temp, temp2, mode, 0);
3840 if (temp2)
3842 rtx seq = get_insns ();
3843 end_sequence ();
3844 emit_insn (seq);
3845 return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3847 end_sequence ();
3849 #endif
3851 if (BRANCH_COST (optimize_insn_for_speed_p (),
3852 false) >= 2)
3854 int ushift = GET_MODE_BITSIZE (mode) - logd;
3856 temp = gen_reg_rtx (mode);
3857 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3858 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3859 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3860 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3861 else
3862 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3863 build_int_cst (NULL_TREE, ushift),
3864 NULL_RTX, 1);
3865 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3866 0, OPTAB_LIB_WIDEN);
3867 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3870 label = gen_label_rtx ();
3871 temp = copy_to_mode_reg (mode, op0);
3872 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3873 expand_inc (temp, GEN_INT (d - 1));
3874 emit_label (label);
3875 return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3878 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3879 if that is convenient, and returning where the result is.
3880 You may request either the quotient or the remainder as the result;
3881 specify REM_FLAG nonzero to get the remainder.
3883 CODE is the expression code for which kind of division this is;
3884 it controls how rounding is done. MODE is the machine mode to use.
3885 UNSIGNEDP nonzero means do unsigned division. */
3887 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3888 and then correct it by or'ing in missing high bits
3889 if result of ANDI is nonzero.
3890 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3891 This could optimize to a bfexts instruction.
3892 But C doesn't use these operations, so their optimizations are
3893 left for later. */
3894 /* ??? For modulo, we don't actually need the highpart of the first product,
3895 the low part will do nicely. And for small divisors, the second multiply
3896 can also be a low-part only multiply or even be completely left out.
3897 E.g. to calculate the remainder of a division by 3 with a 32 bit
3898 multiply, multiply with 0x55555556 and extract the upper two bits;
3899 the result is exact for inputs up to 0x1fffffff.
3900 The input range can be reduced by using cross-sum rules.
3901 For odd divisors >= 3, the following table gives right shift counts
3902 so that if a number is shifted by an integer multiple of the given
3903 amount, the remainder stays the same:
3904 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3905 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3906 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3907 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3908 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3910 Cross-sum rules for even numbers can be derived by leaving as many bits
3911 to the right alone as the divisor has zeros to the right.
3912 E.g. if x is an unsigned 32 bit number:
3913 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3917 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3918 rtx op0, rtx op1, rtx target, int unsignedp)
3920 enum machine_mode compute_mode;
3921 rtx tquotient;
3922 rtx quotient = 0, remainder = 0;
3923 rtx last;
3924 int size;
3925 rtx insn, set;
3926 optab optab1, optab2;
3927 int op1_is_constant, op1_is_pow2 = 0;
3928 int max_cost, extra_cost;
3929 static HOST_WIDE_INT last_div_const = 0;
3930 static HOST_WIDE_INT ext_op1;
3931 bool speed = optimize_insn_for_speed_p ();
3933 op1_is_constant = CONST_INT_P (op1);
3934 if (op1_is_constant)
3936 ext_op1 = INTVAL (op1);
3937 if (unsignedp)
3938 ext_op1 &= GET_MODE_MASK (mode);
3939 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3940 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3944 This is the structure of expand_divmod:
3946 First comes code to fix up the operands so we can perform the operations
3947 correctly and efficiently.
3949 Second comes a switch statement with code specific for each rounding mode.
3950 For some special operands this code emits all RTL for the desired
3951 operation, for other cases, it generates only a quotient and stores it in
3952 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3953 to indicate that it has not done anything.
3955 Last comes code that finishes the operation. If QUOTIENT is set and
3956 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3957 QUOTIENT is not set, it is computed using trunc rounding.
3959 We try to generate special code for division and remainder when OP1 is a
3960 constant. If |OP1| = 2**n we can use shifts and some other fast
3961 operations. For other values of OP1, we compute a carefully selected
3962 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3963 by m.
3965 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3966 half of the product. Different strategies for generating the product are
3967 implemented in expand_mult_highpart.
3969 If what we actually want is the remainder, we generate that by another
3970 by-constant multiplication and a subtraction. */
3972 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3973 code below will malfunction if we are, so check here and handle
3974 the special case if so. */
3975 if (op1 == const1_rtx)
3976 return rem_flag ? const0_rtx : op0;
3978 /* When dividing by -1, we could get an overflow.
3979 negv_optab can handle overflows. */
3980 if (! unsignedp && op1 == constm1_rtx)
3982 if (rem_flag)
3983 return const0_rtx;
3984 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3985 ? negv_optab : neg_optab, op0, target, 0);
3988 if (target
3989 /* Don't use the function value register as a target
3990 since we have to read it as well as write it,
3991 and function-inlining gets confused by this. */
3992 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3993 /* Don't clobber an operand while doing a multi-step calculation. */
3994 || ((rem_flag || op1_is_constant)
3995 && (reg_mentioned_p (target, op0)
3996 || (MEM_P (op0) && MEM_P (target))))
3997 || reg_mentioned_p (target, op1)
3998 || (MEM_P (op1) && MEM_P (target))))
3999 target = 0;
4001 /* Get the mode in which to perform this computation. Normally it will
4002 be MODE, but sometimes we can't do the desired operation in MODE.
4003 If so, pick a wider mode in which we can do the operation. Convert
4004 to that mode at the start to avoid repeated conversions.
4006 First see what operations we need. These depend on the expression
4007 we are evaluating. (We assume that divxx3 insns exist under the
4008 same conditions that modxx3 insns and that these insns don't normally
4009 fail. If these assumptions are not correct, we may generate less
4010 efficient code in some cases.)
4012 Then see if we find a mode in which we can open-code that operation
4013 (either a division, modulus, or shift). Finally, check for the smallest
4014 mode for which we can do the operation with a library call. */
4016 /* We might want to refine this now that we have division-by-constant
4017 optimization. Since expand_mult_highpart tries so many variants, it is
4018 not straightforward to generalize this. Maybe we should make an array
4019 of possible modes in init_expmed? Save this for GCC 2.7. */
4021 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
4022 ? (unsignedp ? lshr_optab : ashr_optab)
4023 : (unsignedp ? udiv_optab : sdiv_optab));
4024 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
4025 ? optab1
4026 : (unsignedp ? udivmod_optab : sdivmod_optab));
4028 for (compute_mode = mode; compute_mode != VOIDmode;
4029 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
4030 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4031 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4032 break;
4034 if (compute_mode == VOIDmode)
4035 for (compute_mode = mode; compute_mode != VOIDmode;
4036 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
4037 if (optab_libfunc (optab1, compute_mode)
4038 || optab_libfunc (optab2, compute_mode))
4039 break;
4041 /* If we still couldn't find a mode, use MODE, but expand_binop will
4042 probably die. */
4043 if (compute_mode == VOIDmode)
4044 compute_mode = mode;
4046 if (target && GET_MODE (target) == compute_mode)
4047 tquotient = target;
4048 else
4049 tquotient = gen_reg_rtx (compute_mode);
4051 size = GET_MODE_BITSIZE (compute_mode);
4052 #if 0
4053 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4054 (mode), and thereby get better code when OP1 is a constant. Do that
4055 later. It will require going over all usages of SIZE below. */
4056 size = GET_MODE_BITSIZE (mode);
4057 #endif
4059 /* Only deduct something for a REM if the last divide done was
4060 for a different constant. Then set the constant of the last
4061 divide. */
4062 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
4063 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4064 && INTVAL (op1) == last_div_const))
4065 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
4067 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4069 /* Now convert to the best mode to use. */
4070 if (compute_mode != mode)
4072 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4073 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4075 /* convert_modes may have placed op1 into a register, so we
4076 must recompute the following. */
4077 op1_is_constant = CONST_INT_P (op1);
4078 op1_is_pow2 = (op1_is_constant
4079 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4080 || (! unsignedp
4081 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
4084 /* If one of the operands is a volatile MEM, copy it into a register. */
4086 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4087 op0 = force_reg (compute_mode, op0);
4088 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4089 op1 = force_reg (compute_mode, op1);
4091 /* If we need the remainder or if OP1 is constant, we need to
4092 put OP0 in a register in case it has any queued subexpressions. */
4093 if (rem_flag || op1_is_constant)
4094 op0 = force_reg (compute_mode, op0);
4096 last = get_last_insn ();
4098 /* Promote floor rounding to trunc rounding for unsigned operations. */
4099 if (unsignedp)
4101 if (code == FLOOR_DIV_EXPR)
4102 code = TRUNC_DIV_EXPR;
4103 if (code == FLOOR_MOD_EXPR)
4104 code = TRUNC_MOD_EXPR;
4105 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4106 code = TRUNC_DIV_EXPR;
4109 if (op1 != const0_rtx)
4110 switch (code)
4112 case TRUNC_MOD_EXPR:
4113 case TRUNC_DIV_EXPR:
4114 if (op1_is_constant)
4116 if (unsignedp)
4118 unsigned HOST_WIDE_INT mh;
4119 int pre_shift, post_shift;
4120 int dummy;
4121 rtx ml;
4122 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4123 & GET_MODE_MASK (compute_mode));
4125 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4127 pre_shift = floor_log2 (d);
4128 if (rem_flag)
4130 remainder
4131 = expand_binop (compute_mode, and_optab, op0,
4132 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4133 remainder, 1,
4134 OPTAB_LIB_WIDEN);
4135 if (remainder)
4136 return gen_lowpart (mode, remainder);
4138 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4139 build_int_cst (NULL_TREE,
4140 pre_shift),
4141 tquotient, 1);
4143 else if (size <= HOST_BITS_PER_WIDE_INT)
4145 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4147 /* Most significant bit of divisor is set; emit an scc
4148 insn. */
4149 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4150 compute_mode, 1, 1);
4152 else
4154 /* Find a suitable multiplier and right shift count
4155 instead of multiplying with D. */
4157 mh = choose_multiplier (d, size, size,
4158 &ml, &post_shift, &dummy);
4160 /* If the suggested multiplier is more than SIZE bits,
4161 we can do better for even divisors, using an
4162 initial right shift. */
4163 if (mh != 0 && (d & 1) == 0)
4165 pre_shift = floor_log2 (d & -d);
4166 mh = choose_multiplier (d >> pre_shift, size,
4167 size - pre_shift,
4168 &ml, &post_shift, &dummy);
4169 gcc_assert (!mh);
4171 else
4172 pre_shift = 0;
4174 if (mh != 0)
4176 rtx t1, t2, t3, t4;
4178 if (post_shift - 1 >= BITS_PER_WORD)
4179 goto fail1;
4181 extra_cost
4182 = (shift_cost[speed][compute_mode][post_shift - 1]
4183 + shift_cost[speed][compute_mode][1]
4184 + 2 * add_cost[speed][compute_mode]);
4185 t1 = expand_mult_highpart (compute_mode, op0, ml,
4186 NULL_RTX, 1,
4187 max_cost - extra_cost);
4188 if (t1 == 0)
4189 goto fail1;
4190 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4191 op0, t1),
4192 NULL_RTX);
4193 t3 = expand_shift
4194 (RSHIFT_EXPR, compute_mode, t2,
4195 build_int_cst (NULL_TREE, 1),
4196 NULL_RTX,1);
4197 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4198 t1, t3),
4199 NULL_RTX);
4200 quotient = expand_shift
4201 (RSHIFT_EXPR, compute_mode, t4,
4202 build_int_cst (NULL_TREE, post_shift - 1),
4203 tquotient, 1);
4205 else
4207 rtx t1, t2;
4209 if (pre_shift >= BITS_PER_WORD
4210 || post_shift >= BITS_PER_WORD)
4211 goto fail1;
4213 t1 = expand_shift
4214 (RSHIFT_EXPR, compute_mode, op0,
4215 build_int_cst (NULL_TREE, pre_shift),
4216 NULL_RTX, 1);
4217 extra_cost
4218 = (shift_cost[speed][compute_mode][pre_shift]
4219 + shift_cost[speed][compute_mode][post_shift]);
4220 t2 = expand_mult_highpart (compute_mode, t1, ml,
4221 NULL_RTX, 1,
4222 max_cost - extra_cost);
4223 if (t2 == 0)
4224 goto fail1;
4225 quotient = expand_shift
4226 (RSHIFT_EXPR, compute_mode, t2,
4227 build_int_cst (NULL_TREE, post_shift),
4228 tquotient, 1);
4232 else /* Too wide mode to use tricky code */
4233 break;
4235 insn = get_last_insn ();
4236 if (insn != last
4237 && (set = single_set (insn)) != 0
4238 && SET_DEST (set) == quotient)
4239 set_unique_reg_note (insn,
4240 REG_EQUAL,
4241 gen_rtx_UDIV (compute_mode, op0, op1));
4243 else /* TRUNC_DIV, signed */
4245 unsigned HOST_WIDE_INT ml;
4246 int lgup, post_shift;
4247 rtx mlr;
4248 HOST_WIDE_INT d = INTVAL (op1);
4249 unsigned HOST_WIDE_INT abs_d;
4251 /* Since d might be INT_MIN, we have to cast to
4252 unsigned HOST_WIDE_INT before negating to avoid
4253 undefined signed overflow. */
4254 abs_d = (d >= 0
4255 ? (unsigned HOST_WIDE_INT) d
4256 : - (unsigned HOST_WIDE_INT) d);
4258 /* n rem d = n rem -d */
4259 if (rem_flag && d < 0)
4261 d = abs_d;
4262 op1 = gen_int_mode (abs_d, compute_mode);
4265 if (d == 1)
4266 quotient = op0;
4267 else if (d == -1)
4268 quotient = expand_unop (compute_mode, neg_optab, op0,
4269 tquotient, 0);
4270 else if (HOST_BITS_PER_WIDE_INT >= size
4271 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4273 /* This case is not handled correctly below. */
4274 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4275 compute_mode, 1, 1);
4276 if (quotient == 0)
4277 goto fail1;
4279 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4280 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4281 : sdiv_pow2_cheap[speed][compute_mode])
4282 /* We assume that cheap metric is true if the
4283 optab has an expander for this mode. */
4284 && ((optab_handler ((rem_flag ? smod_optab
4285 : sdiv_optab),
4286 compute_mode)
4287 != CODE_FOR_nothing)
4288 || (optab_handler (sdivmod_optab,
4289 compute_mode)
4290 != CODE_FOR_nothing)))
4292 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4294 if (rem_flag)
4296 remainder = expand_smod_pow2 (compute_mode, op0, d);
4297 if (remainder)
4298 return gen_lowpart (mode, remainder);
4301 if (sdiv_pow2_cheap[speed][compute_mode]
4302 && ((optab_handler (sdiv_optab, compute_mode)
4303 != CODE_FOR_nothing)
4304 || (optab_handler (sdivmod_optab, compute_mode)
4305 != CODE_FOR_nothing)))
4306 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4307 compute_mode, op0,
4308 gen_int_mode (abs_d,
4309 compute_mode),
4310 NULL_RTX, 0);
4311 else
4312 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4314 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4315 negate the quotient. */
4316 if (d < 0)
4318 insn = get_last_insn ();
4319 if (insn != last
4320 && (set = single_set (insn)) != 0
4321 && SET_DEST (set) == quotient
4322 && abs_d < ((unsigned HOST_WIDE_INT) 1
4323 << (HOST_BITS_PER_WIDE_INT - 1)))
4324 set_unique_reg_note (insn,
4325 REG_EQUAL,
4326 gen_rtx_DIV (compute_mode,
4327 op0,
4328 GEN_INT
4329 (trunc_int_for_mode
4330 (abs_d,
4331 compute_mode))));
4333 quotient = expand_unop (compute_mode, neg_optab,
4334 quotient, quotient, 0);
4337 else if (size <= HOST_BITS_PER_WIDE_INT)
4339 choose_multiplier (abs_d, size, size - 1,
4340 &mlr, &post_shift, &lgup);
4341 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4342 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4344 rtx t1, t2, t3;
4346 if (post_shift >= BITS_PER_WORD
4347 || size - 1 >= BITS_PER_WORD)
4348 goto fail1;
4350 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4351 + shift_cost[speed][compute_mode][size - 1]
4352 + add_cost[speed][compute_mode]);
4353 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4354 NULL_RTX, 0,
4355 max_cost - extra_cost);
4356 if (t1 == 0)
4357 goto fail1;
4358 t2 = expand_shift
4359 (RSHIFT_EXPR, compute_mode, t1,
4360 build_int_cst (NULL_TREE, post_shift),
4361 NULL_RTX, 0);
4362 t3 = expand_shift
4363 (RSHIFT_EXPR, compute_mode, op0,
4364 build_int_cst (NULL_TREE, size - 1),
4365 NULL_RTX, 0);
4366 if (d < 0)
4367 quotient
4368 = force_operand (gen_rtx_MINUS (compute_mode,
4369 t3, t2),
4370 tquotient);
4371 else
4372 quotient
4373 = force_operand (gen_rtx_MINUS (compute_mode,
4374 t2, t3),
4375 tquotient);
4377 else
4379 rtx t1, t2, t3, t4;
4381 if (post_shift >= BITS_PER_WORD
4382 || size - 1 >= BITS_PER_WORD)
4383 goto fail1;
4385 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4386 mlr = gen_int_mode (ml, compute_mode);
4387 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4388 + shift_cost[speed][compute_mode][size - 1]
4389 + 2 * add_cost[speed][compute_mode]);
4390 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4391 NULL_RTX, 0,
4392 max_cost - extra_cost);
4393 if (t1 == 0)
4394 goto fail1;
4395 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4396 t1, op0),
4397 NULL_RTX);
4398 t3 = expand_shift
4399 (RSHIFT_EXPR, compute_mode, t2,
4400 build_int_cst (NULL_TREE, post_shift),
4401 NULL_RTX, 0);
4402 t4 = expand_shift
4403 (RSHIFT_EXPR, compute_mode, op0,
4404 build_int_cst (NULL_TREE, size - 1),
4405 NULL_RTX, 0);
4406 if (d < 0)
4407 quotient
4408 = force_operand (gen_rtx_MINUS (compute_mode,
4409 t4, t3),
4410 tquotient);
4411 else
4412 quotient
4413 = force_operand (gen_rtx_MINUS (compute_mode,
4414 t3, t4),
4415 tquotient);
4418 else /* Too wide mode to use tricky code */
4419 break;
4421 insn = get_last_insn ();
4422 if (insn != last
4423 && (set = single_set (insn)) != 0
4424 && SET_DEST (set) == quotient)
4425 set_unique_reg_note (insn,
4426 REG_EQUAL,
4427 gen_rtx_DIV (compute_mode, op0, op1));
4429 break;
4431 fail1:
4432 delete_insns_since (last);
4433 break;
4435 case FLOOR_DIV_EXPR:
4436 case FLOOR_MOD_EXPR:
4437 /* We will come here only for signed operations. */
4438 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4440 unsigned HOST_WIDE_INT mh;
4441 int pre_shift, lgup, post_shift;
4442 HOST_WIDE_INT d = INTVAL (op1);
4443 rtx ml;
4445 if (d > 0)
4447 /* We could just as easily deal with negative constants here,
4448 but it does not seem worth the trouble for GCC 2.6. */
4449 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4451 pre_shift = floor_log2 (d);
4452 if (rem_flag)
4454 remainder = expand_binop (compute_mode, and_optab, op0,
4455 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4456 remainder, 0, OPTAB_LIB_WIDEN);
4457 if (remainder)
4458 return gen_lowpart (mode, remainder);
4460 quotient = expand_shift
4461 (RSHIFT_EXPR, compute_mode, op0,
4462 build_int_cst (NULL_TREE, pre_shift),
4463 tquotient, 0);
4465 else
4467 rtx t1, t2, t3, t4;
4469 mh = choose_multiplier (d, size, size - 1,
4470 &ml, &post_shift, &lgup);
4471 gcc_assert (!mh);
4473 if (post_shift < BITS_PER_WORD
4474 && size - 1 < BITS_PER_WORD)
4476 t1 = expand_shift
4477 (RSHIFT_EXPR, compute_mode, op0,
4478 build_int_cst (NULL_TREE, size - 1),
4479 NULL_RTX, 0);
4480 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4481 NULL_RTX, 0, OPTAB_WIDEN);
4482 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4483 + shift_cost[speed][compute_mode][size - 1]
4484 + 2 * add_cost[speed][compute_mode]);
4485 t3 = expand_mult_highpart (compute_mode, t2, ml,
4486 NULL_RTX, 1,
4487 max_cost - extra_cost);
4488 if (t3 != 0)
4490 t4 = expand_shift
4491 (RSHIFT_EXPR, compute_mode, t3,
4492 build_int_cst (NULL_TREE, post_shift),
4493 NULL_RTX, 1);
4494 quotient = expand_binop (compute_mode, xor_optab,
4495 t4, t1, tquotient, 0,
4496 OPTAB_WIDEN);
4501 else
4503 rtx nsign, t1, t2, t3, t4;
4504 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4505 op0, constm1_rtx), NULL_RTX);
4506 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4507 0, OPTAB_WIDEN);
4508 nsign = expand_shift
4509 (RSHIFT_EXPR, compute_mode, t2,
4510 build_int_cst (NULL_TREE, size - 1),
4511 NULL_RTX, 0);
4512 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4513 NULL_RTX);
4514 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4515 NULL_RTX, 0);
4516 if (t4)
4518 rtx t5;
4519 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4520 NULL_RTX, 0);
4521 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4522 t4, t5),
4523 tquotient);
4528 if (quotient != 0)
4529 break;
4530 delete_insns_since (last);
4532 /* Try using an instruction that produces both the quotient and
4533 remainder, using truncation. We can easily compensate the quotient
4534 or remainder to get floor rounding, once we have the remainder.
4535 Notice that we compute also the final remainder value here,
4536 and return the result right away. */
4537 if (target == 0 || GET_MODE (target) != compute_mode)
4538 target = gen_reg_rtx (compute_mode);
4540 if (rem_flag)
4542 remainder
4543 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4544 quotient = gen_reg_rtx (compute_mode);
4546 else
4548 quotient
4549 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4550 remainder = gen_reg_rtx (compute_mode);
4553 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4554 quotient, remainder, 0))
4556 /* This could be computed with a branch-less sequence.
4557 Save that for later. */
4558 rtx tem;
4559 rtx label = gen_label_rtx ();
4560 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4561 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4562 NULL_RTX, 0, OPTAB_WIDEN);
4563 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4564 expand_dec (quotient, const1_rtx);
4565 expand_inc (remainder, op1);
4566 emit_label (label);
4567 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4570 /* No luck with division elimination or divmod. Have to do it
4571 by conditionally adjusting op0 *and* the result. */
4573 rtx label1, label2, label3, label4, label5;
4574 rtx adjusted_op0;
4575 rtx tem;
4577 quotient = gen_reg_rtx (compute_mode);
4578 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4579 label1 = gen_label_rtx ();
4580 label2 = gen_label_rtx ();
4581 label3 = gen_label_rtx ();
4582 label4 = gen_label_rtx ();
4583 label5 = gen_label_rtx ();
4584 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4585 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4586 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4587 quotient, 0, OPTAB_LIB_WIDEN);
4588 if (tem != quotient)
4589 emit_move_insn (quotient, tem);
4590 emit_jump_insn (gen_jump (label5));
4591 emit_barrier ();
4592 emit_label (label1);
4593 expand_inc (adjusted_op0, const1_rtx);
4594 emit_jump_insn (gen_jump (label4));
4595 emit_barrier ();
4596 emit_label (label2);
4597 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4598 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4599 quotient, 0, OPTAB_LIB_WIDEN);
4600 if (tem != quotient)
4601 emit_move_insn (quotient, tem);
4602 emit_jump_insn (gen_jump (label5));
4603 emit_barrier ();
4604 emit_label (label3);
4605 expand_dec (adjusted_op0, const1_rtx);
4606 emit_label (label4);
4607 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4608 quotient, 0, OPTAB_LIB_WIDEN);
4609 if (tem != quotient)
4610 emit_move_insn (quotient, tem);
4611 expand_dec (quotient, const1_rtx);
4612 emit_label (label5);
4614 break;
4616 case CEIL_DIV_EXPR:
4617 case CEIL_MOD_EXPR:
4618 if (unsignedp)
4620 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4622 rtx t1, t2, t3;
4623 unsigned HOST_WIDE_INT d = INTVAL (op1);
4624 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4625 build_int_cst (NULL_TREE, floor_log2 (d)),
4626 tquotient, 1);
4627 t2 = expand_binop (compute_mode, and_optab, op0,
4628 GEN_INT (d - 1),
4629 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4630 t3 = gen_reg_rtx (compute_mode);
4631 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4632 compute_mode, 1, 1);
4633 if (t3 == 0)
4635 rtx lab;
4636 lab = gen_label_rtx ();
4637 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4638 expand_inc (t1, const1_rtx);
4639 emit_label (lab);
4640 quotient = t1;
4642 else
4643 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4644 t1, t3),
4645 tquotient);
4646 break;
4649 /* Try using an instruction that produces both the quotient and
4650 remainder, using truncation. We can easily compensate the
4651 quotient or remainder to get ceiling rounding, once we have the
4652 remainder. Notice that we compute also the final remainder
4653 value here, and return the result right away. */
4654 if (target == 0 || GET_MODE (target) != compute_mode)
4655 target = gen_reg_rtx (compute_mode);
4657 if (rem_flag)
4659 remainder = (REG_P (target)
4660 ? target : gen_reg_rtx (compute_mode));
4661 quotient = gen_reg_rtx (compute_mode);
4663 else
4665 quotient = (REG_P (target)
4666 ? target : gen_reg_rtx (compute_mode));
4667 remainder = gen_reg_rtx (compute_mode);
4670 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4671 remainder, 1))
4673 /* This could be computed with a branch-less sequence.
4674 Save that for later. */
4675 rtx label = gen_label_rtx ();
4676 do_cmp_and_jump (remainder, const0_rtx, EQ,
4677 compute_mode, label);
4678 expand_inc (quotient, const1_rtx);
4679 expand_dec (remainder, op1);
4680 emit_label (label);
4681 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4684 /* No luck with division elimination or divmod. Have to do it
4685 by conditionally adjusting op0 *and* the result. */
4687 rtx label1, label2;
4688 rtx adjusted_op0, tem;
4690 quotient = gen_reg_rtx (compute_mode);
4691 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4692 label1 = gen_label_rtx ();
4693 label2 = gen_label_rtx ();
4694 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4695 compute_mode, label1);
4696 emit_move_insn (quotient, const0_rtx);
4697 emit_jump_insn (gen_jump (label2));
4698 emit_barrier ();
4699 emit_label (label1);
4700 expand_dec (adjusted_op0, const1_rtx);
4701 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4702 quotient, 1, OPTAB_LIB_WIDEN);
4703 if (tem != quotient)
4704 emit_move_insn (quotient, tem);
4705 expand_inc (quotient, const1_rtx);
4706 emit_label (label2);
4709 else /* signed */
4711 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4712 && INTVAL (op1) >= 0)
4714 /* This is extremely similar to the code for the unsigned case
4715 above. For 2.7 we should merge these variants, but for
4716 2.6.1 I don't want to touch the code for unsigned since that
4717 get used in C. The signed case will only be used by other
4718 languages (Ada). */
4720 rtx t1, t2, t3;
4721 unsigned HOST_WIDE_INT d = INTVAL (op1);
4722 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4723 build_int_cst (NULL_TREE, floor_log2 (d)),
4724 tquotient, 0);
4725 t2 = expand_binop (compute_mode, and_optab, op0,
4726 GEN_INT (d - 1),
4727 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4728 t3 = gen_reg_rtx (compute_mode);
4729 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4730 compute_mode, 1, 1);
4731 if (t3 == 0)
4733 rtx lab;
4734 lab = gen_label_rtx ();
4735 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4736 expand_inc (t1, const1_rtx);
4737 emit_label (lab);
4738 quotient = t1;
4740 else
4741 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4742 t1, t3),
4743 tquotient);
4744 break;
4747 /* Try using an instruction that produces both the quotient and
4748 remainder, using truncation. We can easily compensate the
4749 quotient or remainder to get ceiling rounding, once we have the
4750 remainder. Notice that we compute also the final remainder
4751 value here, and return the result right away. */
4752 if (target == 0 || GET_MODE (target) != compute_mode)
4753 target = gen_reg_rtx (compute_mode);
4754 if (rem_flag)
4756 remainder= (REG_P (target)
4757 ? target : gen_reg_rtx (compute_mode));
4758 quotient = gen_reg_rtx (compute_mode);
4760 else
4762 quotient = (REG_P (target)
4763 ? target : gen_reg_rtx (compute_mode));
4764 remainder = gen_reg_rtx (compute_mode);
4767 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4768 remainder, 0))
4770 /* This could be computed with a branch-less sequence.
4771 Save that for later. */
4772 rtx tem;
4773 rtx label = gen_label_rtx ();
4774 do_cmp_and_jump (remainder, const0_rtx, EQ,
4775 compute_mode, label);
4776 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4777 NULL_RTX, 0, OPTAB_WIDEN);
4778 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4779 expand_inc (quotient, const1_rtx);
4780 expand_dec (remainder, op1);
4781 emit_label (label);
4782 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4785 /* No luck with division elimination or divmod. Have to do it
4786 by conditionally adjusting op0 *and* the result. */
4788 rtx label1, label2, label3, label4, label5;
4789 rtx adjusted_op0;
4790 rtx tem;
4792 quotient = gen_reg_rtx (compute_mode);
4793 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4794 label1 = gen_label_rtx ();
4795 label2 = gen_label_rtx ();
4796 label3 = gen_label_rtx ();
4797 label4 = gen_label_rtx ();
4798 label5 = gen_label_rtx ();
4799 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4800 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4801 compute_mode, label1);
4802 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4803 quotient, 0, OPTAB_LIB_WIDEN);
4804 if (tem != quotient)
4805 emit_move_insn (quotient, tem);
4806 emit_jump_insn (gen_jump (label5));
4807 emit_barrier ();
4808 emit_label (label1);
4809 expand_dec (adjusted_op0, const1_rtx);
4810 emit_jump_insn (gen_jump (label4));
4811 emit_barrier ();
4812 emit_label (label2);
4813 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4814 compute_mode, label3);
4815 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4816 quotient, 0, OPTAB_LIB_WIDEN);
4817 if (tem != quotient)
4818 emit_move_insn (quotient, tem);
4819 emit_jump_insn (gen_jump (label5));
4820 emit_barrier ();
4821 emit_label (label3);
4822 expand_inc (adjusted_op0, const1_rtx);
4823 emit_label (label4);
4824 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4825 quotient, 0, OPTAB_LIB_WIDEN);
4826 if (tem != quotient)
4827 emit_move_insn (quotient, tem);
4828 expand_inc (quotient, const1_rtx);
4829 emit_label (label5);
4832 break;
4834 case EXACT_DIV_EXPR:
4835 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4837 HOST_WIDE_INT d = INTVAL (op1);
4838 unsigned HOST_WIDE_INT ml;
4839 int pre_shift;
4840 rtx t1;
4842 pre_shift = floor_log2 (d & -d);
4843 ml = invert_mod2n (d >> pre_shift, size);
4844 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4845 build_int_cst (NULL_TREE, pre_shift),
4846 NULL_RTX, unsignedp);
4847 quotient = expand_mult (compute_mode, t1,
4848 gen_int_mode (ml, compute_mode),
4849 NULL_RTX, 1);
4851 insn = get_last_insn ();
4852 set_unique_reg_note (insn,
4853 REG_EQUAL,
4854 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4855 compute_mode,
4856 op0, op1));
4858 break;
4860 case ROUND_DIV_EXPR:
4861 case ROUND_MOD_EXPR:
4862 if (unsignedp)
4864 rtx tem;
4865 rtx label;
4866 label = gen_label_rtx ();
4867 quotient = gen_reg_rtx (compute_mode);
4868 remainder = gen_reg_rtx (compute_mode);
4869 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4871 rtx tem;
4872 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4873 quotient, 1, OPTAB_LIB_WIDEN);
4874 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4875 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4876 remainder, 1, OPTAB_LIB_WIDEN);
4878 tem = plus_constant (op1, -1);
4879 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4880 build_int_cst (NULL_TREE, 1),
4881 NULL_RTX, 1);
4882 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4883 expand_inc (quotient, const1_rtx);
4884 expand_dec (remainder, op1);
4885 emit_label (label);
4887 else
4889 rtx abs_rem, abs_op1, tem, mask;
4890 rtx label;
4891 label = gen_label_rtx ();
4892 quotient = gen_reg_rtx (compute_mode);
4893 remainder = gen_reg_rtx (compute_mode);
4894 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4896 rtx tem;
4897 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4898 quotient, 0, OPTAB_LIB_WIDEN);
4899 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4900 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4901 remainder, 0, OPTAB_LIB_WIDEN);
4903 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4904 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4905 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4906 build_int_cst (NULL_TREE, 1),
4907 NULL_RTX, 1);
4908 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4909 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4910 NULL_RTX, 0, OPTAB_WIDEN);
4911 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4912 build_int_cst (NULL_TREE, size - 1),
4913 NULL_RTX, 0);
4914 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4915 NULL_RTX, 0, OPTAB_WIDEN);
4916 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4917 NULL_RTX, 0, OPTAB_WIDEN);
4918 expand_inc (quotient, tem);
4919 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4920 NULL_RTX, 0, OPTAB_WIDEN);
4921 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4922 NULL_RTX, 0, OPTAB_WIDEN);
4923 expand_dec (remainder, tem);
4924 emit_label (label);
4926 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4928 default:
4929 gcc_unreachable ();
4932 if (quotient == 0)
4934 if (target && GET_MODE (target) != compute_mode)
4935 target = 0;
4937 if (rem_flag)
4939 /* Try to produce the remainder without producing the quotient.
4940 If we seem to have a divmod pattern that does not require widening,
4941 don't try widening here. We should really have a WIDEN argument
4942 to expand_twoval_binop, since what we'd really like to do here is
4943 1) try a mod insn in compute_mode
4944 2) try a divmod insn in compute_mode
4945 3) try a div insn in compute_mode and multiply-subtract to get
4946 remainder
4947 4) try the same things with widening allowed. */
4948 remainder
4949 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4950 op0, op1, target,
4951 unsignedp,
4952 ((optab_handler (optab2, compute_mode)
4953 != CODE_FOR_nothing)
4954 ? OPTAB_DIRECT : OPTAB_WIDEN));
4955 if (remainder == 0)
4957 /* No luck there. Can we do remainder and divide at once
4958 without a library call? */
4959 remainder = gen_reg_rtx (compute_mode);
4960 if (! expand_twoval_binop ((unsignedp
4961 ? udivmod_optab
4962 : sdivmod_optab),
4963 op0, op1,
4964 NULL_RTX, remainder, unsignedp))
4965 remainder = 0;
4968 if (remainder)
4969 return gen_lowpart (mode, remainder);
4972 /* Produce the quotient. Try a quotient insn, but not a library call.
4973 If we have a divmod in this mode, use it in preference to widening
4974 the div (for this test we assume it will not fail). Note that optab2
4975 is set to the one of the two optabs that the call below will use. */
4976 quotient
4977 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4978 op0, op1, rem_flag ? NULL_RTX : target,
4979 unsignedp,
4980 ((optab_handler (optab2, compute_mode)
4981 != CODE_FOR_nothing)
4982 ? OPTAB_DIRECT : OPTAB_WIDEN));
4984 if (quotient == 0)
4986 /* No luck there. Try a quotient-and-remainder insn,
4987 keeping the quotient alone. */
4988 quotient = gen_reg_rtx (compute_mode);
4989 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4990 op0, op1,
4991 quotient, NULL_RTX, unsignedp))
4993 quotient = 0;
4994 if (! rem_flag)
4995 /* Still no luck. If we are not computing the remainder,
4996 use a library call for the quotient. */
4997 quotient = sign_expand_binop (compute_mode,
4998 udiv_optab, sdiv_optab,
4999 op0, op1, target,
5000 unsignedp, OPTAB_LIB_WIDEN);
5005 if (rem_flag)
5007 if (target && GET_MODE (target) != compute_mode)
5008 target = 0;
5010 if (quotient == 0)
5012 /* No divide instruction either. Use library for remainder. */
5013 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5014 op0, op1, target,
5015 unsignedp, OPTAB_LIB_WIDEN);
5016 /* No remainder function. Try a quotient-and-remainder
5017 function, keeping the remainder. */
5018 if (!remainder)
5020 remainder = gen_reg_rtx (compute_mode);
5021 if (!expand_twoval_binop_libfunc
5022 (unsignedp ? udivmod_optab : sdivmod_optab,
5023 op0, op1,
5024 NULL_RTX, remainder,
5025 unsignedp ? UMOD : MOD))
5026 remainder = NULL_RTX;
5029 else
5031 /* We divided. Now finish doing X - Y * (X / Y). */
5032 remainder = expand_mult (compute_mode, quotient, op1,
5033 NULL_RTX, unsignedp);
5034 remainder = expand_binop (compute_mode, sub_optab, op0,
5035 remainder, target, unsignedp,
5036 OPTAB_LIB_WIDEN);
5040 return gen_lowpart (mode, rem_flag ? remainder : quotient);
5043 /* Return a tree node with data type TYPE, describing the value of X.
5044 Usually this is an VAR_DECL, if there is no obvious better choice.
5045 X may be an expression, however we only support those expressions
5046 generated by loop.c. */
5048 tree
5049 make_tree (tree type, rtx x)
5051 tree t;
5053 switch (GET_CODE (x))
5055 case CONST_INT:
5057 HOST_WIDE_INT hi = 0;
5059 if (INTVAL (x) < 0
5060 && !(TYPE_UNSIGNED (type)
5061 && (GET_MODE_BITSIZE (TYPE_MODE (type))
5062 < HOST_BITS_PER_WIDE_INT)))
5063 hi = -1;
5065 t = build_int_cst_wide (type, INTVAL (x), hi);
5067 return t;
5070 case CONST_DOUBLE:
5071 if (GET_MODE (x) == VOIDmode)
5072 t = build_int_cst_wide (type,
5073 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
5074 else
5076 REAL_VALUE_TYPE d;
5078 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
5079 t = build_real (type, d);
5082 return t;
5084 case CONST_VECTOR:
5086 int units = CONST_VECTOR_NUNITS (x);
5087 tree itype = TREE_TYPE (type);
5088 tree t = NULL_TREE;
5089 int i;
5092 /* Build a tree with vector elements. */
5093 for (i = units - 1; i >= 0; --i)
5095 rtx elt = CONST_VECTOR_ELT (x, i);
5096 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
5099 return build_vector (type, t);
5102 case PLUS:
5103 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5104 make_tree (type, XEXP (x, 1)));
5106 case MINUS:
5107 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5108 make_tree (type, XEXP (x, 1)));
5110 case NEG:
5111 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5113 case MULT:
5114 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5115 make_tree (type, XEXP (x, 1)));
5117 case ASHIFT:
5118 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5119 make_tree (type, XEXP (x, 1)));
5121 case LSHIFTRT:
5122 t = unsigned_type_for (type);
5123 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5124 make_tree (t, XEXP (x, 0)),
5125 make_tree (type, XEXP (x, 1))));
5127 case ASHIFTRT:
5128 t = signed_type_for (type);
5129 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5130 make_tree (t, XEXP (x, 0)),
5131 make_tree (type, XEXP (x, 1))));
5133 case DIV:
5134 if (TREE_CODE (type) != REAL_TYPE)
5135 t = signed_type_for (type);
5136 else
5137 t = type;
5139 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5140 make_tree (t, XEXP (x, 0)),
5141 make_tree (t, XEXP (x, 1))));
5142 case UDIV:
5143 t = unsigned_type_for (type);
5144 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5145 make_tree (t, XEXP (x, 0)),
5146 make_tree (t, XEXP (x, 1))));
5148 case SIGN_EXTEND:
5149 case ZERO_EXTEND:
5150 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5151 GET_CODE (x) == ZERO_EXTEND);
5152 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5154 case CONST:
5155 return make_tree (type, XEXP (x, 0));
5157 case SYMBOL_REF:
5158 t = SYMBOL_REF_DECL (x);
5159 if (t)
5160 return fold_convert (type, build_fold_addr_expr (t));
5161 /* else fall through. */
5163 default:
5164 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5166 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5167 address mode to pointer mode. */
5168 if (POINTER_TYPE_P (type))
5169 x = convert_memory_address_addr_space
5170 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5172 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5173 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5174 t->decl_with_rtl.rtl = x;
5176 return t;
5180 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5181 and returning TARGET.
5183 If TARGET is 0, a pseudo-register or constant is returned. */
5186 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5188 rtx tem = 0;
5190 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5191 tem = simplify_binary_operation (AND, mode, op0, op1);
5192 if (tem == 0)
5193 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5195 if (target == 0)
5196 target = tem;
5197 else if (tem != target)
5198 emit_move_insn (target, tem);
5199 return target;
5202 /* Helper function for emit_store_flag. */
5203 static rtx
5204 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5205 enum machine_mode mode, enum machine_mode compare_mode,
5206 int unsignedp, rtx x, rtx y, int normalizep,
5207 enum machine_mode target_mode)
5209 rtx op0, last, comparison, subtarget, pattern;
5210 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5212 last = get_last_insn ();
5213 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5214 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5215 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5216 if (!x || !y
5217 || !insn_data[icode].operand[2].predicate
5218 (x, insn_data[icode].operand[2].mode)
5219 || !insn_data[icode].operand[3].predicate
5220 (y, insn_data[icode].operand[3].mode)
5221 || !insn_data[icode].operand[1].predicate (comparison, VOIDmode))
5223 delete_insns_since (last);
5224 return NULL_RTX;
5227 if (target_mode == VOIDmode)
5228 target_mode = result_mode;
5229 if (!target)
5230 target = gen_reg_rtx (target_mode);
5232 if (optimize
5233 || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
5234 subtarget = gen_reg_rtx (result_mode);
5235 else
5236 subtarget = target;
5238 pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
5239 if (!pattern)
5240 return NULL_RTX;
5241 emit_insn (pattern);
5243 /* If we are converting to a wider mode, first convert to
5244 TARGET_MODE, then normalize. This produces better combining
5245 opportunities on machines that have a SIGN_EXTRACT when we are
5246 testing a single bit. This mostly benefits the 68k.
5248 If STORE_FLAG_VALUE does not have the sign bit set when
5249 interpreted in MODE, we can do this conversion as unsigned, which
5250 is usually more efficient. */
5251 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5253 convert_move (target, subtarget,
5254 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5255 && 0 == (STORE_FLAG_VALUE
5256 & ((HOST_WIDE_INT) 1
5257 << (GET_MODE_BITSIZE (result_mode) -1))));
5258 op0 = target;
5259 result_mode = target_mode;
5261 else
5262 op0 = subtarget;
5264 /* If we want to keep subexpressions around, don't reuse our last
5265 target. */
5266 if (optimize)
5267 subtarget = 0;
5269 /* Now normalize to the proper value in MODE. Sometimes we don't
5270 have to do anything. */
5271 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5273 /* STORE_FLAG_VALUE might be the most negative number, so write
5274 the comparison this way to avoid a compiler-time warning. */
5275 else if (- normalizep == STORE_FLAG_VALUE)
5276 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5278 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5279 it hard to use a value of just the sign bit due to ANSI integer
5280 constant typing rules. */
5281 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5282 && (STORE_FLAG_VALUE
5283 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5284 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5285 size_int (GET_MODE_BITSIZE (result_mode) - 1), subtarget,
5286 normalizep == 1);
5287 else
5289 gcc_assert (STORE_FLAG_VALUE & 1);
5291 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5292 if (normalizep == -1)
5293 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5296 /* If we were converting to a smaller mode, do the conversion now. */
5297 if (target_mode != result_mode)
5299 convert_move (target, op0, 0);
5300 return target;
5302 else
5303 return op0;
5307 /* A subroutine of emit_store_flag only including "tricks" that do not
5308 need a recursive call. These are kept separate to avoid infinite
5309 loops. */
5311 static rtx
5312 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5313 enum machine_mode mode, int unsignedp, int normalizep,
5314 enum machine_mode target_mode)
5316 rtx subtarget;
5317 enum insn_code icode;
5318 enum machine_mode compare_mode;
5319 enum mode_class mclass;
5320 enum rtx_code scode;
5321 rtx tem;
5323 if (unsignedp)
5324 code = unsigned_condition (code);
5325 scode = swap_condition (code);
5327 /* If one operand is constant, make it the second one. Only do this
5328 if the other operand is not constant as well. */
5330 if (swap_commutative_operands_p (op0, op1))
5332 tem = op0;
5333 op0 = op1;
5334 op1 = tem;
5335 code = swap_condition (code);
5338 if (mode == VOIDmode)
5339 mode = GET_MODE (op0);
5341 /* For some comparisons with 1 and -1, we can convert this to
5342 comparisons with zero. This will often produce more opportunities for
5343 store-flag insns. */
5345 switch (code)
5347 case LT:
5348 if (op1 == const1_rtx)
5349 op1 = const0_rtx, code = LE;
5350 break;
5351 case LE:
5352 if (op1 == constm1_rtx)
5353 op1 = const0_rtx, code = LT;
5354 break;
5355 case GE:
5356 if (op1 == const1_rtx)
5357 op1 = const0_rtx, code = GT;
5358 break;
5359 case GT:
5360 if (op1 == constm1_rtx)
5361 op1 = const0_rtx, code = GE;
5362 break;
5363 case GEU:
5364 if (op1 == const1_rtx)
5365 op1 = const0_rtx, code = NE;
5366 break;
5367 case LTU:
5368 if (op1 == const1_rtx)
5369 op1 = const0_rtx, code = EQ;
5370 break;
5371 default:
5372 break;
5375 /* If we are comparing a double-word integer with zero or -1, we can
5376 convert the comparison into one involving a single word. */
5377 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5378 && GET_MODE_CLASS (mode) == MODE_INT
5379 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5381 if ((code == EQ || code == NE)
5382 && (op1 == const0_rtx || op1 == constm1_rtx))
5384 rtx op00, op01;
5386 /* Do a logical OR or AND of the two words and compare the
5387 result. */
5388 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5389 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5390 tem = expand_binop (word_mode,
5391 op1 == const0_rtx ? ior_optab : and_optab,
5392 op00, op01, NULL_RTX, unsignedp,
5393 OPTAB_DIRECT);
5395 if (tem != 0)
5396 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5397 unsignedp, normalizep);
5399 else if ((code == LT || code == GE) && op1 == const0_rtx)
5401 rtx op0h;
5403 /* If testing the sign bit, can just test on high word. */
5404 op0h = simplify_gen_subreg (word_mode, op0, mode,
5405 subreg_highpart_offset (word_mode,
5406 mode));
5407 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5408 unsignedp, normalizep);
5410 else
5411 tem = NULL_RTX;
5413 if (tem)
5415 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5416 return tem;
5417 if (!target)
5418 target = gen_reg_rtx (target_mode);
5420 convert_move (target, tem,
5421 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5422 & ((HOST_WIDE_INT) 1
5423 << (GET_MODE_BITSIZE (word_mode) -1))));
5424 return target;
5428 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5429 complement of A (for GE) and shifting the sign bit to the low bit. */
5430 if (op1 == const0_rtx && (code == LT || code == GE)
5431 && GET_MODE_CLASS (mode) == MODE_INT
5432 && (normalizep || STORE_FLAG_VALUE == 1
5433 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5434 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5435 == ((unsigned HOST_WIDE_INT) 1
5436 << (GET_MODE_BITSIZE (mode) - 1))))))
5438 subtarget = target;
5440 if (!target)
5441 target_mode = mode;
5443 /* If the result is to be wider than OP0, it is best to convert it
5444 first. If it is to be narrower, it is *incorrect* to convert it
5445 first. */
5446 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5448 op0 = convert_modes (target_mode, mode, op0, 0);
5449 mode = target_mode;
5452 if (target_mode != mode)
5453 subtarget = 0;
5455 if (code == GE)
5456 op0 = expand_unop (mode, one_cmpl_optab, op0,
5457 ((STORE_FLAG_VALUE == 1 || normalizep)
5458 ? 0 : subtarget), 0);
5460 if (STORE_FLAG_VALUE == 1 || normalizep)
5461 /* If we are supposed to produce a 0/1 value, we want to do
5462 a logical shift from the sign bit to the low-order bit; for
5463 a -1/0 value, we do an arithmetic shift. */
5464 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5465 size_int (GET_MODE_BITSIZE (mode) - 1),
5466 subtarget, normalizep != -1);
5468 if (mode != target_mode)
5469 op0 = convert_modes (target_mode, mode, op0, 0);
5471 return op0;
5474 mclass = GET_MODE_CLASS (mode);
5475 for (compare_mode = mode; compare_mode != VOIDmode;
5476 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5478 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5479 icode = optab_handler (cstore_optab, optab_mode);
5480 if (icode != CODE_FOR_nothing)
5482 do_pending_stack_adjust ();
5483 tem = emit_cstore (target, icode, code, mode, compare_mode,
5484 unsignedp, op0, op1, normalizep, target_mode);
5485 if (tem)
5486 return tem;
5488 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5490 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5491 unsignedp, op1, op0, normalizep, target_mode);
5492 if (tem)
5493 return tem;
5495 break;
5499 return 0;
5502 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5503 and storing in TARGET. Normally return TARGET.
5504 Return 0 if that cannot be done.
5506 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5507 it is VOIDmode, they cannot both be CONST_INT.
5509 UNSIGNEDP is for the case where we have to widen the operands
5510 to perform the operation. It says to use zero-extension.
5512 NORMALIZEP is 1 if we should convert the result to be either zero
5513 or one. Normalize is -1 if we should convert the result to be
5514 either zero or -1. If NORMALIZEP is zero, the result will be left
5515 "raw" out of the scc insn. */
5518 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5519 enum machine_mode mode, int unsignedp, int normalizep)
5521 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5522 enum rtx_code rcode;
5523 rtx subtarget;
5524 rtx tem, last, trueval;
5526 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5527 target_mode);
5528 if (tem)
5529 return tem;
5531 /* If we reached here, we can't do this with a scc insn, however there
5532 are some comparisons that can be done in other ways. Don't do any
5533 of these cases if branches are very cheap. */
5534 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5535 return 0;
5537 /* See what we need to return. We can only return a 1, -1, or the
5538 sign bit. */
5540 if (normalizep == 0)
5542 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5543 normalizep = STORE_FLAG_VALUE;
5545 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5546 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5547 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5549 else
5550 return 0;
5553 last = get_last_insn ();
5555 /* If optimizing, use different pseudo registers for each insn, instead
5556 of reusing the same pseudo. This leads to better CSE, but slows
5557 down the compiler, since there are more pseudos */
5558 subtarget = (!optimize
5559 && (target_mode == mode)) ? target : NULL_RTX;
5560 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5562 /* For floating-point comparisons, try the reverse comparison or try
5563 changing the "orderedness" of the comparison. */
5564 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5566 enum rtx_code first_code;
5567 bool and_them;
5569 rcode = reverse_condition_maybe_unordered (code);
5570 if (can_compare_p (rcode, mode, ccp_store_flag)
5571 && (code == ORDERED || code == UNORDERED
5572 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5573 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5575 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5576 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5578 /* For the reverse comparison, use either an addition or a XOR. */
5579 if (want_add
5580 && rtx_cost (GEN_INT (normalizep), PLUS,
5581 optimize_insn_for_speed_p ()) == 0)
5583 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5584 STORE_FLAG_VALUE, target_mode);
5585 if (tem)
5586 return expand_binop (target_mode, add_optab, tem,
5587 GEN_INT (normalizep),
5588 target, 0, OPTAB_WIDEN);
5590 else if (!want_add
5591 && rtx_cost (trueval, XOR,
5592 optimize_insn_for_speed_p ()) == 0)
5594 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5595 normalizep, target_mode);
5596 if (tem)
5597 return expand_binop (target_mode, xor_optab, tem, trueval,
5598 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5602 delete_insns_since (last);
5604 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5605 if (code == ORDERED || code == UNORDERED)
5606 return 0;
5608 and_them = split_comparison (code, mode, &first_code, &code);
5610 /* If there are no NaNs, the first comparison should always fall through.
5611 Effectively change the comparison to the other one. */
5612 if (!HONOR_NANS (mode))
5614 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5615 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5616 target_mode);
5619 #ifdef HAVE_conditional_move
5620 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5621 conditional move. */
5622 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5623 normalizep, target_mode);
5624 if (tem == 0)
5625 return 0;
5627 if (and_them)
5628 tem = emit_conditional_move (target, code, op0, op1, mode,
5629 tem, const0_rtx, GET_MODE (tem), 0);
5630 else
5631 tem = emit_conditional_move (target, code, op0, op1, mode,
5632 trueval, tem, GET_MODE (tem), 0);
5634 if (tem == 0)
5635 delete_insns_since (last);
5636 return tem;
5637 #else
5638 return 0;
5639 #endif
5642 /* The remaining tricks only apply to integer comparisons. */
5644 if (GET_MODE_CLASS (mode) != MODE_INT)
5645 return 0;
5647 /* If this is an equality comparison of integers, we can try to exclusive-or
5648 (or subtract) the two operands and use a recursive call to try the
5649 comparison with zero. Don't do any of these cases if branches are
5650 very cheap. */
5652 if ((code == EQ || code == NE) && op1 != const0_rtx)
5654 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5655 OPTAB_WIDEN);
5657 if (tem == 0)
5658 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5659 OPTAB_WIDEN);
5660 if (tem != 0)
5661 tem = emit_store_flag (target, code, tem, const0_rtx,
5662 mode, unsignedp, normalizep);
5663 if (tem != 0)
5664 return tem;
5666 delete_insns_since (last);
5669 /* For integer comparisons, try the reverse comparison. However, for
5670 small X and if we'd have anyway to extend, implementing "X != 0"
5671 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5672 rcode = reverse_condition (code);
5673 if (can_compare_p (rcode, mode, ccp_store_flag)
5674 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5675 && code == NE
5676 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5677 && op1 == const0_rtx))
5679 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5680 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5682 /* Again, for the reverse comparison, use either an addition or a XOR. */
5683 if (want_add
5684 && rtx_cost (GEN_INT (normalizep), PLUS,
5685 optimize_insn_for_speed_p ()) == 0)
5687 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5688 STORE_FLAG_VALUE, target_mode);
5689 if (tem != 0)
5690 tem = expand_binop (target_mode, add_optab, tem,
5691 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5693 else if (!want_add
5694 && rtx_cost (trueval, XOR,
5695 optimize_insn_for_speed_p ()) == 0)
5697 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5698 normalizep, target_mode);
5699 if (tem != 0)
5700 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5701 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5704 if (tem != 0)
5705 return tem;
5706 delete_insns_since (last);
5709 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5710 the constant zero. Reject all other comparisons at this point. Only
5711 do LE and GT if branches are expensive since they are expensive on
5712 2-operand machines. */
5714 if (op1 != const0_rtx
5715 || (code != EQ && code != NE
5716 && (BRANCH_COST (optimize_insn_for_speed_p (),
5717 false) <= 1 || (code != LE && code != GT))))
5718 return 0;
5720 /* Try to put the result of the comparison in the sign bit. Assume we can't
5721 do the necessary operation below. */
5723 tem = 0;
5725 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5726 the sign bit set. */
5728 if (code == LE)
5730 /* This is destructive, so SUBTARGET can't be OP0. */
5731 if (rtx_equal_p (subtarget, op0))
5732 subtarget = 0;
5734 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5735 OPTAB_WIDEN);
5736 if (tem)
5737 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5738 OPTAB_WIDEN);
5741 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5742 number of bits in the mode of OP0, minus one. */
5744 if (code == GT)
5746 if (rtx_equal_p (subtarget, op0))
5747 subtarget = 0;
5749 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5750 size_int (GET_MODE_BITSIZE (mode) - 1),
5751 subtarget, 0);
5752 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5753 OPTAB_WIDEN);
5756 if (code == EQ || code == NE)
5758 /* For EQ or NE, one way to do the comparison is to apply an operation
5759 that converts the operand into a positive number if it is nonzero
5760 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5761 for NE we negate. This puts the result in the sign bit. Then we
5762 normalize with a shift, if needed.
5764 Two operations that can do the above actions are ABS and FFS, so try
5765 them. If that doesn't work, and MODE is smaller than a full word,
5766 we can use zero-extension to the wider mode (an unsigned conversion)
5767 as the operation. */
5769 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5770 that is compensated by the subsequent overflow when subtracting
5771 one / negating. */
5773 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5774 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5775 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5776 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5777 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5779 tem = convert_modes (word_mode, mode, op0, 1);
5780 mode = word_mode;
5783 if (tem != 0)
5785 if (code == EQ)
5786 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5787 0, OPTAB_WIDEN);
5788 else
5789 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5792 /* If we couldn't do it that way, for NE we can "or" the two's complement
5793 of the value with itself. For EQ, we take the one's complement of
5794 that "or", which is an extra insn, so we only handle EQ if branches
5795 are expensive. */
5797 if (tem == 0
5798 && (code == NE
5799 || BRANCH_COST (optimize_insn_for_speed_p (),
5800 false) > 1))
5802 if (rtx_equal_p (subtarget, op0))
5803 subtarget = 0;
5805 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5806 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5807 OPTAB_WIDEN);
5809 if (tem && code == EQ)
5810 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5814 if (tem && normalizep)
5815 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5816 size_int (GET_MODE_BITSIZE (mode) - 1),
5817 subtarget, normalizep == 1);
5819 if (tem)
5821 if (!target)
5823 else if (GET_MODE (tem) != target_mode)
5825 convert_move (target, tem, 0);
5826 tem = target;
5828 else if (!subtarget)
5830 emit_move_insn (target, tem);
5831 tem = target;
5834 else
5835 delete_insns_since (last);
5837 return tem;
5840 /* Like emit_store_flag, but always succeeds. */
5843 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5844 enum machine_mode mode, int unsignedp, int normalizep)
5846 rtx tem, label;
5847 rtx trueval, falseval;
5849 /* First see if emit_store_flag can do the job. */
5850 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5851 if (tem != 0)
5852 return tem;
5854 if (!target)
5855 target = gen_reg_rtx (word_mode);
5857 /* If this failed, we have to do this with set/compare/jump/set code.
5858 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5859 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5860 if (code == NE
5861 && GET_MODE_CLASS (mode) == MODE_INT
5862 && REG_P (target)
5863 && op0 == target
5864 && op1 == const0_rtx)
5866 label = gen_label_rtx ();
5867 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5868 mode, NULL_RTX, NULL_RTX, label, -1);
5869 emit_move_insn (target, trueval);
5870 emit_label (label);
5871 return target;
5874 if (!REG_P (target)
5875 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5876 target = gen_reg_rtx (GET_MODE (target));
5878 /* Jump in the right direction if the target cannot implement CODE
5879 but can jump on its reverse condition. */
5880 falseval = const0_rtx;
5881 if (! can_compare_p (code, mode, ccp_jump)
5882 && (! FLOAT_MODE_P (mode)
5883 || code == ORDERED || code == UNORDERED
5884 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5885 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5887 enum rtx_code rcode;
5888 if (FLOAT_MODE_P (mode))
5889 rcode = reverse_condition_maybe_unordered (code);
5890 else
5891 rcode = reverse_condition (code);
5893 /* Canonicalize to UNORDERED for the libcall. */
5894 if (can_compare_p (rcode, mode, ccp_jump)
5895 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5897 falseval = trueval;
5898 trueval = const0_rtx;
5899 code = rcode;
5903 emit_move_insn (target, trueval);
5904 label = gen_label_rtx ();
5905 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5906 NULL_RTX, label, -1);
5908 emit_move_insn (target, falseval);
5909 emit_label (label);
5911 return target;
5914 /* Perform possibly multi-word comparison and conditional jump to LABEL
5915 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5916 now a thin wrapper around do_compare_rtx_and_jump. */
5918 static void
5919 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5920 rtx label)
5922 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5923 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5924 NULL_RTX, NULL_RTX, label, -1);