* configure.ac: Separate libgloss_dir settings from general case
[official-gcc.git] / gcc / expmed.c
blob748274741f5388631ab099599c24e4279d52cbda
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, rtx);
53 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
54 unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT, rtx, int, bool);
57 static rtx mask_rtx (enum machine_mode, int, int, int);
58 static rtx lshift_value (enum machine_mode, rtx, int, int);
59 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
60 unsigned HOST_WIDE_INT, int);
61 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
62 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
63 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
73 /* Reduce conditional compilation elsewhere. */
74 #ifndef HAVE_insv
75 #define HAVE_insv 0
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
78 #endif
79 #ifndef HAVE_extv
80 #define HAVE_extv 0
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extzv
85 #define HAVE_extzv 0
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
88 #endif
90 void
91 init_expmed (void)
93 struct
95 struct rtx_def reg; rtunion reg_fld[2];
96 struct rtx_def plus; rtunion plus_fld1;
97 struct rtx_def neg;
98 struct rtx_def mult; rtunion mult_fld1;
99 struct rtx_def sdiv; rtunion sdiv_fld1;
100 struct rtx_def udiv; rtunion udiv_fld1;
101 struct rtx_def zext;
102 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
103 struct rtx_def smod_32; rtunion smod_32_fld1;
104 struct rtx_def wide_mult; rtunion wide_mult_fld1;
105 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
106 struct rtx_def wide_trunc;
107 struct rtx_def shift; rtunion shift_fld1;
108 struct rtx_def shift_mult; rtunion shift_mult_fld1;
109 struct rtx_def shift_add; rtunion shift_add_fld1;
110 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
111 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
112 } all;
114 rtx pow2[MAX_BITS_PER_WORD];
115 rtx cint[MAX_BITS_PER_WORD];
116 int m, n;
117 enum machine_mode mode, wider_mode;
118 int speed;
121 for (m = 1; m < MAX_BITS_PER_WORD; m++)
123 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
124 cint[m] = GEN_INT (m);
126 memset (&all, 0, sizeof all);
128 PUT_CODE (&all.reg, REG);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
132 PUT_CODE (&all.plus, PLUS);
133 XEXP (&all.plus, 0) = &all.reg;
134 XEXP (&all.plus, 1) = &all.reg;
136 PUT_CODE (&all.neg, NEG);
137 XEXP (&all.neg, 0) = &all.reg;
139 PUT_CODE (&all.mult, MULT);
140 XEXP (&all.mult, 0) = &all.reg;
141 XEXP (&all.mult, 1) = &all.reg;
143 PUT_CODE (&all.sdiv, DIV);
144 XEXP (&all.sdiv, 0) = &all.reg;
145 XEXP (&all.sdiv, 1) = &all.reg;
147 PUT_CODE (&all.udiv, UDIV);
148 XEXP (&all.udiv, 0) = &all.reg;
149 XEXP (&all.udiv, 1) = &all.reg;
151 PUT_CODE (&all.sdiv_32, DIV);
152 XEXP (&all.sdiv_32, 0) = &all.reg;
153 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
155 PUT_CODE (&all.smod_32, MOD);
156 XEXP (&all.smod_32, 0) = &all.reg;
157 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
159 PUT_CODE (&all.zext, ZERO_EXTEND);
160 XEXP (&all.zext, 0) = &all.reg;
162 PUT_CODE (&all.wide_mult, MULT);
163 XEXP (&all.wide_mult, 0) = &all.zext;
164 XEXP (&all.wide_mult, 1) = &all.zext;
166 PUT_CODE (&all.wide_lshr, LSHIFTRT);
167 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
169 PUT_CODE (&all.wide_trunc, TRUNCATE);
170 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
172 PUT_CODE (&all.shift, ASHIFT);
173 XEXP (&all.shift, 0) = &all.reg;
175 PUT_CODE (&all.shift_mult, MULT);
176 XEXP (&all.shift_mult, 0) = &all.reg;
178 PUT_CODE (&all.shift_add, PLUS);
179 XEXP (&all.shift_add, 0) = &all.shift_mult;
180 XEXP (&all.shift_add, 1) = &all.reg;
182 PUT_CODE (&all.shift_sub0, MINUS);
183 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
184 XEXP (&all.shift_sub0, 1) = &all.reg;
186 PUT_CODE (&all.shift_sub1, MINUS);
187 XEXP (&all.shift_sub1, 0) = &all.reg;
188 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
190 for (speed = 0; speed < 2; speed++)
192 crtl->maybe_hot_insn_p = speed;
193 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
195 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
196 mode != VOIDmode;
197 mode = GET_MODE_WIDER_MODE (mode))
199 PUT_MODE (&all.reg, mode);
200 PUT_MODE (&all.plus, mode);
201 PUT_MODE (&all.neg, mode);
202 PUT_MODE (&all.mult, mode);
203 PUT_MODE (&all.sdiv, mode);
204 PUT_MODE (&all.udiv, mode);
205 PUT_MODE (&all.sdiv_32, mode);
206 PUT_MODE (&all.smod_32, mode);
207 PUT_MODE (&all.wide_trunc, mode);
208 PUT_MODE (&all.shift, mode);
209 PUT_MODE (&all.shift_mult, mode);
210 PUT_MODE (&all.shift_add, mode);
211 PUT_MODE (&all.shift_sub0, mode);
212 PUT_MODE (&all.shift_sub1, mode);
214 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
215 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
216 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
217 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
218 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
220 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
221 <= 2 * add_cost[speed][mode]);
222 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
223 <= 4 * add_cost[speed][mode]);
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
233 mul_widen_cost[speed][wider_mode]
234 = rtx_cost (&all.wide_mult, SET, speed);
235 mul_highpart_cost[speed][mode]
236 = rtx_cost (&all.wide_trunc, SET, speed);
239 shift_cost[speed][mode][0] = 0;
240 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
241 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
243 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
244 for (m = 1; m < n; m++)
246 XEXP (&all.shift, 1) = cint[m];
247 XEXP (&all.shift_mult, 1) = pow2[m];
249 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
250 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
251 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
252 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
256 if (alg_hash_used_p)
257 memset (alg_hash, 0, sizeof (alg_hash));
258 else
259 alg_hash_used_p = true;
260 default_rtl_profile ();
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
268 negate_rtx (enum machine_mode mode, rtx x)
270 rtx result = simplify_unary_operation (NEG, mode, x, mode);
272 if (result == 0)
273 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
275 return result;
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
282 enum machine_mode
283 mode_for_extraction (enum extraction_pattern pattern, int opno)
285 const struct insn_data_d *data;
287 switch (pattern)
289 case EP_insv:
290 if (HAVE_insv)
292 data = &insn_data[CODE_FOR_insv];
293 break;
295 return MAX_MACHINE_MODE;
297 case EP_extv:
298 if (HAVE_extv)
300 data = &insn_data[CODE_FOR_extv];
301 break;
303 return MAX_MACHINE_MODE;
305 case EP_extzv:
306 if (HAVE_extzv)
308 data = &insn_data[CODE_FOR_extzv];
309 break;
311 return MAX_MACHINE_MODE;
313 default:
314 gcc_unreachable ();
317 if (opno == -1)
318 return VOIDmode;
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data->operand[opno].mode == VOIDmode)
323 return word_mode;
324 return data->operand[opno].mode;
327 /* A subroutine of store_bit_field, with the same arguments. Return true
328 if the operation could be implemented.
330 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
331 no other way of implementing the operation. If FALLBACK_P is false,
332 return false instead. */
334 static bool
335 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
336 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
337 rtx value, bool fallback_p)
339 unsigned int unit
340 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
341 unsigned HOST_WIDE_INT offset, bitpos;
342 rtx op0 = str_rtx;
343 int byte_offset;
344 rtx orig_value;
346 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
348 while (GET_CODE (op0) == SUBREG)
350 /* The following line once was done only if WORDS_BIG_ENDIAN,
351 but I think that is a mistake. WORDS_BIG_ENDIAN is
352 meaningful at a much higher level; when structures are copied
353 between memory and regs, the higher-numbered regs
354 always get higher addresses. */
355 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
356 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
358 byte_offset = 0;
360 /* Paradoxical subregs need special handling on big endian machines. */
361 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
363 int difference = inner_mode_size - outer_mode_size;
365 if (WORDS_BIG_ENDIAN)
366 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
367 if (BYTES_BIG_ENDIAN)
368 byte_offset += difference % UNITS_PER_WORD;
370 else
371 byte_offset = SUBREG_BYTE (op0);
373 bitnum += byte_offset * BITS_PER_UNIT;
374 op0 = SUBREG_REG (op0);
377 /* No action is needed if the target is a register and if the field
378 lies completely outside that register. This can occur if the source
379 code contains an out-of-bounds access to a small array. */
380 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
381 return true;
383 /* Use vec_set patterns for inserting parts of vectors whenever
384 available. */
385 if (VECTOR_MODE_P (GET_MODE (op0))
386 && !MEM_P (op0)
387 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
388 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
389 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
390 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
392 struct expand_operand ops[3];
393 enum machine_mode outermode = GET_MODE (op0);
394 enum machine_mode innermode = GET_MODE_INNER (outermode);
395 enum insn_code icode = optab_handler (vec_set_optab, outermode);
396 int pos = bitnum / GET_MODE_BITSIZE (innermode);
398 create_fixed_operand (&ops[0], op0);
399 create_input_operand (&ops[1], value, innermode);
400 create_integer_operand (&ops[2], pos);
401 if (maybe_expand_insn (icode, 3, ops))
402 return true;
405 /* If the target is a register, overwriting the entire object, or storing
406 a full-word or multi-word field can be done with just a SUBREG.
408 If the target is memory, storing any naturally aligned field can be
409 done with a simple store. For targets that support fast unaligned
410 memory, any naturally sized, unit aligned field can be done directly. */
412 offset = bitnum / unit;
413 bitpos = bitnum % unit;
414 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
415 + (offset * UNITS_PER_WORD);
417 if (bitpos == 0
418 && bitsize == GET_MODE_BITSIZE (fieldmode)
419 && (!MEM_P (op0)
420 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
421 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
422 && ((GET_MODE (op0) == fieldmode && byte_offset == 0)
423 || validate_subreg (fieldmode, GET_MODE (op0), op0,
424 byte_offset)))
425 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
426 || (offset * BITS_PER_UNIT % bitsize == 0
427 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
429 if (MEM_P (op0))
430 op0 = adjust_address (op0, fieldmode, offset);
431 else if (GET_MODE (op0) != fieldmode)
432 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
433 byte_offset);
434 emit_move_insn (op0, value);
435 return true;
438 /* Make sure we are playing with integral modes. Pun with subregs
439 if we aren't. This must come after the entire register case above,
440 since that case is valid for any mode. The following cases are only
441 valid for integral modes. */
443 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
444 if (imode != GET_MODE (op0))
446 if (MEM_P (op0))
447 op0 = adjust_address (op0, imode, 0);
448 else
450 gcc_assert (imode != BLKmode);
451 op0 = gen_lowpart (imode, op0);
456 /* We may be accessing data outside the field, which means
457 we can alias adjacent data. */
458 if (MEM_P (op0))
460 op0 = shallow_copy_rtx (op0);
461 set_mem_alias_set (op0, 0);
462 set_mem_expr (op0, 0);
465 /* If OP0 is a register, BITPOS must count within a word.
466 But as we have it, it counts within whatever size OP0 now has.
467 On a bigendian machine, these are not the same, so convert. */
468 if (BYTES_BIG_ENDIAN
469 && !MEM_P (op0)
470 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
471 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
473 /* Storing an lsb-aligned field in a register
474 can be done with a movestrict instruction. */
476 if (!MEM_P (op0)
477 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
478 && bitsize == GET_MODE_BITSIZE (fieldmode)
479 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
481 struct expand_operand ops[2];
482 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
483 rtx arg0 = op0;
484 unsigned HOST_WIDE_INT subreg_off;
486 if (GET_CODE (arg0) == SUBREG)
488 /* Else we've got some float mode source being extracted into
489 a different float mode destination -- this combination of
490 subregs results in Severe Tire Damage. */
491 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
492 || GET_MODE_CLASS (fieldmode) == MODE_INT
493 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
494 arg0 = SUBREG_REG (arg0);
497 subreg_off = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
498 + (offset * UNITS_PER_WORD);
499 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
501 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
503 create_fixed_operand (&ops[0], arg0);
504 /* Shrink the source operand to FIELDMODE. */
505 create_convert_operand_to (&ops[1], value, fieldmode, false);
506 if (maybe_expand_insn (icode, 2, ops))
507 return true;
511 /* Handle fields bigger than a word. */
513 if (bitsize > BITS_PER_WORD)
515 /* Here we transfer the words of the field
516 in the order least significant first.
517 This is because the most significant word is the one which may
518 be less than full.
519 However, only do that if the value is not BLKmode. */
521 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
522 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
523 unsigned int i;
524 rtx last;
526 /* This is the mode we must force value to, so that there will be enough
527 subwords to extract. Note that fieldmode will often (always?) be
528 VOIDmode, because that is what store_field uses to indicate that this
529 is a bit field, but passing VOIDmode to operand_subword_force
530 is not allowed. */
531 fieldmode = GET_MODE (value);
532 if (fieldmode == VOIDmode)
533 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
535 last = get_last_insn ();
536 for (i = 0; i < nwords; i++)
538 /* If I is 0, use the low-order word in both field and target;
539 if I is 1, use the next to lowest word; and so on. */
540 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
541 unsigned int bit_offset = (backwards
542 ? MAX ((int) bitsize - ((int) i + 1)
543 * BITS_PER_WORD,
545 : (int) i * BITS_PER_WORD);
546 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
548 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
549 bitsize - i * BITS_PER_WORD),
550 bitnum + bit_offset, word_mode,
551 value_word, fallback_p))
553 delete_insns_since (last);
554 return false;
557 return true;
560 /* From here on we can assume that the field to be stored in is
561 a full-word (whatever type that is), since it is shorter than a word. */
563 /* OFFSET is the number of words or bytes (UNIT says which)
564 from STR_RTX to the first word or byte containing part of the field. */
566 if (!MEM_P (op0))
568 if (offset != 0
569 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
571 if (!REG_P (op0))
573 /* Since this is a destination (lvalue), we can't copy
574 it to a pseudo. We can remove a SUBREG that does not
575 change the size of the operand. Such a SUBREG may
576 have been added above. */
577 gcc_assert (GET_CODE (op0) == SUBREG
578 && (GET_MODE_SIZE (GET_MODE (op0))
579 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
580 op0 = SUBREG_REG (op0);
582 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
583 op0, (offset * UNITS_PER_WORD));
585 offset = 0;
588 /* If VALUE has a floating-point or complex mode, access it as an
589 integer of the corresponding size. This can occur on a machine
590 with 64 bit registers that uses SFmode for float. It can also
591 occur for unaligned float or complex fields. */
592 orig_value = value;
593 if (GET_MODE (value) != VOIDmode
594 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
595 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
597 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
598 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
601 /* Now OFFSET is nonzero only if OP0 is memory
602 and is therefore always measured in bytes. */
604 if (HAVE_insv
605 && GET_MODE (value) != BLKmode
606 && bitsize > 0
607 && GET_MODE_BITSIZE (op_mode) >= bitsize
608 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
609 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
611 struct expand_operand ops[4];
612 int xbitpos = bitpos;
613 rtx value1;
614 rtx xop0 = op0;
615 rtx last = get_last_insn ();
616 bool copy_back = false;
618 /* Add OFFSET into OP0's address. */
619 if (MEM_P (xop0))
620 xop0 = adjust_address (xop0, byte_mode, offset);
622 /* If xop0 is a register, we need it in OP_MODE
623 to make it acceptable to the format of insv. */
624 if (GET_CODE (xop0) == SUBREG)
625 /* We can't just change the mode, because this might clobber op0,
626 and we will need the original value of op0 if insv fails. */
627 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
628 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
629 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
631 /* If the destination is a paradoxical subreg such that we need a
632 truncate to the inner mode, perform the insertion on a temporary and
633 truncate the result to the original destination. Note that we can't
634 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
635 X) 0)) is (reg:N X). */
636 if (GET_CODE (xop0) == SUBREG
637 && REG_P (SUBREG_REG (xop0))
638 && (!TRULY_NOOP_TRUNCATION
639 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
640 GET_MODE_BITSIZE (op_mode))))
642 rtx tem = gen_reg_rtx (op_mode);
643 emit_move_insn (tem, xop0);
644 xop0 = tem;
645 copy_back = true;
648 /* On big-endian machines, we count bits from the most significant.
649 If the bit field insn does not, we must invert. */
651 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
652 xbitpos = unit - bitsize - xbitpos;
654 /* We have been counting XBITPOS within UNIT.
655 Count instead within the size of the register. */
656 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
657 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
659 unit = GET_MODE_BITSIZE (op_mode);
661 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
662 value1 = value;
663 if (GET_MODE (value) != op_mode)
665 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
667 /* Optimization: Don't bother really extending VALUE
668 if it has all the bits we will actually use. However,
669 if we must narrow it, be sure we do it correctly. */
671 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
673 rtx tmp;
675 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
676 if (! tmp)
677 tmp = simplify_gen_subreg (op_mode,
678 force_reg (GET_MODE (value),
679 value1),
680 GET_MODE (value), 0);
681 value1 = tmp;
683 else
684 value1 = gen_lowpart (op_mode, value1);
686 else if (CONST_INT_P (value))
687 value1 = gen_int_mode (INTVAL (value), op_mode);
688 else
689 /* Parse phase is supposed to make VALUE's data type
690 match that of the component reference, which is a type
691 at least as wide as the field; so VALUE should have
692 a mode that corresponds to that type. */
693 gcc_assert (CONSTANT_P (value));
696 create_fixed_operand (&ops[0], xop0);
697 create_integer_operand (&ops[1], bitsize);
698 create_integer_operand (&ops[2], xbitpos);
699 create_input_operand (&ops[3], value1, op_mode);
700 if (maybe_expand_insn (CODE_FOR_insv, 4, ops))
702 if (copy_back)
703 convert_move (op0, xop0, true);
704 return true;
706 delete_insns_since (last);
709 /* If OP0 is a memory, try copying it to a register and seeing if a
710 cheap register alternative is available. */
711 if (HAVE_insv && MEM_P (op0))
713 enum machine_mode bestmode;
715 /* Get the mode to use for inserting into this field. If OP0 is
716 BLKmode, get the smallest mode consistent with the alignment. If
717 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
718 mode. Otherwise, use the smallest mode containing the field. */
720 if (GET_MODE (op0) == BLKmode
721 || (op_mode != MAX_MACHINE_MODE
722 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
723 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
724 (op_mode == MAX_MACHINE_MODE
725 ? VOIDmode : op_mode),
726 MEM_VOLATILE_P (op0));
727 else
728 bestmode = GET_MODE (op0);
730 if (bestmode != VOIDmode
731 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
732 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
733 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
735 rtx last, tempreg, xop0;
736 unsigned HOST_WIDE_INT xoffset, xbitpos;
738 last = get_last_insn ();
740 /* Adjust address to point to the containing unit of
741 that mode. Compute the offset as a multiple of this unit,
742 counting in bytes. */
743 unit = GET_MODE_BITSIZE (bestmode);
744 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
745 xbitpos = bitnum % unit;
746 xop0 = adjust_address (op0, bestmode, xoffset);
748 /* Fetch that unit, store the bitfield in it, then store
749 the unit. */
750 tempreg = copy_to_reg (xop0);
751 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
752 fieldmode, orig_value, false))
754 emit_move_insn (xop0, tempreg);
755 return true;
757 delete_insns_since (last);
761 if (!fallback_p)
762 return false;
764 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
765 return true;
768 /* Generate code to store value from rtx VALUE
769 into a bit-field within structure STR_RTX
770 containing BITSIZE bits starting at bit BITNUM.
771 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
773 void
774 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
775 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
776 rtx value)
778 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
779 gcc_unreachable ();
782 /* Use shifts and boolean operations to store VALUE
783 into a bit field of width BITSIZE
784 in a memory location specified by OP0 except offset by OFFSET bytes.
785 (OFFSET must be 0 if OP0 is a register.)
786 The field starts at position BITPOS within the byte.
787 (If OP0 is a register, it may be a full word or a narrower mode,
788 but BITPOS still counts within a full word,
789 which is significant on bigendian machines.) */
791 static void
792 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
793 unsigned HOST_WIDE_INT bitsize,
794 unsigned HOST_WIDE_INT bitpos, rtx value)
796 enum machine_mode mode;
797 unsigned int total_bits = BITS_PER_WORD;
798 rtx temp;
799 int all_zero = 0;
800 int all_one = 0;
802 /* There is a case not handled here:
803 a structure with a known alignment of just a halfword
804 and a field split across two aligned halfwords within the structure.
805 Or likewise a structure with a known alignment of just a byte
806 and a field split across two bytes.
807 Such cases are not supposed to be able to occur. */
809 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
811 gcc_assert (!offset);
812 /* Special treatment for a bit field split across two registers. */
813 if (bitsize + bitpos > BITS_PER_WORD)
815 store_split_bit_field (op0, bitsize, bitpos, value);
816 return;
819 else
821 /* Get the proper mode to use for this field. We want a mode that
822 includes the entire field. If such a mode would be larger than
823 a word, we won't be doing the extraction the normal way.
824 We don't want a mode bigger than the destination. */
826 mode = GET_MODE (op0);
827 if (GET_MODE_BITSIZE (mode) == 0
828 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
829 mode = word_mode;
831 if (MEM_VOLATILE_P (op0)
832 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
833 && flag_strict_volatile_bitfields > 0)
834 mode = GET_MODE (op0);
835 else
836 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
837 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
839 if (mode == VOIDmode)
841 /* The only way this should occur is if the field spans word
842 boundaries. */
843 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
844 value);
845 return;
848 total_bits = GET_MODE_BITSIZE (mode);
850 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
851 be in the range 0 to total_bits-1, and put any excess bytes in
852 OFFSET. */
853 if (bitpos >= total_bits)
855 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
856 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
857 * BITS_PER_UNIT);
860 /* Get ref to an aligned byte, halfword, or word containing the field.
861 Adjust BITPOS to be position within a word,
862 and OFFSET to be the offset of that word.
863 Then alter OP0 to refer to that word. */
864 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
865 offset -= (offset % (total_bits / BITS_PER_UNIT));
866 op0 = adjust_address (op0, mode, offset);
869 mode = GET_MODE (op0);
871 /* Now MODE is either some integral mode for a MEM as OP0,
872 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
873 The bit field is contained entirely within OP0.
874 BITPOS is the starting bit number within OP0.
875 (OP0's mode may actually be narrower than MODE.) */
877 if (BYTES_BIG_ENDIAN)
878 /* BITPOS is the distance between our msb
879 and that of the containing datum.
880 Convert it to the distance from the lsb. */
881 bitpos = total_bits - bitsize - bitpos;
883 /* Now BITPOS is always the distance between our lsb
884 and that of OP0. */
886 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
887 we must first convert its mode to MODE. */
889 if (CONST_INT_P (value))
891 HOST_WIDE_INT v = INTVAL (value);
893 if (bitsize < HOST_BITS_PER_WIDE_INT)
894 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
896 if (v == 0)
897 all_zero = 1;
898 else if ((bitsize < HOST_BITS_PER_WIDE_INT
899 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
900 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
901 all_one = 1;
903 value = lshift_value (mode, value, bitpos, bitsize);
905 else
907 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
908 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
910 if (GET_MODE (value) != mode)
911 value = convert_to_mode (mode, value, 1);
913 if (must_and)
914 value = expand_binop (mode, and_optab, value,
915 mask_rtx (mode, 0, bitsize, 0),
916 NULL_RTX, 1, OPTAB_LIB_WIDEN);
917 if (bitpos > 0)
918 value = expand_shift (LSHIFT_EXPR, mode, value,
919 bitpos, NULL_RTX, 1);
922 /* Now clear the chosen bits in OP0,
923 except that if VALUE is -1 we need not bother. */
924 /* We keep the intermediates in registers to allow CSE to combine
925 consecutive bitfield assignments. */
927 temp = force_reg (mode, op0);
929 if (! all_one)
931 temp = expand_binop (mode, and_optab, temp,
932 mask_rtx (mode, bitpos, bitsize, 1),
933 NULL_RTX, 1, OPTAB_LIB_WIDEN);
934 temp = force_reg (mode, temp);
937 /* Now logical-or VALUE into OP0, unless it is zero. */
939 if (! all_zero)
941 temp = expand_binop (mode, ior_optab, temp, value,
942 NULL_RTX, 1, OPTAB_LIB_WIDEN);
943 temp = force_reg (mode, temp);
946 if (op0 != temp)
948 op0 = copy_rtx (op0);
949 emit_move_insn (op0, temp);
953 /* Store a bit field that is split across multiple accessible memory objects.
955 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
956 BITSIZE is the field width; BITPOS the position of its first bit
957 (within the word).
958 VALUE is the value to store.
960 This does not yet handle fields wider than BITS_PER_WORD. */
962 static void
963 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
964 unsigned HOST_WIDE_INT bitpos, rtx value)
966 unsigned int unit;
967 unsigned int bitsdone = 0;
969 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
970 much at a time. */
971 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
972 unit = BITS_PER_WORD;
973 else
974 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
976 /* If VALUE is a constant other than a CONST_INT, get it into a register in
977 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
978 that VALUE might be a floating-point constant. */
979 if (CONSTANT_P (value) && !CONST_INT_P (value))
981 rtx word = gen_lowpart_common (word_mode, value);
983 if (word && (value != word))
984 value = word;
985 else
986 value = gen_lowpart_common (word_mode,
987 force_reg (GET_MODE (value) != VOIDmode
988 ? GET_MODE (value)
989 : word_mode, value));
992 while (bitsdone < bitsize)
994 unsigned HOST_WIDE_INT thissize;
995 rtx part, word;
996 unsigned HOST_WIDE_INT thispos;
997 unsigned HOST_WIDE_INT offset;
999 offset = (bitpos + bitsdone) / unit;
1000 thispos = (bitpos + bitsdone) % unit;
1002 /* THISSIZE must not overrun a word boundary. Otherwise,
1003 store_fixed_bit_field will call us again, and we will mutually
1004 recurse forever. */
1005 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1006 thissize = MIN (thissize, unit - thispos);
1008 if (BYTES_BIG_ENDIAN)
1010 int total_bits;
1012 /* We must do an endian conversion exactly the same way as it is
1013 done in extract_bit_field, so that the two calls to
1014 extract_fixed_bit_field will have comparable arguments. */
1015 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1016 total_bits = BITS_PER_WORD;
1017 else
1018 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1020 /* Fetch successively less significant portions. */
1021 if (CONST_INT_P (value))
1022 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1023 >> (bitsize - bitsdone - thissize))
1024 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1025 else
1026 /* The args are chosen so that the last part includes the
1027 lsb. Give extract_bit_field the value it needs (with
1028 endianness compensation) to fetch the piece we want. */
1029 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1030 total_bits - bitsize + bitsdone,
1031 NULL_RTX, 1, false);
1033 else
1035 /* Fetch successively more significant portions. */
1036 if (CONST_INT_P (value))
1037 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1038 >> bitsdone)
1039 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1040 else
1041 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1042 bitsdone, NULL_RTX, 1, false);
1045 /* If OP0 is a register, then handle OFFSET here.
1047 When handling multiword bitfields, extract_bit_field may pass
1048 down a word_mode SUBREG of a larger REG for a bitfield that actually
1049 crosses a word boundary. Thus, for a SUBREG, we must find
1050 the current word starting from the base register. */
1051 if (GET_CODE (op0) == SUBREG)
1053 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1054 enum machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1055 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1056 word = word_offset ? const0_rtx : op0;
1057 else
1058 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1059 GET_MODE (SUBREG_REG (op0)));
1060 offset = 0;
1062 else if (REG_P (op0))
1064 enum machine_mode op0_mode = GET_MODE (op0);
1065 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1066 word = offset ? const0_rtx : op0;
1067 else
1068 word = operand_subword_force (op0, offset, GET_MODE (op0));
1069 offset = 0;
1071 else
1072 word = op0;
1074 /* OFFSET is in UNITs, and UNIT is in bits.
1075 store_fixed_bit_field wants offset in bytes. If WORD is const0_rtx,
1076 it is just an out-of-bounds access. Ignore it. */
1077 if (word != const0_rtx)
1078 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1079 thispos, part);
1080 bitsdone += thissize;
1084 /* A subroutine of extract_bit_field_1 that converts return value X
1085 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1086 to extract_bit_field. */
1088 static rtx
1089 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1090 enum machine_mode tmode, bool unsignedp)
1092 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1093 return x;
1095 /* If the x mode is not a scalar integral, first convert to the
1096 integer mode of that size and then access it as a floating-point
1097 value via a SUBREG. */
1098 if (!SCALAR_INT_MODE_P (tmode))
1100 enum machine_mode smode;
1102 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1103 x = convert_to_mode (smode, x, unsignedp);
1104 x = force_reg (smode, x);
1105 return gen_lowpart (tmode, x);
1108 return convert_to_mode (tmode, x, unsignedp);
1111 /* A subroutine of extract_bit_field, with the same arguments.
1112 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1113 if we can find no other means of implementing the operation.
1114 if FALLBACK_P is false, return NULL instead. */
1116 static rtx
1117 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1118 unsigned HOST_WIDE_INT bitnum,
1119 int unsignedp, bool packedp, rtx target,
1120 enum machine_mode mode, enum machine_mode tmode,
1121 bool fallback_p)
1123 unsigned int unit
1124 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1125 unsigned HOST_WIDE_INT offset, bitpos;
1126 rtx op0 = str_rtx;
1127 enum machine_mode int_mode;
1128 enum machine_mode ext_mode;
1129 enum machine_mode mode1;
1130 int byte_offset;
1132 if (tmode == VOIDmode)
1133 tmode = mode;
1135 while (GET_CODE (op0) == SUBREG)
1137 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1138 op0 = SUBREG_REG (op0);
1141 /* If we have an out-of-bounds access to a register, just return an
1142 uninitialized register of the required mode. This can occur if the
1143 source code contains an out-of-bounds access to a small array. */
1144 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1145 return gen_reg_rtx (tmode);
1147 if (REG_P (op0)
1148 && mode == GET_MODE (op0)
1149 && bitnum == 0
1150 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1152 /* We're trying to extract a full register from itself. */
1153 return op0;
1156 /* See if we can get a better vector mode before extracting. */
1157 if (VECTOR_MODE_P (GET_MODE (op0))
1158 && !MEM_P (op0)
1159 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1161 enum machine_mode new_mode;
1163 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1164 new_mode = MIN_MODE_VECTOR_FLOAT;
1165 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1166 new_mode = MIN_MODE_VECTOR_FRACT;
1167 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1168 new_mode = MIN_MODE_VECTOR_UFRACT;
1169 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1170 new_mode = MIN_MODE_VECTOR_ACCUM;
1171 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1172 new_mode = MIN_MODE_VECTOR_UACCUM;
1173 else
1174 new_mode = MIN_MODE_VECTOR_INT;
1176 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1177 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1178 && targetm.vector_mode_supported_p (new_mode))
1179 break;
1180 if (new_mode != VOIDmode)
1181 op0 = gen_lowpart (new_mode, op0);
1184 /* Use vec_extract patterns for extracting parts of vectors whenever
1185 available. */
1186 if (VECTOR_MODE_P (GET_MODE (op0))
1187 && !MEM_P (op0)
1188 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1189 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1190 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1192 struct expand_operand ops[3];
1193 enum machine_mode outermode = GET_MODE (op0);
1194 enum machine_mode innermode = GET_MODE_INNER (outermode);
1195 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1196 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1198 create_output_operand (&ops[0], target, innermode);
1199 create_input_operand (&ops[1], op0, outermode);
1200 create_integer_operand (&ops[2], pos);
1201 if (maybe_expand_insn (icode, 3, ops))
1203 target = ops[0].value;
1204 if (GET_MODE (target) != mode)
1205 return gen_lowpart (tmode, target);
1206 return target;
1210 /* Make sure we are playing with integral modes. Pun with subregs
1211 if we aren't. */
1213 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1214 if (imode != GET_MODE (op0))
1216 if (MEM_P (op0))
1217 op0 = adjust_address (op0, imode, 0);
1218 else if (imode != BLKmode)
1220 op0 = gen_lowpart (imode, op0);
1222 /* If we got a SUBREG, force it into a register since we
1223 aren't going to be able to do another SUBREG on it. */
1224 if (GET_CODE (op0) == SUBREG)
1225 op0 = force_reg (imode, op0);
1227 else if (REG_P (op0))
1229 rtx reg, subreg;
1230 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1231 MODE_INT);
1232 reg = gen_reg_rtx (imode);
1233 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1234 emit_move_insn (subreg, op0);
1235 op0 = reg;
1236 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1238 else
1240 rtx mem = assign_stack_temp (GET_MODE (op0),
1241 GET_MODE_SIZE (GET_MODE (op0)), 0);
1242 emit_move_insn (mem, op0);
1243 op0 = adjust_address (mem, BLKmode, 0);
1248 /* We may be accessing data outside the field, which means
1249 we can alias adjacent data. */
1250 if (MEM_P (op0))
1252 op0 = shallow_copy_rtx (op0);
1253 set_mem_alias_set (op0, 0);
1254 set_mem_expr (op0, 0);
1257 /* Extraction of a full-word or multi-word value from a structure
1258 in a register or aligned memory can be done with just a SUBREG.
1259 A subword value in the least significant part of a register
1260 can also be extracted with a SUBREG. For this, we need the
1261 byte offset of the value in op0. */
1263 bitpos = bitnum % unit;
1264 offset = bitnum / unit;
1265 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1267 /* If OP0 is a register, BITPOS must count within a word.
1268 But as we have it, it counts within whatever size OP0 now has.
1269 On a bigendian machine, these are not the same, so convert. */
1270 if (BYTES_BIG_ENDIAN
1271 && !MEM_P (op0)
1272 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1273 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1275 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1276 If that's wrong, the solution is to test for it and set TARGET to 0
1277 if needed. */
1279 /* Only scalar integer modes can be converted via subregs. There is an
1280 additional problem for FP modes here in that they can have a precision
1281 which is different from the size. mode_for_size uses precision, but
1282 we want a mode based on the size, so we must avoid calling it for FP
1283 modes. */
1284 mode1 = (SCALAR_INT_MODE_P (tmode)
1285 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1286 : mode);
1288 /* If the bitfield is volatile, we need to make sure the access
1289 remains on a type-aligned boundary. */
1290 if (GET_CODE (op0) == MEM
1291 && MEM_VOLATILE_P (op0)
1292 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1293 && flag_strict_volatile_bitfields > 0)
1294 goto no_subreg_mode_swap;
1296 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1297 && bitpos % BITS_PER_WORD == 0)
1298 || (mode1 != BLKmode
1299 /* ??? The big endian test here is wrong. This is correct
1300 if the value is in a register, and if mode_for_size is not
1301 the same mode as op0. This causes us to get unnecessarily
1302 inefficient code from the Thumb port when -mbig-endian. */
1303 && (BYTES_BIG_ENDIAN
1304 ? bitpos + bitsize == BITS_PER_WORD
1305 : bitpos == 0)))
1306 && ((!MEM_P (op0)
1307 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1308 GET_MODE_BITSIZE (GET_MODE (op0)))
1309 && GET_MODE_SIZE (mode1) != 0
1310 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1311 || (MEM_P (op0)
1312 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1313 || (offset * BITS_PER_UNIT % bitsize == 0
1314 && MEM_ALIGN (op0) % bitsize == 0)))))
1316 if (MEM_P (op0))
1317 op0 = adjust_address (op0, mode1, offset);
1318 else if (mode1 != GET_MODE (op0))
1320 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1321 byte_offset);
1322 if (sub == NULL)
1323 goto no_subreg_mode_swap;
1324 op0 = sub;
1326 if (mode1 != mode)
1327 return convert_to_mode (tmode, op0, unsignedp);
1328 return op0;
1330 no_subreg_mode_swap:
1332 /* Handle fields bigger than a word. */
1334 if (bitsize > BITS_PER_WORD)
1336 /* Here we transfer the words of the field
1337 in the order least significant first.
1338 This is because the most significant word is the one which may
1339 be less than full. */
1341 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1342 unsigned int i;
1344 if (target == 0 || !REG_P (target))
1345 target = gen_reg_rtx (mode);
1347 /* Indicate for flow that the entire target reg is being set. */
1348 emit_clobber (target);
1350 for (i = 0; i < nwords; i++)
1352 /* If I is 0, use the low-order word in both field and target;
1353 if I is 1, use the next to lowest word; and so on. */
1354 /* Word number in TARGET to use. */
1355 unsigned int wordnum
1356 = (WORDS_BIG_ENDIAN
1357 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1358 : i);
1359 /* Offset from start of field in OP0. */
1360 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1361 ? MAX (0, ((int) bitsize - ((int) i + 1)
1362 * (int) BITS_PER_WORD))
1363 : (int) i * BITS_PER_WORD);
1364 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1365 rtx result_part
1366 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1367 bitsize - i * BITS_PER_WORD),
1368 bitnum + bit_offset, 1, false, target_part, mode,
1369 word_mode);
1371 gcc_assert (target_part);
1373 if (result_part != target_part)
1374 emit_move_insn (target_part, result_part);
1377 if (unsignedp)
1379 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1380 need to be zero'd out. */
1381 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1383 unsigned int i, total_words;
1385 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1386 for (i = nwords; i < total_words; i++)
1387 emit_move_insn
1388 (operand_subword (target,
1389 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1390 1, VOIDmode),
1391 const0_rtx);
1393 return target;
1396 /* Signed bit field: sign-extend with two arithmetic shifts. */
1397 target = expand_shift (LSHIFT_EXPR, mode, target,
1398 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1399 return expand_shift (RSHIFT_EXPR, mode, target,
1400 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1403 /* From here on we know the desired field is smaller than a word. */
1405 /* Check if there is a correspondingly-sized integer field, so we can
1406 safely extract it as one size of integer, if necessary; then
1407 truncate or extend to the size that is wanted; then use SUBREGs or
1408 convert_to_mode to get one of the modes we really wanted. */
1410 int_mode = int_mode_for_mode (tmode);
1411 if (int_mode == BLKmode)
1412 int_mode = int_mode_for_mode (mode);
1413 /* Should probably push op0 out to memory and then do a load. */
1414 gcc_assert (int_mode != BLKmode);
1416 /* OFFSET is the number of words or bytes (UNIT says which)
1417 from STR_RTX to the first word or byte containing part of the field. */
1418 if (!MEM_P (op0))
1420 if (offset != 0
1421 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1423 if (!REG_P (op0))
1424 op0 = copy_to_reg (op0);
1425 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1426 op0, (offset * UNITS_PER_WORD));
1428 offset = 0;
1431 /* Now OFFSET is nonzero only for memory operands. */
1432 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1433 if (ext_mode != MAX_MACHINE_MODE
1434 && bitsize > 0
1435 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1436 /* If op0 is a register, we need it in EXT_MODE to make it
1437 acceptable to the format of ext(z)v. */
1438 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1439 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1440 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))))
1442 struct expand_operand ops[4];
1443 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1444 rtx xop0 = op0;
1445 rtx xtarget = target;
1446 rtx xspec_target = target;
1447 rtx xspec_target_subreg = 0;
1449 /* If op0 is a register, we need it in EXT_MODE to make it
1450 acceptable to the format of ext(z)v. */
1451 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1452 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1453 if (MEM_P (xop0))
1454 /* Get ref to first byte containing part of the field. */
1455 xop0 = adjust_address (xop0, byte_mode, xoffset);
1457 /* On big-endian machines, we count bits from the most significant.
1458 If the bit field insn does not, we must invert. */
1459 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1460 xbitpos = unit - bitsize - xbitpos;
1462 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1463 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1464 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1466 unit = GET_MODE_BITSIZE (ext_mode);
1468 if (xtarget == 0)
1469 xtarget = xspec_target = gen_reg_rtx (tmode);
1471 if (GET_MODE (xtarget) != ext_mode)
1473 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1474 between the mode of the extraction (word_mode) and the target
1475 mode. Instead, create a temporary and use convert_move to set
1476 the target. */
1477 if (REG_P (xtarget)
1478 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1479 GET_MODE_BITSIZE (ext_mode)))
1481 xtarget = gen_lowpart (ext_mode, xtarget);
1482 if (GET_MODE_SIZE (ext_mode)
1483 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1484 xspec_target_subreg = xtarget;
1486 else
1487 xtarget = gen_reg_rtx (ext_mode);
1490 create_output_operand (&ops[0], xtarget, ext_mode);
1491 create_fixed_operand (&ops[1], xop0);
1492 create_integer_operand (&ops[2], bitsize);
1493 create_integer_operand (&ops[3], xbitpos);
1494 if (maybe_expand_insn (unsignedp ? CODE_FOR_extzv : CODE_FOR_extv,
1495 4, ops))
1497 xtarget = ops[0].value;
1498 if (xtarget == xspec_target)
1499 return xtarget;
1500 if (xtarget == xspec_target_subreg)
1501 return xspec_target;
1502 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1506 /* If OP0 is a memory, try copying it to a register and seeing if a
1507 cheap register alternative is available. */
1508 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1510 enum machine_mode bestmode;
1512 /* Get the mode to use for inserting into this field. If
1513 OP0 is BLKmode, get the smallest mode consistent with the
1514 alignment. If OP0 is a non-BLKmode object that is no
1515 wider than EXT_MODE, use its mode. Otherwise, use the
1516 smallest mode containing the field. */
1518 if (GET_MODE (op0) == BLKmode
1519 || (ext_mode != MAX_MACHINE_MODE
1520 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1521 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1522 (ext_mode == MAX_MACHINE_MODE
1523 ? VOIDmode : ext_mode),
1524 MEM_VOLATILE_P (op0));
1525 else
1526 bestmode = GET_MODE (op0);
1528 if (bestmode != VOIDmode
1529 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1530 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1532 unsigned HOST_WIDE_INT xoffset, xbitpos;
1534 /* Compute the offset as a multiple of this unit,
1535 counting in bytes. */
1536 unit = GET_MODE_BITSIZE (bestmode);
1537 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1538 xbitpos = bitnum % unit;
1540 /* Make sure the register is big enough for the whole field. */
1541 if (xoffset * BITS_PER_UNIT + unit
1542 >= offset * BITS_PER_UNIT + bitsize)
1544 rtx last, result, xop0;
1546 last = get_last_insn ();
1548 /* Fetch it to a register in that size. */
1549 xop0 = adjust_address (op0, bestmode, xoffset);
1550 xop0 = force_reg (bestmode, xop0);
1551 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1552 unsignedp, packedp, target,
1553 mode, tmode, false);
1554 if (result)
1555 return result;
1557 delete_insns_since (last);
1562 if (!fallback_p)
1563 return NULL;
1565 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1566 bitpos, target, unsignedp, packedp);
1567 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1570 /* Generate code to extract a byte-field from STR_RTX
1571 containing BITSIZE bits, starting at BITNUM,
1572 and put it in TARGET if possible (if TARGET is nonzero).
1573 Regardless of TARGET, we return the rtx for where the value is placed.
1575 STR_RTX is the structure containing the byte (a REG or MEM).
1576 UNSIGNEDP is nonzero if this is an unsigned bit field.
1577 PACKEDP is nonzero if the field has the packed attribute.
1578 MODE is the natural mode of the field value once extracted.
1579 TMODE is the mode the caller would like the value to have;
1580 but the value may be returned with type MODE instead.
1582 If a TARGET is specified and we can store in it at no extra cost,
1583 we do so, and return TARGET.
1584 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1585 if they are equally easy. */
1588 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1589 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1590 rtx target, enum machine_mode mode, enum machine_mode tmode)
1592 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1593 target, mode, tmode, true);
1596 /* Extract a bit field using shifts and boolean operations
1597 Returns an rtx to represent the value.
1598 OP0 addresses a register (word) or memory (byte).
1599 BITPOS says which bit within the word or byte the bit field starts in.
1600 OFFSET says how many bytes farther the bit field starts;
1601 it is 0 if OP0 is a register.
1602 BITSIZE says how many bits long the bit field is.
1603 (If OP0 is a register, it may be narrower than a full word,
1604 but BITPOS still counts within a full word,
1605 which is significant on bigendian machines.)
1607 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1608 PACKEDP is true if the field has the packed attribute.
1610 If TARGET is nonzero, attempts to store the value there
1611 and return TARGET, but this is not guaranteed.
1612 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1614 static rtx
1615 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1616 unsigned HOST_WIDE_INT offset,
1617 unsigned HOST_WIDE_INT bitsize,
1618 unsigned HOST_WIDE_INT bitpos, rtx target,
1619 int unsignedp, bool packedp)
1621 unsigned int total_bits = BITS_PER_WORD;
1622 enum machine_mode mode;
1624 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1626 /* Special treatment for a bit field split across two registers. */
1627 if (bitsize + bitpos > BITS_PER_WORD)
1628 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1630 else
1632 /* Get the proper mode to use for this field. We want a mode that
1633 includes the entire field. If such a mode would be larger than
1634 a word, we won't be doing the extraction the normal way. */
1636 if (MEM_VOLATILE_P (op0)
1637 && flag_strict_volatile_bitfields > 0)
1639 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1640 mode = GET_MODE (op0);
1641 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1642 mode = GET_MODE (target);
1643 else
1644 mode = tmode;
1646 else
1647 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1648 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1650 if (mode == VOIDmode)
1651 /* The only way this should occur is if the field spans word
1652 boundaries. */
1653 return extract_split_bit_field (op0, bitsize,
1654 bitpos + offset * BITS_PER_UNIT,
1655 unsignedp);
1657 total_bits = GET_MODE_BITSIZE (mode);
1659 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1660 be in the range 0 to total_bits-1, and put any excess bytes in
1661 OFFSET. */
1662 if (bitpos >= total_bits)
1664 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1665 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1666 * BITS_PER_UNIT);
1669 /* If we're accessing a volatile MEM, we can't do the next
1670 alignment step if it results in a multi-word access where we
1671 otherwise wouldn't have one. So, check for that case
1672 here. */
1673 if (MEM_P (op0)
1674 && MEM_VOLATILE_P (op0)
1675 && flag_strict_volatile_bitfields > 0
1676 && bitpos + bitsize <= total_bits
1677 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1679 if (STRICT_ALIGNMENT)
1681 static bool informed_about_misalignment = false;
1682 bool warned;
1684 if (packedp)
1686 if (bitsize == total_bits)
1687 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1688 "multiple accesses to volatile structure member"
1689 " because of packed attribute");
1690 else
1691 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1692 "multiple accesses to volatile structure bitfield"
1693 " because of packed attribute");
1695 return extract_split_bit_field (op0, bitsize,
1696 bitpos + offset * BITS_PER_UNIT,
1697 unsignedp);
1700 if (bitsize == total_bits)
1701 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1702 "mis-aligned access used for structure member");
1703 else
1704 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1705 "mis-aligned access used for structure bitfield");
1707 if (! informed_about_misalignment && warned)
1709 informed_about_misalignment = true;
1710 inform (input_location,
1711 "when a volatile object spans multiple type-sized locations,"
1712 " the compiler must choose between using a single mis-aligned access to"
1713 " preserve the volatility, or using multiple aligned accesses to avoid"
1714 " runtime faults; this code may fail at runtime if the hardware does"
1715 " not allow this access");
1719 else
1722 /* Get ref to an aligned byte, halfword, or word containing the field.
1723 Adjust BITPOS to be position within a word,
1724 and OFFSET to be the offset of that word.
1725 Then alter OP0 to refer to that word. */
1726 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1727 offset -= (offset % (total_bits / BITS_PER_UNIT));
1730 op0 = adjust_address (op0, mode, offset);
1733 mode = GET_MODE (op0);
1735 if (BYTES_BIG_ENDIAN)
1736 /* BITPOS is the distance between our msb and that of OP0.
1737 Convert it to the distance from the lsb. */
1738 bitpos = total_bits - bitsize - bitpos;
1740 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1741 We have reduced the big-endian case to the little-endian case. */
1743 if (unsignedp)
1745 if (bitpos)
1747 /* If the field does not already start at the lsb,
1748 shift it so it does. */
1749 /* Maybe propagate the target for the shift. */
1750 /* But not if we will return it--could confuse integrate.c. */
1751 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1752 if (tmode != mode) subtarget = 0;
1753 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitpos, subtarget, 1);
1755 /* Convert the value to the desired mode. */
1756 if (mode != tmode)
1757 op0 = convert_to_mode (tmode, op0, 1);
1759 /* Unless the msb of the field used to be the msb when we shifted,
1760 mask out the upper bits. */
1762 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1763 return expand_binop (GET_MODE (op0), and_optab, op0,
1764 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1765 target, 1, OPTAB_LIB_WIDEN);
1766 return op0;
1769 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1770 then arithmetic-shift its lsb to the lsb of the word. */
1771 op0 = force_reg (mode, op0);
1772 if (mode != tmode)
1773 target = 0;
1775 /* Find the narrowest integer mode that contains the field. */
1777 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1778 mode = GET_MODE_WIDER_MODE (mode))
1779 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1781 op0 = convert_to_mode (mode, op0, 0);
1782 break;
1785 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1787 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitpos);
1788 /* Maybe propagate the target for the shift. */
1789 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1790 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1793 return expand_shift (RSHIFT_EXPR, mode, op0,
1794 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1797 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1798 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1799 complement of that if COMPLEMENT. The mask is truncated if
1800 necessary to the width of mode MODE. The mask is zero-extended if
1801 BITSIZE+BITPOS is too small for MODE. */
1803 static rtx
1804 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1806 double_int mask;
1808 mask = double_int_mask (bitsize);
1809 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1811 if (complement)
1812 mask = double_int_not (mask);
1814 return immed_double_int_const (mask, mode);
1817 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1818 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1820 static rtx
1821 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1823 double_int val;
1825 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1826 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1828 return immed_double_int_const (val, mode);
1831 /* Extract a bit field that is split across two words
1832 and return an RTX for the result.
1834 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1835 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1836 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1838 static rtx
1839 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1840 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1842 unsigned int unit;
1843 unsigned int bitsdone = 0;
1844 rtx result = NULL_RTX;
1845 int first = 1;
1847 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1848 much at a time. */
1849 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1850 unit = BITS_PER_WORD;
1851 else
1852 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1854 while (bitsdone < bitsize)
1856 unsigned HOST_WIDE_INT thissize;
1857 rtx part, word;
1858 unsigned HOST_WIDE_INT thispos;
1859 unsigned HOST_WIDE_INT offset;
1861 offset = (bitpos + bitsdone) / unit;
1862 thispos = (bitpos + bitsdone) % unit;
1864 /* THISSIZE must not overrun a word boundary. Otherwise,
1865 extract_fixed_bit_field will call us again, and we will mutually
1866 recurse forever. */
1867 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1868 thissize = MIN (thissize, unit - thispos);
1870 /* If OP0 is a register, then handle OFFSET here.
1872 When handling multiword bitfields, extract_bit_field may pass
1873 down a word_mode SUBREG of a larger REG for a bitfield that actually
1874 crosses a word boundary. Thus, for a SUBREG, we must find
1875 the current word starting from the base register. */
1876 if (GET_CODE (op0) == SUBREG)
1878 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1879 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1880 GET_MODE (SUBREG_REG (op0)));
1881 offset = 0;
1883 else if (REG_P (op0))
1885 word = operand_subword_force (op0, offset, GET_MODE (op0));
1886 offset = 0;
1888 else
1889 word = op0;
1891 /* Extract the parts in bit-counting order,
1892 whose meaning is determined by BYTES_PER_UNIT.
1893 OFFSET is in UNITs, and UNIT is in bits.
1894 extract_fixed_bit_field wants offset in bytes. */
1895 part = extract_fixed_bit_field (word_mode, word,
1896 offset * unit / BITS_PER_UNIT,
1897 thissize, thispos, 0, 1, false);
1898 bitsdone += thissize;
1900 /* Shift this part into place for the result. */
1901 if (BYTES_BIG_ENDIAN)
1903 if (bitsize != bitsdone)
1904 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1905 bitsize - bitsdone, 0, 1);
1907 else
1909 if (bitsdone != thissize)
1910 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1911 bitsdone - thissize, 0, 1);
1914 if (first)
1915 result = part;
1916 else
1917 /* Combine the parts with bitwise or. This works
1918 because we extracted each part as an unsigned bit field. */
1919 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1920 OPTAB_LIB_WIDEN);
1922 first = 0;
1925 /* Unsigned bit field: we are done. */
1926 if (unsignedp)
1927 return result;
1928 /* Signed bit field: sign-extend with two arithmetic shifts. */
1929 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1930 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1931 return expand_shift (RSHIFT_EXPR, word_mode, result,
1932 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1935 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1936 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1937 MODE, fill the upper bits with zeros. Fail if the layout of either
1938 mode is unknown (as for CC modes) or if the extraction would involve
1939 unprofitable mode punning. Return the value on success, otherwise
1940 return null.
1942 This is different from gen_lowpart* in these respects:
1944 - the returned value must always be considered an rvalue
1946 - when MODE is wider than SRC_MODE, the extraction involves
1947 a zero extension
1949 - when MODE is smaller than SRC_MODE, the extraction involves
1950 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1952 In other words, this routine performs a computation, whereas the
1953 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1954 operations. */
1957 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
1959 enum machine_mode int_mode, src_int_mode;
1961 if (mode == src_mode)
1962 return src;
1964 if (CONSTANT_P (src))
1966 /* simplify_gen_subreg can't be used here, as if simplify_subreg
1967 fails, it will happily create (subreg (symbol_ref)) or similar
1968 invalid SUBREGs. */
1969 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
1970 rtx ret = simplify_subreg (mode, src, src_mode, byte);
1971 if (ret)
1972 return ret;
1974 if (GET_MODE (src) == VOIDmode
1975 || !validate_subreg (mode, src_mode, src, byte))
1976 return NULL_RTX;
1978 src = force_reg (GET_MODE (src), src);
1979 return gen_rtx_SUBREG (mode, src, byte);
1982 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
1983 return NULL_RTX;
1985 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
1986 && MODES_TIEABLE_P (mode, src_mode))
1988 rtx x = gen_lowpart_common (mode, src);
1989 if (x)
1990 return x;
1993 src_int_mode = int_mode_for_mode (src_mode);
1994 int_mode = int_mode_for_mode (mode);
1995 if (src_int_mode == BLKmode || int_mode == BLKmode)
1996 return NULL_RTX;
1998 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
1999 return NULL_RTX;
2000 if (!MODES_TIEABLE_P (int_mode, mode))
2001 return NULL_RTX;
2003 src = gen_lowpart (src_int_mode, src);
2004 src = convert_modes (int_mode, src_int_mode, src, true);
2005 src = gen_lowpart (mode, src);
2006 return src;
2009 /* Add INC into TARGET. */
2011 void
2012 expand_inc (rtx target, rtx inc)
2014 rtx value = expand_binop (GET_MODE (target), add_optab,
2015 target, inc,
2016 target, 0, OPTAB_LIB_WIDEN);
2017 if (value != target)
2018 emit_move_insn (target, value);
2021 /* Subtract DEC from TARGET. */
2023 void
2024 expand_dec (rtx target, rtx dec)
2026 rtx value = expand_binop (GET_MODE (target), sub_optab,
2027 target, dec,
2028 target, 0, OPTAB_LIB_WIDEN);
2029 if (value != target)
2030 emit_move_insn (target, value);
2033 /* Output a shift instruction for expression code CODE,
2034 with SHIFTED being the rtx for the value to shift,
2035 and AMOUNT the tree for the amount to shift by.
2036 Store the result in the rtx TARGET, if that is convenient.
2037 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2038 Return the rtx for where the value is. */
2041 expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2042 tree amount, rtx target, int unsignedp)
2044 rtx op1, temp = 0;
2045 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2046 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2047 optab lshift_optab = ashl_optab;
2048 optab rshift_arith_optab = ashr_optab;
2049 optab rshift_uns_optab = lshr_optab;
2050 optab lrotate_optab = rotl_optab;
2051 optab rrotate_optab = rotr_optab;
2052 enum machine_mode op1_mode;
2053 int attempt;
2054 bool speed = optimize_insn_for_speed_p ();
2056 op1 = expand_normal (amount);
2057 op1_mode = GET_MODE (op1);
2059 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2060 shift amount is a vector, use the vector/vector shift patterns. */
2061 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2063 lshift_optab = vashl_optab;
2064 rshift_arith_optab = vashr_optab;
2065 rshift_uns_optab = vlshr_optab;
2066 lrotate_optab = vrotl_optab;
2067 rrotate_optab = vrotr_optab;
2070 /* Previously detected shift-counts computed by NEGATE_EXPR
2071 and shifted in the other direction; but that does not work
2072 on all machines. */
2074 if (SHIFT_COUNT_TRUNCATED)
2076 if (CONST_INT_P (op1)
2077 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2078 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2079 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2080 % GET_MODE_BITSIZE (mode));
2081 else if (GET_CODE (op1) == SUBREG
2082 && subreg_lowpart_p (op1)
2083 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2084 op1 = SUBREG_REG (op1);
2087 if (op1 == const0_rtx)
2088 return shifted;
2090 /* Check whether its cheaper to implement a left shift by a constant
2091 bit count by a sequence of additions. */
2092 if (code == LSHIFT_EXPR
2093 && CONST_INT_P (op1)
2094 && INTVAL (op1) > 0
2095 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2096 && INTVAL (op1) < MAX_BITS_PER_WORD
2097 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2098 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2100 int i;
2101 for (i = 0; i < INTVAL (op1); i++)
2103 temp = force_reg (mode, shifted);
2104 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2105 unsignedp, OPTAB_LIB_WIDEN);
2107 return shifted;
2110 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2112 enum optab_methods methods;
2114 if (attempt == 0)
2115 methods = OPTAB_DIRECT;
2116 else if (attempt == 1)
2117 methods = OPTAB_WIDEN;
2118 else
2119 methods = OPTAB_LIB_WIDEN;
2121 if (rotate)
2123 /* Widening does not work for rotation. */
2124 if (methods == OPTAB_WIDEN)
2125 continue;
2126 else if (methods == OPTAB_LIB_WIDEN)
2128 /* If we have been unable to open-code this by a rotation,
2129 do it as the IOR of two shifts. I.e., to rotate A
2130 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2131 where C is the bitsize of A.
2133 It is theoretically possible that the target machine might
2134 not be able to perform either shift and hence we would
2135 be making two libcalls rather than just the one for the
2136 shift (similarly if IOR could not be done). We will allow
2137 this extremely unlikely lossage to avoid complicating the
2138 code below. */
2140 rtx subtarget = target == shifted ? 0 : target;
2141 tree new_amount, other_amount;
2142 rtx temp1;
2143 tree type = TREE_TYPE (amount);
2144 if (GET_MODE (op1) != TYPE_MODE (type)
2145 && GET_MODE (op1) != VOIDmode)
2146 op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2147 new_amount = make_tree (type, op1);
2148 other_amount
2149 = fold_build2 (MINUS_EXPR, type,
2150 build_int_cst (type, GET_MODE_BITSIZE (mode)),
2151 new_amount);
2153 shifted = force_reg (mode, shifted);
2155 temp = expand_variable_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2156 mode, shifted, new_amount, 0, 1);
2157 temp1 = expand_variable_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2158 mode, shifted, other_amount,
2159 subtarget, 1);
2160 return expand_binop (mode, ior_optab, temp, temp1, target,
2161 unsignedp, methods);
2164 temp = expand_binop (mode,
2165 left ? lrotate_optab : rrotate_optab,
2166 shifted, op1, target, unsignedp, methods);
2168 else if (unsignedp)
2169 temp = expand_binop (mode,
2170 left ? lshift_optab : rshift_uns_optab,
2171 shifted, op1, target, unsignedp, methods);
2173 /* Do arithmetic shifts.
2174 Also, if we are going to widen the operand, we can just as well
2175 use an arithmetic right-shift instead of a logical one. */
2176 if (temp == 0 && ! rotate
2177 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2179 enum optab_methods methods1 = methods;
2181 /* If trying to widen a log shift to an arithmetic shift,
2182 don't accept an arithmetic shift of the same size. */
2183 if (unsignedp)
2184 methods1 = OPTAB_MUST_WIDEN;
2186 /* Arithmetic shift */
2188 temp = expand_binop (mode,
2189 left ? lshift_optab : rshift_arith_optab,
2190 shifted, op1, target, unsignedp, methods1);
2193 /* We used to try extzv here for logical right shifts, but that was
2194 only useful for one machine, the VAX, and caused poor code
2195 generation there for lshrdi3, so the code was deleted and a
2196 define_expand for lshrsi3 was added to vax.md. */
2199 gcc_assert (temp);
2200 return temp;
2203 /* Output a shift instruction for expression code CODE,
2204 with SHIFTED being the rtx for the value to shift,
2205 and AMOUNT the amount to shift by.
2206 Store the result in the rtx TARGET, if that is convenient.
2207 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2208 Return the rtx for where the value is. */
2211 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2212 int amount, rtx target, int unsignedp)
2214 /* ??? With re-writing expand_shift we could avoid going through a
2215 tree for the shift amount and directly do GEN_INT (amount). */
2216 return expand_variable_shift (code, mode, shifted,
2217 build_int_cst (integer_type_node, amount),
2218 target, unsignedp);
2221 /* Indicates the type of fixup needed after a constant multiplication.
2222 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2223 the result should be negated, and ADD_VARIANT means that the
2224 multiplicand should be added to the result. */
2225 enum mult_variant {basic_variant, negate_variant, add_variant};
2227 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2228 const struct mult_cost *, enum machine_mode mode);
2229 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2230 struct algorithm *, enum mult_variant *, int);
2231 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2232 const struct algorithm *, enum mult_variant);
2233 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2234 int, rtx *, int *, int *);
2235 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2236 static rtx extract_high_half (enum machine_mode, rtx);
2237 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2238 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2239 int, int);
2240 /* Compute and return the best algorithm for multiplying by T.
2241 The algorithm must cost less than cost_limit
2242 If retval.cost >= COST_LIMIT, no algorithm was found and all
2243 other field of the returned struct are undefined.
2244 MODE is the machine mode of the multiplication. */
2246 static void
2247 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2248 const struct mult_cost *cost_limit, enum machine_mode mode)
2250 int m;
2251 struct algorithm *alg_in, *best_alg;
2252 struct mult_cost best_cost;
2253 struct mult_cost new_limit;
2254 int op_cost, op_latency;
2255 unsigned HOST_WIDE_INT orig_t = t;
2256 unsigned HOST_WIDE_INT q;
2257 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2258 int hash_index;
2259 bool cache_hit = false;
2260 enum alg_code cache_alg = alg_zero;
2261 bool speed = optimize_insn_for_speed_p ();
2263 /* Indicate that no algorithm is yet found. If no algorithm
2264 is found, this value will be returned and indicate failure. */
2265 alg_out->cost.cost = cost_limit->cost + 1;
2266 alg_out->cost.latency = cost_limit->latency + 1;
2268 if (cost_limit->cost < 0
2269 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2270 return;
2272 /* Restrict the bits of "t" to the multiplication's mode. */
2273 t &= GET_MODE_MASK (mode);
2275 /* t == 1 can be done in zero cost. */
2276 if (t == 1)
2278 alg_out->ops = 1;
2279 alg_out->cost.cost = 0;
2280 alg_out->cost.latency = 0;
2281 alg_out->op[0] = alg_m;
2282 return;
2285 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2286 fail now. */
2287 if (t == 0)
2289 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2290 return;
2291 else
2293 alg_out->ops = 1;
2294 alg_out->cost.cost = zero_cost[speed];
2295 alg_out->cost.latency = zero_cost[speed];
2296 alg_out->op[0] = alg_zero;
2297 return;
2301 /* We'll be needing a couple extra algorithm structures now. */
2303 alg_in = XALLOCA (struct algorithm);
2304 best_alg = XALLOCA (struct algorithm);
2305 best_cost = *cost_limit;
2307 /* Compute the hash index. */
2308 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2310 /* See if we already know what to do for T. */
2311 if (alg_hash[hash_index].t == t
2312 && alg_hash[hash_index].mode == mode
2313 && alg_hash[hash_index].mode == mode
2314 && alg_hash[hash_index].speed == speed
2315 && alg_hash[hash_index].alg != alg_unknown)
2317 cache_alg = alg_hash[hash_index].alg;
2319 if (cache_alg == alg_impossible)
2321 /* The cache tells us that it's impossible to synthesize
2322 multiplication by T within alg_hash[hash_index].cost. */
2323 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2324 /* COST_LIMIT is at least as restrictive as the one
2325 recorded in the hash table, in which case we have no
2326 hope of synthesizing a multiplication. Just
2327 return. */
2328 return;
2330 /* If we get here, COST_LIMIT is less restrictive than the
2331 one recorded in the hash table, so we may be able to
2332 synthesize a multiplication. Proceed as if we didn't
2333 have the cache entry. */
2335 else
2337 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2338 /* The cached algorithm shows that this multiplication
2339 requires more cost than COST_LIMIT. Just return. This
2340 way, we don't clobber this cache entry with
2341 alg_impossible but retain useful information. */
2342 return;
2344 cache_hit = true;
2346 switch (cache_alg)
2348 case alg_shift:
2349 goto do_alg_shift;
2351 case alg_add_t_m2:
2352 case alg_sub_t_m2:
2353 goto do_alg_addsub_t_m2;
2355 case alg_add_factor:
2356 case alg_sub_factor:
2357 goto do_alg_addsub_factor;
2359 case alg_add_t2_m:
2360 goto do_alg_add_t2_m;
2362 case alg_sub_t2_m:
2363 goto do_alg_sub_t2_m;
2365 default:
2366 gcc_unreachable ();
2371 /* If we have a group of zero bits at the low-order part of T, try
2372 multiplying by the remaining bits and then doing a shift. */
2374 if ((t & 1) == 0)
2376 do_alg_shift:
2377 m = floor_log2 (t & -t); /* m = number of low zero bits */
2378 if (m < maxm)
2380 q = t >> m;
2381 /* The function expand_shift will choose between a shift and
2382 a sequence of additions, so the observed cost is given as
2383 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2384 op_cost = m * add_cost[speed][mode];
2385 if (shift_cost[speed][mode][m] < op_cost)
2386 op_cost = shift_cost[speed][mode][m];
2387 new_limit.cost = best_cost.cost - op_cost;
2388 new_limit.latency = best_cost.latency - op_cost;
2389 synth_mult (alg_in, q, &new_limit, mode);
2391 alg_in->cost.cost += op_cost;
2392 alg_in->cost.latency += op_cost;
2393 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2395 struct algorithm *x;
2396 best_cost = alg_in->cost;
2397 x = alg_in, alg_in = best_alg, best_alg = x;
2398 best_alg->log[best_alg->ops] = m;
2399 best_alg->op[best_alg->ops] = alg_shift;
2402 /* See if treating ORIG_T as a signed number yields a better
2403 sequence. Try this sequence only for a negative ORIG_T
2404 as it would be useless for a non-negative ORIG_T. */
2405 if ((HOST_WIDE_INT) orig_t < 0)
2407 /* Shift ORIG_T as follows because a right shift of a
2408 negative-valued signed type is implementation
2409 defined. */
2410 q = ~(~orig_t >> m);
2411 /* The function expand_shift will choose between a shift
2412 and a sequence of additions, so the observed cost is
2413 given as MIN (m * add_cost[speed][mode],
2414 shift_cost[speed][mode][m]). */
2415 op_cost = m * add_cost[speed][mode];
2416 if (shift_cost[speed][mode][m] < op_cost)
2417 op_cost = shift_cost[speed][mode][m];
2418 new_limit.cost = best_cost.cost - op_cost;
2419 new_limit.latency = best_cost.latency - op_cost;
2420 synth_mult (alg_in, q, &new_limit, mode);
2422 alg_in->cost.cost += op_cost;
2423 alg_in->cost.latency += op_cost;
2424 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2426 struct algorithm *x;
2427 best_cost = alg_in->cost;
2428 x = alg_in, alg_in = best_alg, best_alg = x;
2429 best_alg->log[best_alg->ops] = m;
2430 best_alg->op[best_alg->ops] = alg_shift;
2434 if (cache_hit)
2435 goto done;
2438 /* If we have an odd number, add or subtract one. */
2439 if ((t & 1) != 0)
2441 unsigned HOST_WIDE_INT w;
2443 do_alg_addsub_t_m2:
2444 for (w = 1; (w & t) != 0; w <<= 1)
2446 /* If T was -1, then W will be zero after the loop. This is another
2447 case where T ends with ...111. Handling this with (T + 1) and
2448 subtract 1 produces slightly better code and results in algorithm
2449 selection much faster than treating it like the ...0111 case
2450 below. */
2451 if (w == 0
2452 || (w > 2
2453 /* Reject the case where t is 3.
2454 Thus we prefer addition in that case. */
2455 && t != 3))
2457 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2459 op_cost = add_cost[speed][mode];
2460 new_limit.cost = best_cost.cost - op_cost;
2461 new_limit.latency = best_cost.latency - op_cost;
2462 synth_mult (alg_in, t + 1, &new_limit, mode);
2464 alg_in->cost.cost += op_cost;
2465 alg_in->cost.latency += op_cost;
2466 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2468 struct algorithm *x;
2469 best_cost = alg_in->cost;
2470 x = alg_in, alg_in = best_alg, best_alg = x;
2471 best_alg->log[best_alg->ops] = 0;
2472 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2475 else
2477 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2479 op_cost = add_cost[speed][mode];
2480 new_limit.cost = best_cost.cost - op_cost;
2481 new_limit.latency = best_cost.latency - op_cost;
2482 synth_mult (alg_in, t - 1, &new_limit, mode);
2484 alg_in->cost.cost += op_cost;
2485 alg_in->cost.latency += op_cost;
2486 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2488 struct algorithm *x;
2489 best_cost = alg_in->cost;
2490 x = alg_in, alg_in = best_alg, best_alg = x;
2491 best_alg->log[best_alg->ops] = 0;
2492 best_alg->op[best_alg->ops] = alg_add_t_m2;
2496 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2497 quickly with a - a * n for some appropriate constant n. */
2498 m = exact_log2 (-orig_t + 1);
2499 if (m >= 0 && m < maxm)
2501 op_cost = shiftsub1_cost[speed][mode][m];
2502 new_limit.cost = best_cost.cost - op_cost;
2503 new_limit.latency = best_cost.latency - op_cost;
2504 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2506 alg_in->cost.cost += op_cost;
2507 alg_in->cost.latency += op_cost;
2508 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2510 struct algorithm *x;
2511 best_cost = alg_in->cost;
2512 x = alg_in, alg_in = best_alg, best_alg = x;
2513 best_alg->log[best_alg->ops] = m;
2514 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2518 if (cache_hit)
2519 goto done;
2522 /* Look for factors of t of the form
2523 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2524 If we find such a factor, we can multiply by t using an algorithm that
2525 multiplies by q, shift the result by m and add/subtract it to itself.
2527 We search for large factors first and loop down, even if large factors
2528 are less probable than small; if we find a large factor we will find a
2529 good sequence quickly, and therefore be able to prune (by decreasing
2530 COST_LIMIT) the search. */
2532 do_alg_addsub_factor:
2533 for (m = floor_log2 (t - 1); m >= 2; m--)
2535 unsigned HOST_WIDE_INT d;
2537 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2538 if (t % d == 0 && t > d && m < maxm
2539 && (!cache_hit || cache_alg == alg_add_factor))
2541 /* If the target has a cheap shift-and-add instruction use
2542 that in preference to a shift insn followed by an add insn.
2543 Assume that the shift-and-add is "atomic" with a latency
2544 equal to its cost, otherwise assume that on superscalar
2545 hardware the shift may be executed concurrently with the
2546 earlier steps in the algorithm. */
2547 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2548 if (shiftadd_cost[speed][mode][m] < op_cost)
2550 op_cost = shiftadd_cost[speed][mode][m];
2551 op_latency = op_cost;
2553 else
2554 op_latency = add_cost[speed][mode];
2556 new_limit.cost = best_cost.cost - op_cost;
2557 new_limit.latency = best_cost.latency - op_latency;
2558 synth_mult (alg_in, t / d, &new_limit, mode);
2560 alg_in->cost.cost += op_cost;
2561 alg_in->cost.latency += op_latency;
2562 if (alg_in->cost.latency < op_cost)
2563 alg_in->cost.latency = op_cost;
2564 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2566 struct algorithm *x;
2567 best_cost = alg_in->cost;
2568 x = alg_in, alg_in = best_alg, best_alg = x;
2569 best_alg->log[best_alg->ops] = m;
2570 best_alg->op[best_alg->ops] = alg_add_factor;
2572 /* Other factors will have been taken care of in the recursion. */
2573 break;
2576 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2577 if (t % d == 0 && t > d && m < maxm
2578 && (!cache_hit || cache_alg == alg_sub_factor))
2580 /* If the target has a cheap shift-and-subtract insn use
2581 that in preference to a shift insn followed by a sub insn.
2582 Assume that the shift-and-sub is "atomic" with a latency
2583 equal to it's cost, otherwise assume that on superscalar
2584 hardware the shift may be executed concurrently with the
2585 earlier steps in the algorithm. */
2586 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2587 if (shiftsub0_cost[speed][mode][m] < op_cost)
2589 op_cost = shiftsub0_cost[speed][mode][m];
2590 op_latency = op_cost;
2592 else
2593 op_latency = add_cost[speed][mode];
2595 new_limit.cost = best_cost.cost - op_cost;
2596 new_limit.latency = best_cost.latency - op_latency;
2597 synth_mult (alg_in, t / d, &new_limit, mode);
2599 alg_in->cost.cost += op_cost;
2600 alg_in->cost.latency += op_latency;
2601 if (alg_in->cost.latency < op_cost)
2602 alg_in->cost.latency = op_cost;
2603 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2605 struct algorithm *x;
2606 best_cost = alg_in->cost;
2607 x = alg_in, alg_in = best_alg, best_alg = x;
2608 best_alg->log[best_alg->ops] = m;
2609 best_alg->op[best_alg->ops] = alg_sub_factor;
2611 break;
2614 if (cache_hit)
2615 goto done;
2617 /* Try shift-and-add (load effective address) instructions,
2618 i.e. do a*3, a*5, a*9. */
2619 if ((t & 1) != 0)
2621 do_alg_add_t2_m:
2622 q = t - 1;
2623 q = q & -q;
2624 m = exact_log2 (q);
2625 if (m >= 0 && m < maxm)
2627 op_cost = shiftadd_cost[speed][mode][m];
2628 new_limit.cost = best_cost.cost - op_cost;
2629 new_limit.latency = best_cost.latency - op_cost;
2630 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2632 alg_in->cost.cost += op_cost;
2633 alg_in->cost.latency += op_cost;
2634 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2636 struct algorithm *x;
2637 best_cost = alg_in->cost;
2638 x = alg_in, alg_in = best_alg, best_alg = x;
2639 best_alg->log[best_alg->ops] = m;
2640 best_alg->op[best_alg->ops] = alg_add_t2_m;
2643 if (cache_hit)
2644 goto done;
2646 do_alg_sub_t2_m:
2647 q = t + 1;
2648 q = q & -q;
2649 m = exact_log2 (q);
2650 if (m >= 0 && m < maxm)
2652 op_cost = shiftsub0_cost[speed][mode][m];
2653 new_limit.cost = best_cost.cost - op_cost;
2654 new_limit.latency = best_cost.latency - op_cost;
2655 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2657 alg_in->cost.cost += op_cost;
2658 alg_in->cost.latency += op_cost;
2659 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2661 struct algorithm *x;
2662 best_cost = alg_in->cost;
2663 x = alg_in, alg_in = best_alg, best_alg = x;
2664 best_alg->log[best_alg->ops] = m;
2665 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2668 if (cache_hit)
2669 goto done;
2672 done:
2673 /* If best_cost has not decreased, we have not found any algorithm. */
2674 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2676 /* We failed to find an algorithm. Record alg_impossible for
2677 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2678 we are asked to find an algorithm for T within the same or
2679 lower COST_LIMIT, we can immediately return to the
2680 caller. */
2681 alg_hash[hash_index].t = t;
2682 alg_hash[hash_index].mode = mode;
2683 alg_hash[hash_index].speed = speed;
2684 alg_hash[hash_index].alg = alg_impossible;
2685 alg_hash[hash_index].cost = *cost_limit;
2686 return;
2689 /* Cache the result. */
2690 if (!cache_hit)
2692 alg_hash[hash_index].t = t;
2693 alg_hash[hash_index].mode = mode;
2694 alg_hash[hash_index].speed = speed;
2695 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2696 alg_hash[hash_index].cost.cost = best_cost.cost;
2697 alg_hash[hash_index].cost.latency = best_cost.latency;
2700 /* If we are getting a too long sequence for `struct algorithm'
2701 to record, make this search fail. */
2702 if (best_alg->ops == MAX_BITS_PER_WORD)
2703 return;
2705 /* Copy the algorithm from temporary space to the space at alg_out.
2706 We avoid using structure assignment because the majority of
2707 best_alg is normally undefined, and this is a critical function. */
2708 alg_out->ops = best_alg->ops + 1;
2709 alg_out->cost = best_cost;
2710 memcpy (alg_out->op, best_alg->op,
2711 alg_out->ops * sizeof *alg_out->op);
2712 memcpy (alg_out->log, best_alg->log,
2713 alg_out->ops * sizeof *alg_out->log);
2716 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2717 Try three variations:
2719 - a shift/add sequence based on VAL itself
2720 - a shift/add sequence based on -VAL, followed by a negation
2721 - a shift/add sequence based on VAL - 1, followed by an addition.
2723 Return true if the cheapest of these cost less than MULT_COST,
2724 describing the algorithm in *ALG and final fixup in *VARIANT. */
2726 static bool
2727 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2728 struct algorithm *alg, enum mult_variant *variant,
2729 int mult_cost)
2731 struct algorithm alg2;
2732 struct mult_cost limit;
2733 int op_cost;
2734 bool speed = optimize_insn_for_speed_p ();
2736 /* Fail quickly for impossible bounds. */
2737 if (mult_cost < 0)
2738 return false;
2740 /* Ensure that mult_cost provides a reasonable upper bound.
2741 Any constant multiplication can be performed with less
2742 than 2 * bits additions. */
2743 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2744 if (mult_cost > op_cost)
2745 mult_cost = op_cost;
2747 *variant = basic_variant;
2748 limit.cost = mult_cost;
2749 limit.latency = mult_cost;
2750 synth_mult (alg, val, &limit, mode);
2752 /* This works only if the inverted value actually fits in an
2753 `unsigned int' */
2754 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2756 op_cost = neg_cost[speed][mode];
2757 if (MULT_COST_LESS (&alg->cost, mult_cost))
2759 limit.cost = alg->cost.cost - op_cost;
2760 limit.latency = alg->cost.latency - op_cost;
2762 else
2764 limit.cost = mult_cost - op_cost;
2765 limit.latency = mult_cost - op_cost;
2768 synth_mult (&alg2, -val, &limit, mode);
2769 alg2.cost.cost += op_cost;
2770 alg2.cost.latency += op_cost;
2771 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2772 *alg = alg2, *variant = negate_variant;
2775 /* This proves very useful for division-by-constant. */
2776 op_cost = add_cost[speed][mode];
2777 if (MULT_COST_LESS (&alg->cost, mult_cost))
2779 limit.cost = alg->cost.cost - op_cost;
2780 limit.latency = alg->cost.latency - op_cost;
2782 else
2784 limit.cost = mult_cost - op_cost;
2785 limit.latency = mult_cost - op_cost;
2788 synth_mult (&alg2, val - 1, &limit, mode);
2789 alg2.cost.cost += op_cost;
2790 alg2.cost.latency += op_cost;
2791 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2792 *alg = alg2, *variant = add_variant;
2794 return MULT_COST_LESS (&alg->cost, mult_cost);
2797 /* A subroutine of expand_mult, used for constant multiplications.
2798 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2799 convenient. Use the shift/add sequence described by ALG and apply
2800 the final fixup specified by VARIANT. */
2802 static rtx
2803 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2804 rtx target, const struct algorithm *alg,
2805 enum mult_variant variant)
2807 HOST_WIDE_INT val_so_far;
2808 rtx insn, accum, tem;
2809 int opno;
2810 enum machine_mode nmode;
2812 /* Avoid referencing memory over and over and invalid sharing
2813 on SUBREGs. */
2814 op0 = force_reg (mode, op0);
2816 /* ACCUM starts out either as OP0 or as a zero, depending on
2817 the first operation. */
2819 if (alg->op[0] == alg_zero)
2821 accum = copy_to_mode_reg (mode, const0_rtx);
2822 val_so_far = 0;
2824 else if (alg->op[0] == alg_m)
2826 accum = copy_to_mode_reg (mode, op0);
2827 val_so_far = 1;
2829 else
2830 gcc_unreachable ();
2832 for (opno = 1; opno < alg->ops; opno++)
2834 int log = alg->log[opno];
2835 rtx shift_subtarget = optimize ? 0 : accum;
2836 rtx add_target
2837 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2838 && !optimize)
2839 ? target : 0;
2840 rtx accum_target = optimize ? 0 : accum;
2842 switch (alg->op[opno])
2844 case alg_shift:
2845 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2846 /* REG_EQUAL note will be attached to the following insn. */
2847 emit_move_insn (accum, tem);
2848 val_so_far <<= log;
2849 break;
2851 case alg_add_t_m2:
2852 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2853 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2854 add_target ? add_target : accum_target);
2855 val_so_far += (HOST_WIDE_INT) 1 << log;
2856 break;
2858 case alg_sub_t_m2:
2859 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2860 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2861 add_target ? add_target : accum_target);
2862 val_so_far -= (HOST_WIDE_INT) 1 << log;
2863 break;
2865 case alg_add_t2_m:
2866 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2867 log, shift_subtarget, 0);
2868 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2869 add_target ? add_target : accum_target);
2870 val_so_far = (val_so_far << log) + 1;
2871 break;
2873 case alg_sub_t2_m:
2874 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2875 log, shift_subtarget, 0);
2876 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2877 add_target ? add_target : accum_target);
2878 val_so_far = (val_so_far << log) - 1;
2879 break;
2881 case alg_add_factor:
2882 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2883 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2884 add_target ? add_target : accum_target);
2885 val_so_far += val_so_far << log;
2886 break;
2888 case alg_sub_factor:
2889 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2890 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2891 (add_target
2892 ? add_target : (optimize ? 0 : tem)));
2893 val_so_far = (val_so_far << log) - val_so_far;
2894 break;
2896 default:
2897 gcc_unreachable ();
2900 /* Write a REG_EQUAL note on the last insn so that we can cse
2901 multiplication sequences. Note that if ACCUM is a SUBREG,
2902 we've set the inner register and must properly indicate
2903 that. */
2905 tem = op0, nmode = mode;
2906 if (GET_CODE (accum) == SUBREG)
2908 nmode = GET_MODE (SUBREG_REG (accum));
2909 tem = gen_lowpart (nmode, op0);
2912 insn = get_last_insn ();
2913 set_unique_reg_note (insn, REG_EQUAL,
2914 gen_rtx_MULT (nmode, tem,
2915 GEN_INT (val_so_far)));
2918 if (variant == negate_variant)
2920 val_so_far = -val_so_far;
2921 accum = expand_unop (mode, neg_optab, accum, target, 0);
2923 else if (variant == add_variant)
2925 val_so_far = val_so_far + 1;
2926 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2929 /* Compare only the bits of val and val_so_far that are significant
2930 in the result mode, to avoid sign-/zero-extension confusion. */
2931 val &= GET_MODE_MASK (mode);
2932 val_so_far &= GET_MODE_MASK (mode);
2933 gcc_assert (val == val_so_far);
2935 return accum;
2938 /* Perform a multiplication and return an rtx for the result.
2939 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2940 TARGET is a suggestion for where to store the result (an rtx).
2942 We check specially for a constant integer as OP1.
2943 If you want this check for OP0 as well, then before calling
2944 you should swap the two operands if OP0 would be constant. */
2947 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2948 int unsignedp)
2950 enum mult_variant variant;
2951 struct algorithm algorithm;
2952 int max_cost;
2953 bool speed = optimize_insn_for_speed_p ();
2955 /* Handling const0_rtx here allows us to use zero as a rogue value for
2956 coeff below. */
2957 if (op1 == const0_rtx)
2958 return const0_rtx;
2959 if (op1 == const1_rtx)
2960 return op0;
2961 if (op1 == constm1_rtx)
2962 return expand_unop (mode,
2963 GET_MODE_CLASS (mode) == MODE_INT
2964 && !unsignedp && flag_trapv
2965 ? negv_optab : neg_optab,
2966 op0, target, 0);
2968 /* These are the operations that are potentially turned into a sequence
2969 of shifts and additions. */
2970 if (SCALAR_INT_MODE_P (mode)
2971 && (unsignedp || !flag_trapv))
2973 HOST_WIDE_INT coeff = 0;
2974 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
2976 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2977 less than or equal in size to `unsigned int' this doesn't matter.
2978 If the mode is larger than `unsigned int', then synth_mult works
2979 only if the constant value exactly fits in an `unsigned int' without
2980 any truncation. This means that multiplying by negative values does
2981 not work; results are off by 2^32 on a 32 bit machine. */
2983 if (CONST_INT_P (op1))
2985 /* Attempt to handle multiplication of DImode values by negative
2986 coefficients, by performing the multiplication by a positive
2987 multiplier and then inverting the result. */
2988 if (INTVAL (op1) < 0
2989 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
2991 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
2992 result is interpreted as an unsigned coefficient.
2993 Exclude cost of op0 from max_cost to match the cost
2994 calculation of the synth_mult. */
2995 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
2996 - neg_cost[speed][mode];
2997 if (max_cost > 0
2998 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
2999 &variant, max_cost))
3001 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3002 NULL_RTX, &algorithm,
3003 variant);
3004 return expand_unop (mode, neg_optab, temp, target, 0);
3007 else coeff = INTVAL (op1);
3009 else if (GET_CODE (op1) == CONST_DOUBLE)
3011 /* If we are multiplying in DImode, it may still be a win
3012 to try to work with shifts and adds. */
3013 if (CONST_DOUBLE_HIGH (op1) == 0
3014 && CONST_DOUBLE_LOW (op1) > 0)
3015 coeff = CONST_DOUBLE_LOW (op1);
3016 else if (CONST_DOUBLE_LOW (op1) == 0
3017 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3019 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3020 + HOST_BITS_PER_WIDE_INT;
3021 return expand_shift (LSHIFT_EXPR, mode, op0,
3022 shift, target, unsignedp);
3026 /* We used to test optimize here, on the grounds that it's better to
3027 produce a smaller program when -O is not used. But this causes
3028 such a terrible slowdown sometimes that it seems better to always
3029 use synth_mult. */
3030 if (coeff != 0)
3032 /* Special case powers of two. */
3033 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3034 return expand_shift (LSHIFT_EXPR, mode, op0,
3035 floor_log2 (coeff), target, unsignedp);
3037 /* Exclude cost of op0 from max_cost to match the cost
3038 calculation of the synth_mult. */
3039 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3040 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3041 max_cost))
3042 return expand_mult_const (mode, op0, coeff, target,
3043 &algorithm, variant);
3047 if (GET_CODE (op0) == CONST_DOUBLE)
3049 rtx temp = op0;
3050 op0 = op1;
3051 op1 = temp;
3054 /* Expand x*2.0 as x+x. */
3055 if (GET_CODE (op1) == CONST_DOUBLE
3056 && SCALAR_FLOAT_MODE_P (mode))
3058 REAL_VALUE_TYPE d;
3059 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3061 if (REAL_VALUES_EQUAL (d, dconst2))
3063 op0 = force_reg (GET_MODE (op0), op0);
3064 return expand_binop (mode, add_optab, op0, op0,
3065 target, unsignedp, OPTAB_LIB_WIDEN);
3069 /* This used to use umul_optab if unsigned, but for non-widening multiply
3070 there is no difference between signed and unsigned. */
3071 op0 = expand_binop (mode,
3072 ! unsignedp
3073 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3074 ? smulv_optab : smul_optab,
3075 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3076 gcc_assert (op0);
3077 return op0;
3080 /* Perform a widening multiplication and return an rtx for the result.
3081 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3082 TARGET is a suggestion for where to store the result (an rtx).
3083 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3084 or smul_widen_optab.
3086 We check specially for a constant integer as OP1, comparing the
3087 cost of a widening multiply against the cost of a sequence of shifts
3088 and adds. */
3091 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3092 int unsignedp, optab this_optab)
3094 bool speed = optimize_insn_for_speed_p ();
3095 rtx cop1;
3097 if (CONST_INT_P (op1)
3098 && GET_MODE (op0) != VOIDmode
3099 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3100 this_optab == umul_widen_optab))
3101 && CONST_INT_P (cop1)
3102 && (INTVAL (cop1) >= 0
3103 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3105 HOST_WIDE_INT coeff = INTVAL (cop1);
3106 int max_cost;
3107 enum mult_variant variant;
3108 struct algorithm algorithm;
3110 /* Special case powers of two. */
3111 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3113 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3114 return expand_shift (LSHIFT_EXPR, mode, op0,
3115 floor_log2 (coeff), target, unsignedp);
3118 /* Exclude cost of op0 from max_cost to match the cost
3119 calculation of the synth_mult. */
3120 max_cost = mul_widen_cost[speed][mode];
3121 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3122 max_cost))
3124 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3125 return expand_mult_const (mode, op0, coeff, target,
3126 &algorithm, variant);
3129 return expand_binop (mode, this_optab, op0, op1, target,
3130 unsignedp, OPTAB_LIB_WIDEN);
3133 /* Return the smallest n such that 2**n >= X. */
3136 ceil_log2 (unsigned HOST_WIDE_INT x)
3138 return floor_log2 (x - 1) + 1;
3141 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3142 replace division by D, and put the least significant N bits of the result
3143 in *MULTIPLIER_PTR and return the most significant bit.
3145 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3146 needed precision is in PRECISION (should be <= N).
3148 PRECISION should be as small as possible so this function can choose
3149 multiplier more freely.
3151 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3152 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3154 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3155 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3157 static
3158 unsigned HOST_WIDE_INT
3159 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3160 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3162 HOST_WIDE_INT mhigh_hi, mlow_hi;
3163 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3164 int lgup, post_shift;
3165 int pow, pow2;
3166 unsigned HOST_WIDE_INT nl, dummy1;
3167 HOST_WIDE_INT nh, dummy2;
3169 /* lgup = ceil(log2(divisor)); */
3170 lgup = ceil_log2 (d);
3172 gcc_assert (lgup <= n);
3174 pow = n + lgup;
3175 pow2 = n + lgup - precision;
3177 /* We could handle this with some effort, but this case is much
3178 better handled directly with a scc insn, so rely on caller using
3179 that. */
3180 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3182 /* mlow = 2^(N + lgup)/d */
3183 if (pow >= HOST_BITS_PER_WIDE_INT)
3185 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3186 nl = 0;
3188 else
3190 nh = 0;
3191 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3193 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3194 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3196 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3197 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3198 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3199 else
3200 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3201 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3202 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3204 gcc_assert (!mhigh_hi || nh - d < d);
3205 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3206 /* Assert that mlow < mhigh. */
3207 gcc_assert (mlow_hi < mhigh_hi
3208 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3210 /* If precision == N, then mlow, mhigh exceed 2^N
3211 (but they do not exceed 2^(N+1)). */
3213 /* Reduce to lowest terms. */
3214 for (post_shift = lgup; post_shift > 0; post_shift--)
3216 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3217 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3218 if (ml_lo >= mh_lo)
3219 break;
3221 mlow_hi = 0;
3222 mlow_lo = ml_lo;
3223 mhigh_hi = 0;
3224 mhigh_lo = mh_lo;
3227 *post_shift_ptr = post_shift;
3228 *lgup_ptr = lgup;
3229 if (n < HOST_BITS_PER_WIDE_INT)
3231 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3232 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3233 return mhigh_lo >= mask;
3235 else
3237 *multiplier_ptr = GEN_INT (mhigh_lo);
3238 return mhigh_hi;
3242 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3243 congruent to 1 (mod 2**N). */
3245 static unsigned HOST_WIDE_INT
3246 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3248 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3250 /* The algorithm notes that the choice y = x satisfies
3251 x*y == 1 mod 2^3, since x is assumed odd.
3252 Each iteration doubles the number of bits of significance in y. */
3254 unsigned HOST_WIDE_INT mask;
3255 unsigned HOST_WIDE_INT y = x;
3256 int nbit = 3;
3258 mask = (n == HOST_BITS_PER_WIDE_INT
3259 ? ~(unsigned HOST_WIDE_INT) 0
3260 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3262 while (nbit < n)
3264 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3265 nbit *= 2;
3267 return y;
3270 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3271 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3272 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3273 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3274 become signed.
3276 The result is put in TARGET if that is convenient.
3278 MODE is the mode of operation. */
3281 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3282 rtx op1, rtx target, int unsignedp)
3284 rtx tem;
3285 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3287 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3288 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3289 tem = expand_and (mode, tem, op1, NULL_RTX);
3290 adj_operand
3291 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3292 adj_operand);
3294 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3295 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3296 tem = expand_and (mode, tem, op0, NULL_RTX);
3297 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3298 target);
3300 return target;
3303 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3305 static rtx
3306 extract_high_half (enum machine_mode mode, rtx op)
3308 enum machine_mode wider_mode;
3310 if (mode == word_mode)
3311 return gen_highpart (mode, op);
3313 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3315 wider_mode = GET_MODE_WIDER_MODE (mode);
3316 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3317 GET_MODE_BITSIZE (mode), 0, 1);
3318 return convert_modes (mode, wider_mode, op, 0);
3321 /* Like expand_mult_highpart, but only consider using a multiplication
3322 optab. OP1 is an rtx for the constant operand. */
3324 static rtx
3325 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3326 rtx target, int unsignedp, int max_cost)
3328 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3329 enum machine_mode wider_mode;
3330 optab moptab;
3331 rtx tem;
3332 int size;
3333 bool speed = optimize_insn_for_speed_p ();
3335 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3337 wider_mode = GET_MODE_WIDER_MODE (mode);
3338 size = GET_MODE_BITSIZE (mode);
3340 /* Firstly, try using a multiplication insn that only generates the needed
3341 high part of the product, and in the sign flavor of unsignedp. */
3342 if (mul_highpart_cost[speed][mode] < max_cost)
3344 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3345 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3346 unsignedp, OPTAB_DIRECT);
3347 if (tem)
3348 return tem;
3351 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3352 Need to adjust the result after the multiplication. */
3353 if (size - 1 < BITS_PER_WORD
3354 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3355 + 4 * add_cost[speed][mode] < max_cost))
3357 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3358 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3359 unsignedp, OPTAB_DIRECT);
3360 if (tem)
3361 /* We used the wrong signedness. Adjust the result. */
3362 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3363 tem, unsignedp);
3366 /* Try widening multiplication. */
3367 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3368 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3369 && mul_widen_cost[speed][wider_mode] < max_cost)
3371 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3372 unsignedp, OPTAB_WIDEN);
3373 if (tem)
3374 return extract_high_half (mode, tem);
3377 /* Try widening the mode and perform a non-widening multiplication. */
3378 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3379 && size - 1 < BITS_PER_WORD
3380 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3382 rtx insns, wop0, wop1;
3384 /* We need to widen the operands, for example to ensure the
3385 constant multiplier is correctly sign or zero extended.
3386 Use a sequence to clean-up any instructions emitted by
3387 the conversions if things don't work out. */
3388 start_sequence ();
3389 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3390 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3391 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3392 unsignedp, OPTAB_WIDEN);
3393 insns = get_insns ();
3394 end_sequence ();
3396 if (tem)
3398 emit_insn (insns);
3399 return extract_high_half (mode, tem);
3403 /* Try widening multiplication of opposite signedness, and adjust. */
3404 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3405 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3406 && size - 1 < BITS_PER_WORD
3407 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3408 + 4 * add_cost[speed][mode] < max_cost))
3410 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3411 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3412 if (tem != 0)
3414 tem = extract_high_half (mode, tem);
3415 /* We used the wrong signedness. Adjust the result. */
3416 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3417 target, unsignedp);
3421 return 0;
3424 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3425 putting the high half of the result in TARGET if that is convenient,
3426 and return where the result is. If the operation can not be performed,
3427 0 is returned.
3429 MODE is the mode of operation and result.
3431 UNSIGNEDP nonzero means unsigned multiply.
3433 MAX_COST is the total allowed cost for the expanded RTL. */
3435 static rtx
3436 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3437 rtx target, int unsignedp, int max_cost)
3439 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3440 unsigned HOST_WIDE_INT cnst1;
3441 int extra_cost;
3442 bool sign_adjust = false;
3443 enum mult_variant variant;
3444 struct algorithm alg;
3445 rtx tem;
3446 bool speed = optimize_insn_for_speed_p ();
3448 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3449 /* We can't support modes wider than HOST_BITS_PER_INT. */
3450 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3452 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3454 /* We can't optimize modes wider than BITS_PER_WORD.
3455 ??? We might be able to perform double-word arithmetic if
3456 mode == word_mode, however all the cost calculations in
3457 synth_mult etc. assume single-word operations. */
3458 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3459 return expand_mult_highpart_optab (mode, op0, op1, target,
3460 unsignedp, max_cost);
3462 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3464 /* Check whether we try to multiply by a negative constant. */
3465 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3467 sign_adjust = true;
3468 extra_cost += add_cost[speed][mode];
3471 /* See whether shift/add multiplication is cheap enough. */
3472 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3473 max_cost - extra_cost))
3475 /* See whether the specialized multiplication optabs are
3476 cheaper than the shift/add version. */
3477 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3478 alg.cost.cost + extra_cost);
3479 if (tem)
3480 return tem;
3482 tem = convert_to_mode (wider_mode, op0, unsignedp);
3483 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3484 tem = extract_high_half (mode, tem);
3486 /* Adjust result for signedness. */
3487 if (sign_adjust)
3488 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3490 return tem;
3492 return expand_mult_highpart_optab (mode, op0, op1, target,
3493 unsignedp, max_cost);
3497 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3499 static rtx
3500 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3502 unsigned HOST_WIDE_INT masklow, maskhigh;
3503 rtx result, temp, shift, label;
3504 int logd;
3506 logd = floor_log2 (d);
3507 result = gen_reg_rtx (mode);
3509 /* Avoid conditional branches when they're expensive. */
3510 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3511 && optimize_insn_for_speed_p ())
3513 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3514 mode, 0, -1);
3515 if (signmask)
3517 signmask = force_reg (mode, signmask);
3518 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3519 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3521 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3522 which instruction sequence to use. If logical right shifts
3523 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3524 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3526 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3527 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3528 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3530 temp = expand_binop (mode, xor_optab, op0, signmask,
3531 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3532 temp = expand_binop (mode, sub_optab, temp, signmask,
3533 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3534 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3535 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3536 temp = expand_binop (mode, xor_optab, temp, signmask,
3537 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3538 temp = expand_binop (mode, sub_optab, temp, signmask,
3539 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3541 else
3543 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3544 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3545 signmask = force_reg (mode, signmask);
3547 temp = expand_binop (mode, add_optab, op0, signmask,
3548 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3549 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3550 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3551 temp = expand_binop (mode, sub_optab, temp, signmask,
3552 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3554 return temp;
3558 /* Mask contains the mode's signbit and the significant bits of the
3559 modulus. By including the signbit in the operation, many targets
3560 can avoid an explicit compare operation in the following comparison
3561 against zero. */
3563 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3564 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3566 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3567 maskhigh = -1;
3569 else
3570 maskhigh = (HOST_WIDE_INT) -1
3571 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3573 temp = expand_binop (mode, and_optab, op0,
3574 immed_double_const (masklow, maskhigh, mode),
3575 result, 1, OPTAB_LIB_WIDEN);
3576 if (temp != result)
3577 emit_move_insn (result, temp);
3579 label = gen_label_rtx ();
3580 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3582 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3583 0, OPTAB_LIB_WIDEN);
3584 masklow = (HOST_WIDE_INT) -1 << logd;
3585 maskhigh = -1;
3586 temp = expand_binop (mode, ior_optab, temp,
3587 immed_double_const (masklow, maskhigh, mode),
3588 result, 1, OPTAB_LIB_WIDEN);
3589 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3590 0, OPTAB_LIB_WIDEN);
3591 if (temp != result)
3592 emit_move_insn (result, temp);
3593 emit_label (label);
3594 return result;
3597 /* Expand signed division of OP0 by a power of two D in mode MODE.
3598 This routine is only called for positive values of D. */
3600 static rtx
3601 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3603 rtx temp, label;
3604 int logd;
3606 logd = floor_log2 (d);
3608 if (d == 2
3609 && BRANCH_COST (optimize_insn_for_speed_p (),
3610 false) >= 1)
3612 temp = gen_reg_rtx (mode);
3613 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3614 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3615 0, OPTAB_LIB_WIDEN);
3616 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3619 #ifdef HAVE_conditional_move
3620 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3621 >= 2)
3623 rtx temp2;
3625 /* ??? emit_conditional_move forces a stack adjustment via
3626 compare_from_rtx so, if the sequence is discarded, it will
3627 be lost. Do it now instead. */
3628 do_pending_stack_adjust ();
3630 start_sequence ();
3631 temp2 = copy_to_mode_reg (mode, op0);
3632 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3633 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3634 temp = force_reg (mode, temp);
3636 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3637 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3638 mode, temp, temp2, mode, 0);
3639 if (temp2)
3641 rtx seq = get_insns ();
3642 end_sequence ();
3643 emit_insn (seq);
3644 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3646 end_sequence ();
3648 #endif
3650 if (BRANCH_COST (optimize_insn_for_speed_p (),
3651 false) >= 2)
3653 int ushift = GET_MODE_BITSIZE (mode) - logd;
3655 temp = gen_reg_rtx (mode);
3656 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3657 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3658 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3659 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3660 else
3661 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3662 ushift, NULL_RTX, 1);
3663 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3664 0, OPTAB_LIB_WIDEN);
3665 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3668 label = gen_label_rtx ();
3669 temp = copy_to_mode_reg (mode, op0);
3670 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3671 expand_inc (temp, GEN_INT (d - 1));
3672 emit_label (label);
3673 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3676 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3677 if that is convenient, and returning where the result is.
3678 You may request either the quotient or the remainder as the result;
3679 specify REM_FLAG nonzero to get the remainder.
3681 CODE is the expression code for which kind of division this is;
3682 it controls how rounding is done. MODE is the machine mode to use.
3683 UNSIGNEDP nonzero means do unsigned division. */
3685 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3686 and then correct it by or'ing in missing high bits
3687 if result of ANDI is nonzero.
3688 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3689 This could optimize to a bfexts instruction.
3690 But C doesn't use these operations, so their optimizations are
3691 left for later. */
3692 /* ??? For modulo, we don't actually need the highpart of the first product,
3693 the low part will do nicely. And for small divisors, the second multiply
3694 can also be a low-part only multiply or even be completely left out.
3695 E.g. to calculate the remainder of a division by 3 with a 32 bit
3696 multiply, multiply with 0x55555556 and extract the upper two bits;
3697 the result is exact for inputs up to 0x1fffffff.
3698 The input range can be reduced by using cross-sum rules.
3699 For odd divisors >= 3, the following table gives right shift counts
3700 so that if a number is shifted by an integer multiple of the given
3701 amount, the remainder stays the same:
3702 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3703 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3704 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3705 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3706 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3708 Cross-sum rules for even numbers can be derived by leaving as many bits
3709 to the right alone as the divisor has zeros to the right.
3710 E.g. if x is an unsigned 32 bit number:
3711 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3715 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3716 rtx op0, rtx op1, rtx target, int unsignedp)
3718 enum machine_mode compute_mode;
3719 rtx tquotient;
3720 rtx quotient = 0, remainder = 0;
3721 rtx last;
3722 int size;
3723 rtx insn, set;
3724 optab optab1, optab2;
3725 int op1_is_constant, op1_is_pow2 = 0;
3726 int max_cost, extra_cost;
3727 static HOST_WIDE_INT last_div_const = 0;
3728 static HOST_WIDE_INT ext_op1;
3729 bool speed = optimize_insn_for_speed_p ();
3731 op1_is_constant = CONST_INT_P (op1);
3732 if (op1_is_constant)
3734 ext_op1 = INTVAL (op1);
3735 if (unsignedp)
3736 ext_op1 &= GET_MODE_MASK (mode);
3737 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3738 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3742 This is the structure of expand_divmod:
3744 First comes code to fix up the operands so we can perform the operations
3745 correctly and efficiently.
3747 Second comes a switch statement with code specific for each rounding mode.
3748 For some special operands this code emits all RTL for the desired
3749 operation, for other cases, it generates only a quotient and stores it in
3750 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3751 to indicate that it has not done anything.
3753 Last comes code that finishes the operation. If QUOTIENT is set and
3754 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3755 QUOTIENT is not set, it is computed using trunc rounding.
3757 We try to generate special code for division and remainder when OP1 is a
3758 constant. If |OP1| = 2**n we can use shifts and some other fast
3759 operations. For other values of OP1, we compute a carefully selected
3760 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3761 by m.
3763 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3764 half of the product. Different strategies for generating the product are
3765 implemented in expand_mult_highpart.
3767 If what we actually want is the remainder, we generate that by another
3768 by-constant multiplication and a subtraction. */
3770 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3771 code below will malfunction if we are, so check here and handle
3772 the special case if so. */
3773 if (op1 == const1_rtx)
3774 return rem_flag ? const0_rtx : op0;
3776 /* When dividing by -1, we could get an overflow.
3777 negv_optab can handle overflows. */
3778 if (! unsignedp && op1 == constm1_rtx)
3780 if (rem_flag)
3781 return const0_rtx;
3782 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3783 ? negv_optab : neg_optab, op0, target, 0);
3786 if (target
3787 /* Don't use the function value register as a target
3788 since we have to read it as well as write it,
3789 and function-inlining gets confused by this. */
3790 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3791 /* Don't clobber an operand while doing a multi-step calculation. */
3792 || ((rem_flag || op1_is_constant)
3793 && (reg_mentioned_p (target, op0)
3794 || (MEM_P (op0) && MEM_P (target))))
3795 || reg_mentioned_p (target, op1)
3796 || (MEM_P (op1) && MEM_P (target))))
3797 target = 0;
3799 /* Get the mode in which to perform this computation. Normally it will
3800 be MODE, but sometimes we can't do the desired operation in MODE.
3801 If so, pick a wider mode in which we can do the operation. Convert
3802 to that mode at the start to avoid repeated conversions.
3804 First see what operations we need. These depend on the expression
3805 we are evaluating. (We assume that divxx3 insns exist under the
3806 same conditions that modxx3 insns and that these insns don't normally
3807 fail. If these assumptions are not correct, we may generate less
3808 efficient code in some cases.)
3810 Then see if we find a mode in which we can open-code that operation
3811 (either a division, modulus, or shift). Finally, check for the smallest
3812 mode for which we can do the operation with a library call. */
3814 /* We might want to refine this now that we have division-by-constant
3815 optimization. Since expand_mult_highpart tries so many variants, it is
3816 not straightforward to generalize this. Maybe we should make an array
3817 of possible modes in init_expmed? Save this for GCC 2.7. */
3819 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3820 ? (unsignedp ? lshr_optab : ashr_optab)
3821 : (unsignedp ? udiv_optab : sdiv_optab));
3822 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3823 ? optab1
3824 : (unsignedp ? udivmod_optab : sdivmod_optab));
3826 for (compute_mode = mode; compute_mode != VOIDmode;
3827 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3828 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3829 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3830 break;
3832 if (compute_mode == VOIDmode)
3833 for (compute_mode = mode; compute_mode != VOIDmode;
3834 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3835 if (optab_libfunc (optab1, compute_mode)
3836 || optab_libfunc (optab2, compute_mode))
3837 break;
3839 /* If we still couldn't find a mode, use MODE, but expand_binop will
3840 probably die. */
3841 if (compute_mode == VOIDmode)
3842 compute_mode = mode;
3844 if (target && GET_MODE (target) == compute_mode)
3845 tquotient = target;
3846 else
3847 tquotient = gen_reg_rtx (compute_mode);
3849 size = GET_MODE_BITSIZE (compute_mode);
3850 #if 0
3851 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3852 (mode), and thereby get better code when OP1 is a constant. Do that
3853 later. It will require going over all usages of SIZE below. */
3854 size = GET_MODE_BITSIZE (mode);
3855 #endif
3857 /* Only deduct something for a REM if the last divide done was
3858 for a different constant. Then set the constant of the last
3859 divide. */
3860 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3861 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3862 && INTVAL (op1) == last_div_const))
3863 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3865 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3867 /* Now convert to the best mode to use. */
3868 if (compute_mode != mode)
3870 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3871 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3873 /* convert_modes may have placed op1 into a register, so we
3874 must recompute the following. */
3875 op1_is_constant = CONST_INT_P (op1);
3876 op1_is_pow2 = (op1_is_constant
3877 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3878 || (! unsignedp
3879 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3882 /* If one of the operands is a volatile MEM, copy it into a register. */
3884 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3885 op0 = force_reg (compute_mode, op0);
3886 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3887 op1 = force_reg (compute_mode, op1);
3889 /* If we need the remainder or if OP1 is constant, we need to
3890 put OP0 in a register in case it has any queued subexpressions. */
3891 if (rem_flag || op1_is_constant)
3892 op0 = force_reg (compute_mode, op0);
3894 last = get_last_insn ();
3896 /* Promote floor rounding to trunc rounding for unsigned operations. */
3897 if (unsignedp)
3899 if (code == FLOOR_DIV_EXPR)
3900 code = TRUNC_DIV_EXPR;
3901 if (code == FLOOR_MOD_EXPR)
3902 code = TRUNC_MOD_EXPR;
3903 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3904 code = TRUNC_DIV_EXPR;
3907 if (op1 != const0_rtx)
3908 switch (code)
3910 case TRUNC_MOD_EXPR:
3911 case TRUNC_DIV_EXPR:
3912 if (op1_is_constant)
3914 if (unsignedp)
3916 unsigned HOST_WIDE_INT mh;
3917 int pre_shift, post_shift;
3918 int dummy;
3919 rtx ml;
3920 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3921 & GET_MODE_MASK (compute_mode));
3923 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3925 pre_shift = floor_log2 (d);
3926 if (rem_flag)
3928 remainder
3929 = expand_binop (compute_mode, and_optab, op0,
3930 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3931 remainder, 1,
3932 OPTAB_LIB_WIDEN);
3933 if (remainder)
3934 return gen_lowpart (mode, remainder);
3936 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3937 pre_shift, tquotient, 1);
3939 else if (size <= HOST_BITS_PER_WIDE_INT)
3941 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3943 /* Most significant bit of divisor is set; emit an scc
3944 insn. */
3945 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
3946 compute_mode, 1, 1);
3948 else
3950 /* Find a suitable multiplier and right shift count
3951 instead of multiplying with D. */
3953 mh = choose_multiplier (d, size, size,
3954 &ml, &post_shift, &dummy);
3956 /* If the suggested multiplier is more than SIZE bits,
3957 we can do better for even divisors, using an
3958 initial right shift. */
3959 if (mh != 0 && (d & 1) == 0)
3961 pre_shift = floor_log2 (d & -d);
3962 mh = choose_multiplier (d >> pre_shift, size,
3963 size - pre_shift,
3964 &ml, &post_shift, &dummy);
3965 gcc_assert (!mh);
3967 else
3968 pre_shift = 0;
3970 if (mh != 0)
3972 rtx t1, t2, t3, t4;
3974 if (post_shift - 1 >= BITS_PER_WORD)
3975 goto fail1;
3977 extra_cost
3978 = (shift_cost[speed][compute_mode][post_shift - 1]
3979 + shift_cost[speed][compute_mode][1]
3980 + 2 * add_cost[speed][compute_mode]);
3981 t1 = expand_mult_highpart (compute_mode, op0, ml,
3982 NULL_RTX, 1,
3983 max_cost - extra_cost);
3984 if (t1 == 0)
3985 goto fail1;
3986 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3987 op0, t1),
3988 NULL_RTX);
3989 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
3990 t2, 1, NULL_RTX, 1);
3991 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3992 t1, t3),
3993 NULL_RTX);
3994 quotient = expand_shift
3995 (RSHIFT_EXPR, compute_mode, t4,
3996 post_shift - 1, tquotient, 1);
3998 else
4000 rtx t1, t2;
4002 if (pre_shift >= BITS_PER_WORD
4003 || post_shift >= BITS_PER_WORD)
4004 goto fail1;
4006 t1 = expand_shift
4007 (RSHIFT_EXPR, compute_mode, op0,
4008 pre_shift, NULL_RTX, 1);
4009 extra_cost
4010 = (shift_cost[speed][compute_mode][pre_shift]
4011 + shift_cost[speed][compute_mode][post_shift]);
4012 t2 = expand_mult_highpart (compute_mode, t1, ml,
4013 NULL_RTX, 1,
4014 max_cost - extra_cost);
4015 if (t2 == 0)
4016 goto fail1;
4017 quotient = expand_shift
4018 (RSHIFT_EXPR, compute_mode, t2,
4019 post_shift, tquotient, 1);
4023 else /* Too wide mode to use tricky code */
4024 break;
4026 insn = get_last_insn ();
4027 if (insn != last
4028 && (set = single_set (insn)) != 0
4029 && SET_DEST (set) == quotient)
4030 set_unique_reg_note (insn,
4031 REG_EQUAL,
4032 gen_rtx_UDIV (compute_mode, op0, op1));
4034 else /* TRUNC_DIV, signed */
4036 unsigned HOST_WIDE_INT ml;
4037 int lgup, post_shift;
4038 rtx mlr;
4039 HOST_WIDE_INT d = INTVAL (op1);
4040 unsigned HOST_WIDE_INT abs_d;
4042 /* Since d might be INT_MIN, we have to cast to
4043 unsigned HOST_WIDE_INT before negating to avoid
4044 undefined signed overflow. */
4045 abs_d = (d >= 0
4046 ? (unsigned HOST_WIDE_INT) d
4047 : - (unsigned HOST_WIDE_INT) d);
4049 /* n rem d = n rem -d */
4050 if (rem_flag && d < 0)
4052 d = abs_d;
4053 op1 = gen_int_mode (abs_d, compute_mode);
4056 if (d == 1)
4057 quotient = op0;
4058 else if (d == -1)
4059 quotient = expand_unop (compute_mode, neg_optab, op0,
4060 tquotient, 0);
4061 else if (HOST_BITS_PER_WIDE_INT >= size
4062 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4064 /* This case is not handled correctly below. */
4065 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4066 compute_mode, 1, 1);
4067 if (quotient == 0)
4068 goto fail1;
4070 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4071 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4072 : sdiv_pow2_cheap[speed][compute_mode])
4073 /* We assume that cheap metric is true if the
4074 optab has an expander for this mode. */
4075 && ((optab_handler ((rem_flag ? smod_optab
4076 : sdiv_optab),
4077 compute_mode)
4078 != CODE_FOR_nothing)
4079 || (optab_handler (sdivmod_optab,
4080 compute_mode)
4081 != CODE_FOR_nothing)))
4083 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4085 if (rem_flag)
4087 remainder = expand_smod_pow2 (compute_mode, op0, d);
4088 if (remainder)
4089 return gen_lowpart (mode, remainder);
4092 if (sdiv_pow2_cheap[speed][compute_mode]
4093 && ((optab_handler (sdiv_optab, compute_mode)
4094 != CODE_FOR_nothing)
4095 || (optab_handler (sdivmod_optab, compute_mode)
4096 != CODE_FOR_nothing)))
4097 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4098 compute_mode, op0,
4099 gen_int_mode (abs_d,
4100 compute_mode),
4101 NULL_RTX, 0);
4102 else
4103 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4105 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4106 negate the quotient. */
4107 if (d < 0)
4109 insn = get_last_insn ();
4110 if (insn != last
4111 && (set = single_set (insn)) != 0
4112 && SET_DEST (set) == quotient
4113 && abs_d < ((unsigned HOST_WIDE_INT) 1
4114 << (HOST_BITS_PER_WIDE_INT - 1)))
4115 set_unique_reg_note (insn,
4116 REG_EQUAL,
4117 gen_rtx_DIV (compute_mode,
4118 op0,
4119 GEN_INT
4120 (trunc_int_for_mode
4121 (abs_d,
4122 compute_mode))));
4124 quotient = expand_unop (compute_mode, neg_optab,
4125 quotient, quotient, 0);
4128 else if (size <= HOST_BITS_PER_WIDE_INT)
4130 choose_multiplier (abs_d, size, size - 1,
4131 &mlr, &post_shift, &lgup);
4132 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4133 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4135 rtx t1, t2, t3;
4137 if (post_shift >= BITS_PER_WORD
4138 || size - 1 >= BITS_PER_WORD)
4139 goto fail1;
4141 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4142 + shift_cost[speed][compute_mode][size - 1]
4143 + add_cost[speed][compute_mode]);
4144 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4145 NULL_RTX, 0,
4146 max_cost - extra_cost);
4147 if (t1 == 0)
4148 goto fail1;
4149 t2 = expand_shift
4150 (RSHIFT_EXPR, compute_mode, t1,
4151 post_shift, NULL_RTX, 0);
4152 t3 = expand_shift
4153 (RSHIFT_EXPR, compute_mode, op0,
4154 size - 1, NULL_RTX, 0);
4155 if (d < 0)
4156 quotient
4157 = force_operand (gen_rtx_MINUS (compute_mode,
4158 t3, t2),
4159 tquotient);
4160 else
4161 quotient
4162 = force_operand (gen_rtx_MINUS (compute_mode,
4163 t2, t3),
4164 tquotient);
4166 else
4168 rtx t1, t2, t3, t4;
4170 if (post_shift >= BITS_PER_WORD
4171 || size - 1 >= BITS_PER_WORD)
4172 goto fail1;
4174 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4175 mlr = gen_int_mode (ml, compute_mode);
4176 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4177 + shift_cost[speed][compute_mode][size - 1]
4178 + 2 * add_cost[speed][compute_mode]);
4179 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4180 NULL_RTX, 0,
4181 max_cost - extra_cost);
4182 if (t1 == 0)
4183 goto fail1;
4184 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4185 t1, op0),
4186 NULL_RTX);
4187 t3 = expand_shift
4188 (RSHIFT_EXPR, compute_mode, t2,
4189 post_shift, NULL_RTX, 0);
4190 t4 = expand_shift
4191 (RSHIFT_EXPR, compute_mode, op0,
4192 size - 1, NULL_RTX, 0);
4193 if (d < 0)
4194 quotient
4195 = force_operand (gen_rtx_MINUS (compute_mode,
4196 t4, t3),
4197 tquotient);
4198 else
4199 quotient
4200 = force_operand (gen_rtx_MINUS (compute_mode,
4201 t3, t4),
4202 tquotient);
4205 else /* Too wide mode to use tricky code */
4206 break;
4208 insn = get_last_insn ();
4209 if (insn != last
4210 && (set = single_set (insn)) != 0
4211 && SET_DEST (set) == quotient)
4212 set_unique_reg_note (insn,
4213 REG_EQUAL,
4214 gen_rtx_DIV (compute_mode, op0, op1));
4216 break;
4218 fail1:
4219 delete_insns_since (last);
4220 break;
4222 case FLOOR_DIV_EXPR:
4223 case FLOOR_MOD_EXPR:
4224 /* We will come here only for signed operations. */
4225 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4227 unsigned HOST_WIDE_INT mh;
4228 int pre_shift, lgup, post_shift;
4229 HOST_WIDE_INT d = INTVAL (op1);
4230 rtx ml;
4232 if (d > 0)
4234 /* We could just as easily deal with negative constants here,
4235 but it does not seem worth the trouble for GCC 2.6. */
4236 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4238 pre_shift = floor_log2 (d);
4239 if (rem_flag)
4241 remainder = expand_binop (compute_mode, and_optab, op0,
4242 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4243 remainder, 0, OPTAB_LIB_WIDEN);
4244 if (remainder)
4245 return gen_lowpart (mode, remainder);
4247 quotient = expand_shift
4248 (RSHIFT_EXPR, compute_mode, op0,
4249 pre_shift, tquotient, 0);
4251 else
4253 rtx t1, t2, t3, t4;
4255 mh = choose_multiplier (d, size, size - 1,
4256 &ml, &post_shift, &lgup);
4257 gcc_assert (!mh);
4259 if (post_shift < BITS_PER_WORD
4260 && size - 1 < BITS_PER_WORD)
4262 t1 = expand_shift
4263 (RSHIFT_EXPR, compute_mode, op0,
4264 size - 1, NULL_RTX, 0);
4265 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4266 NULL_RTX, 0, OPTAB_WIDEN);
4267 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4268 + shift_cost[speed][compute_mode][size - 1]
4269 + 2 * add_cost[speed][compute_mode]);
4270 t3 = expand_mult_highpart (compute_mode, t2, ml,
4271 NULL_RTX, 1,
4272 max_cost - extra_cost);
4273 if (t3 != 0)
4275 t4 = expand_shift
4276 (RSHIFT_EXPR, compute_mode, t3,
4277 post_shift, NULL_RTX, 1);
4278 quotient = expand_binop (compute_mode, xor_optab,
4279 t4, t1, tquotient, 0,
4280 OPTAB_WIDEN);
4285 else
4287 rtx nsign, t1, t2, t3, t4;
4288 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4289 op0, constm1_rtx), NULL_RTX);
4290 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4291 0, OPTAB_WIDEN);
4292 nsign = expand_shift
4293 (RSHIFT_EXPR, compute_mode, t2,
4294 size - 1, NULL_RTX, 0);
4295 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4296 NULL_RTX);
4297 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4298 NULL_RTX, 0);
4299 if (t4)
4301 rtx t5;
4302 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4303 NULL_RTX, 0);
4304 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4305 t4, t5),
4306 tquotient);
4311 if (quotient != 0)
4312 break;
4313 delete_insns_since (last);
4315 /* Try using an instruction that produces both the quotient and
4316 remainder, using truncation. We can easily compensate the quotient
4317 or remainder to get floor rounding, once we have the remainder.
4318 Notice that we compute also the final remainder value here,
4319 and return the result right away. */
4320 if (target == 0 || GET_MODE (target) != compute_mode)
4321 target = gen_reg_rtx (compute_mode);
4323 if (rem_flag)
4325 remainder
4326 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4327 quotient = gen_reg_rtx (compute_mode);
4329 else
4331 quotient
4332 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4333 remainder = gen_reg_rtx (compute_mode);
4336 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4337 quotient, remainder, 0))
4339 /* This could be computed with a branch-less sequence.
4340 Save that for later. */
4341 rtx tem;
4342 rtx label = gen_label_rtx ();
4343 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4344 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4345 NULL_RTX, 0, OPTAB_WIDEN);
4346 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4347 expand_dec (quotient, const1_rtx);
4348 expand_inc (remainder, op1);
4349 emit_label (label);
4350 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4353 /* No luck with division elimination or divmod. Have to do it
4354 by conditionally adjusting op0 *and* the result. */
4356 rtx label1, label2, label3, label4, label5;
4357 rtx adjusted_op0;
4358 rtx tem;
4360 quotient = gen_reg_rtx (compute_mode);
4361 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4362 label1 = gen_label_rtx ();
4363 label2 = gen_label_rtx ();
4364 label3 = gen_label_rtx ();
4365 label4 = gen_label_rtx ();
4366 label5 = gen_label_rtx ();
4367 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4368 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4369 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4370 quotient, 0, OPTAB_LIB_WIDEN);
4371 if (tem != quotient)
4372 emit_move_insn (quotient, tem);
4373 emit_jump_insn (gen_jump (label5));
4374 emit_barrier ();
4375 emit_label (label1);
4376 expand_inc (adjusted_op0, const1_rtx);
4377 emit_jump_insn (gen_jump (label4));
4378 emit_barrier ();
4379 emit_label (label2);
4380 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4381 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4382 quotient, 0, OPTAB_LIB_WIDEN);
4383 if (tem != quotient)
4384 emit_move_insn (quotient, tem);
4385 emit_jump_insn (gen_jump (label5));
4386 emit_barrier ();
4387 emit_label (label3);
4388 expand_dec (adjusted_op0, const1_rtx);
4389 emit_label (label4);
4390 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4391 quotient, 0, OPTAB_LIB_WIDEN);
4392 if (tem != quotient)
4393 emit_move_insn (quotient, tem);
4394 expand_dec (quotient, const1_rtx);
4395 emit_label (label5);
4397 break;
4399 case CEIL_DIV_EXPR:
4400 case CEIL_MOD_EXPR:
4401 if (unsignedp)
4403 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4405 rtx t1, t2, t3;
4406 unsigned HOST_WIDE_INT d = INTVAL (op1);
4407 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4408 floor_log2 (d), tquotient, 1);
4409 t2 = expand_binop (compute_mode, and_optab, op0,
4410 GEN_INT (d - 1),
4411 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4412 t3 = gen_reg_rtx (compute_mode);
4413 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4414 compute_mode, 1, 1);
4415 if (t3 == 0)
4417 rtx lab;
4418 lab = gen_label_rtx ();
4419 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4420 expand_inc (t1, const1_rtx);
4421 emit_label (lab);
4422 quotient = t1;
4424 else
4425 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4426 t1, t3),
4427 tquotient);
4428 break;
4431 /* Try using an instruction that produces both the quotient and
4432 remainder, using truncation. We can easily compensate the
4433 quotient or remainder to get ceiling rounding, once we have the
4434 remainder. Notice that we compute also the final remainder
4435 value here, and return the result right away. */
4436 if (target == 0 || GET_MODE (target) != compute_mode)
4437 target = gen_reg_rtx (compute_mode);
4439 if (rem_flag)
4441 remainder = (REG_P (target)
4442 ? target : gen_reg_rtx (compute_mode));
4443 quotient = gen_reg_rtx (compute_mode);
4445 else
4447 quotient = (REG_P (target)
4448 ? target : gen_reg_rtx (compute_mode));
4449 remainder = gen_reg_rtx (compute_mode);
4452 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4453 remainder, 1))
4455 /* This could be computed with a branch-less sequence.
4456 Save that for later. */
4457 rtx label = gen_label_rtx ();
4458 do_cmp_and_jump (remainder, const0_rtx, EQ,
4459 compute_mode, label);
4460 expand_inc (quotient, const1_rtx);
4461 expand_dec (remainder, op1);
4462 emit_label (label);
4463 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4466 /* No luck with division elimination or divmod. Have to do it
4467 by conditionally adjusting op0 *and* the result. */
4469 rtx label1, label2;
4470 rtx adjusted_op0, tem;
4472 quotient = gen_reg_rtx (compute_mode);
4473 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4474 label1 = gen_label_rtx ();
4475 label2 = gen_label_rtx ();
4476 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4477 compute_mode, label1);
4478 emit_move_insn (quotient, const0_rtx);
4479 emit_jump_insn (gen_jump (label2));
4480 emit_barrier ();
4481 emit_label (label1);
4482 expand_dec (adjusted_op0, const1_rtx);
4483 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4484 quotient, 1, OPTAB_LIB_WIDEN);
4485 if (tem != quotient)
4486 emit_move_insn (quotient, tem);
4487 expand_inc (quotient, const1_rtx);
4488 emit_label (label2);
4491 else /* signed */
4493 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4494 && INTVAL (op1) >= 0)
4496 /* This is extremely similar to the code for the unsigned case
4497 above. For 2.7 we should merge these variants, but for
4498 2.6.1 I don't want to touch the code for unsigned since that
4499 get used in C. The signed case will only be used by other
4500 languages (Ada). */
4502 rtx t1, t2, t3;
4503 unsigned HOST_WIDE_INT d = INTVAL (op1);
4504 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4505 floor_log2 (d), tquotient, 0);
4506 t2 = expand_binop (compute_mode, and_optab, op0,
4507 GEN_INT (d - 1),
4508 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4509 t3 = gen_reg_rtx (compute_mode);
4510 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4511 compute_mode, 1, 1);
4512 if (t3 == 0)
4514 rtx lab;
4515 lab = gen_label_rtx ();
4516 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4517 expand_inc (t1, const1_rtx);
4518 emit_label (lab);
4519 quotient = t1;
4521 else
4522 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4523 t1, t3),
4524 tquotient);
4525 break;
4528 /* Try using an instruction that produces both the quotient and
4529 remainder, using truncation. We can easily compensate the
4530 quotient or remainder to get ceiling rounding, once we have the
4531 remainder. Notice that we compute also the final remainder
4532 value here, and return the result right away. */
4533 if (target == 0 || GET_MODE (target) != compute_mode)
4534 target = gen_reg_rtx (compute_mode);
4535 if (rem_flag)
4537 remainder= (REG_P (target)
4538 ? target : gen_reg_rtx (compute_mode));
4539 quotient = gen_reg_rtx (compute_mode);
4541 else
4543 quotient = (REG_P (target)
4544 ? target : gen_reg_rtx (compute_mode));
4545 remainder = gen_reg_rtx (compute_mode);
4548 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4549 remainder, 0))
4551 /* This could be computed with a branch-less sequence.
4552 Save that for later. */
4553 rtx tem;
4554 rtx label = gen_label_rtx ();
4555 do_cmp_and_jump (remainder, const0_rtx, EQ,
4556 compute_mode, label);
4557 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4558 NULL_RTX, 0, OPTAB_WIDEN);
4559 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4560 expand_inc (quotient, const1_rtx);
4561 expand_dec (remainder, op1);
4562 emit_label (label);
4563 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4566 /* No luck with division elimination or divmod. Have to do it
4567 by conditionally adjusting op0 *and* the result. */
4569 rtx label1, label2, label3, label4, label5;
4570 rtx adjusted_op0;
4571 rtx tem;
4573 quotient = gen_reg_rtx (compute_mode);
4574 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4575 label1 = gen_label_rtx ();
4576 label2 = gen_label_rtx ();
4577 label3 = gen_label_rtx ();
4578 label4 = gen_label_rtx ();
4579 label5 = gen_label_rtx ();
4580 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4581 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4582 compute_mode, label1);
4583 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4584 quotient, 0, OPTAB_LIB_WIDEN);
4585 if (tem != quotient)
4586 emit_move_insn (quotient, tem);
4587 emit_jump_insn (gen_jump (label5));
4588 emit_barrier ();
4589 emit_label (label1);
4590 expand_dec (adjusted_op0, const1_rtx);
4591 emit_jump_insn (gen_jump (label4));
4592 emit_barrier ();
4593 emit_label (label2);
4594 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4595 compute_mode, label3);
4596 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4597 quotient, 0, OPTAB_LIB_WIDEN);
4598 if (tem != quotient)
4599 emit_move_insn (quotient, tem);
4600 emit_jump_insn (gen_jump (label5));
4601 emit_barrier ();
4602 emit_label (label3);
4603 expand_inc (adjusted_op0, const1_rtx);
4604 emit_label (label4);
4605 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4606 quotient, 0, OPTAB_LIB_WIDEN);
4607 if (tem != quotient)
4608 emit_move_insn (quotient, tem);
4609 expand_inc (quotient, const1_rtx);
4610 emit_label (label5);
4613 break;
4615 case EXACT_DIV_EXPR:
4616 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4618 HOST_WIDE_INT d = INTVAL (op1);
4619 unsigned HOST_WIDE_INT ml;
4620 int pre_shift;
4621 rtx t1;
4623 pre_shift = floor_log2 (d & -d);
4624 ml = invert_mod2n (d >> pre_shift, size);
4625 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4626 pre_shift, NULL_RTX, unsignedp);
4627 quotient = expand_mult (compute_mode, t1,
4628 gen_int_mode (ml, compute_mode),
4629 NULL_RTX, 1);
4631 insn = get_last_insn ();
4632 set_unique_reg_note (insn,
4633 REG_EQUAL,
4634 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4635 compute_mode,
4636 op0, op1));
4638 break;
4640 case ROUND_DIV_EXPR:
4641 case ROUND_MOD_EXPR:
4642 if (unsignedp)
4644 rtx tem;
4645 rtx label;
4646 label = gen_label_rtx ();
4647 quotient = gen_reg_rtx (compute_mode);
4648 remainder = gen_reg_rtx (compute_mode);
4649 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4651 rtx tem;
4652 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4653 quotient, 1, OPTAB_LIB_WIDEN);
4654 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4655 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4656 remainder, 1, OPTAB_LIB_WIDEN);
4658 tem = plus_constant (op1, -1);
4659 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4660 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4661 expand_inc (quotient, const1_rtx);
4662 expand_dec (remainder, op1);
4663 emit_label (label);
4665 else
4667 rtx abs_rem, abs_op1, tem, mask;
4668 rtx label;
4669 label = gen_label_rtx ();
4670 quotient = gen_reg_rtx (compute_mode);
4671 remainder = gen_reg_rtx (compute_mode);
4672 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4674 rtx tem;
4675 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4676 quotient, 0, OPTAB_LIB_WIDEN);
4677 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4678 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4679 remainder, 0, OPTAB_LIB_WIDEN);
4681 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4682 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4683 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4684 1, NULL_RTX, 1);
4685 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4686 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4687 NULL_RTX, 0, OPTAB_WIDEN);
4688 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4689 size - 1, NULL_RTX, 0);
4690 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4691 NULL_RTX, 0, OPTAB_WIDEN);
4692 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4693 NULL_RTX, 0, OPTAB_WIDEN);
4694 expand_inc (quotient, tem);
4695 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4696 NULL_RTX, 0, OPTAB_WIDEN);
4697 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4698 NULL_RTX, 0, OPTAB_WIDEN);
4699 expand_dec (remainder, tem);
4700 emit_label (label);
4702 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4704 default:
4705 gcc_unreachable ();
4708 if (quotient == 0)
4710 if (target && GET_MODE (target) != compute_mode)
4711 target = 0;
4713 if (rem_flag)
4715 /* Try to produce the remainder without producing the quotient.
4716 If we seem to have a divmod pattern that does not require widening,
4717 don't try widening here. We should really have a WIDEN argument
4718 to expand_twoval_binop, since what we'd really like to do here is
4719 1) try a mod insn in compute_mode
4720 2) try a divmod insn in compute_mode
4721 3) try a div insn in compute_mode and multiply-subtract to get
4722 remainder
4723 4) try the same things with widening allowed. */
4724 remainder
4725 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4726 op0, op1, target,
4727 unsignedp,
4728 ((optab_handler (optab2, compute_mode)
4729 != CODE_FOR_nothing)
4730 ? OPTAB_DIRECT : OPTAB_WIDEN));
4731 if (remainder == 0)
4733 /* No luck there. Can we do remainder and divide at once
4734 without a library call? */
4735 remainder = gen_reg_rtx (compute_mode);
4736 if (! expand_twoval_binop ((unsignedp
4737 ? udivmod_optab
4738 : sdivmod_optab),
4739 op0, op1,
4740 NULL_RTX, remainder, unsignedp))
4741 remainder = 0;
4744 if (remainder)
4745 return gen_lowpart (mode, remainder);
4748 /* Produce the quotient. Try a quotient insn, but not a library call.
4749 If we have a divmod in this mode, use it in preference to widening
4750 the div (for this test we assume it will not fail). Note that optab2
4751 is set to the one of the two optabs that the call below will use. */
4752 quotient
4753 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4754 op0, op1, rem_flag ? NULL_RTX : target,
4755 unsignedp,
4756 ((optab_handler (optab2, compute_mode)
4757 != CODE_FOR_nothing)
4758 ? OPTAB_DIRECT : OPTAB_WIDEN));
4760 if (quotient == 0)
4762 /* No luck there. Try a quotient-and-remainder insn,
4763 keeping the quotient alone. */
4764 quotient = gen_reg_rtx (compute_mode);
4765 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4766 op0, op1,
4767 quotient, NULL_RTX, unsignedp))
4769 quotient = 0;
4770 if (! rem_flag)
4771 /* Still no luck. If we are not computing the remainder,
4772 use a library call for the quotient. */
4773 quotient = sign_expand_binop (compute_mode,
4774 udiv_optab, sdiv_optab,
4775 op0, op1, target,
4776 unsignedp, OPTAB_LIB_WIDEN);
4781 if (rem_flag)
4783 if (target && GET_MODE (target) != compute_mode)
4784 target = 0;
4786 if (quotient == 0)
4788 /* No divide instruction either. Use library for remainder. */
4789 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4790 op0, op1, target,
4791 unsignedp, OPTAB_LIB_WIDEN);
4792 /* No remainder function. Try a quotient-and-remainder
4793 function, keeping the remainder. */
4794 if (!remainder)
4796 remainder = gen_reg_rtx (compute_mode);
4797 if (!expand_twoval_binop_libfunc
4798 (unsignedp ? udivmod_optab : sdivmod_optab,
4799 op0, op1,
4800 NULL_RTX, remainder,
4801 unsignedp ? UMOD : MOD))
4802 remainder = NULL_RTX;
4805 else
4807 /* We divided. Now finish doing X - Y * (X / Y). */
4808 remainder = expand_mult (compute_mode, quotient, op1,
4809 NULL_RTX, unsignedp);
4810 remainder = expand_binop (compute_mode, sub_optab, op0,
4811 remainder, target, unsignedp,
4812 OPTAB_LIB_WIDEN);
4816 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4819 /* Return a tree node with data type TYPE, describing the value of X.
4820 Usually this is an VAR_DECL, if there is no obvious better choice.
4821 X may be an expression, however we only support those expressions
4822 generated by loop.c. */
4824 tree
4825 make_tree (tree type, rtx x)
4827 tree t;
4829 switch (GET_CODE (x))
4831 case CONST_INT:
4833 HOST_WIDE_INT hi = 0;
4835 if (INTVAL (x) < 0
4836 && !(TYPE_UNSIGNED (type)
4837 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4838 < HOST_BITS_PER_WIDE_INT)))
4839 hi = -1;
4841 t = build_int_cst_wide (type, INTVAL (x), hi);
4843 return t;
4846 case CONST_DOUBLE:
4847 if (GET_MODE (x) == VOIDmode)
4848 t = build_int_cst_wide (type,
4849 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4850 else
4852 REAL_VALUE_TYPE d;
4854 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4855 t = build_real (type, d);
4858 return t;
4860 case CONST_VECTOR:
4862 int units = CONST_VECTOR_NUNITS (x);
4863 tree itype = TREE_TYPE (type);
4864 tree t = NULL_TREE;
4865 int i;
4868 /* Build a tree with vector elements. */
4869 for (i = units - 1; i >= 0; --i)
4871 rtx elt = CONST_VECTOR_ELT (x, i);
4872 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4875 return build_vector (type, t);
4878 case PLUS:
4879 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4880 make_tree (type, XEXP (x, 1)));
4882 case MINUS:
4883 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4884 make_tree (type, XEXP (x, 1)));
4886 case NEG:
4887 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4889 case MULT:
4890 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4891 make_tree (type, XEXP (x, 1)));
4893 case ASHIFT:
4894 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4895 make_tree (type, XEXP (x, 1)));
4897 case LSHIFTRT:
4898 t = unsigned_type_for (type);
4899 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4900 make_tree (t, XEXP (x, 0)),
4901 make_tree (type, XEXP (x, 1))));
4903 case ASHIFTRT:
4904 t = signed_type_for (type);
4905 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4906 make_tree (t, XEXP (x, 0)),
4907 make_tree (type, XEXP (x, 1))));
4909 case DIV:
4910 if (TREE_CODE (type) != REAL_TYPE)
4911 t = signed_type_for (type);
4912 else
4913 t = type;
4915 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4916 make_tree (t, XEXP (x, 0)),
4917 make_tree (t, XEXP (x, 1))));
4918 case UDIV:
4919 t = unsigned_type_for (type);
4920 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4921 make_tree (t, XEXP (x, 0)),
4922 make_tree (t, XEXP (x, 1))));
4924 case SIGN_EXTEND:
4925 case ZERO_EXTEND:
4926 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4927 GET_CODE (x) == ZERO_EXTEND);
4928 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4930 case CONST:
4931 return make_tree (type, XEXP (x, 0));
4933 case SYMBOL_REF:
4934 t = SYMBOL_REF_DECL (x);
4935 if (t)
4936 return fold_convert (type, build_fold_addr_expr (t));
4937 /* else fall through. */
4939 default:
4940 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
4942 /* If TYPE is a POINTER_TYPE, we might need to convert X from
4943 address mode to pointer mode. */
4944 if (POINTER_TYPE_P (type))
4945 x = convert_memory_address_addr_space
4946 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
4948 /* Note that we do *not* use SET_DECL_RTL here, because we do not
4949 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
4950 t->decl_with_rtl.rtl = x;
4952 return t;
4956 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4957 and returning TARGET.
4959 If TARGET is 0, a pseudo-register or constant is returned. */
4962 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4964 rtx tem = 0;
4966 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4967 tem = simplify_binary_operation (AND, mode, op0, op1);
4968 if (tem == 0)
4969 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4971 if (target == 0)
4972 target = tem;
4973 else if (tem != target)
4974 emit_move_insn (target, tem);
4975 return target;
4978 /* Helper function for emit_store_flag. */
4979 static rtx
4980 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
4981 enum machine_mode mode, enum machine_mode compare_mode,
4982 int unsignedp, rtx x, rtx y, int normalizep,
4983 enum machine_mode target_mode)
4985 struct expand_operand ops[4];
4986 rtx op0, last, comparison, subtarget;
4987 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
4989 last = get_last_insn ();
4990 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
4991 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
4992 if (!x || !y)
4994 delete_insns_since (last);
4995 return NULL_RTX;
4998 if (target_mode == VOIDmode)
4999 target_mode = result_mode;
5000 if (!target)
5001 target = gen_reg_rtx (target_mode);
5003 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5005 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5006 create_fixed_operand (&ops[1], comparison);
5007 create_fixed_operand (&ops[2], x);
5008 create_fixed_operand (&ops[3], y);
5009 if (!maybe_expand_insn (icode, 4, ops))
5011 delete_insns_since (last);
5012 return NULL_RTX;
5014 subtarget = ops[0].value;
5016 /* If we are converting to a wider mode, first convert to
5017 TARGET_MODE, then normalize. This produces better combining
5018 opportunities on machines that have a SIGN_EXTRACT when we are
5019 testing a single bit. This mostly benefits the 68k.
5021 If STORE_FLAG_VALUE does not have the sign bit set when
5022 interpreted in MODE, we can do this conversion as unsigned, which
5023 is usually more efficient. */
5024 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5026 convert_move (target, subtarget,
5027 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5028 && 0 == (STORE_FLAG_VALUE
5029 & ((HOST_WIDE_INT) 1
5030 << (GET_MODE_BITSIZE (result_mode) -1))));
5031 op0 = target;
5032 result_mode = target_mode;
5034 else
5035 op0 = subtarget;
5037 /* If we want to keep subexpressions around, don't reuse our last
5038 target. */
5039 if (optimize)
5040 subtarget = 0;
5042 /* Now normalize to the proper value in MODE. Sometimes we don't
5043 have to do anything. */
5044 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5046 /* STORE_FLAG_VALUE might be the most negative number, so write
5047 the comparison this way to avoid a compiler-time warning. */
5048 else if (- normalizep == STORE_FLAG_VALUE)
5049 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5051 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5052 it hard to use a value of just the sign bit due to ANSI integer
5053 constant typing rules. */
5054 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5055 && (STORE_FLAG_VALUE
5056 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5057 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5058 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5059 normalizep == 1);
5060 else
5062 gcc_assert (STORE_FLAG_VALUE & 1);
5064 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5065 if (normalizep == -1)
5066 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5069 /* If we were converting to a smaller mode, do the conversion now. */
5070 if (target_mode != result_mode)
5072 convert_move (target, op0, 0);
5073 return target;
5075 else
5076 return op0;
5080 /* A subroutine of emit_store_flag only including "tricks" that do not
5081 need a recursive call. These are kept separate to avoid infinite
5082 loops. */
5084 static rtx
5085 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5086 enum machine_mode mode, int unsignedp, int normalizep,
5087 enum machine_mode target_mode)
5089 rtx subtarget;
5090 enum insn_code icode;
5091 enum machine_mode compare_mode;
5092 enum mode_class mclass;
5093 enum rtx_code scode;
5094 rtx tem;
5096 if (unsignedp)
5097 code = unsigned_condition (code);
5098 scode = swap_condition (code);
5100 /* If one operand is constant, make it the second one. Only do this
5101 if the other operand is not constant as well. */
5103 if (swap_commutative_operands_p (op0, op1))
5105 tem = op0;
5106 op0 = op1;
5107 op1 = tem;
5108 code = swap_condition (code);
5111 if (mode == VOIDmode)
5112 mode = GET_MODE (op0);
5114 /* For some comparisons with 1 and -1, we can convert this to
5115 comparisons with zero. This will often produce more opportunities for
5116 store-flag insns. */
5118 switch (code)
5120 case LT:
5121 if (op1 == const1_rtx)
5122 op1 = const0_rtx, code = LE;
5123 break;
5124 case LE:
5125 if (op1 == constm1_rtx)
5126 op1 = const0_rtx, code = LT;
5127 break;
5128 case GE:
5129 if (op1 == const1_rtx)
5130 op1 = const0_rtx, code = GT;
5131 break;
5132 case GT:
5133 if (op1 == constm1_rtx)
5134 op1 = const0_rtx, code = GE;
5135 break;
5136 case GEU:
5137 if (op1 == const1_rtx)
5138 op1 = const0_rtx, code = NE;
5139 break;
5140 case LTU:
5141 if (op1 == const1_rtx)
5142 op1 = const0_rtx, code = EQ;
5143 break;
5144 default:
5145 break;
5148 /* If we are comparing a double-word integer with zero or -1, we can
5149 convert the comparison into one involving a single word. */
5150 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5151 && GET_MODE_CLASS (mode) == MODE_INT
5152 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5154 if ((code == EQ || code == NE)
5155 && (op1 == const0_rtx || op1 == constm1_rtx))
5157 rtx op00, op01;
5159 /* Do a logical OR or AND of the two words and compare the
5160 result. */
5161 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5162 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5163 tem = expand_binop (word_mode,
5164 op1 == const0_rtx ? ior_optab : and_optab,
5165 op00, op01, NULL_RTX, unsignedp,
5166 OPTAB_DIRECT);
5168 if (tem != 0)
5169 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5170 unsignedp, normalizep);
5172 else if ((code == LT || code == GE) && op1 == const0_rtx)
5174 rtx op0h;
5176 /* If testing the sign bit, can just test on high word. */
5177 op0h = simplify_gen_subreg (word_mode, op0, mode,
5178 subreg_highpart_offset (word_mode,
5179 mode));
5180 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5181 unsignedp, normalizep);
5183 else
5184 tem = NULL_RTX;
5186 if (tem)
5188 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5189 return tem;
5190 if (!target)
5191 target = gen_reg_rtx (target_mode);
5193 convert_move (target, tem,
5194 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5195 & ((HOST_WIDE_INT) 1
5196 << (GET_MODE_BITSIZE (word_mode) -1))));
5197 return target;
5201 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5202 complement of A (for GE) and shifting the sign bit to the low bit. */
5203 if (op1 == const0_rtx && (code == LT || code == GE)
5204 && GET_MODE_CLASS (mode) == MODE_INT
5205 && (normalizep || STORE_FLAG_VALUE == 1
5206 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5207 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5208 == ((unsigned HOST_WIDE_INT) 1
5209 << (GET_MODE_BITSIZE (mode) - 1))))))
5211 subtarget = target;
5213 if (!target)
5214 target_mode = mode;
5216 /* If the result is to be wider than OP0, it is best to convert it
5217 first. If it is to be narrower, it is *incorrect* to convert it
5218 first. */
5219 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5221 op0 = convert_modes (target_mode, mode, op0, 0);
5222 mode = target_mode;
5225 if (target_mode != mode)
5226 subtarget = 0;
5228 if (code == GE)
5229 op0 = expand_unop (mode, one_cmpl_optab, op0,
5230 ((STORE_FLAG_VALUE == 1 || normalizep)
5231 ? 0 : subtarget), 0);
5233 if (STORE_FLAG_VALUE == 1 || normalizep)
5234 /* If we are supposed to produce a 0/1 value, we want to do
5235 a logical shift from the sign bit to the low-order bit; for
5236 a -1/0 value, we do an arithmetic shift. */
5237 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5238 GET_MODE_BITSIZE (mode) - 1,
5239 subtarget, normalizep != -1);
5241 if (mode != target_mode)
5242 op0 = convert_modes (target_mode, mode, op0, 0);
5244 return op0;
5247 mclass = GET_MODE_CLASS (mode);
5248 for (compare_mode = mode; compare_mode != VOIDmode;
5249 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5251 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5252 icode = optab_handler (cstore_optab, optab_mode);
5253 if (icode != CODE_FOR_nothing)
5255 do_pending_stack_adjust ();
5256 tem = emit_cstore (target, icode, code, mode, compare_mode,
5257 unsignedp, op0, op1, normalizep, target_mode);
5258 if (tem)
5259 return tem;
5261 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5263 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5264 unsignedp, op1, op0, normalizep, target_mode);
5265 if (tem)
5266 return tem;
5268 break;
5272 return 0;
5275 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5276 and storing in TARGET. Normally return TARGET.
5277 Return 0 if that cannot be done.
5279 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5280 it is VOIDmode, they cannot both be CONST_INT.
5282 UNSIGNEDP is for the case where we have to widen the operands
5283 to perform the operation. It says to use zero-extension.
5285 NORMALIZEP is 1 if we should convert the result to be either zero
5286 or one. Normalize is -1 if we should convert the result to be
5287 either zero or -1. If NORMALIZEP is zero, the result will be left
5288 "raw" out of the scc insn. */
5291 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5292 enum machine_mode mode, int unsignedp, int normalizep)
5294 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5295 enum rtx_code rcode;
5296 rtx subtarget;
5297 rtx tem, last, trueval;
5299 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5300 target_mode);
5301 if (tem)
5302 return tem;
5304 /* If we reached here, we can't do this with a scc insn, however there
5305 are some comparisons that can be done in other ways. Don't do any
5306 of these cases if branches are very cheap. */
5307 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5308 return 0;
5310 /* See what we need to return. We can only return a 1, -1, or the
5311 sign bit. */
5313 if (normalizep == 0)
5315 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5316 normalizep = STORE_FLAG_VALUE;
5318 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5319 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5320 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5322 else
5323 return 0;
5326 last = get_last_insn ();
5328 /* If optimizing, use different pseudo registers for each insn, instead
5329 of reusing the same pseudo. This leads to better CSE, but slows
5330 down the compiler, since there are more pseudos */
5331 subtarget = (!optimize
5332 && (target_mode == mode)) ? target : NULL_RTX;
5333 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5335 /* For floating-point comparisons, try the reverse comparison or try
5336 changing the "orderedness" of the comparison. */
5337 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5339 enum rtx_code first_code;
5340 bool and_them;
5342 rcode = reverse_condition_maybe_unordered (code);
5343 if (can_compare_p (rcode, mode, ccp_store_flag)
5344 && (code == ORDERED || code == UNORDERED
5345 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5346 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5348 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5349 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5351 /* For the reverse comparison, use either an addition or a XOR. */
5352 if (want_add
5353 && rtx_cost (GEN_INT (normalizep), PLUS,
5354 optimize_insn_for_speed_p ()) == 0)
5356 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5357 STORE_FLAG_VALUE, target_mode);
5358 if (tem)
5359 return expand_binop (target_mode, add_optab, tem,
5360 GEN_INT (normalizep),
5361 target, 0, OPTAB_WIDEN);
5363 else if (!want_add
5364 && rtx_cost (trueval, XOR,
5365 optimize_insn_for_speed_p ()) == 0)
5367 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5368 normalizep, target_mode);
5369 if (tem)
5370 return expand_binop (target_mode, xor_optab, tem, trueval,
5371 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5375 delete_insns_since (last);
5377 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5378 if (code == ORDERED || code == UNORDERED)
5379 return 0;
5381 and_them = split_comparison (code, mode, &first_code, &code);
5383 /* If there are no NaNs, the first comparison should always fall through.
5384 Effectively change the comparison to the other one. */
5385 if (!HONOR_NANS (mode))
5387 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5388 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5389 target_mode);
5392 #ifdef HAVE_conditional_move
5393 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5394 conditional move. */
5395 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5396 normalizep, target_mode);
5397 if (tem == 0)
5398 return 0;
5400 if (and_them)
5401 tem = emit_conditional_move (target, code, op0, op1, mode,
5402 tem, const0_rtx, GET_MODE (tem), 0);
5403 else
5404 tem = emit_conditional_move (target, code, op0, op1, mode,
5405 trueval, tem, GET_MODE (tem), 0);
5407 if (tem == 0)
5408 delete_insns_since (last);
5409 return tem;
5410 #else
5411 return 0;
5412 #endif
5415 /* The remaining tricks only apply to integer comparisons. */
5417 if (GET_MODE_CLASS (mode) != MODE_INT)
5418 return 0;
5420 /* If this is an equality comparison of integers, we can try to exclusive-or
5421 (or subtract) the two operands and use a recursive call to try the
5422 comparison with zero. Don't do any of these cases if branches are
5423 very cheap. */
5425 if ((code == EQ || code == NE) && op1 != const0_rtx)
5427 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5428 OPTAB_WIDEN);
5430 if (tem == 0)
5431 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5432 OPTAB_WIDEN);
5433 if (tem != 0)
5434 tem = emit_store_flag (target, code, tem, const0_rtx,
5435 mode, unsignedp, normalizep);
5436 if (tem != 0)
5437 return tem;
5439 delete_insns_since (last);
5442 /* For integer comparisons, try the reverse comparison. However, for
5443 small X and if we'd have anyway to extend, implementing "X != 0"
5444 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5445 rcode = reverse_condition (code);
5446 if (can_compare_p (rcode, mode, ccp_store_flag)
5447 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5448 && code == NE
5449 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5450 && op1 == const0_rtx))
5452 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5453 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5455 /* Again, for the reverse comparison, use either an addition or a XOR. */
5456 if (want_add
5457 && rtx_cost (GEN_INT (normalizep), PLUS,
5458 optimize_insn_for_speed_p ()) == 0)
5460 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5461 STORE_FLAG_VALUE, target_mode);
5462 if (tem != 0)
5463 tem = expand_binop (target_mode, add_optab, tem,
5464 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5466 else if (!want_add
5467 && rtx_cost (trueval, XOR,
5468 optimize_insn_for_speed_p ()) == 0)
5470 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5471 normalizep, target_mode);
5472 if (tem != 0)
5473 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5474 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5477 if (tem != 0)
5478 return tem;
5479 delete_insns_since (last);
5482 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5483 the constant zero. Reject all other comparisons at this point. Only
5484 do LE and GT if branches are expensive since they are expensive on
5485 2-operand machines. */
5487 if (op1 != const0_rtx
5488 || (code != EQ && code != NE
5489 && (BRANCH_COST (optimize_insn_for_speed_p (),
5490 false) <= 1 || (code != LE && code != GT))))
5491 return 0;
5493 /* Try to put the result of the comparison in the sign bit. Assume we can't
5494 do the necessary operation below. */
5496 tem = 0;
5498 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5499 the sign bit set. */
5501 if (code == LE)
5503 /* This is destructive, so SUBTARGET can't be OP0. */
5504 if (rtx_equal_p (subtarget, op0))
5505 subtarget = 0;
5507 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5508 OPTAB_WIDEN);
5509 if (tem)
5510 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5511 OPTAB_WIDEN);
5514 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5515 number of bits in the mode of OP0, minus one. */
5517 if (code == GT)
5519 if (rtx_equal_p (subtarget, op0))
5520 subtarget = 0;
5522 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5523 GET_MODE_BITSIZE (mode) - 1,
5524 subtarget, 0);
5525 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5526 OPTAB_WIDEN);
5529 if (code == EQ || code == NE)
5531 /* For EQ or NE, one way to do the comparison is to apply an operation
5532 that converts the operand into a positive number if it is nonzero
5533 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5534 for NE we negate. This puts the result in the sign bit. Then we
5535 normalize with a shift, if needed.
5537 Two operations that can do the above actions are ABS and FFS, so try
5538 them. If that doesn't work, and MODE is smaller than a full word,
5539 we can use zero-extension to the wider mode (an unsigned conversion)
5540 as the operation. */
5542 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5543 that is compensated by the subsequent overflow when subtracting
5544 one / negating. */
5546 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5547 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5548 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5549 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5550 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5552 tem = convert_modes (word_mode, mode, op0, 1);
5553 mode = word_mode;
5556 if (tem != 0)
5558 if (code == EQ)
5559 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5560 0, OPTAB_WIDEN);
5561 else
5562 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5565 /* If we couldn't do it that way, for NE we can "or" the two's complement
5566 of the value with itself. For EQ, we take the one's complement of
5567 that "or", which is an extra insn, so we only handle EQ if branches
5568 are expensive. */
5570 if (tem == 0
5571 && (code == NE
5572 || BRANCH_COST (optimize_insn_for_speed_p (),
5573 false) > 1))
5575 if (rtx_equal_p (subtarget, op0))
5576 subtarget = 0;
5578 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5579 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5580 OPTAB_WIDEN);
5582 if (tem && code == EQ)
5583 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5587 if (tem && normalizep)
5588 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5589 GET_MODE_BITSIZE (mode) - 1,
5590 subtarget, normalizep == 1);
5592 if (tem)
5594 if (!target)
5596 else if (GET_MODE (tem) != target_mode)
5598 convert_move (target, tem, 0);
5599 tem = target;
5601 else if (!subtarget)
5603 emit_move_insn (target, tem);
5604 tem = target;
5607 else
5608 delete_insns_since (last);
5610 return tem;
5613 /* Like emit_store_flag, but always succeeds. */
5616 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5617 enum machine_mode mode, int unsignedp, int normalizep)
5619 rtx tem, label;
5620 rtx trueval, falseval;
5622 /* First see if emit_store_flag can do the job. */
5623 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5624 if (tem != 0)
5625 return tem;
5627 if (!target)
5628 target = gen_reg_rtx (word_mode);
5630 /* If this failed, we have to do this with set/compare/jump/set code.
5631 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5632 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5633 if (code == NE
5634 && GET_MODE_CLASS (mode) == MODE_INT
5635 && REG_P (target)
5636 && op0 == target
5637 && op1 == const0_rtx)
5639 label = gen_label_rtx ();
5640 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5641 mode, NULL_RTX, NULL_RTX, label, -1);
5642 emit_move_insn (target, trueval);
5643 emit_label (label);
5644 return target;
5647 if (!REG_P (target)
5648 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5649 target = gen_reg_rtx (GET_MODE (target));
5651 /* Jump in the right direction if the target cannot implement CODE
5652 but can jump on its reverse condition. */
5653 falseval = const0_rtx;
5654 if (! can_compare_p (code, mode, ccp_jump)
5655 && (! FLOAT_MODE_P (mode)
5656 || code == ORDERED || code == UNORDERED
5657 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5658 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5660 enum rtx_code rcode;
5661 if (FLOAT_MODE_P (mode))
5662 rcode = reverse_condition_maybe_unordered (code);
5663 else
5664 rcode = reverse_condition (code);
5666 /* Canonicalize to UNORDERED for the libcall. */
5667 if (can_compare_p (rcode, mode, ccp_jump)
5668 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5670 falseval = trueval;
5671 trueval = const0_rtx;
5672 code = rcode;
5676 emit_move_insn (target, trueval);
5677 label = gen_label_rtx ();
5678 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5679 NULL_RTX, label, -1);
5681 emit_move_insn (target, falseval);
5682 emit_label (label);
5684 return target;
5687 /* Perform possibly multi-word comparison and conditional jump to LABEL
5688 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5689 now a thin wrapper around do_compare_rtx_and_jump. */
5691 static void
5692 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5693 rtx label)
5695 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5696 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5697 NULL_RTX, NULL_RTX, label, -1);