PR tree-optimization/82929
[official-gcc.git] / gcc / gimple-ssa-store-merging.c
blob40f7c9843f643826898376ea0f76551275aa8780
1 /* GIMPLE store merging pass.
2 Copyright (C) 2016-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of this pass is to combine multiple memory stores of
22 constant values, values loaded from memory or bitwise operations
23 on those to consecutive memory locations into fewer wider stores.
24 For example, if we have a sequence peforming four byte stores to
25 consecutive memory locations:
26 [p ] := imm1;
27 [p + 1B] := imm2;
28 [p + 2B] := imm3;
29 [p + 3B] := imm4;
30 we can transform this into a single 4-byte store if the target supports it:
31 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness.
33 Or:
34 [p ] := [q ];
35 [p + 1B] := [q + 1B];
36 [p + 2B] := [q + 2B];
37 [p + 3B] := [q + 3B];
38 if there is no overlap can be transformed into a single 4-byte
39 load followed by single 4-byte store.
41 Or:
42 [p ] := [q ] ^ imm1;
43 [p + 1B] := [q + 1B] ^ imm2;
44 [p + 2B] := [q + 2B] ^ imm3;
45 [p + 3B] := [q + 3B] ^ imm4;
46 if there is no overlap can be transformed into a single 4-byte
47 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49 The algorithm is applied to each basic block in three phases:
51 1) Scan through the basic block recording assignments to
52 destinations that can be expressed as a store to memory of a certain size
53 at a certain bit offset from expressions we can handle. For bit-fields
54 we also note the surrounding bit region, bits that could be stored in
55 a read-modify-write operation when storing the bit-field. Record store
56 chains to different bases in a hash_map (m_stores) and make sure to
57 terminate such chains when appropriate (for example when when the stored
58 values get used subsequently).
59 These stores can be a result of structure element initializers, array stores
60 etc. A store_immediate_info object is recorded for every such store.
61 Record as many such assignments to a single base as possible until a
62 statement that interferes with the store sequence is encountered.
63 Each store has up to 2 operands, which can be an immediate constant
64 or a memory load, from which the value to be stored can be computed.
65 At most one of the operands can be a constant. The operands are recorded
66 in store_operand_info struct.
68 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of
69 store_immediate_info objects) and coalesce contiguous stores into
70 merged_store_group objects. For bit-fields stores, we don't need to
71 require the stores to be contiguous, just their surrounding bit regions
72 have to be contiguous. If the expression being stored is different
73 between adjacent stores, such as one store storing a constant and
74 following storing a value loaded from memory, or if the loaded memory
75 objects are not adjacent, a new merged_store_group is created as well.
77 For example, given the stores:
78 [p ] := 0;
79 [p + 1B] := 1;
80 [p + 3B] := 0;
81 [p + 4B] := 1;
82 [p + 5B] := 0;
83 [p + 6B] := 0;
84 This phase would produce two merged_store_group objects, one recording the
85 two bytes stored in the memory region [p : p + 1] and another
86 recording the four bytes stored in the memory region [p + 3 : p + 6].
88 3) The merged_store_group objects produced in phase 2) are processed
89 to generate the sequence of wider stores that set the contiguous memory
90 regions to the sequence of bytes that correspond to it. This may emit
91 multiple stores per store group to handle contiguous stores that are not
92 of a size that is a power of 2. For example it can try to emit a 40-bit
93 store as a 32-bit store followed by an 8-bit store.
94 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or
95 TARGET_SLOW_UNALIGNED_ACCESS rules.
97 Note on endianness and example:
98 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
99 [p ] := 0x1234;
100 [p + 2B] := 0x5678;
101 [p + 4B] := 0xab;
102 [p + 5B] := 0xcd;
104 The memory layout for little-endian (LE) and big-endian (BE) must be:
105 p |LE|BE|
106 ---------
107 0 |34|12|
108 1 |12|34|
109 2 |78|56|
110 3 |56|78|
111 4 |ab|ab|
112 5 |cd|cd|
114 To merge these into a single 48-bit merged value 'val' in phase 2)
115 on little-endian we insert stores to higher (consecutive) bitpositions
116 into the most significant bits of the merged value.
117 The final merged value would be: 0xcdab56781234
119 For big-endian we insert stores to higher bitpositions into the least
120 significant bits of the merged value.
121 The final merged value would be: 0x12345678abcd
123 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
124 followed by a 16-bit store. Again, we must consider endianness when
125 breaking down the 48-bit value 'val' computed above.
126 For little endian we emit:
127 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
128 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
130 Whereas for big-endian we emit:
131 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
132 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
134 #include "config.h"
135 #include "system.h"
136 #include "coretypes.h"
137 #include "backend.h"
138 #include "tree.h"
139 #include "gimple.h"
140 #include "builtins.h"
141 #include "fold-const.h"
142 #include "tree-pass.h"
143 #include "ssa.h"
144 #include "gimple-pretty-print.h"
145 #include "alias.h"
146 #include "fold-const.h"
147 #include "params.h"
148 #include "print-tree.h"
149 #include "tree-hash-traits.h"
150 #include "gimple-iterator.h"
151 #include "gimplify.h"
152 #include "stor-layout.h"
153 #include "timevar.h"
154 #include "tree-cfg.h"
155 #include "tree-eh.h"
156 #include "target.h"
157 #include "gimplify-me.h"
158 #include "rtl.h"
159 #include "expr.h" /* For get_bit_range. */
160 #include "selftest.h"
162 /* The maximum size (in bits) of the stores this pass should generate. */
163 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
164 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
166 /* Limit to bound the number of aliasing checks for loads with the same
167 vuse as the corresponding store. */
168 #define MAX_STORE_ALIAS_CHECKS 64
170 namespace {
172 /* Struct recording one operand for the store, which is either a constant,
173 then VAL represents the constant and all the other fields are zero,
174 or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
175 and the other fields also reflect the memory load. */
177 struct store_operand_info
179 tree val;
180 tree base_addr;
181 unsigned HOST_WIDE_INT bitsize;
182 unsigned HOST_WIDE_INT bitpos;
183 unsigned HOST_WIDE_INT bitregion_start;
184 unsigned HOST_WIDE_INT bitregion_end;
185 gimple *stmt;
186 bool bit_not_p;
187 store_operand_info ();
190 store_operand_info::store_operand_info ()
191 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
192 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
196 /* Struct recording the information about a single store of an immediate
197 to memory. These are created in the first phase and coalesced into
198 merged_store_group objects in the second phase. */
200 struct store_immediate_info
202 unsigned HOST_WIDE_INT bitsize;
203 unsigned HOST_WIDE_INT bitpos;
204 unsigned HOST_WIDE_INT bitregion_start;
205 /* This is one past the last bit of the bit region. */
206 unsigned HOST_WIDE_INT bitregion_end;
207 gimple *stmt;
208 unsigned int order;
209 /* INTEGER_CST for constant stores, MEM_REF for memory copy or
210 BIT_*_EXPR for logical bitwise operation. */
211 enum tree_code rhs_code;
212 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
213 bool bit_not_p;
214 /* True if ops have been swapped and thus ops[1] represents
215 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
216 bool ops_swapped_p;
217 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
218 just the first one. */
219 store_operand_info ops[2];
220 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
221 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
222 gimple *, unsigned int, enum tree_code, bool,
223 const store_operand_info &,
224 const store_operand_info &);
227 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
228 unsigned HOST_WIDE_INT bp,
229 unsigned HOST_WIDE_INT brs,
230 unsigned HOST_WIDE_INT bre,
231 gimple *st,
232 unsigned int ord,
233 enum tree_code rhscode,
234 bool bitnotp,
235 const store_operand_info &op0r,
236 const store_operand_info &op1r)
237 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
238 stmt (st), order (ord), rhs_code (rhscode), bit_not_p (bitnotp),
239 ops_swapped_p (false)
240 #if __cplusplus >= 201103L
241 , ops { op0r, op1r }
244 #else
246 ops[0] = op0r;
247 ops[1] = op1r;
249 #endif
251 /* Struct representing a group of stores to contiguous memory locations.
252 These are produced by the second phase (coalescing) and consumed in the
253 third phase that outputs the widened stores. */
255 struct merged_store_group
257 unsigned HOST_WIDE_INT start;
258 unsigned HOST_WIDE_INT width;
259 unsigned HOST_WIDE_INT bitregion_start;
260 unsigned HOST_WIDE_INT bitregion_end;
261 /* The size of the allocated memory for val and mask. */
262 unsigned HOST_WIDE_INT buf_size;
263 unsigned HOST_WIDE_INT align_base;
264 unsigned HOST_WIDE_INT load_align_base[2];
266 unsigned int align;
267 unsigned int load_align[2];
268 unsigned int first_order;
269 unsigned int last_order;
271 auto_vec<store_immediate_info *> stores;
272 /* We record the first and last original statements in the sequence because
273 we'll need their vuse/vdef and replacement position. It's easier to keep
274 track of them separately as 'stores' is reordered by apply_stores. */
275 gimple *last_stmt;
276 gimple *first_stmt;
277 unsigned char *val;
278 unsigned char *mask;
280 merged_store_group (store_immediate_info *);
281 ~merged_store_group ();
282 void merge_into (store_immediate_info *);
283 void merge_overlapping (store_immediate_info *);
284 bool apply_stores ();
285 private:
286 void do_merge (store_immediate_info *);
289 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
291 static void
292 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
294 if (!fd)
295 return;
297 for (unsigned int i = 0; i < len; i++)
298 fprintf (fd, "%x ", ptr[i]);
299 fprintf (fd, "\n");
302 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
303 bits between adjacent elements. AMNT should be within
304 [0, BITS_PER_UNIT).
305 Example, AMNT = 2:
306 00011111|11100000 << 2 = 01111111|10000000
307 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
309 static void
310 shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt)
312 if (amnt == 0)
313 return;
315 unsigned char carry_over = 0U;
316 unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt);
317 unsigned char clear_mask = (~0U) << amnt;
319 for (unsigned int i = 0; i < sz; i++)
321 unsigned prev_carry_over = carry_over;
322 carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt);
324 ptr[i] <<= amnt;
325 if (i != 0)
327 ptr[i] &= clear_mask;
328 ptr[i] |= prev_carry_over;
333 /* Like shift_bytes_in_array but for big-endian.
334 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
335 bits between adjacent elements. AMNT should be within
336 [0, BITS_PER_UNIT).
337 Example, AMNT = 2:
338 00011111|11100000 >> 2 = 00000111|11111000
339 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
341 static void
342 shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz,
343 unsigned int amnt)
345 if (amnt == 0)
346 return;
348 unsigned char carry_over = 0U;
349 unsigned char carry_mask = ~(~0U << amnt);
351 for (unsigned int i = 0; i < sz; i++)
353 unsigned prev_carry_over = carry_over;
354 carry_over = ptr[i] & carry_mask;
356 carry_over <<= (unsigned char) BITS_PER_UNIT - amnt;
357 ptr[i] >>= amnt;
358 ptr[i] |= prev_carry_over;
362 /* Clear out LEN bits starting from bit START in the byte array
363 PTR. This clears the bits to the *right* from START.
364 START must be within [0, BITS_PER_UNIT) and counts starting from
365 the least significant bit. */
367 static void
368 clear_bit_region_be (unsigned char *ptr, unsigned int start,
369 unsigned int len)
371 if (len == 0)
372 return;
373 /* Clear len bits to the right of start. */
374 else if (len <= start + 1)
376 unsigned char mask = (~(~0U << len));
377 mask = mask << (start + 1U - len);
378 ptr[0] &= ~mask;
380 else if (start != BITS_PER_UNIT - 1)
382 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
383 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
384 len - (start % BITS_PER_UNIT) - 1);
386 else if (start == BITS_PER_UNIT - 1
387 && len > BITS_PER_UNIT)
389 unsigned int nbytes = len / BITS_PER_UNIT;
390 memset (ptr, 0, nbytes);
391 if (len % BITS_PER_UNIT != 0)
392 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
393 len % BITS_PER_UNIT);
395 else
396 gcc_unreachable ();
399 /* In the byte array PTR clear the bit region starting at bit
400 START and is LEN bits wide.
401 For regions spanning multiple bytes do this recursively until we reach
402 zero LEN or a region contained within a single byte. */
404 static void
405 clear_bit_region (unsigned char *ptr, unsigned int start,
406 unsigned int len)
408 /* Degenerate base case. */
409 if (len == 0)
410 return;
411 else if (start >= BITS_PER_UNIT)
412 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
413 /* Second base case. */
414 else if ((start + len) <= BITS_PER_UNIT)
416 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
417 mask >>= BITS_PER_UNIT - (start + len);
419 ptr[0] &= ~mask;
421 return;
423 /* Clear most significant bits in a byte and proceed with the next byte. */
424 else if (start != 0)
426 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
427 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
429 /* Whole bytes need to be cleared. */
430 else if (start == 0 && len > BITS_PER_UNIT)
432 unsigned int nbytes = len / BITS_PER_UNIT;
433 /* We could recurse on each byte but we clear whole bytes, so a simple
434 memset will do. */
435 memset (ptr, '\0', nbytes);
436 /* Clear the remaining sub-byte region if there is one. */
437 if (len % BITS_PER_UNIT != 0)
438 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
440 else
441 gcc_unreachable ();
444 /* Write BITLEN bits of EXPR to the byte array PTR at
445 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
446 Return true if the operation succeeded. */
448 static bool
449 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
450 unsigned int total_bytes)
452 unsigned int first_byte = bitpos / BITS_PER_UNIT;
453 tree tmp_int = expr;
454 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
455 || (bitpos % BITS_PER_UNIT)
456 || !int_mode_for_size (bitlen, 0).exists ());
458 if (!sub_byte_op_p)
459 return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0;
461 /* LITTLE-ENDIAN
462 We are writing a non byte-sized quantity or at a position that is not
463 at a byte boundary.
464 |--------|--------|--------| ptr + first_byte
466 xxx xxxxxxxx xxx< bp>
467 |______EXPR____|
469 First native_encode_expr EXPR into a temporary buffer and shift each
470 byte in the buffer by 'bp' (carrying the bits over as necessary).
471 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
472 <------bitlen---->< bp>
473 Then we clear the destination bits:
474 |---00000|00000000|000-----| ptr + first_byte
475 <-------bitlen--->< bp>
477 Finally we ORR the bytes of the shifted EXPR into the cleared region:
478 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
480 BIG-ENDIAN
481 We are writing a non byte-sized quantity or at a position that is not
482 at a byte boundary.
483 ptr + first_byte |--------|--------|--------|
485 <bp >xxx xxxxxxxx xxx
486 |_____EXPR_____|
488 First native_encode_expr EXPR into a temporary buffer and shift each
489 byte in the buffer to the right by (carrying the bits over as necessary).
490 We shift by as much as needed to align the most significant bit of EXPR
491 with bitpos:
492 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
493 <---bitlen----> <bp ><-----bitlen----->
494 Then we clear the destination bits:
495 ptr + first_byte |-----000||00000000||00000---|
496 <bp ><-------bitlen----->
498 Finally we ORR the bytes of the shifted EXPR into the cleared region:
499 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
500 The awkwardness comes from the fact that bitpos is counted from the
501 most significant bit of a byte. */
503 /* We must be dealing with fixed-size data at this point, since the
504 total size is also fixed. */
505 fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
506 /* Allocate an extra byte so that we have space to shift into. */
507 unsigned int byte_size = GET_MODE_SIZE (mode) + 1;
508 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
509 memset (tmpbuf, '\0', byte_size);
510 /* The store detection code should only have allowed constants that are
511 accepted by native_encode_expr. */
512 if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
513 gcc_unreachable ();
515 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
516 bytes to write. This means it can write more than
517 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
518 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
519 bitlen and zero out the bits that are not relevant as well (that may
520 contain a sign bit due to sign-extension). */
521 unsigned int padding
522 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
523 /* On big-endian the padding is at the 'front' so just skip the initial
524 bytes. */
525 if (BYTES_BIG_ENDIAN)
526 tmpbuf += padding;
528 byte_size -= padding;
530 if (bitlen % BITS_PER_UNIT != 0)
532 if (BYTES_BIG_ENDIAN)
533 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
534 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
535 else
536 clear_bit_region (tmpbuf, bitlen,
537 byte_size * BITS_PER_UNIT - bitlen);
539 /* Left shifting relies on the last byte being clear if bitlen is
540 a multiple of BITS_PER_UNIT, which might not be clear if
541 there are padding bytes. */
542 else if (!BYTES_BIG_ENDIAN)
543 tmpbuf[byte_size - 1] = '\0';
545 /* Clear the bit region in PTR where the bits from TMPBUF will be
546 inserted into. */
547 if (BYTES_BIG_ENDIAN)
548 clear_bit_region_be (ptr + first_byte,
549 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
550 else
551 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
553 int shift_amnt;
554 int bitlen_mod = bitlen % BITS_PER_UNIT;
555 int bitpos_mod = bitpos % BITS_PER_UNIT;
557 bool skip_byte = false;
558 if (BYTES_BIG_ENDIAN)
560 /* BITPOS and BITLEN are exactly aligned and no shifting
561 is necessary. */
562 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
563 || (bitpos_mod == 0 && bitlen_mod == 0))
564 shift_amnt = 0;
565 /* |. . . . . . . .|
566 <bp > <blen >.
567 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
568 of the value until it aligns with 'bp' in the next byte over. */
569 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
571 shift_amnt = bitlen_mod + bitpos_mod;
572 skip_byte = bitlen_mod != 0;
574 /* |. . . . . . . .|
575 <----bp--->
576 <---blen---->.
577 Shift the value right within the same byte so it aligns with 'bp'. */
578 else
579 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
581 else
582 shift_amnt = bitpos % BITS_PER_UNIT;
584 /* Create the shifted version of EXPR. */
585 if (!BYTES_BIG_ENDIAN)
587 shift_bytes_in_array (tmpbuf, byte_size, shift_amnt);
588 if (shift_amnt == 0)
589 byte_size--;
591 else
593 gcc_assert (BYTES_BIG_ENDIAN);
594 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
595 /* If shifting right forced us to move into the next byte skip the now
596 empty byte. */
597 if (skip_byte)
599 tmpbuf++;
600 byte_size--;
604 /* Insert the bits from TMPBUF. */
605 for (unsigned int i = 0; i < byte_size; i++)
606 ptr[first_byte + i] |= tmpbuf[i];
608 return true;
611 /* Sorting function for store_immediate_info objects.
612 Sorts them by bitposition. */
614 static int
615 sort_by_bitpos (const void *x, const void *y)
617 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
618 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
620 if ((*tmp)->bitpos < (*tmp2)->bitpos)
621 return -1;
622 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
623 return 1;
624 else
625 /* If they are the same let's use the order which is guaranteed to
626 be different. */
627 return (*tmp)->order - (*tmp2)->order;
630 /* Sorting function for store_immediate_info objects.
631 Sorts them by the order field. */
633 static int
634 sort_by_order (const void *x, const void *y)
636 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
637 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
639 if ((*tmp)->order < (*tmp2)->order)
640 return -1;
641 else if ((*tmp)->order > (*tmp2)->order)
642 return 1;
644 gcc_unreachable ();
647 /* Initialize a merged_store_group object from a store_immediate_info
648 object. */
650 merged_store_group::merged_store_group (store_immediate_info *info)
652 start = info->bitpos;
653 width = info->bitsize;
654 bitregion_start = info->bitregion_start;
655 bitregion_end = info->bitregion_end;
656 /* VAL has memory allocated for it in apply_stores once the group
657 width has been finalized. */
658 val = NULL;
659 mask = NULL;
660 unsigned HOST_WIDE_INT align_bitpos = 0;
661 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
662 &align, &align_bitpos);
663 align_base = start - align_bitpos;
664 for (int i = 0; i < 2; ++i)
666 store_operand_info &op = info->ops[i];
667 if (op.base_addr == NULL_TREE)
669 load_align[i] = 0;
670 load_align_base[i] = 0;
672 else
674 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
675 load_align_base[i] = op.bitpos - align_bitpos;
678 stores.create (1);
679 stores.safe_push (info);
680 last_stmt = info->stmt;
681 last_order = info->order;
682 first_stmt = last_stmt;
683 first_order = last_order;
684 buf_size = 0;
687 merged_store_group::~merged_store_group ()
689 if (val)
690 XDELETEVEC (val);
693 /* Helper method for merge_into and merge_overlapping to do
694 the common part. */
695 void
696 merged_store_group::do_merge (store_immediate_info *info)
698 bitregion_start = MIN (bitregion_start, info->bitregion_start);
699 bitregion_end = MAX (bitregion_end, info->bitregion_end);
701 unsigned int this_align;
702 unsigned HOST_WIDE_INT align_bitpos = 0;
703 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
704 &this_align, &align_bitpos);
705 if (this_align > align)
707 align = this_align;
708 align_base = info->bitpos - align_bitpos;
710 for (int i = 0; i < 2; ++i)
712 store_operand_info &op = info->ops[i];
713 if (!op.base_addr)
714 continue;
716 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
717 if (this_align > load_align[i])
719 load_align[i] = this_align;
720 load_align_base[i] = op.bitpos - align_bitpos;
724 gimple *stmt = info->stmt;
725 stores.safe_push (info);
726 if (info->order > last_order)
728 last_order = info->order;
729 last_stmt = stmt;
731 else if (info->order < first_order)
733 first_order = info->order;
734 first_stmt = stmt;
738 /* Merge a store recorded by INFO into this merged store.
739 The store is not overlapping with the existing recorded
740 stores. */
742 void
743 merged_store_group::merge_into (store_immediate_info *info)
745 unsigned HOST_WIDE_INT wid = info->bitsize;
746 /* Make sure we're inserting in the position we think we're inserting. */
747 gcc_assert (info->bitpos >= start + width
748 && info->bitregion_start <= bitregion_end);
750 width += wid;
751 do_merge (info);
754 /* Merge a store described by INFO into this merged store.
755 INFO overlaps in some way with the current store (i.e. it's not contiguous
756 which is handled by merged_store_group::merge_into). */
758 void
759 merged_store_group::merge_overlapping (store_immediate_info *info)
761 /* If the store extends the size of the group, extend the width. */
762 if (info->bitpos + info->bitsize > start + width)
763 width += info->bitpos + info->bitsize - (start + width);
765 do_merge (info);
768 /* Go through all the recorded stores in this group in program order and
769 apply their values to the VAL byte array to create the final merged
770 value. Return true if the operation succeeded. */
772 bool
773 merged_store_group::apply_stores ()
775 /* Make sure we have more than one store in the group, otherwise we cannot
776 merge anything. */
777 if (bitregion_start % BITS_PER_UNIT != 0
778 || bitregion_end % BITS_PER_UNIT != 0
779 || stores.length () == 1)
780 return false;
782 stores.qsort (sort_by_order);
783 store_immediate_info *info;
784 unsigned int i;
785 /* Create a buffer of a size that is 2 times the number of bytes we're
786 storing. That way native_encode_expr can write power-of-2-sized
787 chunks without overrunning. */
788 buf_size = 2 * ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
789 val = XNEWVEC (unsigned char, 2 * buf_size);
790 mask = val + buf_size;
791 memset (val, 0, buf_size);
792 memset (mask, ~0U, buf_size);
794 FOR_EACH_VEC_ELT (stores, i, info)
796 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
797 tree cst = NULL_TREE;
798 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
799 cst = info->ops[0].val;
800 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
801 cst = info->ops[1].val;
802 bool ret = true;
803 if (cst)
804 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
805 pos_in_buffer, buf_size);
806 if (cst && dump_file && (dump_flags & TDF_DETAILS))
808 if (ret)
810 fprintf (dump_file, "After writing ");
811 print_generic_expr (dump_file, cst, 0);
812 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
813 " at position %d the merged region contains:\n",
814 info->bitsize, pos_in_buffer);
815 dump_char_array (dump_file, val, buf_size);
817 else
818 fprintf (dump_file, "Failed to merge stores\n");
820 if (!ret)
821 return false;
822 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
823 if (BYTES_BIG_ENDIAN)
824 clear_bit_region_be (m, (BITS_PER_UNIT - 1
825 - (pos_in_buffer % BITS_PER_UNIT)),
826 info->bitsize);
827 else
828 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
830 return true;
833 /* Structure describing the store chain. */
835 struct imm_store_chain_info
837 /* Doubly-linked list that imposes an order on chain processing.
838 PNXP (prev's next pointer) points to the head of a list, or to
839 the next field in the previous chain in the list.
840 See pass_store_merging::m_stores_head for more rationale. */
841 imm_store_chain_info *next, **pnxp;
842 tree base_addr;
843 auto_vec<store_immediate_info *> m_store_info;
844 auto_vec<merged_store_group *> m_merged_store_groups;
846 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
847 : next (inspt), pnxp (&inspt), base_addr (b_a)
849 inspt = this;
850 if (next)
852 gcc_checking_assert (pnxp == next->pnxp);
853 next->pnxp = &next;
856 ~imm_store_chain_info ()
858 *pnxp = next;
859 if (next)
861 gcc_checking_assert (&next == next->pnxp);
862 next->pnxp = pnxp;
865 bool terminate_and_process_chain ();
866 bool coalesce_immediate_stores ();
867 bool output_merged_store (merged_store_group *);
868 bool output_merged_stores ();
871 const pass_data pass_data_tree_store_merging = {
872 GIMPLE_PASS, /* type */
873 "store-merging", /* name */
874 OPTGROUP_NONE, /* optinfo_flags */
875 TV_GIMPLE_STORE_MERGING, /* tv_id */
876 PROP_ssa, /* properties_required */
877 0, /* properties_provided */
878 0, /* properties_destroyed */
879 0, /* todo_flags_start */
880 TODO_update_ssa, /* todo_flags_finish */
883 class pass_store_merging : public gimple_opt_pass
885 public:
886 pass_store_merging (gcc::context *ctxt)
887 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
891 /* Pass not supported for PDP-endianness, nor for insane hosts
892 or target character sizes where native_{encode,interpret}_expr
893 doesn't work properly. */
894 virtual bool
895 gate (function *)
897 return flag_store_merging
898 && WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN
899 && CHAR_BIT == 8
900 && BITS_PER_UNIT == 8;
903 virtual unsigned int execute (function *);
905 private:
906 hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores;
908 /* Form a doubly-linked stack of the elements of m_stores, so that
909 we can iterate over them in a predictable way. Using this order
910 avoids extraneous differences in the compiler output just because
911 of tree pointer variations (e.g. different chains end up in
912 different positions of m_stores, so they are handled in different
913 orders, so they allocate or release SSA names in different
914 orders, and when they get reused, subsequent passes end up
915 getting different SSA names, which may ultimately change
916 decisions when going out of SSA). */
917 imm_store_chain_info *m_stores_head;
919 void process_store (gimple *);
920 bool terminate_and_process_all_chains ();
921 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
922 bool terminate_and_release_chain (imm_store_chain_info *);
923 }; // class pass_store_merging
925 /* Terminate and process all recorded chains. Return true if any changes
926 were made. */
928 bool
929 pass_store_merging::terminate_and_process_all_chains ()
931 bool ret = false;
932 while (m_stores_head)
933 ret |= terminate_and_release_chain (m_stores_head);
934 gcc_assert (m_stores.elements () == 0);
935 gcc_assert (m_stores_head == NULL);
937 return ret;
940 /* Terminate all chains that are affected by the statement STMT.
941 CHAIN_INFO is the chain we should ignore from the checks if
942 non-NULL. */
944 bool
945 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
946 **chain_info,
947 gimple *stmt)
949 bool ret = false;
951 /* If the statement doesn't touch memory it can't alias. */
952 if (!gimple_vuse (stmt))
953 return false;
955 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
956 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
958 next = cur->next;
960 /* We already checked all the stores in chain_info and terminated the
961 chain if necessary. Skip it here. */
962 if (chain_info && *chain_info == cur)
963 continue;
965 store_immediate_info *info;
966 unsigned int i;
967 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
969 tree lhs = gimple_assign_lhs (info->stmt);
970 if (ref_maybe_used_by_stmt_p (stmt, lhs)
971 || stmt_may_clobber_ref_p (stmt, lhs)
972 || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
974 if (dump_file && (dump_flags & TDF_DETAILS))
976 fprintf (dump_file, "stmt causes chain termination:\n");
977 print_gimple_stmt (dump_file, stmt, 0);
979 terminate_and_release_chain (cur);
980 ret = true;
981 break;
986 return ret;
989 /* Helper function. Terminate the recorded chain storing to base object
990 BASE. Return true if the merging and output was successful. The m_stores
991 entry is removed after the processing in any case. */
993 bool
994 pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info)
996 bool ret = chain_info->terminate_and_process_chain ();
997 m_stores.remove (chain_info->base_addr);
998 delete chain_info;
999 return ret;
1002 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
1003 may clobber REF. FIRST and LAST must be in the same basic block and
1004 have non-NULL vdef. */
1006 bool
1007 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
1009 ao_ref r;
1010 ao_ref_init (&r, ref);
1011 unsigned int count = 0;
1012 tree vop = gimple_vdef (last);
1013 gimple *stmt;
1015 gcc_checking_assert (gimple_bb (first) == gimple_bb (last));
1018 stmt = SSA_NAME_DEF_STMT (vop);
1019 if (stmt_may_clobber_ref_p_1 (stmt, &r))
1020 return true;
1021 /* Avoid quadratic compile time by bounding the number of checks
1022 we perform. */
1023 if (++count > MAX_STORE_ALIAS_CHECKS)
1024 return true;
1025 vop = gimple_vuse (stmt);
1027 while (stmt != first);
1028 return false;
1031 /* Return true if INFO->ops[IDX] is mergeable with the
1032 corresponding loads already in MERGED_STORE group.
1033 BASE_ADDR is the base address of the whole store group. */
1035 bool
1036 compatible_load_p (merged_store_group *merged_store,
1037 store_immediate_info *info,
1038 tree base_addr, int idx)
1040 store_immediate_info *infof = merged_store->stores[0];
1041 if (!info->ops[idx].base_addr
1042 || info->ops[idx].bit_not_p != infof->ops[idx].bit_not_p
1043 || (info->ops[idx].bitpos - infof->ops[idx].bitpos
1044 != info->bitpos - infof->bitpos)
1045 || !operand_equal_p (info->ops[idx].base_addr,
1046 infof->ops[idx].base_addr, 0))
1047 return false;
1049 store_immediate_info *infol = merged_store->stores.last ();
1050 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
1051 /* In this case all vuses should be the same, e.g.
1052 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
1054 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
1055 and we can emit the coalesced load next to any of those loads. */
1056 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
1057 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
1058 return true;
1060 /* Otherwise, at least for now require that the load has the same
1061 vuse as the store. See following examples. */
1062 if (gimple_vuse (info->stmt) != load_vuse)
1063 return false;
1065 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
1066 || (infof != infol
1067 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
1068 return false;
1070 /* If the load is from the same location as the store, already
1071 the construction of the immediate chain info guarantees no intervening
1072 stores, so no further checks are needed. Example:
1073 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
1074 if (info->ops[idx].bitpos == info->bitpos
1075 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
1076 return true;
1078 /* Otherwise, we need to punt if any of the loads can be clobbered by any
1079 of the stores in the group, or any other stores in between those.
1080 Previous calls to compatible_load_p ensured that for all the
1081 merged_store->stores IDX loads, no stmts starting with
1082 merged_store->first_stmt and ending right before merged_store->last_stmt
1083 clobbers those loads. */
1084 gimple *first = merged_store->first_stmt;
1085 gimple *last = merged_store->last_stmt;
1086 unsigned int i;
1087 store_immediate_info *infoc;
1088 /* The stores are sorted by increasing store bitpos, so if info->stmt store
1089 comes before the so far first load, we'll be changing
1090 merged_store->first_stmt. In that case we need to give up if
1091 any of the earlier processed loads clobber with the stmts in the new
1092 range. */
1093 if (info->order < merged_store->first_order)
1095 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
1096 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
1097 return false;
1098 first = info->stmt;
1100 /* Similarly, we could change merged_store->last_stmt, so ensure
1101 in that case no stmts in the new range clobber any of the earlier
1102 processed loads. */
1103 else if (info->order > merged_store->last_order)
1105 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
1106 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
1107 return false;
1108 last = info->stmt;
1110 /* And finally, we'd be adding a new load to the set, ensure it isn't
1111 clobbered in the new range. */
1112 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
1113 return false;
1115 /* Otherwise, we are looking for:
1116 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
1118 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
1119 return true;
1122 /* Go through the candidate stores recorded in m_store_info and merge them
1123 into merged_store_group objects recorded into m_merged_store_groups
1124 representing the widened stores. Return true if coalescing was successful
1125 and the number of widened stores is fewer than the original number
1126 of stores. */
1128 bool
1129 imm_store_chain_info::coalesce_immediate_stores ()
1131 /* Anything less can't be processed. */
1132 if (m_store_info.length () < 2)
1133 return false;
1135 if (dump_file && (dump_flags & TDF_DETAILS))
1136 fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n",
1137 m_store_info.length ());
1139 store_immediate_info *info;
1140 unsigned int i;
1142 /* Order the stores by the bitposition they write to. */
1143 m_store_info.qsort (sort_by_bitpos);
1145 info = m_store_info[0];
1146 merged_store_group *merged_store = new merged_store_group (info);
1148 FOR_EACH_VEC_ELT (m_store_info, i, info)
1150 if (dump_file && (dump_flags & TDF_DETAILS))
1152 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
1153 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n",
1154 i, info->bitsize, info->bitpos);
1155 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
1156 fprintf (dump_file, "\n------------\n");
1159 if (i == 0)
1160 continue;
1162 /* |---store 1---|
1163 |---store 2---|
1164 Overlapping stores. */
1165 unsigned HOST_WIDE_INT start = info->bitpos;
1166 if (IN_RANGE (start, merged_store->start,
1167 merged_store->start + merged_store->width - 1))
1169 /* Only allow overlapping stores of constants. */
1170 if (info->rhs_code == INTEGER_CST
1171 && merged_store->stores[0]->rhs_code == INTEGER_CST)
1173 merged_store->merge_overlapping (info);
1174 continue;
1177 /* |---store 1---||---store 2---|
1178 This store is consecutive to the previous one.
1179 Merge it into the current store group. There can be gaps in between
1180 the stores, but there can't be gaps in between bitregions. */
1181 else if (info->bitregion_start <= merged_store->bitregion_end
1182 && info->rhs_code == merged_store->stores[0]->rhs_code
1183 && info->bit_not_p == merged_store->stores[0]->bit_not_p)
1185 store_immediate_info *infof = merged_store->stores[0];
1187 /* All the rhs_code ops that take 2 operands are commutative,
1188 swap the operands if it could make the operands compatible. */
1189 if (infof->ops[0].base_addr
1190 && infof->ops[1].base_addr
1191 && info->ops[0].base_addr
1192 && info->ops[1].base_addr
1193 && (info->ops[1].bitpos - infof->ops[0].bitpos
1194 == info->bitpos - infof->bitpos)
1195 && operand_equal_p (info->ops[1].base_addr,
1196 infof->ops[0].base_addr, 0))
1198 std::swap (info->ops[0], info->ops[1]);
1199 info->ops_swapped_p = true;
1201 if ((!infof->ops[0].base_addr
1202 || compatible_load_p (merged_store, info, base_addr, 0))
1203 && (!infof->ops[1].base_addr
1204 || compatible_load_p (merged_store, info, base_addr, 1)))
1206 merged_store->merge_into (info);
1207 continue;
1211 /* |---store 1---| <gap> |---store 2---|.
1212 Gap between stores or the rhs not compatible. Start a new group. */
1214 /* Try to apply all the stores recorded for the group to determine
1215 the bitpattern they write and discard it if that fails.
1216 This will also reject single-store groups. */
1217 if (!merged_store->apply_stores ())
1218 delete merged_store;
1219 else
1220 m_merged_store_groups.safe_push (merged_store);
1222 merged_store = new merged_store_group (info);
1225 /* Record or discard the last store group. */
1226 if (!merged_store->apply_stores ())
1227 delete merged_store;
1228 else
1229 m_merged_store_groups.safe_push (merged_store);
1231 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
1232 bool success
1233 = !m_merged_store_groups.is_empty ()
1234 && m_merged_store_groups.length () < m_store_info.length ();
1236 if (success && dump_file)
1237 fprintf (dump_file, "Coalescing successful!\n"
1238 "Merged into %u stores\n",
1239 m_merged_store_groups.length ());
1241 return success;
1244 /* Return the type to use for the merged stores or loads described by STMTS.
1245 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
1246 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
1247 of the MEM_REFs if any. */
1249 static tree
1250 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
1251 unsigned short *cliquep, unsigned short *basep)
1253 gimple *stmt;
1254 unsigned int i;
1255 tree type = NULL_TREE;
1256 tree ret = NULL_TREE;
1257 *cliquep = 0;
1258 *basep = 0;
1260 FOR_EACH_VEC_ELT (stmts, i, stmt)
1262 tree ref = is_load ? gimple_assign_rhs1 (stmt)
1263 : gimple_assign_lhs (stmt);
1264 tree type1 = reference_alias_ptr_type (ref);
1265 tree base = get_base_address (ref);
1267 if (i == 0)
1269 if (TREE_CODE (base) == MEM_REF)
1271 *cliquep = MR_DEPENDENCE_CLIQUE (base);
1272 *basep = MR_DEPENDENCE_BASE (base);
1274 ret = type = type1;
1275 continue;
1277 if (!alias_ptr_types_compatible_p (type, type1))
1278 ret = ptr_type_node;
1279 if (TREE_CODE (base) != MEM_REF
1280 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
1281 || *basep != MR_DEPENDENCE_BASE (base))
1283 *cliquep = 0;
1284 *basep = 0;
1287 return ret;
1290 /* Return the location_t information we can find among the statements
1291 in STMTS. */
1293 static location_t
1294 get_location_for_stmts (vec<gimple *> &stmts)
1296 gimple *stmt;
1297 unsigned int i;
1299 FOR_EACH_VEC_ELT (stmts, i, stmt)
1300 if (gimple_has_location (stmt))
1301 return gimple_location (stmt);
1303 return UNKNOWN_LOCATION;
1306 /* Used to decribe a store resulting from splitting a wide store in smaller
1307 regularly-sized stores in split_group. */
1309 struct split_store
1311 unsigned HOST_WIDE_INT bytepos;
1312 unsigned HOST_WIDE_INT size;
1313 unsigned HOST_WIDE_INT align;
1314 auto_vec<store_immediate_info *> orig_stores;
1315 /* True if there is a single orig stmt covering the whole split store. */
1316 bool orig;
1317 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1318 unsigned HOST_WIDE_INT);
1321 /* Simple constructor. */
1323 split_store::split_store (unsigned HOST_WIDE_INT bp,
1324 unsigned HOST_WIDE_INT sz,
1325 unsigned HOST_WIDE_INT al)
1326 : bytepos (bp), size (sz), align (al), orig (false)
1328 orig_stores.create (0);
1331 /* Record all stores in GROUP that write to the region starting at BITPOS and
1332 is of size BITSIZE. Record infos for such statements in STORES if
1333 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
1334 if there is exactly one original store in the range. */
1336 static store_immediate_info *
1337 find_constituent_stores (struct merged_store_group *group,
1338 vec<store_immediate_info *> *stores,
1339 unsigned int *first,
1340 unsigned HOST_WIDE_INT bitpos,
1341 unsigned HOST_WIDE_INT bitsize)
1343 store_immediate_info *info, *ret = NULL;
1344 unsigned int i;
1345 bool second = false;
1346 bool update_first = true;
1347 unsigned HOST_WIDE_INT end = bitpos + bitsize;
1348 for (i = *first; group->stores.iterate (i, &info); ++i)
1350 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
1351 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
1352 if (stmt_end <= bitpos)
1354 /* BITPOS passed to this function never decreases from within the
1355 same split_group call, so optimize and don't scan info records
1356 which are known to end before or at BITPOS next time.
1357 Only do it if all stores before this one also pass this. */
1358 if (update_first)
1359 *first = i + 1;
1360 continue;
1362 else
1363 update_first = false;
1365 /* The stores in GROUP are ordered by bitposition so if we're past
1366 the region for this group return early. */
1367 if (stmt_start >= end)
1368 return ret;
1370 if (stores)
1372 stores->safe_push (info);
1373 if (ret)
1375 ret = NULL;
1376 second = true;
1379 else if (ret)
1380 return NULL;
1381 if (!second)
1382 ret = info;
1384 return ret;
1387 /* Return how many SSA_NAMEs used to compute value to store in the INFO
1388 store have multiple uses. If any SSA_NAME has multiple uses, also
1389 count statements needed to compute it. */
1391 static unsigned
1392 count_multiple_uses (store_immediate_info *info)
1394 gimple *stmt = info->stmt;
1395 unsigned ret = 0;
1396 switch (info->rhs_code)
1398 case INTEGER_CST:
1399 return 0;
1400 case BIT_AND_EXPR:
1401 case BIT_IOR_EXPR:
1402 case BIT_XOR_EXPR:
1403 if (info->bit_not_p)
1405 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1406 ret = 1; /* Fall through below to return
1407 the BIT_NOT_EXPR stmt and then
1408 BIT_{AND,IOR,XOR}_EXPR and anything it
1409 uses. */
1410 else
1411 /* stmt is after this the BIT_NOT_EXPR. */
1412 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1414 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1416 ret += 1 + info->ops[0].bit_not_p;
1417 if (info->ops[1].base_addr)
1418 ret += 1 + info->ops[1].bit_not_p;
1419 return ret + 1;
1421 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1422 /* stmt is now the BIT_*_EXPR. */
1423 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1424 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
1425 else if (info->ops[info->ops_swapped_p].bit_not_p)
1427 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1428 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
1429 ++ret;
1431 if (info->ops[1].base_addr == NULL_TREE)
1433 gcc_checking_assert (!info->ops_swapped_p);
1434 return ret;
1436 if (!has_single_use (gimple_assign_rhs2 (stmt)))
1437 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
1438 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
1440 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
1441 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
1442 ++ret;
1444 return ret;
1445 case MEM_REF:
1446 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1447 return 1 + info->ops[0].bit_not_p;
1448 else if (info->ops[0].bit_not_p)
1450 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1451 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1452 return 1;
1454 return 0;
1455 default:
1456 gcc_unreachable ();
1460 /* Split a merged store described by GROUP by populating the SPLIT_STORES
1461 vector (if non-NULL) with split_store structs describing the byte offset
1462 (from the base), the bit size and alignment of each store as well as the
1463 original statements involved in each such split group.
1464 This is to separate the splitting strategy from the statement
1465 building/emission/linking done in output_merged_store.
1466 Return number of new stores.
1467 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
1468 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
1469 If SPLIT_STORES is NULL, it is just a dry run to count number of
1470 new stores. */
1472 static unsigned int
1473 split_group (merged_store_group *group, bool allow_unaligned_store,
1474 bool allow_unaligned_load,
1475 vec<struct split_store *> *split_stores,
1476 unsigned *total_orig,
1477 unsigned *total_new)
1479 unsigned HOST_WIDE_INT pos = group->bitregion_start;
1480 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
1481 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
1482 unsigned HOST_WIDE_INT group_align = group->align;
1483 unsigned HOST_WIDE_INT align_base = group->align_base;
1484 unsigned HOST_WIDE_INT group_load_align = group_align;
1485 bool any_orig = false;
1487 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
1489 unsigned int ret = 0, first = 0;
1490 unsigned HOST_WIDE_INT try_pos = bytepos;
1491 group->stores.qsort (sort_by_bitpos);
1493 if (total_orig)
1495 unsigned int i;
1496 store_immediate_info *info = group->stores[0];
1498 total_new[0] = 0;
1499 total_orig[0] = 1; /* The orig store. */
1500 info = group->stores[0];
1501 if (info->ops[0].base_addr)
1502 total_orig[0] += 1 + info->ops[0].bit_not_p;
1503 if (info->ops[1].base_addr)
1504 total_orig[0] += 1 + info->ops[1].bit_not_p;
1505 switch (info->rhs_code)
1507 case BIT_AND_EXPR:
1508 case BIT_IOR_EXPR:
1509 case BIT_XOR_EXPR:
1510 if (info->bit_not_p)
1511 total_orig[0]++; /* The orig BIT_NOT_EXPR stmt. */
1512 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
1513 break;
1514 default:
1515 break;
1517 total_orig[0] *= group->stores.length ();
1519 FOR_EACH_VEC_ELT (group->stores, i, info)
1520 total_new[0] += count_multiple_uses (info);
1523 if (!allow_unaligned_load)
1524 for (int i = 0; i < 2; ++i)
1525 if (group->load_align[i])
1526 group_load_align = MIN (group_load_align, group->load_align[i]);
1528 while (size > 0)
1530 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
1531 && group->mask[try_pos - bytepos] == (unsigned char) ~0U)
1533 /* Skip padding bytes. */
1534 ++try_pos;
1535 size -= BITS_PER_UNIT;
1536 continue;
1539 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
1540 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
1541 unsigned HOST_WIDE_INT align_bitpos
1542 = (try_bitpos - align_base) & (group_align - 1);
1543 unsigned HOST_WIDE_INT align = group_align;
1544 if (align_bitpos)
1545 align = least_bit_hwi (align_bitpos);
1546 if (!allow_unaligned_store)
1547 try_size = MIN (try_size, align);
1548 if (!allow_unaligned_load)
1550 /* If we can't do or don't want to do unaligned stores
1551 as well as loads, we need to take the loads into account
1552 as well. */
1553 unsigned HOST_WIDE_INT load_align = group_load_align;
1554 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
1555 if (align_bitpos)
1556 load_align = least_bit_hwi (align_bitpos);
1557 for (int i = 0; i < 2; ++i)
1558 if (group->load_align[i])
1560 align_bitpos = try_bitpos - group->stores[0]->bitpos;
1561 align_bitpos += group->stores[0]->ops[i].bitpos;
1562 align_bitpos -= group->load_align_base[i];
1563 align_bitpos &= (group_load_align - 1);
1564 if (align_bitpos)
1566 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
1567 load_align = MIN (load_align, a);
1570 try_size = MIN (try_size, load_align);
1572 store_immediate_info *info
1573 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
1574 if (info)
1576 /* If there is just one original statement for the range, see if
1577 we can just reuse the original store which could be even larger
1578 than try_size. */
1579 unsigned HOST_WIDE_INT stmt_end
1580 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
1581 info = find_constituent_stores (group, NULL, &first, try_bitpos,
1582 stmt_end - try_bitpos);
1583 if (info && info->bitpos >= try_bitpos)
1585 try_size = stmt_end - try_bitpos;
1586 goto found;
1590 /* Approximate store bitsize for the case when there are no padding
1591 bits. */
1592 while (try_size > size)
1593 try_size /= 2;
1594 /* Now look for whole padding bytes at the end of that bitsize. */
1595 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
1596 if (group->mask[try_pos - bytepos + nonmasked - 1]
1597 != (unsigned char) ~0U)
1598 break;
1599 if (nonmasked == 0)
1601 /* If entire try_size range is padding, skip it. */
1602 try_pos += try_size / BITS_PER_UNIT;
1603 size -= try_size;
1604 continue;
1606 /* Otherwise try to decrease try_size if second half, last 3 quarters
1607 etc. are padding. */
1608 nonmasked *= BITS_PER_UNIT;
1609 while (nonmasked <= try_size / 2)
1610 try_size /= 2;
1611 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
1613 /* Now look for whole padding bytes at the start of that bitsize. */
1614 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
1615 for (masked = 0; masked < try_bytesize; ++masked)
1616 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U)
1617 break;
1618 masked *= BITS_PER_UNIT;
1619 gcc_assert (masked < try_size);
1620 if (masked >= try_size / 2)
1622 while (masked >= try_size / 2)
1624 try_size /= 2;
1625 try_pos += try_size / BITS_PER_UNIT;
1626 size -= try_size;
1627 masked -= try_size;
1629 /* Need to recompute the alignment, so just retry at the new
1630 position. */
1631 continue;
1635 found:
1636 ++ret;
1638 if (split_stores)
1640 struct split_store *store
1641 = new split_store (try_pos, try_size, align);
1642 info = find_constituent_stores (group, &store->orig_stores,
1643 &first, try_bitpos, try_size);
1644 if (info
1645 && info->bitpos >= try_bitpos
1646 && info->bitpos + info->bitsize <= try_bitpos + try_size)
1648 store->orig = true;
1649 any_orig = true;
1651 split_stores->safe_push (store);
1654 try_pos += try_size / BITS_PER_UNIT;
1655 size -= try_size;
1658 if (total_orig)
1660 /* If we are reusing some original stores and any of the
1661 original SSA_NAMEs had multiple uses, we need to subtract
1662 those now before we add the new ones. */
1663 if (total_new[0] && any_orig)
1665 unsigned int i;
1666 struct split_store *store;
1667 FOR_EACH_VEC_ELT (*split_stores, i, store)
1668 if (store->orig)
1669 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
1671 total_new[0] += ret; /* The new store. */
1672 store_immediate_info *info = group->stores[0];
1673 if (info->ops[0].base_addr)
1674 total_new[0] += ret * (1 + info->ops[0].bit_not_p);
1675 if (info->ops[1].base_addr)
1676 total_new[0] += ret * (1 + info->ops[1].bit_not_p);
1677 switch (info->rhs_code)
1679 case BIT_AND_EXPR:
1680 case BIT_IOR_EXPR:
1681 case BIT_XOR_EXPR:
1682 if (info->bit_not_p)
1683 total_new[0] += ret; /* The new BIT_NOT_EXPR stmt. */
1684 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
1685 break;
1686 default:
1687 break;
1691 return ret;
1694 /* Given a merged store group GROUP output the widened version of it.
1695 The store chain is against the base object BASE.
1696 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
1697 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
1698 Make sure that the number of statements output is less than the number of
1699 original statements. If a better sequence is possible emit it and
1700 return true. */
1702 bool
1703 imm_store_chain_info::output_merged_store (merged_store_group *group)
1705 unsigned HOST_WIDE_INT start_byte_pos
1706 = group->bitregion_start / BITS_PER_UNIT;
1708 unsigned int orig_num_stmts = group->stores.length ();
1709 if (orig_num_stmts < 2)
1710 return false;
1712 auto_vec<struct split_store *, 32> split_stores;
1713 split_stores.create (0);
1714 bool allow_unaligned_store
1715 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
1716 bool allow_unaligned_load = allow_unaligned_store;
1717 if (allow_unaligned_store)
1719 /* If unaligned stores are allowed, see how many stores we'd emit
1720 for unaligned and how many stores we'd emit for aligned stores.
1721 Only use unaligned stores if it allows fewer stores than aligned. */
1722 unsigned aligned_cnt
1723 = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL);
1724 unsigned unaligned_cnt
1725 = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL);
1726 if (aligned_cnt <= unaligned_cnt)
1727 allow_unaligned_store = false;
1729 unsigned total_orig, total_new;
1730 split_group (group, allow_unaligned_store, allow_unaligned_load,
1731 &split_stores, &total_orig, &total_new);
1733 if (split_stores.length () >= orig_num_stmts)
1735 /* We didn't manage to reduce the number of statements. Bail out. */
1736 if (dump_file && (dump_flags & TDF_DETAILS))
1737 fprintf (dump_file, "Exceeded original number of stmts (%u)."
1738 " Not profitable to emit new sequence.\n",
1739 orig_num_stmts);
1740 return false;
1742 if (total_orig <= total_new)
1744 /* If number of estimated new statements is above estimated original
1745 statements, bail out too. */
1746 if (dump_file && (dump_flags & TDF_DETAILS))
1747 fprintf (dump_file, "Estimated number of original stmts (%u)"
1748 " not larger than estimated number of new"
1749 " stmts (%u).\n",
1750 total_orig, total_new);
1753 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
1754 gimple_seq seq = NULL;
1755 tree last_vdef, new_vuse;
1756 last_vdef = gimple_vdef (group->last_stmt);
1757 new_vuse = gimple_vuse (group->last_stmt);
1759 gimple *stmt = NULL;
1760 split_store *split_store;
1761 unsigned int i;
1762 auto_vec<gimple *, 32> orig_stmts;
1763 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &seq,
1764 is_gimple_mem_ref_addr, NULL_TREE);
1766 tree load_addr[2] = { NULL_TREE, NULL_TREE };
1767 gimple_seq load_seq[2] = { NULL, NULL };
1768 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
1769 for (int j = 0; j < 2; ++j)
1771 store_operand_info &op = group->stores[0]->ops[j];
1772 if (op.base_addr == NULL_TREE)
1773 continue;
1775 store_immediate_info *infol = group->stores.last ();
1776 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
1778 load_gsi[j] = gsi_for_stmt (op.stmt);
1779 load_addr[j]
1780 = force_gimple_operand_1 (unshare_expr (op.base_addr),
1781 &load_seq[j], is_gimple_mem_ref_addr,
1782 NULL_TREE);
1784 else if (operand_equal_p (base_addr, op.base_addr, 0))
1785 load_addr[j] = addr;
1786 else
1788 gimple_seq this_seq;
1789 load_addr[j]
1790 = force_gimple_operand_1 (unshare_expr (op.base_addr),
1791 &this_seq, is_gimple_mem_ref_addr,
1792 NULL_TREE);
1793 gimple_seq_add_seq_without_update (&seq, this_seq);
1797 FOR_EACH_VEC_ELT (split_stores, i, split_store)
1799 unsigned HOST_WIDE_INT try_size = split_store->size;
1800 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
1801 unsigned HOST_WIDE_INT align = split_store->align;
1802 tree dest, src;
1803 location_t loc;
1804 if (split_store->orig)
1806 /* If there is just a single constituent store which covers
1807 the whole area, just reuse the lhs and rhs. */
1808 gimple *orig_stmt = split_store->orig_stores[0]->stmt;
1809 dest = gimple_assign_lhs (orig_stmt);
1810 src = gimple_assign_rhs1 (orig_stmt);
1811 loc = gimple_location (orig_stmt);
1813 else
1815 store_immediate_info *info;
1816 unsigned short clique, base;
1817 unsigned int k;
1818 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
1819 orig_stmts.safe_push (info->stmt);
1820 tree offset_type
1821 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
1822 loc = get_location_for_stmts (orig_stmts);
1823 orig_stmts.truncate (0);
1825 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
1826 int_type = build_aligned_type (int_type, align);
1827 dest = fold_build2 (MEM_REF, int_type, addr,
1828 build_int_cst (offset_type, try_pos));
1829 if (TREE_CODE (dest) == MEM_REF)
1831 MR_DEPENDENCE_CLIQUE (dest) = clique;
1832 MR_DEPENDENCE_BASE (dest) = base;
1835 tree mask
1836 = native_interpret_expr (int_type,
1837 group->mask + try_pos - start_byte_pos,
1838 group->buf_size);
1840 tree ops[2];
1841 for (int j = 0;
1842 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
1843 ++j)
1845 store_operand_info &op = split_store->orig_stores[0]->ops[j];
1846 if (op.base_addr)
1848 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
1849 orig_stmts.safe_push (info->ops[j].stmt);
1851 offset_type = get_alias_type_for_stmts (orig_stmts, true,
1852 &clique, &base);
1853 location_t load_loc = get_location_for_stmts (orig_stmts);
1854 orig_stmts.truncate (0);
1856 unsigned HOST_WIDE_INT load_align = group->load_align[j];
1857 unsigned HOST_WIDE_INT align_bitpos
1858 = (try_pos * BITS_PER_UNIT
1859 - split_store->orig_stores[0]->bitpos
1860 + op.bitpos) & (load_align - 1);
1861 if (align_bitpos)
1862 load_align = least_bit_hwi (align_bitpos);
1864 tree load_int_type
1865 = build_nonstandard_integer_type (try_size, UNSIGNED);
1866 load_int_type
1867 = build_aligned_type (load_int_type, load_align);
1869 unsigned HOST_WIDE_INT load_pos
1870 = (try_pos * BITS_PER_UNIT
1871 - split_store->orig_stores[0]->bitpos
1872 + op.bitpos) / BITS_PER_UNIT;
1873 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
1874 build_int_cst (offset_type, load_pos));
1875 if (TREE_CODE (ops[j]) == MEM_REF)
1877 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
1878 MR_DEPENDENCE_BASE (ops[j]) = base;
1880 if (!integer_zerop (mask))
1881 /* The load might load some bits (that will be masked off
1882 later on) uninitialized, avoid -W*uninitialized
1883 warnings in that case. */
1884 TREE_NO_WARNING (ops[j]) = 1;
1886 stmt = gimple_build_assign (make_ssa_name (int_type),
1887 ops[j]);
1888 gimple_set_location (stmt, load_loc);
1889 if (gsi_bb (load_gsi[j]))
1891 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
1892 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
1894 else
1896 gimple_set_vuse (stmt, new_vuse);
1897 gimple_seq_add_stmt_without_update (&seq, stmt);
1899 ops[j] = gimple_assign_lhs (stmt);
1900 if (op.bit_not_p)
1902 stmt = gimple_build_assign (make_ssa_name (int_type),
1903 BIT_NOT_EXPR, ops[j]);
1904 gimple_set_location (stmt, load_loc);
1905 ops[j] = gimple_assign_lhs (stmt);
1907 if (gsi_bb (load_gsi[j]))
1908 gimple_seq_add_stmt_without_update (&load_seq[j],
1909 stmt);
1910 else
1911 gimple_seq_add_stmt_without_update (&seq, stmt);
1914 else
1915 ops[j] = native_interpret_expr (int_type,
1916 group->val + try_pos
1917 - start_byte_pos,
1918 group->buf_size);
1921 switch (split_store->orig_stores[0]->rhs_code)
1923 case BIT_AND_EXPR:
1924 case BIT_IOR_EXPR:
1925 case BIT_XOR_EXPR:
1926 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
1928 tree rhs1 = gimple_assign_rhs1 (info->stmt);
1929 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
1931 location_t bit_loc;
1932 bit_loc = get_location_for_stmts (orig_stmts);
1933 orig_stmts.truncate (0);
1935 stmt
1936 = gimple_build_assign (make_ssa_name (int_type),
1937 split_store->orig_stores[0]->rhs_code,
1938 ops[0], ops[1]);
1939 gimple_set_location (stmt, bit_loc);
1940 /* If there is just one load and there is a separate
1941 load_seq[0], emit the bitwise op right after it. */
1942 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
1943 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
1944 /* Otherwise, if at least one load is in seq, we need to
1945 emit the bitwise op right before the store. If there
1946 are two loads and are emitted somewhere else, it would
1947 be better to emit the bitwise op as early as possible;
1948 we don't track where that would be possible right now
1949 though. */
1950 else
1951 gimple_seq_add_stmt_without_update (&seq, stmt);
1952 src = gimple_assign_lhs (stmt);
1953 if (split_store->orig_stores[0]->bit_not_p)
1955 stmt = gimple_build_assign (make_ssa_name (int_type),
1956 BIT_NOT_EXPR, src);
1957 gimple_set_location (stmt, bit_loc);
1958 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
1959 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
1960 else
1961 gimple_seq_add_stmt_without_update (&seq, stmt);
1962 src = gimple_assign_lhs (stmt);
1964 break;
1965 default:
1966 src = ops[0];
1967 break;
1970 if (!integer_zerop (mask))
1972 tree tem = make_ssa_name (int_type);
1973 tree load_src = unshare_expr (dest);
1974 /* The load might load some or all bits uninitialized,
1975 avoid -W*uninitialized warnings in that case.
1976 As optimization, it would be nice if all the bits are
1977 provably uninitialized (no stores at all yet or previous
1978 store a CLOBBER) we'd optimize away the load and replace
1979 it e.g. with 0. */
1980 TREE_NO_WARNING (load_src) = 1;
1981 stmt = gimple_build_assign (tem, load_src);
1982 gimple_set_location (stmt, loc);
1983 gimple_set_vuse (stmt, new_vuse);
1984 gimple_seq_add_stmt_without_update (&seq, stmt);
1986 /* FIXME: If there is a single chunk of zero bits in mask,
1987 perhaps use BIT_INSERT_EXPR instead? */
1988 stmt = gimple_build_assign (make_ssa_name (int_type),
1989 BIT_AND_EXPR, tem, mask);
1990 gimple_set_location (stmt, loc);
1991 gimple_seq_add_stmt_without_update (&seq, stmt);
1992 tem = gimple_assign_lhs (stmt);
1994 if (TREE_CODE (src) == INTEGER_CST)
1995 src = wide_int_to_tree (int_type,
1996 wi::bit_and_not (wi::to_wide (src),
1997 wi::to_wide (mask)));
1998 else
2000 tree nmask
2001 = wide_int_to_tree (int_type,
2002 wi::bit_not (wi::to_wide (mask)));
2003 stmt = gimple_build_assign (make_ssa_name (int_type),
2004 BIT_AND_EXPR, src, nmask);
2005 gimple_set_location (stmt, loc);
2006 gimple_seq_add_stmt_without_update (&seq, stmt);
2007 src = gimple_assign_lhs (stmt);
2009 stmt = gimple_build_assign (make_ssa_name (int_type),
2010 BIT_IOR_EXPR, tem, src);
2011 gimple_set_location (stmt, loc);
2012 gimple_seq_add_stmt_without_update (&seq, stmt);
2013 src = gimple_assign_lhs (stmt);
2017 stmt = gimple_build_assign (dest, src);
2018 gimple_set_location (stmt, loc);
2019 gimple_set_vuse (stmt, new_vuse);
2020 gimple_seq_add_stmt_without_update (&seq, stmt);
2022 tree new_vdef;
2023 if (i < split_stores.length () - 1)
2024 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
2025 else
2026 new_vdef = last_vdef;
2028 gimple_set_vdef (stmt, new_vdef);
2029 SSA_NAME_DEF_STMT (new_vdef) = stmt;
2030 new_vuse = new_vdef;
2033 FOR_EACH_VEC_ELT (split_stores, i, split_store)
2034 delete split_store;
2036 gcc_assert (seq);
2037 if (dump_file)
2039 fprintf (dump_file,
2040 "New sequence of %u stmts to replace old one of %u stmts\n",
2041 split_stores.length (), orig_num_stmts);
2042 if (dump_flags & TDF_DETAILS)
2043 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
2045 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
2046 for (int j = 0; j < 2; ++j)
2047 if (load_seq[j])
2048 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
2050 return true;
2053 /* Process the merged_store_group objects created in the coalescing phase.
2054 The stores are all against the base object BASE.
2055 Try to output the widened stores and delete the original statements if
2056 successful. Return true iff any changes were made. */
2058 bool
2059 imm_store_chain_info::output_merged_stores ()
2061 unsigned int i;
2062 merged_store_group *merged_store;
2063 bool ret = false;
2064 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
2066 if (output_merged_store (merged_store))
2068 unsigned int j;
2069 store_immediate_info *store;
2070 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
2072 gimple *stmt = store->stmt;
2073 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2074 gsi_remove (&gsi, true);
2075 if (stmt != merged_store->last_stmt)
2077 unlink_stmt_vdef (stmt);
2078 release_defs (stmt);
2081 ret = true;
2084 if (ret && dump_file)
2085 fprintf (dump_file, "Merging successful!\n");
2087 return ret;
2090 /* Coalesce the store_immediate_info objects recorded against the base object
2091 BASE in the first phase and output them.
2092 Delete the allocated structures.
2093 Return true if any changes were made. */
2095 bool
2096 imm_store_chain_info::terminate_and_process_chain ()
2098 /* Process store chain. */
2099 bool ret = false;
2100 if (m_store_info.length () > 1)
2102 ret = coalesce_immediate_stores ();
2103 if (ret)
2104 ret = output_merged_stores ();
2107 /* Delete all the entries we allocated ourselves. */
2108 store_immediate_info *info;
2109 unsigned int i;
2110 FOR_EACH_VEC_ELT (m_store_info, i, info)
2111 delete info;
2113 merged_store_group *merged_info;
2114 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
2115 delete merged_info;
2117 return ret;
2120 /* Return true iff LHS is a destination potentially interesting for
2121 store merging. In practice these are the codes that get_inner_reference
2122 can process. */
2124 static bool
2125 lhs_valid_for_store_merging_p (tree lhs)
2127 tree_code code = TREE_CODE (lhs);
2129 if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF
2130 || code == COMPONENT_REF || code == BIT_FIELD_REF)
2131 return true;
2133 return false;
2136 /* Return true if the tree RHS is a constant we want to consider
2137 during store merging. In practice accept all codes that
2138 native_encode_expr accepts. */
2140 static bool
2141 rhs_valid_for_store_merging_p (tree rhs)
2143 return native_encode_expr (rhs, NULL,
2144 GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0;
2147 /* If MEM is a memory reference usable for store merging (either as
2148 store destination or for loads), return the non-NULL base_addr
2149 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
2150 Otherwise return NULL, *PBITPOS should be still valid even for that
2151 case. */
2153 static tree
2154 mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
2155 unsigned HOST_WIDE_INT *pbitpos,
2156 unsigned HOST_WIDE_INT *pbitregion_start,
2157 unsigned HOST_WIDE_INT *pbitregion_end)
2159 HOST_WIDE_INT bitsize;
2160 HOST_WIDE_INT bitpos;
2161 unsigned HOST_WIDE_INT bitregion_start = 0;
2162 unsigned HOST_WIDE_INT bitregion_end = 0;
2163 machine_mode mode;
2164 int unsignedp = 0, reversep = 0, volatilep = 0;
2165 tree offset;
2166 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
2167 &unsignedp, &reversep, &volatilep);
2168 *pbitsize = bitsize;
2169 if (bitsize == 0)
2170 return NULL_TREE;
2172 if (TREE_CODE (mem) == COMPONENT_REF
2173 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
2175 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
2176 if (bitregion_end)
2177 ++bitregion_end;
2180 if (reversep)
2181 return NULL_TREE;
2183 /* We do not want to rewrite TARGET_MEM_REFs. */
2184 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
2185 return NULL_TREE;
2186 /* In some cases get_inner_reference may return a
2187 MEM_REF [ptr + byteoffset]. For the purposes of this pass
2188 canonicalize the base_addr to MEM_REF [ptr] and take
2189 byteoffset into account in the bitpos. This occurs in
2190 PR 23684 and this way we can catch more chains. */
2191 else if (TREE_CODE (base_addr) == MEM_REF)
2193 offset_int bit_off, byte_off = mem_ref_offset (base_addr);
2194 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2195 bit_off += bitpos;
2196 if (!wi::neg_p (bit_off) && wi::fits_shwi_p (bit_off))
2198 bitpos = bit_off.to_shwi ();
2199 if (bitregion_end)
2201 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2202 bit_off += bitregion_start;
2203 if (wi::fits_uhwi_p (bit_off))
2205 bitregion_start = bit_off.to_uhwi ();
2206 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2207 bit_off += bitregion_end;
2208 if (wi::fits_uhwi_p (bit_off))
2209 bitregion_end = bit_off.to_uhwi ();
2210 else
2211 bitregion_end = 0;
2213 else
2214 bitregion_end = 0;
2217 else
2218 return NULL_TREE;
2219 base_addr = TREE_OPERAND (base_addr, 0);
2221 /* get_inner_reference returns the base object, get at its
2222 address now. */
2223 else
2225 if (bitpos < 0)
2226 return NULL_TREE;
2227 base_addr = build_fold_addr_expr (base_addr);
2230 if (!bitregion_end)
2232 bitregion_start = ROUND_DOWN (bitpos, BITS_PER_UNIT);
2233 bitregion_end = ROUND_UP (bitpos + bitsize, BITS_PER_UNIT);
2236 if (offset != NULL_TREE)
2238 /* If the access is variable offset then a base decl has to be
2239 address-taken to be able to emit pointer-based stores to it.
2240 ??? We might be able to get away with re-using the original
2241 base up to the first variable part and then wrapping that inside
2242 a BIT_FIELD_REF. */
2243 tree base = get_base_address (base_addr);
2244 if (! base
2245 || (DECL_P (base) && ! TREE_ADDRESSABLE (base)))
2246 return NULL_TREE;
2248 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
2249 base_addr, offset);
2252 *pbitsize = bitsize;
2253 *pbitpos = bitpos;
2254 *pbitregion_start = bitregion_start;
2255 *pbitregion_end = bitregion_end;
2256 return base_addr;
2259 /* Return true if STMT is a load that can be used for store merging.
2260 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
2261 BITREGION_END are properties of the corresponding store. */
2263 static bool
2264 handled_load (gimple *stmt, store_operand_info *op,
2265 unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos,
2266 unsigned HOST_WIDE_INT bitregion_start,
2267 unsigned HOST_WIDE_INT bitregion_end)
2269 if (!is_gimple_assign (stmt))
2270 return false;
2271 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
2273 tree rhs1 = gimple_assign_rhs1 (stmt);
2274 if (TREE_CODE (rhs1) == SSA_NAME
2275 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
2276 bitregion_start, bitregion_end))
2278 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
2279 been optimized earlier, but if allowed here, would confuse the
2280 multiple uses counting. */
2281 if (op->bit_not_p)
2282 return false;
2283 op->bit_not_p = !op->bit_not_p;
2284 return true;
2286 return false;
2288 if (gimple_vuse (stmt)
2289 && gimple_assign_load_p (stmt)
2290 && !stmt_can_throw_internal (stmt)
2291 && !gimple_has_volatile_ops (stmt))
2293 tree mem = gimple_assign_rhs1 (stmt);
2294 op->base_addr
2295 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
2296 &op->bitregion_start,
2297 &op->bitregion_end);
2298 if (op->base_addr != NULL_TREE
2299 && op->bitsize == bitsize
2300 && ((op->bitpos - bitpos) % BITS_PER_UNIT) == 0
2301 && op->bitpos - op->bitregion_start >= bitpos - bitregion_start
2302 && op->bitregion_end - op->bitpos >= bitregion_end - bitpos)
2304 op->stmt = stmt;
2305 op->val = mem;
2306 op->bit_not_p = false;
2307 return true;
2310 return false;
2313 /* Record the store STMT for store merging optimization if it can be
2314 optimized. */
2316 void
2317 pass_store_merging::process_store (gimple *stmt)
2319 tree lhs = gimple_assign_lhs (stmt);
2320 tree rhs = gimple_assign_rhs1 (stmt);
2321 unsigned HOST_WIDE_INT bitsize, bitpos;
2322 unsigned HOST_WIDE_INT bitregion_start;
2323 unsigned HOST_WIDE_INT bitregion_end;
2324 tree base_addr
2325 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
2326 &bitregion_start, &bitregion_end);
2327 if (bitsize == 0)
2328 return;
2330 bool invalid = (base_addr == NULL_TREE
2331 || ((bitsize > MAX_BITSIZE_MODE_ANY_INT)
2332 && (TREE_CODE (rhs) != INTEGER_CST)));
2333 enum tree_code rhs_code = ERROR_MARK;
2334 bool bit_not_p = false;
2335 store_operand_info ops[2];
2336 if (invalid)
2338 else if (rhs_valid_for_store_merging_p (rhs))
2340 rhs_code = INTEGER_CST;
2341 ops[0].val = rhs;
2343 else if (TREE_CODE (rhs) != SSA_NAME)
2344 invalid = true;
2345 else
2347 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
2348 if (!is_gimple_assign (def_stmt))
2349 invalid = true;
2350 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
2351 bitregion_start, bitregion_end))
2352 rhs_code = MEM_REF;
2353 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
2355 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2356 if (TREE_CODE (rhs1) == SSA_NAME
2357 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
2359 bit_not_p = true;
2360 def_stmt = SSA_NAME_DEF_STMT (rhs1);
2363 if (rhs_code == ERROR_MARK && !invalid)
2364 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
2366 case BIT_AND_EXPR:
2367 case BIT_IOR_EXPR:
2368 case BIT_XOR_EXPR:
2369 tree rhs1, rhs2;
2370 rhs1 = gimple_assign_rhs1 (def_stmt);
2371 rhs2 = gimple_assign_rhs2 (def_stmt);
2372 invalid = true;
2373 if (TREE_CODE (rhs1) != SSA_NAME)
2374 break;
2375 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
2376 if (!is_gimple_assign (def_stmt1)
2377 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
2378 bitregion_start, bitregion_end))
2379 break;
2380 if (rhs_valid_for_store_merging_p (rhs2))
2381 ops[1].val = rhs2;
2382 else if (TREE_CODE (rhs2) != SSA_NAME)
2383 break;
2384 else
2386 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
2387 if (!is_gimple_assign (def_stmt2))
2388 break;
2389 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
2390 bitregion_start, bitregion_end))
2391 break;
2393 invalid = false;
2394 break;
2395 default:
2396 invalid = true;
2397 break;
2401 if (invalid)
2403 terminate_all_aliasing_chains (NULL, stmt);
2404 return;
2407 struct imm_store_chain_info **chain_info = NULL;
2408 if (base_addr)
2409 chain_info = m_stores.get (base_addr);
2411 store_immediate_info *info;
2412 if (chain_info)
2414 unsigned int ord = (*chain_info)->m_store_info.length ();
2415 info = new store_immediate_info (bitsize, bitpos, bitregion_start,
2416 bitregion_end, stmt, ord, rhs_code,
2417 bit_not_p, ops[0], ops[1]);
2418 if (dump_file && (dump_flags & TDF_DETAILS))
2420 fprintf (dump_file, "Recording immediate store from stmt:\n");
2421 print_gimple_stmt (dump_file, stmt, 0);
2423 (*chain_info)->m_store_info.safe_push (info);
2424 terminate_all_aliasing_chains (chain_info, stmt);
2425 /* If we reach the limit of stores to merge in a chain terminate and
2426 process the chain now. */
2427 if ((*chain_info)->m_store_info.length ()
2428 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
2430 if (dump_file && (dump_flags & TDF_DETAILS))
2431 fprintf (dump_file,
2432 "Reached maximum number of statements to merge:\n");
2433 terminate_and_release_chain (*chain_info);
2435 return;
2438 /* Store aliases any existing chain? */
2439 terminate_all_aliasing_chains (NULL, stmt);
2440 /* Start a new chain. */
2441 struct imm_store_chain_info *new_chain
2442 = new imm_store_chain_info (m_stores_head, base_addr);
2443 info = new store_immediate_info (bitsize, bitpos, bitregion_start,
2444 bitregion_end, stmt, 0, rhs_code,
2445 bit_not_p, ops[0], ops[1]);
2446 new_chain->m_store_info.safe_push (info);
2447 m_stores.put (base_addr, new_chain);
2448 if (dump_file && (dump_flags & TDF_DETAILS))
2450 fprintf (dump_file, "Starting new chain with statement:\n");
2451 print_gimple_stmt (dump_file, stmt, 0);
2452 fprintf (dump_file, "The base object is:\n");
2453 print_generic_expr (dump_file, base_addr);
2454 fprintf (dump_file, "\n");
2458 /* Entry point for the pass. Go over each basic block recording chains of
2459 immediate stores. Upon encountering a terminating statement (as defined
2460 by stmt_terminates_chain_p) process the recorded stores and emit the widened
2461 variants. */
2463 unsigned int
2464 pass_store_merging::execute (function *fun)
2466 basic_block bb;
2467 hash_set<gimple *> orig_stmts;
2469 FOR_EACH_BB_FN (bb, fun)
2471 gimple_stmt_iterator gsi;
2472 unsigned HOST_WIDE_INT num_statements = 0;
2473 /* Record the original statements so that we can keep track of
2474 statements emitted in this pass and not re-process new
2475 statements. */
2476 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2478 if (is_gimple_debug (gsi_stmt (gsi)))
2479 continue;
2481 if (++num_statements >= 2)
2482 break;
2485 if (num_statements < 2)
2486 continue;
2488 if (dump_file && (dump_flags & TDF_DETAILS))
2489 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
2491 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2493 gimple *stmt = gsi_stmt (gsi);
2495 if (is_gimple_debug (stmt))
2496 continue;
2498 if (gimple_has_volatile_ops (stmt))
2500 /* Terminate all chains. */
2501 if (dump_file && (dump_flags & TDF_DETAILS))
2502 fprintf (dump_file, "Volatile access terminates "
2503 "all chains\n");
2504 terminate_and_process_all_chains ();
2505 continue;
2508 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt)
2509 && !stmt_can_throw_internal (stmt)
2510 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt)))
2511 process_store (stmt);
2512 else
2513 terminate_all_aliasing_chains (NULL, stmt);
2515 terminate_and_process_all_chains ();
2517 return 0;
2520 } // anon namespace
2522 /* Construct and return a store merging pass object. */
2524 gimple_opt_pass *
2525 make_pass_store_merging (gcc::context *ctxt)
2527 return new pass_store_merging (ctxt);
2530 #if CHECKING_P
2532 namespace selftest {
2534 /* Selftests for store merging helpers. */
2536 /* Assert that all elements of the byte arrays X and Y, both of length N
2537 are equal. */
2539 static void
2540 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
2542 for (unsigned int i = 0; i < n; i++)
2544 if (x[i] != y[i])
2546 fprintf (stderr, "Arrays do not match. X:\n");
2547 dump_char_array (stderr, x, n);
2548 fprintf (stderr, "Y:\n");
2549 dump_char_array (stderr, y, n);
2551 ASSERT_EQ (x[i], y[i]);
2555 /* Test shift_bytes_in_array and that it carries bits across between
2556 bytes correctly. */
2558 static void
2559 verify_shift_bytes_in_array (void)
2561 /* byte 1 | byte 0
2562 00011111 | 11100000. */
2563 unsigned char orig[2] = { 0xe0, 0x1f };
2564 unsigned char in[2];
2565 memcpy (in, orig, sizeof orig);
2567 unsigned char expected[2] = { 0x80, 0x7f };
2568 shift_bytes_in_array (in, sizeof (in), 2);
2569 verify_array_eq (in, expected, sizeof (in));
2571 memcpy (in, orig, sizeof orig);
2572 memcpy (expected, orig, sizeof orig);
2573 /* Check that shifting by zero doesn't change anything. */
2574 shift_bytes_in_array (in, sizeof (in), 0);
2575 verify_array_eq (in, expected, sizeof (in));
2579 /* Test shift_bytes_in_array_right and that it carries bits across between
2580 bytes correctly. */
2582 static void
2583 verify_shift_bytes_in_array_right (void)
2585 /* byte 1 | byte 0
2586 00011111 | 11100000. */
2587 unsigned char orig[2] = { 0x1f, 0xe0};
2588 unsigned char in[2];
2589 memcpy (in, orig, sizeof orig);
2590 unsigned char expected[2] = { 0x07, 0xf8};
2591 shift_bytes_in_array_right (in, sizeof (in), 2);
2592 verify_array_eq (in, expected, sizeof (in));
2594 memcpy (in, orig, sizeof orig);
2595 memcpy (expected, orig, sizeof orig);
2596 /* Check that shifting by zero doesn't change anything. */
2597 shift_bytes_in_array_right (in, sizeof (in), 0);
2598 verify_array_eq (in, expected, sizeof (in));
2601 /* Test clear_bit_region that it clears exactly the bits asked and
2602 nothing more. */
2604 static void
2605 verify_clear_bit_region (void)
2607 /* Start with all bits set and test clearing various patterns in them. */
2608 unsigned char orig[3] = { 0xff, 0xff, 0xff};
2609 unsigned char in[3];
2610 unsigned char expected[3];
2611 memcpy (in, orig, sizeof in);
2613 /* Check zeroing out all the bits. */
2614 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
2615 expected[0] = expected[1] = expected[2] = 0;
2616 verify_array_eq (in, expected, sizeof in);
2618 memcpy (in, orig, sizeof in);
2619 /* Leave the first and last bits intact. */
2620 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
2621 expected[0] = 0x1;
2622 expected[1] = 0;
2623 expected[2] = 0x80;
2624 verify_array_eq (in, expected, sizeof in);
2627 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
2628 nothing more. */
2630 static void
2631 verify_clear_bit_region_be (void)
2633 /* Start with all bits set and test clearing various patterns in them. */
2634 unsigned char orig[3] = { 0xff, 0xff, 0xff};
2635 unsigned char in[3];
2636 unsigned char expected[3];
2637 memcpy (in, orig, sizeof in);
2639 /* Check zeroing out all the bits. */
2640 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
2641 expected[0] = expected[1] = expected[2] = 0;
2642 verify_array_eq (in, expected, sizeof in);
2644 memcpy (in, orig, sizeof in);
2645 /* Leave the first and last bits intact. */
2646 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
2647 expected[0] = 0x80;
2648 expected[1] = 0;
2649 expected[2] = 0x1;
2650 verify_array_eq (in, expected, sizeof in);
2654 /* Run all of the selftests within this file. */
2656 void
2657 store_merging_c_tests (void)
2659 verify_shift_bytes_in_array ();
2660 verify_shift_bytes_in_array_right ();
2661 verify_clear_bit_region ();
2662 verify_clear_bit_region_be ();
2665 } // namespace selftest
2666 #endif /* CHECKING_P. */