Add noexcept to generic std::size, std::empty and std::data
[official-gcc.git] / gcc / gimple-ssa-store-merging.c
blobb8920d92b1dffd73c9d47406645a09e9f33e97b2
1 /* GIMPLE store merging pass.
2 Copyright (C) 2016-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of this pass is to combine multiple memory stores of
22 constant values, values loaded from memory or bitwise operations
23 on those to consecutive memory locations into fewer wider stores.
24 For example, if we have a sequence peforming four byte stores to
25 consecutive memory locations:
26 [p ] := imm1;
27 [p + 1B] := imm2;
28 [p + 2B] := imm3;
29 [p + 3B] := imm4;
30 we can transform this into a single 4-byte store if the target supports it:
31 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness.
33 Or:
34 [p ] := [q ];
35 [p + 1B] := [q + 1B];
36 [p + 2B] := [q + 2B];
37 [p + 3B] := [q + 3B];
38 if there is no overlap can be transformed into a single 4-byte
39 load followed by single 4-byte store.
41 Or:
42 [p ] := [q ] ^ imm1;
43 [p + 1B] := [q + 1B] ^ imm2;
44 [p + 2B] := [q + 2B] ^ imm3;
45 [p + 3B] := [q + 3B] ^ imm4;
46 if there is no overlap can be transformed into a single 4-byte
47 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49 The algorithm is applied to each basic block in three phases:
51 1) Scan through the basic block recording assignments to
52 destinations that can be expressed as a store to memory of a certain size
53 at a certain bit offset from expressions we can handle. For bit-fields
54 we also note the surrounding bit region, bits that could be stored in
55 a read-modify-write operation when storing the bit-field. Record store
56 chains to different bases in a hash_map (m_stores) and make sure to
57 terminate such chains when appropriate (for example when when the stored
58 values get used subsequently).
59 These stores can be a result of structure element initializers, array stores
60 etc. A store_immediate_info object is recorded for every such store.
61 Record as many such assignments to a single base as possible until a
62 statement that interferes with the store sequence is encountered.
63 Each store has up to 2 operands, which can be an immediate constant
64 or a memory load, from which the value to be stored can be computed.
65 At most one of the operands can be a constant. The operands are recorded
66 in store_operand_info struct.
68 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of
69 store_immediate_info objects) and coalesce contiguous stores into
70 merged_store_group objects. For bit-fields stores, we don't need to
71 require the stores to be contiguous, just their surrounding bit regions
72 have to be contiguous. If the expression being stored is different
73 between adjacent stores, such as one store storing a constant and
74 following storing a value loaded from memory, or if the loaded memory
75 objects are not adjacent, a new merged_store_group is created as well.
77 For example, given the stores:
78 [p ] := 0;
79 [p + 1B] := 1;
80 [p + 3B] := 0;
81 [p + 4B] := 1;
82 [p + 5B] := 0;
83 [p + 6B] := 0;
84 This phase would produce two merged_store_group objects, one recording the
85 two bytes stored in the memory region [p : p + 1] and another
86 recording the four bytes stored in the memory region [p + 3 : p + 6].
88 3) The merged_store_group objects produced in phase 2) are processed
89 to generate the sequence of wider stores that set the contiguous memory
90 regions to the sequence of bytes that correspond to it. This may emit
91 multiple stores per store group to handle contiguous stores that are not
92 of a size that is a power of 2. For example it can try to emit a 40-bit
93 store as a 32-bit store followed by an 8-bit store.
94 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or
95 TARGET_SLOW_UNALIGNED_ACCESS rules.
97 Note on endianness and example:
98 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
99 [p ] := 0x1234;
100 [p + 2B] := 0x5678;
101 [p + 4B] := 0xab;
102 [p + 5B] := 0xcd;
104 The memory layout for little-endian (LE) and big-endian (BE) must be:
105 p |LE|BE|
106 ---------
107 0 |34|12|
108 1 |12|34|
109 2 |78|56|
110 3 |56|78|
111 4 |ab|ab|
112 5 |cd|cd|
114 To merge these into a single 48-bit merged value 'val' in phase 2)
115 on little-endian we insert stores to higher (consecutive) bitpositions
116 into the most significant bits of the merged value.
117 The final merged value would be: 0xcdab56781234
119 For big-endian we insert stores to higher bitpositions into the least
120 significant bits of the merged value.
121 The final merged value would be: 0x12345678abcd
123 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
124 followed by a 16-bit store. Again, we must consider endianness when
125 breaking down the 48-bit value 'val' computed above.
126 For little endian we emit:
127 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
128 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
130 Whereas for big-endian we emit:
131 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
132 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
134 #include "config.h"
135 #include "system.h"
136 #include "coretypes.h"
137 #include "backend.h"
138 #include "tree.h"
139 #include "gimple.h"
140 #include "builtins.h"
141 #include "fold-const.h"
142 #include "tree-pass.h"
143 #include "ssa.h"
144 #include "gimple-pretty-print.h"
145 #include "alias.h"
146 #include "fold-const.h"
147 #include "params.h"
148 #include "print-tree.h"
149 #include "tree-hash-traits.h"
150 #include "gimple-iterator.h"
151 #include "gimplify.h"
152 #include "stor-layout.h"
153 #include "timevar.h"
154 #include "tree-cfg.h"
155 #include "tree-eh.h"
156 #include "target.h"
157 #include "gimplify-me.h"
158 #include "rtl.h"
159 #include "expr.h" /* For get_bit_range. */
160 #include "selftest.h"
162 /* The maximum size (in bits) of the stores this pass should generate. */
163 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
164 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
166 /* Limit to bound the number of aliasing checks for loads with the same
167 vuse as the corresponding store. */
168 #define MAX_STORE_ALIAS_CHECKS 64
170 namespace {
172 /* Struct recording one operand for the store, which is either a constant,
173 then VAL represents the constant and all the other fields are zero,
174 or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
175 and the other fields also reflect the memory load. */
177 struct store_operand_info
179 tree val;
180 tree base_addr;
181 unsigned HOST_WIDE_INT bitsize;
182 unsigned HOST_WIDE_INT bitpos;
183 unsigned HOST_WIDE_INT bitregion_start;
184 unsigned HOST_WIDE_INT bitregion_end;
185 gimple *stmt;
186 bool bit_not_p;
187 store_operand_info ();
190 store_operand_info::store_operand_info ()
191 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
192 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
196 /* Struct recording the information about a single store of an immediate
197 to memory. These are created in the first phase and coalesced into
198 merged_store_group objects in the second phase. */
200 struct store_immediate_info
202 unsigned HOST_WIDE_INT bitsize;
203 unsigned HOST_WIDE_INT bitpos;
204 unsigned HOST_WIDE_INT bitregion_start;
205 /* This is one past the last bit of the bit region. */
206 unsigned HOST_WIDE_INT bitregion_end;
207 gimple *stmt;
208 unsigned int order;
209 /* INTEGER_CST for constant stores, MEM_REF for memory copy or
210 BIT_*_EXPR for logical bitwise operation. */
211 enum tree_code rhs_code;
212 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
213 bool bit_not_p;
214 /* True if ops have been swapped and thus ops[1] represents
215 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
216 bool ops_swapped_p;
217 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
218 just the first one. */
219 store_operand_info ops[2];
220 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
221 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
222 gimple *, unsigned int, enum tree_code, bool,
223 const store_operand_info &,
224 const store_operand_info &);
227 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
228 unsigned HOST_WIDE_INT bp,
229 unsigned HOST_WIDE_INT brs,
230 unsigned HOST_WIDE_INT bre,
231 gimple *st,
232 unsigned int ord,
233 enum tree_code rhscode,
234 bool bitnotp,
235 const store_operand_info &op0r,
236 const store_operand_info &op1r)
237 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
238 stmt (st), order (ord), rhs_code (rhscode), bit_not_p (bitnotp),
239 ops_swapped_p (false)
240 #if __cplusplus >= 201103L
241 , ops { op0r, op1r }
244 #else
246 ops[0] = op0r;
247 ops[1] = op1r;
249 #endif
251 /* Struct representing a group of stores to contiguous memory locations.
252 These are produced by the second phase (coalescing) and consumed in the
253 third phase that outputs the widened stores. */
255 struct merged_store_group
257 unsigned HOST_WIDE_INT start;
258 unsigned HOST_WIDE_INT width;
259 unsigned HOST_WIDE_INT bitregion_start;
260 unsigned HOST_WIDE_INT bitregion_end;
261 /* The size of the allocated memory for val and mask. */
262 unsigned HOST_WIDE_INT buf_size;
263 unsigned HOST_WIDE_INT align_base;
264 unsigned HOST_WIDE_INT load_align_base[2];
266 unsigned int align;
267 unsigned int load_align[2];
268 unsigned int first_order;
269 unsigned int last_order;
271 auto_vec<store_immediate_info *> stores;
272 /* We record the first and last original statements in the sequence because
273 we'll need their vuse/vdef and replacement position. It's easier to keep
274 track of them separately as 'stores' is reordered by apply_stores. */
275 gimple *last_stmt;
276 gimple *first_stmt;
277 unsigned char *val;
278 unsigned char *mask;
280 merged_store_group (store_immediate_info *);
281 ~merged_store_group ();
282 void merge_into (store_immediate_info *);
283 void merge_overlapping (store_immediate_info *);
284 bool apply_stores ();
285 private:
286 void do_merge (store_immediate_info *);
289 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
291 static void
292 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
294 if (!fd)
295 return;
297 for (unsigned int i = 0; i < len; i++)
298 fprintf (fd, "%x ", ptr[i]);
299 fprintf (fd, "\n");
302 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
303 bits between adjacent elements. AMNT should be within
304 [0, BITS_PER_UNIT).
305 Example, AMNT = 2:
306 00011111|11100000 << 2 = 01111111|10000000
307 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
309 static void
310 shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt)
312 if (amnt == 0)
313 return;
315 unsigned char carry_over = 0U;
316 unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt);
317 unsigned char clear_mask = (~0U) << amnt;
319 for (unsigned int i = 0; i < sz; i++)
321 unsigned prev_carry_over = carry_over;
322 carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt);
324 ptr[i] <<= amnt;
325 if (i != 0)
327 ptr[i] &= clear_mask;
328 ptr[i] |= prev_carry_over;
333 /* Like shift_bytes_in_array but for big-endian.
334 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
335 bits between adjacent elements. AMNT should be within
336 [0, BITS_PER_UNIT).
337 Example, AMNT = 2:
338 00011111|11100000 >> 2 = 00000111|11111000
339 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
341 static void
342 shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz,
343 unsigned int amnt)
345 if (amnt == 0)
346 return;
348 unsigned char carry_over = 0U;
349 unsigned char carry_mask = ~(~0U << amnt);
351 for (unsigned int i = 0; i < sz; i++)
353 unsigned prev_carry_over = carry_over;
354 carry_over = ptr[i] & carry_mask;
356 carry_over <<= (unsigned char) BITS_PER_UNIT - amnt;
357 ptr[i] >>= amnt;
358 ptr[i] |= prev_carry_over;
362 /* Clear out LEN bits starting from bit START in the byte array
363 PTR. This clears the bits to the *right* from START.
364 START must be within [0, BITS_PER_UNIT) and counts starting from
365 the least significant bit. */
367 static void
368 clear_bit_region_be (unsigned char *ptr, unsigned int start,
369 unsigned int len)
371 if (len == 0)
372 return;
373 /* Clear len bits to the right of start. */
374 else if (len <= start + 1)
376 unsigned char mask = (~(~0U << len));
377 mask = mask << (start + 1U - len);
378 ptr[0] &= ~mask;
380 else if (start != BITS_PER_UNIT - 1)
382 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
383 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
384 len - (start % BITS_PER_UNIT) - 1);
386 else if (start == BITS_PER_UNIT - 1
387 && len > BITS_PER_UNIT)
389 unsigned int nbytes = len / BITS_PER_UNIT;
390 memset (ptr, 0, nbytes);
391 if (len % BITS_PER_UNIT != 0)
392 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
393 len % BITS_PER_UNIT);
395 else
396 gcc_unreachable ();
399 /* In the byte array PTR clear the bit region starting at bit
400 START and is LEN bits wide.
401 For regions spanning multiple bytes do this recursively until we reach
402 zero LEN or a region contained within a single byte. */
404 static void
405 clear_bit_region (unsigned char *ptr, unsigned int start,
406 unsigned int len)
408 /* Degenerate base case. */
409 if (len == 0)
410 return;
411 else if (start >= BITS_PER_UNIT)
412 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
413 /* Second base case. */
414 else if ((start + len) <= BITS_PER_UNIT)
416 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
417 mask >>= BITS_PER_UNIT - (start + len);
419 ptr[0] &= ~mask;
421 return;
423 /* Clear most significant bits in a byte and proceed with the next byte. */
424 else if (start != 0)
426 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
427 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
429 /* Whole bytes need to be cleared. */
430 else if (start == 0 && len > BITS_PER_UNIT)
432 unsigned int nbytes = len / BITS_PER_UNIT;
433 /* We could recurse on each byte but we clear whole bytes, so a simple
434 memset will do. */
435 memset (ptr, '\0', nbytes);
436 /* Clear the remaining sub-byte region if there is one. */
437 if (len % BITS_PER_UNIT != 0)
438 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
440 else
441 gcc_unreachable ();
444 /* Write BITLEN bits of EXPR to the byte array PTR at
445 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
446 Return true if the operation succeeded. */
448 static bool
449 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
450 unsigned int total_bytes)
452 unsigned int first_byte = bitpos / BITS_PER_UNIT;
453 tree tmp_int = expr;
454 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
455 || (bitpos % BITS_PER_UNIT)
456 || !int_mode_for_size (bitlen, 0).exists ());
458 if (!sub_byte_op_p)
459 return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0;
461 /* LITTLE-ENDIAN
462 We are writing a non byte-sized quantity or at a position that is not
463 at a byte boundary.
464 |--------|--------|--------| ptr + first_byte
466 xxx xxxxxxxx xxx< bp>
467 |______EXPR____|
469 First native_encode_expr EXPR into a temporary buffer and shift each
470 byte in the buffer by 'bp' (carrying the bits over as necessary).
471 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
472 <------bitlen---->< bp>
473 Then we clear the destination bits:
474 |---00000|00000000|000-----| ptr + first_byte
475 <-------bitlen--->< bp>
477 Finally we ORR the bytes of the shifted EXPR into the cleared region:
478 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
480 BIG-ENDIAN
481 We are writing a non byte-sized quantity or at a position that is not
482 at a byte boundary.
483 ptr + first_byte |--------|--------|--------|
485 <bp >xxx xxxxxxxx xxx
486 |_____EXPR_____|
488 First native_encode_expr EXPR into a temporary buffer and shift each
489 byte in the buffer to the right by (carrying the bits over as necessary).
490 We shift by as much as needed to align the most significant bit of EXPR
491 with bitpos:
492 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
493 <---bitlen----> <bp ><-----bitlen----->
494 Then we clear the destination bits:
495 ptr + first_byte |-----000||00000000||00000---|
496 <bp ><-------bitlen----->
498 Finally we ORR the bytes of the shifted EXPR into the cleared region:
499 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
500 The awkwardness comes from the fact that bitpos is counted from the
501 most significant bit of a byte. */
503 /* We must be dealing with fixed-size data at this point, since the
504 total size is also fixed. */
505 fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
506 /* Allocate an extra byte so that we have space to shift into. */
507 unsigned int byte_size = GET_MODE_SIZE (mode) + 1;
508 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
509 memset (tmpbuf, '\0', byte_size);
510 /* The store detection code should only have allowed constants that are
511 accepted by native_encode_expr. */
512 if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
513 gcc_unreachable ();
515 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
516 bytes to write. This means it can write more than
517 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
518 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
519 bitlen and zero out the bits that are not relevant as well (that may
520 contain a sign bit due to sign-extension). */
521 unsigned int padding
522 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
523 /* On big-endian the padding is at the 'front' so just skip the initial
524 bytes. */
525 if (BYTES_BIG_ENDIAN)
526 tmpbuf += padding;
528 byte_size -= padding;
530 if (bitlen % BITS_PER_UNIT != 0)
532 if (BYTES_BIG_ENDIAN)
533 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
534 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
535 else
536 clear_bit_region (tmpbuf, bitlen,
537 byte_size * BITS_PER_UNIT - bitlen);
539 /* Left shifting relies on the last byte being clear if bitlen is
540 a multiple of BITS_PER_UNIT, which might not be clear if
541 there are padding bytes. */
542 else if (!BYTES_BIG_ENDIAN)
543 tmpbuf[byte_size - 1] = '\0';
545 /* Clear the bit region in PTR where the bits from TMPBUF will be
546 inserted into. */
547 if (BYTES_BIG_ENDIAN)
548 clear_bit_region_be (ptr + first_byte,
549 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
550 else
551 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
553 int shift_amnt;
554 int bitlen_mod = bitlen % BITS_PER_UNIT;
555 int bitpos_mod = bitpos % BITS_PER_UNIT;
557 bool skip_byte = false;
558 if (BYTES_BIG_ENDIAN)
560 /* BITPOS and BITLEN are exactly aligned and no shifting
561 is necessary. */
562 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
563 || (bitpos_mod == 0 && bitlen_mod == 0))
564 shift_amnt = 0;
565 /* |. . . . . . . .|
566 <bp > <blen >.
567 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
568 of the value until it aligns with 'bp' in the next byte over. */
569 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
571 shift_amnt = bitlen_mod + bitpos_mod;
572 skip_byte = bitlen_mod != 0;
574 /* |. . . . . . . .|
575 <----bp--->
576 <---blen---->.
577 Shift the value right within the same byte so it aligns with 'bp'. */
578 else
579 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
581 else
582 shift_amnt = bitpos % BITS_PER_UNIT;
584 /* Create the shifted version of EXPR. */
585 if (!BYTES_BIG_ENDIAN)
587 shift_bytes_in_array (tmpbuf, byte_size, shift_amnt);
588 if (shift_amnt == 0)
589 byte_size--;
591 else
593 gcc_assert (BYTES_BIG_ENDIAN);
594 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
595 /* If shifting right forced us to move into the next byte skip the now
596 empty byte. */
597 if (skip_byte)
599 tmpbuf++;
600 byte_size--;
604 /* Insert the bits from TMPBUF. */
605 for (unsigned int i = 0; i < byte_size; i++)
606 ptr[first_byte + i] |= tmpbuf[i];
608 return true;
611 /* Sorting function for store_immediate_info objects.
612 Sorts them by bitposition. */
614 static int
615 sort_by_bitpos (const void *x, const void *y)
617 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
618 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
620 if ((*tmp)->bitpos < (*tmp2)->bitpos)
621 return -1;
622 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
623 return 1;
624 else
625 /* If they are the same let's use the order which is guaranteed to
626 be different. */
627 return (*tmp)->order - (*tmp2)->order;
630 /* Sorting function for store_immediate_info objects.
631 Sorts them by the order field. */
633 static int
634 sort_by_order (const void *x, const void *y)
636 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
637 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
639 if ((*tmp)->order < (*tmp2)->order)
640 return -1;
641 else if ((*tmp)->order > (*tmp2)->order)
642 return 1;
644 gcc_unreachable ();
647 /* Initialize a merged_store_group object from a store_immediate_info
648 object. */
650 merged_store_group::merged_store_group (store_immediate_info *info)
652 start = info->bitpos;
653 width = info->bitsize;
654 bitregion_start = info->bitregion_start;
655 bitregion_end = info->bitregion_end;
656 /* VAL has memory allocated for it in apply_stores once the group
657 width has been finalized. */
658 val = NULL;
659 mask = NULL;
660 unsigned HOST_WIDE_INT align_bitpos = 0;
661 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
662 &align, &align_bitpos);
663 align_base = start - align_bitpos;
664 for (int i = 0; i < 2; ++i)
666 store_operand_info &op = info->ops[i];
667 if (op.base_addr == NULL_TREE)
669 load_align[i] = 0;
670 load_align_base[i] = 0;
672 else
674 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
675 load_align_base[i] = op.bitpos - align_bitpos;
678 stores.create (1);
679 stores.safe_push (info);
680 last_stmt = info->stmt;
681 last_order = info->order;
682 first_stmt = last_stmt;
683 first_order = last_order;
684 buf_size = 0;
687 merged_store_group::~merged_store_group ()
689 if (val)
690 XDELETEVEC (val);
693 /* Helper method for merge_into and merge_overlapping to do
694 the common part. */
695 void
696 merged_store_group::do_merge (store_immediate_info *info)
698 bitregion_start = MIN (bitregion_start, info->bitregion_start);
699 bitregion_end = MAX (bitregion_end, info->bitregion_end);
701 unsigned int this_align;
702 unsigned HOST_WIDE_INT align_bitpos = 0;
703 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
704 &this_align, &align_bitpos);
705 if (this_align > align)
707 align = this_align;
708 align_base = info->bitpos - align_bitpos;
710 for (int i = 0; i < 2; ++i)
712 store_operand_info &op = info->ops[i];
713 if (!op.base_addr)
714 continue;
716 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
717 if (this_align > load_align[i])
719 load_align[i] = this_align;
720 load_align_base[i] = op.bitpos - align_bitpos;
724 gimple *stmt = info->stmt;
725 stores.safe_push (info);
726 if (info->order > last_order)
728 last_order = info->order;
729 last_stmt = stmt;
731 else if (info->order < first_order)
733 first_order = info->order;
734 first_stmt = stmt;
738 /* Merge a store recorded by INFO into this merged store.
739 The store is not overlapping with the existing recorded
740 stores. */
742 void
743 merged_store_group::merge_into (store_immediate_info *info)
745 unsigned HOST_WIDE_INT wid = info->bitsize;
746 /* Make sure we're inserting in the position we think we're inserting. */
747 gcc_assert (info->bitpos >= start + width
748 && info->bitregion_start <= bitregion_end);
750 width += wid;
751 do_merge (info);
754 /* Merge a store described by INFO into this merged store.
755 INFO overlaps in some way with the current store (i.e. it's not contiguous
756 which is handled by merged_store_group::merge_into). */
758 void
759 merged_store_group::merge_overlapping (store_immediate_info *info)
761 /* If the store extends the size of the group, extend the width. */
762 if (info->bitpos + info->bitsize > start + width)
763 width += info->bitpos + info->bitsize - (start + width);
765 do_merge (info);
768 /* Go through all the recorded stores in this group in program order and
769 apply their values to the VAL byte array to create the final merged
770 value. Return true if the operation succeeded. */
772 bool
773 merged_store_group::apply_stores ()
775 /* Make sure we have more than one store in the group, otherwise we cannot
776 merge anything. */
777 if (bitregion_start % BITS_PER_UNIT != 0
778 || bitregion_end % BITS_PER_UNIT != 0
779 || stores.length () == 1)
780 return false;
782 stores.qsort (sort_by_order);
783 store_immediate_info *info;
784 unsigned int i;
785 /* Create a buffer of a size that is 2 times the number of bytes we're
786 storing. That way native_encode_expr can write power-of-2-sized
787 chunks without overrunning. */
788 buf_size = 2 * ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
789 val = XNEWVEC (unsigned char, 2 * buf_size);
790 mask = val + buf_size;
791 memset (val, 0, buf_size);
792 memset (mask, ~0U, buf_size);
794 FOR_EACH_VEC_ELT (stores, i, info)
796 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
797 tree cst = NULL_TREE;
798 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
799 cst = info->ops[0].val;
800 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
801 cst = info->ops[1].val;
802 bool ret = true;
803 if (cst)
804 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
805 pos_in_buffer, buf_size);
806 if (cst && dump_file && (dump_flags & TDF_DETAILS))
808 if (ret)
810 fprintf (dump_file, "After writing ");
811 print_generic_expr (dump_file, cst, 0);
812 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
813 " at position %d the merged region contains:\n",
814 info->bitsize, pos_in_buffer);
815 dump_char_array (dump_file, val, buf_size);
817 else
818 fprintf (dump_file, "Failed to merge stores\n");
820 if (!ret)
821 return false;
822 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
823 if (BYTES_BIG_ENDIAN)
824 clear_bit_region_be (m, (BITS_PER_UNIT - 1
825 - (pos_in_buffer % BITS_PER_UNIT)),
826 info->bitsize);
827 else
828 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
830 return true;
833 /* Structure describing the store chain. */
835 struct imm_store_chain_info
837 /* Doubly-linked list that imposes an order on chain processing.
838 PNXP (prev's next pointer) points to the head of a list, or to
839 the next field in the previous chain in the list.
840 See pass_store_merging::m_stores_head for more rationale. */
841 imm_store_chain_info *next, **pnxp;
842 tree base_addr;
843 auto_vec<store_immediate_info *> m_store_info;
844 auto_vec<merged_store_group *> m_merged_store_groups;
846 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
847 : next (inspt), pnxp (&inspt), base_addr (b_a)
849 inspt = this;
850 if (next)
852 gcc_checking_assert (pnxp == next->pnxp);
853 next->pnxp = &next;
856 ~imm_store_chain_info ()
858 *pnxp = next;
859 if (next)
861 gcc_checking_assert (&next == next->pnxp);
862 next->pnxp = pnxp;
865 bool terminate_and_process_chain ();
866 bool coalesce_immediate_stores ();
867 bool output_merged_store (merged_store_group *);
868 bool output_merged_stores ();
871 const pass_data pass_data_tree_store_merging = {
872 GIMPLE_PASS, /* type */
873 "store-merging", /* name */
874 OPTGROUP_NONE, /* optinfo_flags */
875 TV_GIMPLE_STORE_MERGING, /* tv_id */
876 PROP_ssa, /* properties_required */
877 0, /* properties_provided */
878 0, /* properties_destroyed */
879 0, /* todo_flags_start */
880 TODO_update_ssa, /* todo_flags_finish */
883 class pass_store_merging : public gimple_opt_pass
885 public:
886 pass_store_merging (gcc::context *ctxt)
887 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
891 /* Pass not supported for PDP-endianness, nor for insane hosts
892 or target character sizes where native_{encode,interpret}_expr
893 doesn't work properly. */
894 virtual bool
895 gate (function *)
897 return flag_store_merging
898 && WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN
899 && CHAR_BIT == 8
900 && BITS_PER_UNIT == 8;
903 virtual unsigned int execute (function *);
905 private:
906 hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores;
908 /* Form a doubly-linked stack of the elements of m_stores, so that
909 we can iterate over them in a predictable way. Using this order
910 avoids extraneous differences in the compiler output just because
911 of tree pointer variations (e.g. different chains end up in
912 different positions of m_stores, so they are handled in different
913 orders, so they allocate or release SSA names in different
914 orders, and when they get reused, subsequent passes end up
915 getting different SSA names, which may ultimately change
916 decisions when going out of SSA). */
917 imm_store_chain_info *m_stores_head;
919 void process_store (gimple *);
920 bool terminate_and_process_all_chains ();
921 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
922 bool terminate_and_release_chain (imm_store_chain_info *);
923 }; // class pass_store_merging
925 /* Terminate and process all recorded chains. Return true if any changes
926 were made. */
928 bool
929 pass_store_merging::terminate_and_process_all_chains ()
931 bool ret = false;
932 while (m_stores_head)
933 ret |= terminate_and_release_chain (m_stores_head);
934 gcc_assert (m_stores.elements () == 0);
935 gcc_assert (m_stores_head == NULL);
937 return ret;
940 /* Terminate all chains that are affected by the statement STMT.
941 CHAIN_INFO is the chain we should ignore from the checks if
942 non-NULL. */
944 bool
945 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
946 **chain_info,
947 gimple *stmt)
949 bool ret = false;
951 /* If the statement doesn't touch memory it can't alias. */
952 if (!gimple_vuse (stmt))
953 return false;
955 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
956 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
958 next = cur->next;
960 /* We already checked all the stores in chain_info and terminated the
961 chain if necessary. Skip it here. */
962 if (chain_info && *chain_info == cur)
963 continue;
965 store_immediate_info *info;
966 unsigned int i;
967 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
969 tree lhs = gimple_assign_lhs (info->stmt);
970 if (ref_maybe_used_by_stmt_p (stmt, lhs)
971 || stmt_may_clobber_ref_p (stmt, lhs)
972 || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
974 if (dump_file && (dump_flags & TDF_DETAILS))
976 fprintf (dump_file, "stmt causes chain termination:\n");
977 print_gimple_stmt (dump_file, stmt, 0);
979 terminate_and_release_chain (cur);
980 ret = true;
981 break;
986 return ret;
989 /* Helper function. Terminate the recorded chain storing to base object
990 BASE. Return true if the merging and output was successful. The m_stores
991 entry is removed after the processing in any case. */
993 bool
994 pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info)
996 bool ret = chain_info->terminate_and_process_chain ();
997 m_stores.remove (chain_info->base_addr);
998 delete chain_info;
999 return ret;
1002 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
1003 may clobber REF. FIRST and LAST must be in the same basic block and
1004 have non-NULL vdef. */
1006 bool
1007 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
1009 ao_ref r;
1010 ao_ref_init (&r, ref);
1011 unsigned int count = 0;
1012 tree vop = gimple_vdef (last);
1013 gimple *stmt;
1015 gcc_checking_assert (gimple_bb (first) == gimple_bb (last));
1018 stmt = SSA_NAME_DEF_STMT (vop);
1019 if (stmt_may_clobber_ref_p_1 (stmt, &r))
1020 return true;
1021 /* Avoid quadratic compile time by bounding the number of checks
1022 we perform. */
1023 if (++count > MAX_STORE_ALIAS_CHECKS)
1024 return true;
1025 vop = gimple_vuse (stmt);
1027 while (stmt != first);
1028 return false;
1031 /* Return true if INFO->ops[IDX] is mergeable with the
1032 corresponding loads already in MERGED_STORE group.
1033 BASE_ADDR is the base address of the whole store group. */
1035 bool
1036 compatible_load_p (merged_store_group *merged_store,
1037 store_immediate_info *info,
1038 tree base_addr, int idx)
1040 store_immediate_info *infof = merged_store->stores[0];
1041 if (!info->ops[idx].base_addr
1042 || (info->ops[idx].bitpos - infof->ops[idx].bitpos
1043 != info->bitpos - infof->bitpos)
1044 || !operand_equal_p (info->ops[idx].base_addr,
1045 infof->ops[idx].base_addr, 0))
1046 return false;
1048 store_immediate_info *infol = merged_store->stores.last ();
1049 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
1050 /* In this case all vuses should be the same, e.g.
1051 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
1053 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
1054 and we can emit the coalesced load next to any of those loads. */
1055 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
1056 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
1057 return true;
1059 /* Otherwise, at least for now require that the load has the same
1060 vuse as the store. See following examples. */
1061 if (gimple_vuse (info->stmt) != load_vuse)
1062 return false;
1064 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
1065 || (infof != infol
1066 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
1067 return false;
1069 /* If the load is from the same location as the store, already
1070 the construction of the immediate chain info guarantees no intervening
1071 stores, so no further checks are needed. Example:
1072 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
1073 if (info->ops[idx].bitpos == info->bitpos
1074 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
1075 return true;
1077 /* Otherwise, we need to punt if any of the loads can be clobbered by any
1078 of the stores in the group, or any other stores in between those.
1079 Previous calls to compatible_load_p ensured that for all the
1080 merged_store->stores IDX loads, no stmts starting with
1081 merged_store->first_stmt and ending right before merged_store->last_stmt
1082 clobbers those loads. */
1083 gimple *first = merged_store->first_stmt;
1084 gimple *last = merged_store->last_stmt;
1085 unsigned int i;
1086 store_immediate_info *infoc;
1087 /* The stores are sorted by increasing store bitpos, so if info->stmt store
1088 comes before the so far first load, we'll be changing
1089 merged_store->first_stmt. In that case we need to give up if
1090 any of the earlier processed loads clobber with the stmts in the new
1091 range. */
1092 if (info->order < merged_store->first_order)
1094 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
1095 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
1096 return false;
1097 first = info->stmt;
1099 /* Similarly, we could change merged_store->last_stmt, so ensure
1100 in that case no stmts in the new range clobber any of the earlier
1101 processed loads. */
1102 else if (info->order > merged_store->last_order)
1104 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
1105 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
1106 return false;
1107 last = info->stmt;
1109 /* And finally, we'd be adding a new load to the set, ensure it isn't
1110 clobbered in the new range. */
1111 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
1112 return false;
1114 /* Otherwise, we are looking for:
1115 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
1117 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
1118 return true;
1121 /* Go through the candidate stores recorded in m_store_info and merge them
1122 into merged_store_group objects recorded into m_merged_store_groups
1123 representing the widened stores. Return true if coalescing was successful
1124 and the number of widened stores is fewer than the original number
1125 of stores. */
1127 bool
1128 imm_store_chain_info::coalesce_immediate_stores ()
1130 /* Anything less can't be processed. */
1131 if (m_store_info.length () < 2)
1132 return false;
1134 if (dump_file && (dump_flags & TDF_DETAILS))
1135 fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n",
1136 m_store_info.length ());
1138 store_immediate_info *info;
1139 unsigned int i;
1141 /* Order the stores by the bitposition they write to. */
1142 m_store_info.qsort (sort_by_bitpos);
1144 info = m_store_info[0];
1145 merged_store_group *merged_store = new merged_store_group (info);
1147 FOR_EACH_VEC_ELT (m_store_info, i, info)
1149 if (dump_file && (dump_flags & TDF_DETAILS))
1151 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
1152 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n",
1153 i, info->bitsize, info->bitpos);
1154 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
1155 fprintf (dump_file, "\n------------\n");
1158 if (i == 0)
1159 continue;
1161 /* |---store 1---|
1162 |---store 2---|
1163 Overlapping stores. */
1164 unsigned HOST_WIDE_INT start = info->bitpos;
1165 if (IN_RANGE (start, merged_store->start,
1166 merged_store->start + merged_store->width - 1))
1168 /* Only allow overlapping stores of constants. */
1169 if (info->rhs_code == INTEGER_CST
1170 && merged_store->stores[0]->rhs_code == INTEGER_CST)
1172 merged_store->merge_overlapping (info);
1173 continue;
1176 /* |---store 1---||---store 2---|
1177 This store is consecutive to the previous one.
1178 Merge it into the current store group. There can be gaps in between
1179 the stores, but there can't be gaps in between bitregions. */
1180 else if (info->bitregion_start <= merged_store->bitregion_end
1181 && info->rhs_code == merged_store->stores[0]->rhs_code)
1183 store_immediate_info *infof = merged_store->stores[0];
1185 /* All the rhs_code ops that take 2 operands are commutative,
1186 swap the operands if it could make the operands compatible. */
1187 if (infof->ops[0].base_addr
1188 && infof->ops[1].base_addr
1189 && info->ops[0].base_addr
1190 && info->ops[1].base_addr
1191 && (info->ops[1].bitpos - infof->ops[0].bitpos
1192 == info->bitpos - infof->bitpos)
1193 && operand_equal_p (info->ops[1].base_addr,
1194 infof->ops[0].base_addr, 0))
1196 std::swap (info->ops[0], info->ops[1]);
1197 info->ops_swapped_p = true;
1199 if ((infof->ops[0].base_addr
1200 ? compatible_load_p (merged_store, info, base_addr, 0)
1201 : !info->ops[0].base_addr)
1202 && (infof->ops[1].base_addr
1203 ? compatible_load_p (merged_store, info, base_addr, 1)
1204 : !info->ops[1].base_addr))
1206 merged_store->merge_into (info);
1207 continue;
1211 /* |---store 1---| <gap> |---store 2---|.
1212 Gap between stores or the rhs not compatible. Start a new group. */
1214 /* Try to apply all the stores recorded for the group to determine
1215 the bitpattern they write and discard it if that fails.
1216 This will also reject single-store groups. */
1217 if (!merged_store->apply_stores ())
1218 delete merged_store;
1219 else
1220 m_merged_store_groups.safe_push (merged_store);
1222 merged_store = new merged_store_group (info);
1225 /* Record or discard the last store group. */
1226 if (!merged_store->apply_stores ())
1227 delete merged_store;
1228 else
1229 m_merged_store_groups.safe_push (merged_store);
1231 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
1232 bool success
1233 = !m_merged_store_groups.is_empty ()
1234 && m_merged_store_groups.length () < m_store_info.length ();
1236 if (success && dump_file)
1237 fprintf (dump_file, "Coalescing successful!\n"
1238 "Merged into %u stores\n",
1239 m_merged_store_groups.length ());
1241 return success;
1244 /* Return the type to use for the merged stores or loads described by STMTS.
1245 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
1246 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
1247 of the MEM_REFs if any. */
1249 static tree
1250 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
1251 unsigned short *cliquep, unsigned short *basep)
1253 gimple *stmt;
1254 unsigned int i;
1255 tree type = NULL_TREE;
1256 tree ret = NULL_TREE;
1257 *cliquep = 0;
1258 *basep = 0;
1260 FOR_EACH_VEC_ELT (stmts, i, stmt)
1262 tree ref = is_load ? gimple_assign_rhs1 (stmt)
1263 : gimple_assign_lhs (stmt);
1264 tree type1 = reference_alias_ptr_type (ref);
1265 tree base = get_base_address (ref);
1267 if (i == 0)
1269 if (TREE_CODE (base) == MEM_REF)
1271 *cliquep = MR_DEPENDENCE_CLIQUE (base);
1272 *basep = MR_DEPENDENCE_BASE (base);
1274 ret = type = type1;
1275 continue;
1277 if (!alias_ptr_types_compatible_p (type, type1))
1278 ret = ptr_type_node;
1279 if (TREE_CODE (base) != MEM_REF
1280 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
1281 || *basep != MR_DEPENDENCE_BASE (base))
1283 *cliquep = 0;
1284 *basep = 0;
1287 return ret;
1290 /* Return the location_t information we can find among the statements
1291 in STMTS. */
1293 static location_t
1294 get_location_for_stmts (vec<gimple *> &stmts)
1296 gimple *stmt;
1297 unsigned int i;
1299 FOR_EACH_VEC_ELT (stmts, i, stmt)
1300 if (gimple_has_location (stmt))
1301 return gimple_location (stmt);
1303 return UNKNOWN_LOCATION;
1306 /* Used to decribe a store resulting from splitting a wide store in smaller
1307 regularly-sized stores in split_group. */
1309 struct split_store
1311 unsigned HOST_WIDE_INT bytepos;
1312 unsigned HOST_WIDE_INT size;
1313 unsigned HOST_WIDE_INT align;
1314 auto_vec<store_immediate_info *> orig_stores;
1315 /* True if there is a single orig stmt covering the whole split store. */
1316 bool orig;
1317 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1318 unsigned HOST_WIDE_INT);
1321 /* Simple constructor. */
1323 split_store::split_store (unsigned HOST_WIDE_INT bp,
1324 unsigned HOST_WIDE_INT sz,
1325 unsigned HOST_WIDE_INT al)
1326 : bytepos (bp), size (sz), align (al), orig (false)
1328 orig_stores.create (0);
1331 /* Record all stores in GROUP that write to the region starting at BITPOS and
1332 is of size BITSIZE. Record infos for such statements in STORES if
1333 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
1334 if there is exactly one original store in the range. */
1336 static store_immediate_info *
1337 find_constituent_stores (struct merged_store_group *group,
1338 vec<store_immediate_info *> *stores,
1339 unsigned int *first,
1340 unsigned HOST_WIDE_INT bitpos,
1341 unsigned HOST_WIDE_INT bitsize)
1343 store_immediate_info *info, *ret = NULL;
1344 unsigned int i;
1345 bool second = false;
1346 bool update_first = true;
1347 unsigned HOST_WIDE_INT end = bitpos + bitsize;
1348 for (i = *first; group->stores.iterate (i, &info); ++i)
1350 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
1351 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
1352 if (stmt_end <= bitpos)
1354 /* BITPOS passed to this function never decreases from within the
1355 same split_group call, so optimize and don't scan info records
1356 which are known to end before or at BITPOS next time.
1357 Only do it if all stores before this one also pass this. */
1358 if (update_first)
1359 *first = i + 1;
1360 continue;
1362 else
1363 update_first = false;
1365 /* The stores in GROUP are ordered by bitposition so if we're past
1366 the region for this group return early. */
1367 if (stmt_start >= end)
1368 return ret;
1370 if (stores)
1372 stores->safe_push (info);
1373 if (ret)
1375 ret = NULL;
1376 second = true;
1379 else if (ret)
1380 return NULL;
1381 if (!second)
1382 ret = info;
1384 return ret;
1387 /* Return how many SSA_NAMEs used to compute value to store in the INFO
1388 store have multiple uses. If any SSA_NAME has multiple uses, also
1389 count statements needed to compute it. */
1391 static unsigned
1392 count_multiple_uses (store_immediate_info *info)
1394 gimple *stmt = info->stmt;
1395 unsigned ret = 0;
1396 switch (info->rhs_code)
1398 case INTEGER_CST:
1399 return 0;
1400 case BIT_AND_EXPR:
1401 case BIT_IOR_EXPR:
1402 case BIT_XOR_EXPR:
1403 if (info->bit_not_p)
1405 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1406 ret = 1; /* Fall through below to return
1407 the BIT_NOT_EXPR stmt and then
1408 BIT_{AND,IOR,XOR}_EXPR and anything it
1409 uses. */
1410 else
1411 /* stmt is after this the BIT_NOT_EXPR. */
1412 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1414 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1416 ret += 1 + info->ops[0].bit_not_p;
1417 if (info->ops[1].base_addr)
1418 ret += 1 + info->ops[1].bit_not_p;
1419 return ret + 1;
1421 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1422 /* stmt is now the BIT_*_EXPR. */
1423 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1424 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
1425 else if (info->ops[info->ops_swapped_p].bit_not_p)
1427 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1428 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
1429 ++ret;
1431 if (info->ops[1].base_addr == NULL_TREE)
1433 gcc_checking_assert (!info->ops_swapped_p);
1434 return ret;
1436 if (!has_single_use (gimple_assign_rhs2 (stmt)))
1437 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
1438 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
1440 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
1441 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
1442 ++ret;
1444 return ret;
1445 case MEM_REF:
1446 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1447 return 1 + info->ops[0].bit_not_p;
1448 else if (info->ops[0].bit_not_p)
1450 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
1451 if (!has_single_use (gimple_assign_rhs1 (stmt)))
1452 return 1;
1454 return 0;
1455 default:
1456 gcc_unreachable ();
1460 /* Split a merged store described by GROUP by populating the SPLIT_STORES
1461 vector (if non-NULL) with split_store structs describing the byte offset
1462 (from the base), the bit size and alignment of each store as well as the
1463 original statements involved in each such split group.
1464 This is to separate the splitting strategy from the statement
1465 building/emission/linking done in output_merged_store.
1466 Return number of new stores.
1467 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
1468 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
1469 If SPLIT_STORES is NULL, it is just a dry run to count number of
1470 new stores. */
1472 static unsigned int
1473 split_group (merged_store_group *group, bool allow_unaligned_store,
1474 bool allow_unaligned_load,
1475 vec<struct split_store *> *split_stores,
1476 unsigned *total_orig,
1477 unsigned *total_new)
1479 unsigned HOST_WIDE_INT pos = group->bitregion_start;
1480 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
1481 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
1482 unsigned HOST_WIDE_INT group_align = group->align;
1483 unsigned HOST_WIDE_INT align_base = group->align_base;
1484 unsigned HOST_WIDE_INT group_load_align = group_align;
1485 bool any_orig = false;
1487 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
1489 unsigned int ret = 0, first = 0;
1490 unsigned HOST_WIDE_INT try_pos = bytepos;
1491 group->stores.qsort (sort_by_bitpos);
1493 if (total_orig)
1495 unsigned int i;
1496 store_immediate_info *info = group->stores[0];
1498 total_new[0] = 0;
1499 total_orig[0] = 1; /* The orig store. */
1500 info = group->stores[0];
1501 if (info->ops[0].base_addr)
1502 total_orig[0]++;
1503 if (info->ops[1].base_addr)
1504 total_orig[0]++;
1505 switch (info->rhs_code)
1507 case BIT_AND_EXPR:
1508 case BIT_IOR_EXPR:
1509 case BIT_XOR_EXPR:
1510 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
1511 break;
1512 default:
1513 break;
1515 total_orig[0] *= group->stores.length ();
1517 FOR_EACH_VEC_ELT (group->stores, i, info)
1519 total_new[0] += count_multiple_uses (info);
1520 total_orig[0] += (info->bit_not_p
1521 + info->ops[0].bit_not_p
1522 + info->ops[1].bit_not_p);
1526 if (!allow_unaligned_load)
1527 for (int i = 0; i < 2; ++i)
1528 if (group->load_align[i])
1529 group_load_align = MIN (group_load_align, group->load_align[i]);
1531 while (size > 0)
1533 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
1534 && group->mask[try_pos - bytepos] == (unsigned char) ~0U)
1536 /* Skip padding bytes. */
1537 ++try_pos;
1538 size -= BITS_PER_UNIT;
1539 continue;
1542 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
1543 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
1544 unsigned HOST_WIDE_INT align_bitpos
1545 = (try_bitpos - align_base) & (group_align - 1);
1546 unsigned HOST_WIDE_INT align = group_align;
1547 if (align_bitpos)
1548 align = least_bit_hwi (align_bitpos);
1549 if (!allow_unaligned_store)
1550 try_size = MIN (try_size, align);
1551 if (!allow_unaligned_load)
1553 /* If we can't do or don't want to do unaligned stores
1554 as well as loads, we need to take the loads into account
1555 as well. */
1556 unsigned HOST_WIDE_INT load_align = group_load_align;
1557 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
1558 if (align_bitpos)
1559 load_align = least_bit_hwi (align_bitpos);
1560 for (int i = 0; i < 2; ++i)
1561 if (group->load_align[i])
1563 align_bitpos = try_bitpos - group->stores[0]->bitpos;
1564 align_bitpos += group->stores[0]->ops[i].bitpos;
1565 align_bitpos -= group->load_align_base[i];
1566 align_bitpos &= (group_load_align - 1);
1567 if (align_bitpos)
1569 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
1570 load_align = MIN (load_align, a);
1573 try_size = MIN (try_size, load_align);
1575 store_immediate_info *info
1576 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
1577 if (info)
1579 /* If there is just one original statement for the range, see if
1580 we can just reuse the original store which could be even larger
1581 than try_size. */
1582 unsigned HOST_WIDE_INT stmt_end
1583 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
1584 info = find_constituent_stores (group, NULL, &first, try_bitpos,
1585 stmt_end - try_bitpos);
1586 if (info && info->bitpos >= try_bitpos)
1588 try_size = stmt_end - try_bitpos;
1589 goto found;
1593 /* Approximate store bitsize for the case when there are no padding
1594 bits. */
1595 while (try_size > size)
1596 try_size /= 2;
1597 /* Now look for whole padding bytes at the end of that bitsize. */
1598 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
1599 if (group->mask[try_pos - bytepos + nonmasked - 1]
1600 != (unsigned char) ~0U)
1601 break;
1602 if (nonmasked == 0)
1604 /* If entire try_size range is padding, skip it. */
1605 try_pos += try_size / BITS_PER_UNIT;
1606 size -= try_size;
1607 continue;
1609 /* Otherwise try to decrease try_size if second half, last 3 quarters
1610 etc. are padding. */
1611 nonmasked *= BITS_PER_UNIT;
1612 while (nonmasked <= try_size / 2)
1613 try_size /= 2;
1614 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
1616 /* Now look for whole padding bytes at the start of that bitsize. */
1617 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
1618 for (masked = 0; masked < try_bytesize; ++masked)
1619 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U)
1620 break;
1621 masked *= BITS_PER_UNIT;
1622 gcc_assert (masked < try_size);
1623 if (masked >= try_size / 2)
1625 while (masked >= try_size / 2)
1627 try_size /= 2;
1628 try_pos += try_size / BITS_PER_UNIT;
1629 size -= try_size;
1630 masked -= try_size;
1632 /* Need to recompute the alignment, so just retry at the new
1633 position. */
1634 continue;
1638 found:
1639 ++ret;
1641 if (split_stores)
1643 struct split_store *store
1644 = new split_store (try_pos, try_size, align);
1645 info = find_constituent_stores (group, &store->orig_stores,
1646 &first, try_bitpos, try_size);
1647 if (info
1648 && info->bitpos >= try_bitpos
1649 && info->bitpos + info->bitsize <= try_bitpos + try_size)
1651 store->orig = true;
1652 any_orig = true;
1654 split_stores->safe_push (store);
1657 try_pos += try_size / BITS_PER_UNIT;
1658 size -= try_size;
1661 if (total_orig)
1663 unsigned int i;
1664 struct split_store *store;
1665 /* If we are reusing some original stores and any of the
1666 original SSA_NAMEs had multiple uses, we need to subtract
1667 those now before we add the new ones. */
1668 if (total_new[0] && any_orig)
1670 FOR_EACH_VEC_ELT (*split_stores, i, store)
1671 if (store->orig)
1672 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
1674 total_new[0] += ret; /* The new store. */
1675 store_immediate_info *info = group->stores[0];
1676 if (info->ops[0].base_addr)
1677 total_new[0] += ret;
1678 if (info->ops[1].base_addr)
1679 total_new[0] += ret;
1680 switch (info->rhs_code)
1682 case BIT_AND_EXPR:
1683 case BIT_IOR_EXPR:
1684 case BIT_XOR_EXPR:
1685 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
1686 break;
1687 default:
1688 break;
1690 FOR_EACH_VEC_ELT (*split_stores, i, store)
1692 unsigned int j;
1693 bool bit_not_p[3] = { false, false, false };
1694 /* If all orig_stores have certain bit_not_p set, then
1695 we'd use a BIT_NOT_EXPR stmt and need to account for it.
1696 If some orig_stores have certain bit_not_p set, then
1697 we'd use a BIT_XOR_EXPR with a mask and need to account for
1698 it. */
1699 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
1701 if (info->ops[0].bit_not_p)
1702 bit_not_p[0] = true;
1703 if (info->ops[1].bit_not_p)
1704 bit_not_p[1] = true;
1705 if (info->bit_not_p)
1706 bit_not_p[2] = true;
1708 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
1713 return ret;
1716 /* Return the operation through which the operand IDX (if < 2) or
1717 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
1718 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
1719 the bits should be xored with mask. */
1721 static enum tree_code
1722 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
1724 unsigned int i;
1725 store_immediate_info *info;
1726 unsigned int cnt = 0;
1727 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
1729 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
1730 if (bit_not_p)
1731 ++cnt;
1733 mask = NULL_TREE;
1734 if (cnt == 0)
1735 return NOP_EXPR;
1736 if (cnt == split_store->orig_stores.length ())
1737 return BIT_NOT_EXPR;
1739 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
1740 unsigned buf_size = split_store->size / BITS_PER_UNIT;
1741 unsigned char *buf
1742 = XALLOCAVEC (unsigned char, buf_size);
1743 memset (buf, ~0U, buf_size);
1744 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
1746 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
1747 if (!bit_not_p)
1748 continue;
1749 /* Clear regions with bit_not_p and invert afterwards, rather than
1750 clear regions with !bit_not_p, so that gaps in between stores aren't
1751 set in the mask. */
1752 unsigned HOST_WIDE_INT bitsize = info->bitsize;
1753 unsigned int pos_in_buffer = 0;
1754 if (info->bitpos < try_bitpos)
1756 gcc_assert (info->bitpos + bitsize > try_bitpos);
1757 bitsize -= (try_bitpos - info->bitpos);
1759 else
1760 pos_in_buffer = info->bitpos - try_bitpos;
1761 if (pos_in_buffer + bitsize > split_store->size)
1762 bitsize = split_store->size - pos_in_buffer;
1763 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
1764 if (BYTES_BIG_ENDIAN)
1765 clear_bit_region_be (p, (BITS_PER_UNIT - 1
1766 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
1767 else
1768 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
1770 for (unsigned int i = 0; i < buf_size; ++i)
1771 buf[i] = ~buf[i];
1772 mask = native_interpret_expr (int_type, buf, buf_size);
1773 return BIT_XOR_EXPR;
1776 /* Given a merged store group GROUP output the widened version of it.
1777 The store chain is against the base object BASE.
1778 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
1779 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
1780 Make sure that the number of statements output is less than the number of
1781 original statements. If a better sequence is possible emit it and
1782 return true. */
1784 bool
1785 imm_store_chain_info::output_merged_store (merged_store_group *group)
1787 unsigned HOST_WIDE_INT start_byte_pos
1788 = group->bitregion_start / BITS_PER_UNIT;
1790 unsigned int orig_num_stmts = group->stores.length ();
1791 if (orig_num_stmts < 2)
1792 return false;
1794 auto_vec<struct split_store *, 32> split_stores;
1795 split_stores.create (0);
1796 bool allow_unaligned_store
1797 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
1798 bool allow_unaligned_load = allow_unaligned_store;
1799 if (allow_unaligned_store)
1801 /* If unaligned stores are allowed, see how many stores we'd emit
1802 for unaligned and how many stores we'd emit for aligned stores.
1803 Only use unaligned stores if it allows fewer stores than aligned. */
1804 unsigned aligned_cnt
1805 = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL);
1806 unsigned unaligned_cnt
1807 = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL);
1808 if (aligned_cnt <= unaligned_cnt)
1809 allow_unaligned_store = false;
1811 unsigned total_orig, total_new;
1812 split_group (group, allow_unaligned_store, allow_unaligned_load,
1813 &split_stores, &total_orig, &total_new);
1815 if (split_stores.length () >= orig_num_stmts)
1817 /* We didn't manage to reduce the number of statements. Bail out. */
1818 if (dump_file && (dump_flags & TDF_DETAILS))
1819 fprintf (dump_file, "Exceeded original number of stmts (%u)."
1820 " Not profitable to emit new sequence.\n",
1821 orig_num_stmts);
1822 return false;
1824 if (total_orig <= total_new)
1826 /* If number of estimated new statements is above estimated original
1827 statements, bail out too. */
1828 if (dump_file && (dump_flags & TDF_DETAILS))
1829 fprintf (dump_file, "Estimated number of original stmts (%u)"
1830 " not larger than estimated number of new"
1831 " stmts (%u).\n",
1832 total_orig, total_new);
1835 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
1836 gimple_seq seq = NULL;
1837 tree last_vdef, new_vuse;
1838 last_vdef = gimple_vdef (group->last_stmt);
1839 new_vuse = gimple_vuse (group->last_stmt);
1841 gimple *stmt = NULL;
1842 split_store *split_store;
1843 unsigned int i;
1844 auto_vec<gimple *, 32> orig_stmts;
1845 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &seq,
1846 is_gimple_mem_ref_addr, NULL_TREE);
1848 tree load_addr[2] = { NULL_TREE, NULL_TREE };
1849 gimple_seq load_seq[2] = { NULL, NULL };
1850 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
1851 for (int j = 0; j < 2; ++j)
1853 store_operand_info &op = group->stores[0]->ops[j];
1854 if (op.base_addr == NULL_TREE)
1855 continue;
1857 store_immediate_info *infol = group->stores.last ();
1858 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
1860 load_gsi[j] = gsi_for_stmt (op.stmt);
1861 load_addr[j]
1862 = force_gimple_operand_1 (unshare_expr (op.base_addr),
1863 &load_seq[j], is_gimple_mem_ref_addr,
1864 NULL_TREE);
1866 else if (operand_equal_p (base_addr, op.base_addr, 0))
1867 load_addr[j] = addr;
1868 else
1870 gimple_seq this_seq;
1871 load_addr[j]
1872 = force_gimple_operand_1 (unshare_expr (op.base_addr),
1873 &this_seq, is_gimple_mem_ref_addr,
1874 NULL_TREE);
1875 gimple_seq_add_seq_without_update (&seq, this_seq);
1879 FOR_EACH_VEC_ELT (split_stores, i, split_store)
1881 unsigned HOST_WIDE_INT try_size = split_store->size;
1882 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
1883 unsigned HOST_WIDE_INT align = split_store->align;
1884 tree dest, src;
1885 location_t loc;
1886 if (split_store->orig)
1888 /* If there is just a single constituent store which covers
1889 the whole area, just reuse the lhs and rhs. */
1890 gimple *orig_stmt = split_store->orig_stores[0]->stmt;
1891 dest = gimple_assign_lhs (orig_stmt);
1892 src = gimple_assign_rhs1 (orig_stmt);
1893 loc = gimple_location (orig_stmt);
1895 else
1897 store_immediate_info *info;
1898 unsigned short clique, base;
1899 unsigned int k;
1900 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
1901 orig_stmts.safe_push (info->stmt);
1902 tree offset_type
1903 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
1904 loc = get_location_for_stmts (orig_stmts);
1905 orig_stmts.truncate (0);
1907 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
1908 int_type = build_aligned_type (int_type, align);
1909 dest = fold_build2 (MEM_REF, int_type, addr,
1910 build_int_cst (offset_type, try_pos));
1911 if (TREE_CODE (dest) == MEM_REF)
1913 MR_DEPENDENCE_CLIQUE (dest) = clique;
1914 MR_DEPENDENCE_BASE (dest) = base;
1917 tree mask
1918 = native_interpret_expr (int_type,
1919 group->mask + try_pos - start_byte_pos,
1920 group->buf_size);
1922 tree ops[2];
1923 for (int j = 0;
1924 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
1925 ++j)
1927 store_operand_info &op = split_store->orig_stores[0]->ops[j];
1928 if (op.base_addr)
1930 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
1931 orig_stmts.safe_push (info->ops[j].stmt);
1933 offset_type = get_alias_type_for_stmts (orig_stmts, true,
1934 &clique, &base);
1935 location_t load_loc = get_location_for_stmts (orig_stmts);
1936 orig_stmts.truncate (0);
1938 unsigned HOST_WIDE_INT load_align = group->load_align[j];
1939 unsigned HOST_WIDE_INT align_bitpos
1940 = (try_pos * BITS_PER_UNIT
1941 - split_store->orig_stores[0]->bitpos
1942 + op.bitpos) & (load_align - 1);
1943 if (align_bitpos)
1944 load_align = least_bit_hwi (align_bitpos);
1946 tree load_int_type
1947 = build_nonstandard_integer_type (try_size, UNSIGNED);
1948 load_int_type
1949 = build_aligned_type (load_int_type, load_align);
1951 unsigned HOST_WIDE_INT load_pos
1952 = (try_pos * BITS_PER_UNIT
1953 - split_store->orig_stores[0]->bitpos
1954 + op.bitpos) / BITS_PER_UNIT;
1955 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
1956 build_int_cst (offset_type, load_pos));
1957 if (TREE_CODE (ops[j]) == MEM_REF)
1959 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
1960 MR_DEPENDENCE_BASE (ops[j]) = base;
1962 if (!integer_zerop (mask))
1963 /* The load might load some bits (that will be masked off
1964 later on) uninitialized, avoid -W*uninitialized
1965 warnings in that case. */
1966 TREE_NO_WARNING (ops[j]) = 1;
1968 stmt = gimple_build_assign (make_ssa_name (int_type),
1969 ops[j]);
1970 gimple_set_location (stmt, load_loc);
1971 if (gsi_bb (load_gsi[j]))
1973 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
1974 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
1976 else
1978 gimple_set_vuse (stmt, new_vuse);
1979 gimple_seq_add_stmt_without_update (&seq, stmt);
1981 ops[j] = gimple_assign_lhs (stmt);
1982 tree xor_mask;
1983 enum tree_code inv_op
1984 = invert_op (split_store, j, int_type, xor_mask);
1985 if (inv_op != NOP_EXPR)
1987 stmt = gimple_build_assign (make_ssa_name (int_type),
1988 inv_op, ops[j], xor_mask);
1989 gimple_set_location (stmt, load_loc);
1990 ops[j] = gimple_assign_lhs (stmt);
1992 if (gsi_bb (load_gsi[j]))
1993 gimple_seq_add_stmt_without_update (&load_seq[j],
1994 stmt);
1995 else
1996 gimple_seq_add_stmt_without_update (&seq, stmt);
1999 else
2000 ops[j] = native_interpret_expr (int_type,
2001 group->val + try_pos
2002 - start_byte_pos,
2003 group->buf_size);
2006 switch (split_store->orig_stores[0]->rhs_code)
2008 case BIT_AND_EXPR:
2009 case BIT_IOR_EXPR:
2010 case BIT_XOR_EXPR:
2011 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
2013 tree rhs1 = gimple_assign_rhs1 (info->stmt);
2014 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
2016 location_t bit_loc;
2017 bit_loc = get_location_for_stmts (orig_stmts);
2018 orig_stmts.truncate (0);
2020 stmt
2021 = gimple_build_assign (make_ssa_name (int_type),
2022 split_store->orig_stores[0]->rhs_code,
2023 ops[0], ops[1]);
2024 gimple_set_location (stmt, bit_loc);
2025 /* If there is just one load and there is a separate
2026 load_seq[0], emit the bitwise op right after it. */
2027 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
2028 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
2029 /* Otherwise, if at least one load is in seq, we need to
2030 emit the bitwise op right before the store. If there
2031 are two loads and are emitted somewhere else, it would
2032 be better to emit the bitwise op as early as possible;
2033 we don't track where that would be possible right now
2034 though. */
2035 else
2036 gimple_seq_add_stmt_without_update (&seq, stmt);
2037 src = gimple_assign_lhs (stmt);
2038 tree xor_mask;
2039 enum tree_code inv_op;
2040 inv_op = invert_op (split_store, 2, int_type, xor_mask);
2041 if (inv_op != NOP_EXPR)
2043 stmt = gimple_build_assign (make_ssa_name (int_type),
2044 inv_op, src, xor_mask);
2045 gimple_set_location (stmt, bit_loc);
2046 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
2047 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
2048 else
2049 gimple_seq_add_stmt_without_update (&seq, stmt);
2050 src = gimple_assign_lhs (stmt);
2052 break;
2053 default:
2054 src = ops[0];
2055 break;
2058 if (!integer_zerop (mask))
2060 tree tem = make_ssa_name (int_type);
2061 tree load_src = unshare_expr (dest);
2062 /* The load might load some or all bits uninitialized,
2063 avoid -W*uninitialized warnings in that case.
2064 As optimization, it would be nice if all the bits are
2065 provably uninitialized (no stores at all yet or previous
2066 store a CLOBBER) we'd optimize away the load and replace
2067 it e.g. with 0. */
2068 TREE_NO_WARNING (load_src) = 1;
2069 stmt = gimple_build_assign (tem, load_src);
2070 gimple_set_location (stmt, loc);
2071 gimple_set_vuse (stmt, new_vuse);
2072 gimple_seq_add_stmt_without_update (&seq, stmt);
2074 /* FIXME: If there is a single chunk of zero bits in mask,
2075 perhaps use BIT_INSERT_EXPR instead? */
2076 stmt = gimple_build_assign (make_ssa_name (int_type),
2077 BIT_AND_EXPR, tem, mask);
2078 gimple_set_location (stmt, loc);
2079 gimple_seq_add_stmt_without_update (&seq, stmt);
2080 tem = gimple_assign_lhs (stmt);
2082 if (TREE_CODE (src) == INTEGER_CST)
2083 src = wide_int_to_tree (int_type,
2084 wi::bit_and_not (wi::to_wide (src),
2085 wi::to_wide (mask)));
2086 else
2088 tree nmask
2089 = wide_int_to_tree (int_type,
2090 wi::bit_not (wi::to_wide (mask)));
2091 stmt = gimple_build_assign (make_ssa_name (int_type),
2092 BIT_AND_EXPR, src, nmask);
2093 gimple_set_location (stmt, loc);
2094 gimple_seq_add_stmt_without_update (&seq, stmt);
2095 src = gimple_assign_lhs (stmt);
2097 stmt = gimple_build_assign (make_ssa_name (int_type),
2098 BIT_IOR_EXPR, tem, src);
2099 gimple_set_location (stmt, loc);
2100 gimple_seq_add_stmt_without_update (&seq, stmt);
2101 src = gimple_assign_lhs (stmt);
2105 stmt = gimple_build_assign (dest, src);
2106 gimple_set_location (stmt, loc);
2107 gimple_set_vuse (stmt, new_vuse);
2108 gimple_seq_add_stmt_without_update (&seq, stmt);
2110 tree new_vdef;
2111 if (i < split_stores.length () - 1)
2112 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
2113 else
2114 new_vdef = last_vdef;
2116 gimple_set_vdef (stmt, new_vdef);
2117 SSA_NAME_DEF_STMT (new_vdef) = stmt;
2118 new_vuse = new_vdef;
2121 FOR_EACH_VEC_ELT (split_stores, i, split_store)
2122 delete split_store;
2124 gcc_assert (seq);
2125 if (dump_file)
2127 fprintf (dump_file,
2128 "New sequence of %u stmts to replace old one of %u stmts\n",
2129 split_stores.length (), orig_num_stmts);
2130 if (dump_flags & TDF_DETAILS)
2131 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
2133 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
2134 for (int j = 0; j < 2; ++j)
2135 if (load_seq[j])
2136 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
2138 return true;
2141 /* Process the merged_store_group objects created in the coalescing phase.
2142 The stores are all against the base object BASE.
2143 Try to output the widened stores and delete the original statements if
2144 successful. Return true iff any changes were made. */
2146 bool
2147 imm_store_chain_info::output_merged_stores ()
2149 unsigned int i;
2150 merged_store_group *merged_store;
2151 bool ret = false;
2152 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
2154 if (output_merged_store (merged_store))
2156 unsigned int j;
2157 store_immediate_info *store;
2158 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
2160 gimple *stmt = store->stmt;
2161 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2162 gsi_remove (&gsi, true);
2163 if (stmt != merged_store->last_stmt)
2165 unlink_stmt_vdef (stmt);
2166 release_defs (stmt);
2169 ret = true;
2172 if (ret && dump_file)
2173 fprintf (dump_file, "Merging successful!\n");
2175 return ret;
2178 /* Coalesce the store_immediate_info objects recorded against the base object
2179 BASE in the first phase and output them.
2180 Delete the allocated structures.
2181 Return true if any changes were made. */
2183 bool
2184 imm_store_chain_info::terminate_and_process_chain ()
2186 /* Process store chain. */
2187 bool ret = false;
2188 if (m_store_info.length () > 1)
2190 ret = coalesce_immediate_stores ();
2191 if (ret)
2192 ret = output_merged_stores ();
2195 /* Delete all the entries we allocated ourselves. */
2196 store_immediate_info *info;
2197 unsigned int i;
2198 FOR_EACH_VEC_ELT (m_store_info, i, info)
2199 delete info;
2201 merged_store_group *merged_info;
2202 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
2203 delete merged_info;
2205 return ret;
2208 /* Return true iff LHS is a destination potentially interesting for
2209 store merging. In practice these are the codes that get_inner_reference
2210 can process. */
2212 static bool
2213 lhs_valid_for_store_merging_p (tree lhs)
2215 tree_code code = TREE_CODE (lhs);
2217 if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF
2218 || code == COMPONENT_REF || code == BIT_FIELD_REF)
2219 return true;
2221 return false;
2224 /* Return true if the tree RHS is a constant we want to consider
2225 during store merging. In practice accept all codes that
2226 native_encode_expr accepts. */
2228 static bool
2229 rhs_valid_for_store_merging_p (tree rhs)
2231 return native_encode_expr (rhs, NULL,
2232 GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0;
2235 /* If MEM is a memory reference usable for store merging (either as
2236 store destination or for loads), return the non-NULL base_addr
2237 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
2238 Otherwise return NULL, *PBITPOS should be still valid even for that
2239 case. */
2241 static tree
2242 mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize,
2243 unsigned HOST_WIDE_INT *pbitpos,
2244 unsigned HOST_WIDE_INT *pbitregion_start,
2245 unsigned HOST_WIDE_INT *pbitregion_end)
2247 HOST_WIDE_INT bitsize;
2248 HOST_WIDE_INT bitpos;
2249 unsigned HOST_WIDE_INT bitregion_start = 0;
2250 unsigned HOST_WIDE_INT bitregion_end = 0;
2251 machine_mode mode;
2252 int unsignedp = 0, reversep = 0, volatilep = 0;
2253 tree offset;
2254 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
2255 &unsignedp, &reversep, &volatilep);
2256 *pbitsize = bitsize;
2257 if (bitsize == 0)
2258 return NULL_TREE;
2260 if (TREE_CODE (mem) == COMPONENT_REF
2261 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
2263 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
2264 if (bitregion_end)
2265 ++bitregion_end;
2268 if (reversep)
2269 return NULL_TREE;
2271 /* We do not want to rewrite TARGET_MEM_REFs. */
2272 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
2273 return NULL_TREE;
2274 /* In some cases get_inner_reference may return a
2275 MEM_REF [ptr + byteoffset]. For the purposes of this pass
2276 canonicalize the base_addr to MEM_REF [ptr] and take
2277 byteoffset into account in the bitpos. This occurs in
2278 PR 23684 and this way we can catch more chains. */
2279 else if (TREE_CODE (base_addr) == MEM_REF)
2281 offset_int bit_off, byte_off = mem_ref_offset (base_addr);
2282 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2283 bit_off += bitpos;
2284 if (!wi::neg_p (bit_off) && wi::fits_shwi_p (bit_off))
2286 bitpos = bit_off.to_shwi ();
2287 if (bitregion_end)
2289 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2290 bit_off += bitregion_start;
2291 if (wi::fits_uhwi_p (bit_off))
2293 bitregion_start = bit_off.to_uhwi ();
2294 bit_off = byte_off << LOG2_BITS_PER_UNIT;
2295 bit_off += bitregion_end;
2296 if (wi::fits_uhwi_p (bit_off))
2297 bitregion_end = bit_off.to_uhwi ();
2298 else
2299 bitregion_end = 0;
2301 else
2302 bitregion_end = 0;
2305 else
2306 return NULL_TREE;
2307 base_addr = TREE_OPERAND (base_addr, 0);
2309 /* get_inner_reference returns the base object, get at its
2310 address now. */
2311 else
2313 if (bitpos < 0)
2314 return NULL_TREE;
2315 base_addr = build_fold_addr_expr (base_addr);
2318 if (!bitregion_end)
2320 bitregion_start = ROUND_DOWN (bitpos, BITS_PER_UNIT);
2321 bitregion_end = ROUND_UP (bitpos + bitsize, BITS_PER_UNIT);
2324 if (offset != NULL_TREE)
2326 /* If the access is variable offset then a base decl has to be
2327 address-taken to be able to emit pointer-based stores to it.
2328 ??? We might be able to get away with re-using the original
2329 base up to the first variable part and then wrapping that inside
2330 a BIT_FIELD_REF. */
2331 tree base = get_base_address (base_addr);
2332 if (! base
2333 || (DECL_P (base) && ! TREE_ADDRESSABLE (base)))
2334 return NULL_TREE;
2336 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
2337 base_addr, offset);
2340 *pbitsize = bitsize;
2341 *pbitpos = bitpos;
2342 *pbitregion_start = bitregion_start;
2343 *pbitregion_end = bitregion_end;
2344 return base_addr;
2347 /* Return true if STMT is a load that can be used for store merging.
2348 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
2349 BITREGION_END are properties of the corresponding store. */
2351 static bool
2352 handled_load (gimple *stmt, store_operand_info *op,
2353 unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos,
2354 unsigned HOST_WIDE_INT bitregion_start,
2355 unsigned HOST_WIDE_INT bitregion_end)
2357 if (!is_gimple_assign (stmt))
2358 return false;
2359 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
2361 tree rhs1 = gimple_assign_rhs1 (stmt);
2362 if (TREE_CODE (rhs1) == SSA_NAME
2363 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
2364 bitregion_start, bitregion_end))
2366 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
2367 been optimized earlier, but if allowed here, would confuse the
2368 multiple uses counting. */
2369 if (op->bit_not_p)
2370 return false;
2371 op->bit_not_p = !op->bit_not_p;
2372 return true;
2374 return false;
2376 if (gimple_vuse (stmt)
2377 && gimple_assign_load_p (stmt)
2378 && !stmt_can_throw_internal (stmt)
2379 && !gimple_has_volatile_ops (stmt))
2381 tree mem = gimple_assign_rhs1 (stmt);
2382 op->base_addr
2383 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
2384 &op->bitregion_start,
2385 &op->bitregion_end);
2386 if (op->base_addr != NULL_TREE
2387 && op->bitsize == bitsize
2388 && ((op->bitpos - bitpos) % BITS_PER_UNIT) == 0
2389 && op->bitpos - op->bitregion_start >= bitpos - bitregion_start
2390 && op->bitregion_end - op->bitpos >= bitregion_end - bitpos)
2392 op->stmt = stmt;
2393 op->val = mem;
2394 op->bit_not_p = false;
2395 return true;
2398 return false;
2401 /* Record the store STMT for store merging optimization if it can be
2402 optimized. */
2404 void
2405 pass_store_merging::process_store (gimple *stmt)
2407 tree lhs = gimple_assign_lhs (stmt);
2408 tree rhs = gimple_assign_rhs1 (stmt);
2409 unsigned HOST_WIDE_INT bitsize, bitpos;
2410 unsigned HOST_WIDE_INT bitregion_start;
2411 unsigned HOST_WIDE_INT bitregion_end;
2412 tree base_addr
2413 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
2414 &bitregion_start, &bitregion_end);
2415 if (bitsize == 0)
2416 return;
2418 bool invalid = (base_addr == NULL_TREE
2419 || ((bitsize > MAX_BITSIZE_MODE_ANY_INT)
2420 && (TREE_CODE (rhs) != INTEGER_CST)));
2421 enum tree_code rhs_code = ERROR_MARK;
2422 bool bit_not_p = false;
2423 store_operand_info ops[2];
2424 if (invalid)
2426 else if (rhs_valid_for_store_merging_p (rhs))
2428 rhs_code = INTEGER_CST;
2429 ops[0].val = rhs;
2431 else if (TREE_CODE (rhs) != SSA_NAME)
2432 invalid = true;
2433 else
2435 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
2436 if (!is_gimple_assign (def_stmt))
2437 invalid = true;
2438 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
2439 bitregion_start, bitregion_end))
2440 rhs_code = MEM_REF;
2441 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
2443 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2444 if (TREE_CODE (rhs1) == SSA_NAME
2445 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
2447 bit_not_p = true;
2448 def_stmt = SSA_NAME_DEF_STMT (rhs1);
2451 if (rhs_code == ERROR_MARK && !invalid)
2452 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
2454 case BIT_AND_EXPR:
2455 case BIT_IOR_EXPR:
2456 case BIT_XOR_EXPR:
2457 tree rhs1, rhs2;
2458 rhs1 = gimple_assign_rhs1 (def_stmt);
2459 rhs2 = gimple_assign_rhs2 (def_stmt);
2460 invalid = true;
2461 if (TREE_CODE (rhs1) != SSA_NAME)
2462 break;
2463 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
2464 if (!is_gimple_assign (def_stmt1)
2465 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
2466 bitregion_start, bitregion_end))
2467 break;
2468 if (rhs_valid_for_store_merging_p (rhs2))
2469 ops[1].val = rhs2;
2470 else if (TREE_CODE (rhs2) != SSA_NAME)
2471 break;
2472 else
2474 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
2475 if (!is_gimple_assign (def_stmt2))
2476 break;
2477 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
2478 bitregion_start, bitregion_end))
2479 break;
2481 invalid = false;
2482 break;
2483 default:
2484 invalid = true;
2485 break;
2489 if (invalid)
2491 terminate_all_aliasing_chains (NULL, stmt);
2492 return;
2495 struct imm_store_chain_info **chain_info = NULL;
2496 if (base_addr)
2497 chain_info = m_stores.get (base_addr);
2499 store_immediate_info *info;
2500 if (chain_info)
2502 unsigned int ord = (*chain_info)->m_store_info.length ();
2503 info = new store_immediate_info (bitsize, bitpos, bitregion_start,
2504 bitregion_end, stmt, ord, rhs_code,
2505 bit_not_p, ops[0], ops[1]);
2506 if (dump_file && (dump_flags & TDF_DETAILS))
2508 fprintf (dump_file, "Recording immediate store from stmt:\n");
2509 print_gimple_stmt (dump_file, stmt, 0);
2511 (*chain_info)->m_store_info.safe_push (info);
2512 terminate_all_aliasing_chains (chain_info, stmt);
2513 /* If we reach the limit of stores to merge in a chain terminate and
2514 process the chain now. */
2515 if ((*chain_info)->m_store_info.length ()
2516 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
2518 if (dump_file && (dump_flags & TDF_DETAILS))
2519 fprintf (dump_file,
2520 "Reached maximum number of statements to merge:\n");
2521 terminate_and_release_chain (*chain_info);
2523 return;
2526 /* Store aliases any existing chain? */
2527 terminate_all_aliasing_chains (NULL, stmt);
2528 /* Start a new chain. */
2529 struct imm_store_chain_info *new_chain
2530 = new imm_store_chain_info (m_stores_head, base_addr);
2531 info = new store_immediate_info (bitsize, bitpos, bitregion_start,
2532 bitregion_end, stmt, 0, rhs_code,
2533 bit_not_p, ops[0], ops[1]);
2534 new_chain->m_store_info.safe_push (info);
2535 m_stores.put (base_addr, new_chain);
2536 if (dump_file && (dump_flags & TDF_DETAILS))
2538 fprintf (dump_file, "Starting new chain with statement:\n");
2539 print_gimple_stmt (dump_file, stmt, 0);
2540 fprintf (dump_file, "The base object is:\n");
2541 print_generic_expr (dump_file, base_addr);
2542 fprintf (dump_file, "\n");
2546 /* Entry point for the pass. Go over each basic block recording chains of
2547 immediate stores. Upon encountering a terminating statement (as defined
2548 by stmt_terminates_chain_p) process the recorded stores and emit the widened
2549 variants. */
2551 unsigned int
2552 pass_store_merging::execute (function *fun)
2554 basic_block bb;
2555 hash_set<gimple *> orig_stmts;
2557 FOR_EACH_BB_FN (bb, fun)
2559 gimple_stmt_iterator gsi;
2560 unsigned HOST_WIDE_INT num_statements = 0;
2561 /* Record the original statements so that we can keep track of
2562 statements emitted in this pass and not re-process new
2563 statements. */
2564 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2566 if (is_gimple_debug (gsi_stmt (gsi)))
2567 continue;
2569 if (++num_statements >= 2)
2570 break;
2573 if (num_statements < 2)
2574 continue;
2576 if (dump_file && (dump_flags & TDF_DETAILS))
2577 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
2579 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2581 gimple *stmt = gsi_stmt (gsi);
2583 if (is_gimple_debug (stmt))
2584 continue;
2586 if (gimple_has_volatile_ops (stmt))
2588 /* Terminate all chains. */
2589 if (dump_file && (dump_flags & TDF_DETAILS))
2590 fprintf (dump_file, "Volatile access terminates "
2591 "all chains\n");
2592 terminate_and_process_all_chains ();
2593 continue;
2596 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt)
2597 && !stmt_can_throw_internal (stmt)
2598 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt)))
2599 process_store (stmt);
2600 else
2601 terminate_all_aliasing_chains (NULL, stmt);
2603 terminate_and_process_all_chains ();
2605 return 0;
2608 } // anon namespace
2610 /* Construct and return a store merging pass object. */
2612 gimple_opt_pass *
2613 make_pass_store_merging (gcc::context *ctxt)
2615 return new pass_store_merging (ctxt);
2618 #if CHECKING_P
2620 namespace selftest {
2622 /* Selftests for store merging helpers. */
2624 /* Assert that all elements of the byte arrays X and Y, both of length N
2625 are equal. */
2627 static void
2628 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
2630 for (unsigned int i = 0; i < n; i++)
2632 if (x[i] != y[i])
2634 fprintf (stderr, "Arrays do not match. X:\n");
2635 dump_char_array (stderr, x, n);
2636 fprintf (stderr, "Y:\n");
2637 dump_char_array (stderr, y, n);
2639 ASSERT_EQ (x[i], y[i]);
2643 /* Test shift_bytes_in_array and that it carries bits across between
2644 bytes correctly. */
2646 static void
2647 verify_shift_bytes_in_array (void)
2649 /* byte 1 | byte 0
2650 00011111 | 11100000. */
2651 unsigned char orig[2] = { 0xe0, 0x1f };
2652 unsigned char in[2];
2653 memcpy (in, orig, sizeof orig);
2655 unsigned char expected[2] = { 0x80, 0x7f };
2656 shift_bytes_in_array (in, sizeof (in), 2);
2657 verify_array_eq (in, expected, sizeof (in));
2659 memcpy (in, orig, sizeof orig);
2660 memcpy (expected, orig, sizeof orig);
2661 /* Check that shifting by zero doesn't change anything. */
2662 shift_bytes_in_array (in, sizeof (in), 0);
2663 verify_array_eq (in, expected, sizeof (in));
2667 /* Test shift_bytes_in_array_right and that it carries bits across between
2668 bytes correctly. */
2670 static void
2671 verify_shift_bytes_in_array_right (void)
2673 /* byte 1 | byte 0
2674 00011111 | 11100000. */
2675 unsigned char orig[2] = { 0x1f, 0xe0};
2676 unsigned char in[2];
2677 memcpy (in, orig, sizeof orig);
2678 unsigned char expected[2] = { 0x07, 0xf8};
2679 shift_bytes_in_array_right (in, sizeof (in), 2);
2680 verify_array_eq (in, expected, sizeof (in));
2682 memcpy (in, orig, sizeof orig);
2683 memcpy (expected, orig, sizeof orig);
2684 /* Check that shifting by zero doesn't change anything. */
2685 shift_bytes_in_array_right (in, sizeof (in), 0);
2686 verify_array_eq (in, expected, sizeof (in));
2689 /* Test clear_bit_region that it clears exactly the bits asked and
2690 nothing more. */
2692 static void
2693 verify_clear_bit_region (void)
2695 /* Start with all bits set and test clearing various patterns in them. */
2696 unsigned char orig[3] = { 0xff, 0xff, 0xff};
2697 unsigned char in[3];
2698 unsigned char expected[3];
2699 memcpy (in, orig, sizeof in);
2701 /* Check zeroing out all the bits. */
2702 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
2703 expected[0] = expected[1] = expected[2] = 0;
2704 verify_array_eq (in, expected, sizeof in);
2706 memcpy (in, orig, sizeof in);
2707 /* Leave the first and last bits intact. */
2708 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
2709 expected[0] = 0x1;
2710 expected[1] = 0;
2711 expected[2] = 0x80;
2712 verify_array_eq (in, expected, sizeof in);
2715 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
2716 nothing more. */
2718 static void
2719 verify_clear_bit_region_be (void)
2721 /* Start with all bits set and test clearing various patterns in them. */
2722 unsigned char orig[3] = { 0xff, 0xff, 0xff};
2723 unsigned char in[3];
2724 unsigned char expected[3];
2725 memcpy (in, orig, sizeof in);
2727 /* Check zeroing out all the bits. */
2728 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
2729 expected[0] = expected[1] = expected[2] = 0;
2730 verify_array_eq (in, expected, sizeof in);
2732 memcpy (in, orig, sizeof in);
2733 /* Leave the first and last bits intact. */
2734 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
2735 expected[0] = 0x80;
2736 expected[1] = 0;
2737 expected[2] = 0x1;
2738 verify_array_eq (in, expected, sizeof in);
2742 /* Run all of the selftests within this file. */
2744 void
2745 store_merging_c_tests (void)
2747 verify_shift_bytes_in_array ();
2748 verify_shift_bytes_in_array_right ();
2749 verify_clear_bit_region ();
2750 verify_clear_bit_region_be ();
2753 } // namespace selftest
2754 #endif /* CHECKING_P. */