Fix PR ada/97504 on hppa*-*-hpux*.
[official-gcc.git] / gcc / gimple-ssa-store-merging.c
blob17a4250d77f4e54ceaaeefd80c1e64e022fdd22c
1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
141 #include "config.h"
142 #include "system.h"
143 #include "coretypes.h"
144 #include "backend.h"
145 #include "tree.h"
146 #include "gimple.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
150 #include "ssa.h"
151 #include "gimple-pretty-print.h"
152 #include "alias.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
160 #include "timevar.h"
161 #include "cfganal.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
164 #include "except.h"
165 #include "tree-eh.h"
166 #include "target.h"
167 #include "gimplify-me.h"
168 #include "rtl.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
171 #include "dbgcnt.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
182 namespace {
184 struct bswap_stat
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194 } nop_stats, bswap_stats;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
230 poly_int64_pod bytepos;
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
260 inline bool
261 do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
279 switch (code)
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
308 /* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
311 inline bool
312 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
314 tree lhs_type;
316 lhs_type = gimple_expr_type (stmt);
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
320 return false;
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
325 return true;
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
331 bool
332 init_symbolic_number (struct symbolic_number *n, tree src)
334 int size;
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
337 return false;
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
359 return true;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
366 bool
367 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize, bitpos, bytepos;
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
391 poly_offset_int bit_offset = 0;
392 tree off = TREE_OPERAND (base_addr, 1);
394 if (!integer_zerop (off))
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
398 bit_offset += boff;
401 base_addr = TREE_OPERAND (base_addr, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset, 0))
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
409 if (offset)
410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
411 else
412 offset = byte_offset;
415 bitpos += bit_offset.force_shwi ();
417 else
418 base_addr = build_fold_addr_expr (base_addr);
420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
421 return false;
422 if (!multiple_p (bitsize, BITS_PER_UNIT))
423 return false;
424 if (reversep)
425 return false;
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
431 n->bytepos = bytepos;
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
441 gimple *
442 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
464 uint64_t inc;
465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
481 if (start1 < start2)
483 n_start = n1;
484 start_sub = start2 - start1;
486 else
488 n_start = n2;
489 start_sub = start1 - start2;
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
503 if (end1 < end2)
505 end = end2;
506 end_sub = end2 - end1;
508 else
510 end = end1;
511 end_sub = end1 - end2;
513 n_end = (end2 > end1) ? n2 : n1;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
521 n->range = end - MIN (start1, start2) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
540 else
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
562 uint64_t masked1, masked2;
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
572 return source_stmt;
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
581 gimple *
582 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
592 rhs1 = gimple_assign_rhs1 (stmt);
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
635 return NULL;
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
675 switch (code)
677 case BIT_AND_EXPR:
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
690 n->n &= mask;
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
702 int i, type_size, old_type_size;
703 tree type;
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
721 if (type_size < 64 / BITS_PER_MARKER)
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
731 break;
732 default:
733 return NULL;
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
738 /* Handle binary rhs. */
740 if (rhs_class == GIMPLE_BINARY_RHS)
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
745 if (code != BIT_IOR_EXPR)
746 return NULL;
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
753 switch (code)
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
758 if (!source_stmt1)
759 return NULL;
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
763 if (!source_stmt2)
764 return NULL;
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
769 if (n1.vuse != n2.vuse)
770 return NULL;
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
775 if (!source_stmt)
776 return NULL;
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
781 break;
782 default:
783 return NULL;
785 return source_stmt;
787 return NULL;
790 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
793 void
794 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
797 unsigned rsize;
798 uint64_t tmpn, mask;
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
825 if (BYTES_BIG_ENDIAN)
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
831 else
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
837 n->range = rsize;
840 n->range *= BITS_PER_UNIT;
843 /* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
851 gimple *
852 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
854 tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
855 if (!tree_fits_uhwi_p (type_size))
856 return NULL;
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
860 increase that number by 2 * (log2(n) + 1) here in order to also
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
863 int limit = tree_to_uhwi (type_size);
864 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
865 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
867 if (!ins_stmt)
868 return NULL;
870 uint64_t cmpxchg, cmpnop;
871 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
873 /* A complete byte swap should make the symbolic number to start with
874 the largest digit in the highest order byte. Unchanged symbolic
875 number indicates a read with same endianness as target architecture. */
876 if (n->n == cmpnop)
877 *bswap = false;
878 else if (n->n == cmpxchg)
879 *bswap = true;
880 else
881 return NULL;
883 /* Useless bit manipulation performed by code. */
884 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
885 return NULL;
887 return ins_stmt;
890 const pass_data pass_data_optimize_bswap =
892 GIMPLE_PASS, /* type */
893 "bswap", /* name */
894 OPTGROUP_NONE, /* optinfo_flags */
895 TV_NONE, /* tv_id */
896 PROP_ssa, /* properties_required */
897 0, /* properties_provided */
898 0, /* properties_destroyed */
899 0, /* todo_flags_start */
900 0, /* todo_flags_finish */
903 class pass_optimize_bswap : public gimple_opt_pass
905 public:
906 pass_optimize_bswap (gcc::context *ctxt)
907 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
910 /* opt_pass methods: */
911 virtual bool gate (function *)
913 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
916 virtual unsigned int execute (function *);
918 }; // class pass_optimize_bswap
920 /* Perform the bswap optimization: replace the expression computed in the rhs
921 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
922 bswap, load or load + bswap expression.
923 Which of these alternatives replace the rhs is given by N->base_addr (non
924 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
925 load to perform are also given in N while the builtin bswap invoke is given
926 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
927 load statements involved to construct the rhs in gsi_stmt (GSI) and
928 N->range gives the size of the rhs expression for maintaining some
929 statistics.
931 Note that if the replacement involve a load and if gsi_stmt (GSI) is
932 non-NULL, that stmt is moved just after INS_STMT to do the load with the
933 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
935 tree
936 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
937 tree bswap_type, tree load_type, struct symbolic_number *n,
938 bool bswap)
940 tree src, tmp, tgt = NULL_TREE;
941 gimple *bswap_stmt;
943 gimple *cur_stmt = gsi_stmt (gsi);
944 src = n->src;
945 if (cur_stmt)
946 tgt = gimple_assign_lhs (cur_stmt);
948 /* Need to load the value from memory first. */
949 if (n->base_addr)
951 gimple_stmt_iterator gsi_ins = gsi;
952 if (ins_stmt)
953 gsi_ins = gsi_for_stmt (ins_stmt);
954 tree addr_expr, addr_tmp, val_expr, val_tmp;
955 tree load_offset_ptr, aligned_load_type;
956 gimple *load_stmt;
957 unsigned align = get_object_alignment (src);
958 poly_int64 load_offset = 0;
960 if (cur_stmt)
962 basic_block ins_bb = gimple_bb (ins_stmt);
963 basic_block cur_bb = gimple_bb (cur_stmt);
964 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
965 return NULL_TREE;
967 /* Move cur_stmt just before one of the load of the original
968 to ensure it has the same VUSE. See PR61517 for what could
969 go wrong. */
970 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
971 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
972 gsi_move_before (&gsi, &gsi_ins);
973 gsi = gsi_for_stmt (cur_stmt);
975 else
976 gsi = gsi_ins;
978 /* Compute address to load from and cast according to the size
979 of the load. */
980 addr_expr = build_fold_addr_expr (src);
981 if (is_gimple_mem_ref_addr (addr_expr))
982 addr_tmp = unshare_expr (addr_expr);
983 else
985 addr_tmp = unshare_expr (n->base_addr);
986 if (!is_gimple_mem_ref_addr (addr_tmp))
987 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
988 is_gimple_mem_ref_addr,
989 NULL_TREE, true,
990 GSI_SAME_STMT);
991 load_offset = n->bytepos;
992 if (n->offset)
994 tree off
995 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
996 true, NULL_TREE, true,
997 GSI_SAME_STMT);
998 gimple *stmt
999 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1000 POINTER_PLUS_EXPR, addr_tmp, off);
1001 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1002 addr_tmp = gimple_assign_lhs (stmt);
1006 /* Perform the load. */
1007 aligned_load_type = load_type;
1008 if (align < TYPE_ALIGN (load_type))
1009 aligned_load_type = build_aligned_type (load_type, align);
1010 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1011 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1012 load_offset_ptr);
1014 if (!bswap)
1016 if (n->range == 16)
1017 nop_stats.found_16bit++;
1018 else if (n->range == 32)
1019 nop_stats.found_32bit++;
1020 else
1022 gcc_assert (n->range == 64);
1023 nop_stats.found_64bit++;
1026 /* Convert the result of load if necessary. */
1027 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1029 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1030 "load_dst");
1031 load_stmt = gimple_build_assign (val_tmp, val_expr);
1032 gimple_set_vuse (load_stmt, n->vuse);
1033 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1034 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
1035 update_stmt (cur_stmt);
1037 else if (cur_stmt)
1039 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1040 gimple_set_vuse (cur_stmt, n->vuse);
1041 update_stmt (cur_stmt);
1043 else
1045 tgt = make_ssa_name (load_type);
1046 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1047 gimple_set_vuse (cur_stmt, n->vuse);
1048 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1051 if (dump_file)
1053 fprintf (dump_file,
1054 "%d bit load in target endianness found at: ",
1055 (int) n->range);
1056 print_gimple_stmt (dump_file, cur_stmt, 0);
1058 return tgt;
1060 else
1062 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1063 load_stmt = gimple_build_assign (val_tmp, val_expr);
1064 gimple_set_vuse (load_stmt, n->vuse);
1065 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1067 src = val_tmp;
1069 else if (!bswap)
1071 gimple *g = NULL;
1072 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1074 if (!is_gimple_val (src))
1075 return NULL_TREE;
1076 g = gimple_build_assign (tgt, NOP_EXPR, src);
1078 else if (cur_stmt)
1079 g = gimple_build_assign (tgt, src);
1080 else
1081 tgt = src;
1082 if (n->range == 16)
1083 nop_stats.found_16bit++;
1084 else if (n->range == 32)
1085 nop_stats.found_32bit++;
1086 else
1088 gcc_assert (n->range == 64);
1089 nop_stats.found_64bit++;
1091 if (dump_file)
1093 fprintf (dump_file,
1094 "%d bit reshuffle in target endianness found at: ",
1095 (int) n->range);
1096 if (cur_stmt)
1097 print_gimple_stmt (dump_file, cur_stmt, 0);
1098 else
1100 print_generic_expr (dump_file, tgt, TDF_NONE);
1101 fprintf (dump_file, "\n");
1104 if (cur_stmt)
1105 gsi_replace (&gsi, g, true);
1106 return tgt;
1108 else if (TREE_CODE (src) == BIT_FIELD_REF)
1109 src = TREE_OPERAND (src, 0);
1111 if (n->range == 16)
1112 bswap_stats.found_16bit++;
1113 else if (n->range == 32)
1114 bswap_stats.found_32bit++;
1115 else
1117 gcc_assert (n->range == 64);
1118 bswap_stats.found_64bit++;
1121 tmp = src;
1123 /* Convert the src expression if necessary. */
1124 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1126 gimple *convert_stmt;
1128 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1129 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1130 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1133 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1134 are considered as rotation of 2N bit values by N bits is generally not
1135 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1136 gives 0x03040102 while a bswap for that value is 0x04030201. */
1137 if (bswap && n->range == 16)
1139 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1140 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1141 bswap_stmt = gimple_build_assign (NULL, src);
1143 else
1144 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1146 if (tgt == NULL_TREE)
1147 tgt = make_ssa_name (bswap_type);
1148 tmp = tgt;
1150 /* Convert the result if necessary. */
1151 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1153 gimple *convert_stmt;
1155 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1156 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1157 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1160 gimple_set_lhs (bswap_stmt, tmp);
1162 if (dump_file)
1164 fprintf (dump_file, "%d bit bswap implementation found at: ",
1165 (int) n->range);
1166 if (cur_stmt)
1167 print_gimple_stmt (dump_file, cur_stmt, 0);
1168 else
1170 print_generic_expr (dump_file, tgt, TDF_NONE);
1171 fprintf (dump_file, "\n");
1175 if (cur_stmt)
1177 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1178 gsi_remove (&gsi, true);
1180 else
1181 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1182 return tgt;
1185 /* Find manual byte swap implementations as well as load in a given
1186 endianness. Byte swaps are turned into a bswap builtin invokation
1187 while endian loads are converted to bswap builtin invokation or
1188 simple load according to the target endianness. */
1190 unsigned int
1191 pass_optimize_bswap::execute (function *fun)
1193 basic_block bb;
1194 bool bswap32_p, bswap64_p;
1195 bool changed = false;
1196 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1198 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1199 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1200 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1201 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1202 || (bswap32_p && word_mode == SImode)));
1204 /* Determine the argument type of the builtins. The code later on
1205 assumes that the return and argument type are the same. */
1206 if (bswap32_p)
1208 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1209 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1212 if (bswap64_p)
1214 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1215 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1218 memset (&nop_stats, 0, sizeof (nop_stats));
1219 memset (&bswap_stats, 0, sizeof (bswap_stats));
1220 calculate_dominance_info (CDI_DOMINATORS);
1222 FOR_EACH_BB_FN (bb, fun)
1224 gimple_stmt_iterator gsi;
1226 /* We do a reverse scan for bswap patterns to make sure we get the
1227 widest match. As bswap pattern matching doesn't handle previously
1228 inserted smaller bswap replacements as sub-patterns, the wider
1229 variant wouldn't be detected. */
1230 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1232 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1233 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1234 enum tree_code code;
1235 struct symbolic_number n;
1236 bool bswap;
1238 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1239 might be moved to a different basic block by bswap_replace and gsi
1240 must not points to it if that's the case. Moving the gsi_prev
1241 there make sure that gsi points to the statement previous to
1242 cur_stmt while still making sure that all statements are
1243 considered in this basic block. */
1244 gsi_prev (&gsi);
1246 if (!is_gimple_assign (cur_stmt))
1247 continue;
1249 code = gimple_assign_rhs_code (cur_stmt);
1250 switch (code)
1252 case LROTATE_EXPR:
1253 case RROTATE_EXPR:
1254 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1255 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1256 % BITS_PER_UNIT)
1257 continue;
1258 /* Fall through. */
1259 case BIT_IOR_EXPR:
1260 break;
1261 default:
1262 continue;
1265 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1267 if (!ins_stmt)
1268 continue;
1270 switch (n.range)
1272 case 16:
1273 /* Already in canonical form, nothing to do. */
1274 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1275 continue;
1276 load_type = bswap_type = uint16_type_node;
1277 break;
1278 case 32:
1279 load_type = uint32_type_node;
1280 if (bswap32_p)
1282 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1283 bswap_type = bswap32_type;
1285 break;
1286 case 64:
1287 load_type = uint64_type_node;
1288 if (bswap64_p)
1290 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1291 bswap_type = bswap64_type;
1293 break;
1294 default:
1295 continue;
1298 if (bswap && !fndecl && n.range != 16)
1299 continue;
1301 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1302 bswap_type, load_type, &n, bswap))
1303 changed = true;
1307 statistics_counter_event (fun, "16-bit nop implementations found",
1308 nop_stats.found_16bit);
1309 statistics_counter_event (fun, "32-bit nop implementations found",
1310 nop_stats.found_32bit);
1311 statistics_counter_event (fun, "64-bit nop implementations found",
1312 nop_stats.found_64bit);
1313 statistics_counter_event (fun, "16-bit bswap implementations found",
1314 bswap_stats.found_16bit);
1315 statistics_counter_event (fun, "32-bit bswap implementations found",
1316 bswap_stats.found_32bit);
1317 statistics_counter_event (fun, "64-bit bswap implementations found",
1318 bswap_stats.found_64bit);
1320 return (changed ? TODO_update_ssa : 0);
1323 } // anon namespace
1325 gimple_opt_pass *
1326 make_pass_optimize_bswap (gcc::context *ctxt)
1328 return new pass_optimize_bswap (ctxt);
1331 namespace {
1333 /* Struct recording one operand for the store, which is either a constant,
1334 then VAL represents the constant and all the other fields are zero, or
1335 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1336 and the other fields also reflect the memory load, or an SSA name, then
1337 VAL represents the SSA name and all the other fields are zero, */
1339 class store_operand_info
1341 public:
1342 tree val;
1343 tree base_addr;
1344 poly_uint64 bitsize;
1345 poly_uint64 bitpos;
1346 poly_uint64 bitregion_start;
1347 poly_uint64 bitregion_end;
1348 gimple *stmt;
1349 bool bit_not_p;
1350 store_operand_info ();
1353 store_operand_info::store_operand_info ()
1354 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1355 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1359 /* Struct recording the information about a single store of an immediate
1360 to memory. These are created in the first phase and coalesced into
1361 merged_store_group objects in the second phase. */
1363 class store_immediate_info
1365 public:
1366 unsigned HOST_WIDE_INT bitsize;
1367 unsigned HOST_WIDE_INT bitpos;
1368 unsigned HOST_WIDE_INT bitregion_start;
1369 /* This is one past the last bit of the bit region. */
1370 unsigned HOST_WIDE_INT bitregion_end;
1371 gimple *stmt;
1372 unsigned int order;
1373 /* INTEGER_CST for constant store, STRING_CST for string store,
1374 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1375 BIT_INSERT_EXPR for bit insertion.
1376 LROTATE_EXPR if it can be only bswap optimized and
1377 ops are not really meaningful.
1378 NOP_EXPR if bswap optimization detected identity, ops
1379 are not meaningful. */
1380 enum tree_code rhs_code;
1381 /* Two fields for bswap optimization purposes. */
1382 struct symbolic_number n;
1383 gimple *ins_stmt;
1384 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1385 bool bit_not_p;
1386 /* True if ops have been swapped and thus ops[1] represents
1387 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1388 bool ops_swapped_p;
1389 /* The index number of the landing pad, or 0 if there is none. */
1390 int lp_nr;
1391 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1392 just the first one. */
1393 store_operand_info ops[2];
1394 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1395 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1396 gimple *, unsigned int, enum tree_code,
1397 struct symbolic_number &, gimple *, bool, int,
1398 const store_operand_info &,
1399 const store_operand_info &);
1402 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
1403 unsigned HOST_WIDE_INT bp,
1404 unsigned HOST_WIDE_INT brs,
1405 unsigned HOST_WIDE_INT bre,
1406 gimple *st,
1407 unsigned int ord,
1408 enum tree_code rhscode,
1409 struct symbolic_number &nr,
1410 gimple *ins_stmtp,
1411 bool bitnotp,
1412 int nr2,
1413 const store_operand_info &op0r,
1414 const store_operand_info &op1r)
1415 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1416 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1417 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1418 lp_nr (nr2)
1419 #if __cplusplus >= 201103L
1420 , ops { op0r, op1r }
1423 #else
1425 ops[0] = op0r;
1426 ops[1] = op1r;
1428 #endif
1430 /* Struct representing a group of stores to contiguous memory locations.
1431 These are produced by the second phase (coalescing) and consumed in the
1432 third phase that outputs the widened stores. */
1434 class merged_store_group
1436 public:
1437 unsigned HOST_WIDE_INT start;
1438 unsigned HOST_WIDE_INT width;
1439 unsigned HOST_WIDE_INT bitregion_start;
1440 unsigned HOST_WIDE_INT bitregion_end;
1441 /* The size of the allocated memory for val and mask. */
1442 unsigned HOST_WIDE_INT buf_size;
1443 unsigned HOST_WIDE_INT align_base;
1444 poly_uint64 load_align_base[2];
1446 unsigned int align;
1447 unsigned int load_align[2];
1448 unsigned int first_order;
1449 unsigned int last_order;
1450 bool bit_insertion;
1451 bool string_concatenation;
1452 bool only_constants;
1453 bool consecutive;
1454 unsigned int first_nonmergeable_order;
1455 int lp_nr;
1457 auto_vec<store_immediate_info *> stores;
1458 /* We record the first and last original statements in the sequence because
1459 we'll need their vuse/vdef and replacement position. It's easier to keep
1460 track of them separately as 'stores' is reordered by apply_stores. */
1461 gimple *last_stmt;
1462 gimple *first_stmt;
1463 unsigned char *val;
1464 unsigned char *mask;
1466 merged_store_group (store_immediate_info *);
1467 ~merged_store_group ();
1468 bool can_be_merged_into (store_immediate_info *);
1469 void merge_into (store_immediate_info *);
1470 void merge_overlapping (store_immediate_info *);
1471 bool apply_stores ();
1472 private:
1473 void do_merge (store_immediate_info *);
1476 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1478 static void
1479 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1481 if (!fd)
1482 return;
1484 for (unsigned int i = 0; i < len; i++)
1485 fprintf (fd, "%02x ", ptr[i]);
1486 fprintf (fd, "\n");
1489 /* Clear out LEN bits starting from bit START in the byte array
1490 PTR. This clears the bits to the *right* from START.
1491 START must be within [0, BITS_PER_UNIT) and counts starting from
1492 the least significant bit. */
1494 static void
1495 clear_bit_region_be (unsigned char *ptr, unsigned int start,
1496 unsigned int len)
1498 if (len == 0)
1499 return;
1500 /* Clear len bits to the right of start. */
1501 else if (len <= start + 1)
1503 unsigned char mask = (~(~0U << len));
1504 mask = mask << (start + 1U - len);
1505 ptr[0] &= ~mask;
1507 else if (start != BITS_PER_UNIT - 1)
1509 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1510 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1511 len - (start % BITS_PER_UNIT) - 1);
1513 else if (start == BITS_PER_UNIT - 1
1514 && len > BITS_PER_UNIT)
1516 unsigned int nbytes = len / BITS_PER_UNIT;
1517 memset (ptr, 0, nbytes);
1518 if (len % BITS_PER_UNIT != 0)
1519 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1520 len % BITS_PER_UNIT);
1522 else
1523 gcc_unreachable ();
1526 /* In the byte array PTR clear the bit region starting at bit
1527 START and is LEN bits wide.
1528 For regions spanning multiple bytes do this recursively until we reach
1529 zero LEN or a region contained within a single byte. */
1531 static void
1532 clear_bit_region (unsigned char *ptr, unsigned int start,
1533 unsigned int len)
1535 /* Degenerate base case. */
1536 if (len == 0)
1537 return;
1538 else if (start >= BITS_PER_UNIT)
1539 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1540 /* Second base case. */
1541 else if ((start + len) <= BITS_PER_UNIT)
1543 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
1544 mask >>= BITS_PER_UNIT - (start + len);
1546 ptr[0] &= ~mask;
1548 return;
1550 /* Clear most significant bits in a byte and proceed with the next byte. */
1551 else if (start != 0)
1553 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1554 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
1556 /* Whole bytes need to be cleared. */
1557 else if (start == 0 && len > BITS_PER_UNIT)
1559 unsigned int nbytes = len / BITS_PER_UNIT;
1560 /* We could recurse on each byte but we clear whole bytes, so a simple
1561 memset will do. */
1562 memset (ptr, '\0', nbytes);
1563 /* Clear the remaining sub-byte region if there is one. */
1564 if (len % BITS_PER_UNIT != 0)
1565 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1567 else
1568 gcc_unreachable ();
1571 /* Write BITLEN bits of EXPR to the byte array PTR at
1572 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1573 Return true if the operation succeeded. */
1575 static bool
1576 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
1577 unsigned int total_bytes)
1579 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1580 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1581 || (bitpos % BITS_PER_UNIT)
1582 || !int_mode_for_size (bitlen, 0).exists ());
1583 bool empty_ctor_p
1584 = (TREE_CODE (expr) == CONSTRUCTOR
1585 && CONSTRUCTOR_NELTS (expr) == 0
1586 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1587 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
1589 if (!sub_byte_op_p)
1591 if (first_byte >= total_bytes)
1592 return false;
1593 total_bytes -= first_byte;
1594 if (empty_ctor_p)
1596 unsigned HOST_WIDE_INT rhs_bytes
1597 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1598 if (rhs_bytes > total_bytes)
1599 return false;
1600 memset (ptr + first_byte, '\0', rhs_bytes);
1601 return true;
1603 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1606 /* LITTLE-ENDIAN
1607 We are writing a non byte-sized quantity or at a position that is not
1608 at a byte boundary.
1609 |--------|--------|--------| ptr + first_byte
1611 xxx xxxxxxxx xxx< bp>
1612 |______EXPR____|
1614 First native_encode_expr EXPR into a temporary buffer and shift each
1615 byte in the buffer by 'bp' (carrying the bits over as necessary).
1616 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1617 <------bitlen---->< bp>
1618 Then we clear the destination bits:
1619 |---00000|00000000|000-----| ptr + first_byte
1620 <-------bitlen--->< bp>
1622 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1623 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1625 BIG-ENDIAN
1626 We are writing a non byte-sized quantity or at a position that is not
1627 at a byte boundary.
1628 ptr + first_byte |--------|--------|--------|
1630 <bp >xxx xxxxxxxx xxx
1631 |_____EXPR_____|
1633 First native_encode_expr EXPR into a temporary buffer and shift each
1634 byte in the buffer to the right by (carrying the bits over as necessary).
1635 We shift by as much as needed to align the most significant bit of EXPR
1636 with bitpos:
1637 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1638 <---bitlen----> <bp ><-----bitlen----->
1639 Then we clear the destination bits:
1640 ptr + first_byte |-----000||00000000||00000---|
1641 <bp ><-------bitlen----->
1643 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1644 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1645 The awkwardness comes from the fact that bitpos is counted from the
1646 most significant bit of a byte. */
1648 /* We must be dealing with fixed-size data at this point, since the
1649 total size is also fixed. */
1650 unsigned int byte_size;
1651 if (empty_ctor_p)
1653 unsigned HOST_WIDE_INT rhs_bytes
1654 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1655 if (rhs_bytes > total_bytes)
1656 return false;
1657 byte_size = rhs_bytes;
1659 else
1661 fixed_size_mode mode
1662 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
1663 byte_size
1664 = mode == BLKmode
1665 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1666 : GET_MODE_SIZE (mode);
1668 /* Allocate an extra byte so that we have space to shift into. */
1669 byte_size++;
1670 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
1671 memset (tmpbuf, '\0', byte_size);
1672 /* The store detection code should only have allowed constants that are
1673 accepted by native_encode_expr or empty ctors. */
1674 if (!empty_ctor_p
1675 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
1676 gcc_unreachable ();
1678 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1679 bytes to write. This means it can write more than
1680 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1681 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1682 bitlen and zero out the bits that are not relevant as well (that may
1683 contain a sign bit due to sign-extension). */
1684 unsigned int padding
1685 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
1686 /* On big-endian the padding is at the 'front' so just skip the initial
1687 bytes. */
1688 if (BYTES_BIG_ENDIAN)
1689 tmpbuf += padding;
1691 byte_size -= padding;
1693 if (bitlen % BITS_PER_UNIT != 0)
1695 if (BYTES_BIG_ENDIAN)
1696 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1697 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1698 else
1699 clear_bit_region (tmpbuf, bitlen,
1700 byte_size * BITS_PER_UNIT - bitlen);
1702 /* Left shifting relies on the last byte being clear if bitlen is
1703 a multiple of BITS_PER_UNIT, which might not be clear if
1704 there are padding bytes. */
1705 else if (!BYTES_BIG_ENDIAN)
1706 tmpbuf[byte_size - 1] = '\0';
1708 /* Clear the bit region in PTR where the bits from TMPBUF will be
1709 inserted into. */
1710 if (BYTES_BIG_ENDIAN)
1711 clear_bit_region_be (ptr + first_byte,
1712 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1713 else
1714 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1716 int shift_amnt;
1717 int bitlen_mod = bitlen % BITS_PER_UNIT;
1718 int bitpos_mod = bitpos % BITS_PER_UNIT;
1720 bool skip_byte = false;
1721 if (BYTES_BIG_ENDIAN)
1723 /* BITPOS and BITLEN are exactly aligned and no shifting
1724 is necessary. */
1725 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1726 || (bitpos_mod == 0 && bitlen_mod == 0))
1727 shift_amnt = 0;
1728 /* |. . . . . . . .|
1729 <bp > <blen >.
1730 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1731 of the value until it aligns with 'bp' in the next byte over. */
1732 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1734 shift_amnt = bitlen_mod + bitpos_mod;
1735 skip_byte = bitlen_mod != 0;
1737 /* |. . . . . . . .|
1738 <----bp--->
1739 <---blen---->.
1740 Shift the value right within the same byte so it aligns with 'bp'. */
1741 else
1742 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1744 else
1745 shift_amnt = bitpos % BITS_PER_UNIT;
1747 /* Create the shifted version of EXPR. */
1748 if (!BYTES_BIG_ENDIAN)
1750 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
1751 if (shift_amnt == 0)
1752 byte_size--;
1754 else
1756 gcc_assert (BYTES_BIG_ENDIAN);
1757 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1758 /* If shifting right forced us to move into the next byte skip the now
1759 empty byte. */
1760 if (skip_byte)
1762 tmpbuf++;
1763 byte_size--;
1767 /* Insert the bits from TMPBUF. */
1768 for (unsigned int i = 0; i < byte_size; i++)
1769 ptr[first_byte + i] |= tmpbuf[i];
1771 return true;
1774 /* Sorting function for store_immediate_info objects.
1775 Sorts them by bitposition. */
1777 static int
1778 sort_by_bitpos (const void *x, const void *y)
1780 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1781 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1783 if ((*tmp)->bitpos < (*tmp2)->bitpos)
1784 return -1;
1785 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1786 return 1;
1787 else
1788 /* If they are the same let's use the order which is guaranteed to
1789 be different. */
1790 return (*tmp)->order - (*tmp2)->order;
1793 /* Sorting function for store_immediate_info objects.
1794 Sorts them by the order field. */
1796 static int
1797 sort_by_order (const void *x, const void *y)
1799 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1800 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1802 if ((*tmp)->order < (*tmp2)->order)
1803 return -1;
1804 else if ((*tmp)->order > (*tmp2)->order)
1805 return 1;
1807 gcc_unreachable ();
1810 /* Initialize a merged_store_group object from a store_immediate_info
1811 object. */
1813 merged_store_group::merged_store_group (store_immediate_info *info)
1815 start = info->bitpos;
1816 width = info->bitsize;
1817 bitregion_start = info->bitregion_start;
1818 bitregion_end = info->bitregion_end;
1819 /* VAL has memory allocated for it in apply_stores once the group
1820 width has been finalized. */
1821 val = NULL;
1822 mask = NULL;
1823 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1824 string_concatenation = info->rhs_code == STRING_CST;
1825 only_constants = info->rhs_code == INTEGER_CST;
1826 consecutive = true;
1827 first_nonmergeable_order = ~0U;
1828 lp_nr = info->lp_nr;
1829 unsigned HOST_WIDE_INT align_bitpos = 0;
1830 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1831 &align, &align_bitpos);
1832 align_base = start - align_bitpos;
1833 for (int i = 0; i < 2; ++i)
1835 store_operand_info &op = info->ops[i];
1836 if (op.base_addr == NULL_TREE)
1838 load_align[i] = 0;
1839 load_align_base[i] = 0;
1841 else
1843 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1844 load_align_base[i] = op.bitpos - align_bitpos;
1847 stores.create (1);
1848 stores.safe_push (info);
1849 last_stmt = info->stmt;
1850 last_order = info->order;
1851 first_stmt = last_stmt;
1852 first_order = last_order;
1853 buf_size = 0;
1856 merged_store_group::~merged_store_group ()
1858 if (val)
1859 XDELETEVEC (val);
1862 /* Return true if the store described by INFO can be merged into the group. */
1864 bool
1865 merged_store_group::can_be_merged_into (store_immediate_info *info)
1867 /* Do not merge bswap patterns. */
1868 if (info->rhs_code == LROTATE_EXPR)
1869 return false;
1871 if (info->lp_nr != lp_nr)
1872 return false;
1874 /* The canonical case. */
1875 if (info->rhs_code == stores[0]->rhs_code)
1876 return true;
1878 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
1879 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
1880 return !string_concatenation;
1882 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
1883 return !string_concatenation;
1885 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
1886 only for small regions since this can generate a lot of instructions. */
1887 if (info->rhs_code == MEM_REF
1888 && (stores[0]->rhs_code == INTEGER_CST
1889 || stores[0]->rhs_code == BIT_INSERT_EXPR)
1890 && info->bitregion_start == stores[0]->bitregion_start
1891 && info->bitregion_end == stores[0]->bitregion_end
1892 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
1893 return !string_concatenation;
1895 if (stores[0]->rhs_code == MEM_REF
1896 && (info->rhs_code == INTEGER_CST
1897 || info->rhs_code == BIT_INSERT_EXPR)
1898 && info->bitregion_start == stores[0]->bitregion_start
1899 && info->bitregion_end == stores[0]->bitregion_end
1900 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
1901 return !string_concatenation;
1903 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
1904 if (info->rhs_code == STRING_CST
1905 && stores[0]->rhs_code == INTEGER_CST
1906 && stores[0]->bitsize == CHAR_BIT)
1907 return !bit_insertion;
1909 if (stores[0]->rhs_code == STRING_CST
1910 && info->rhs_code == INTEGER_CST
1911 && info->bitsize == CHAR_BIT)
1912 return !bit_insertion;
1914 return false;
1917 /* Helper method for merge_into and merge_overlapping to do
1918 the common part. */
1920 void
1921 merged_store_group::do_merge (store_immediate_info *info)
1923 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1924 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1926 unsigned int this_align;
1927 unsigned HOST_WIDE_INT align_bitpos = 0;
1928 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1929 &this_align, &align_bitpos);
1930 if (this_align > align)
1932 align = this_align;
1933 align_base = info->bitpos - align_bitpos;
1935 for (int i = 0; i < 2; ++i)
1937 store_operand_info &op = info->ops[i];
1938 if (!op.base_addr)
1939 continue;
1941 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1942 if (this_align > load_align[i])
1944 load_align[i] = this_align;
1945 load_align_base[i] = op.bitpos - align_bitpos;
1949 gimple *stmt = info->stmt;
1950 stores.safe_push (info);
1951 if (info->order > last_order)
1953 last_order = info->order;
1954 last_stmt = stmt;
1956 else if (info->order < first_order)
1958 first_order = info->order;
1959 first_stmt = stmt;
1962 if (info->bitpos != start + width)
1963 consecutive = false;
1965 /* We need to use extraction if there is any bit-field. */
1966 if (info->rhs_code == BIT_INSERT_EXPR)
1968 bit_insertion = true;
1969 gcc_assert (!string_concatenation);
1972 /* We want to use concatenation if there is any string. */
1973 if (info->rhs_code == STRING_CST)
1975 string_concatenation = true;
1976 gcc_assert (!bit_insertion);
1979 /* But we cannot use it if we don't have consecutive stores. */
1980 if (!consecutive)
1981 string_concatenation = false;
1983 if (info->rhs_code != INTEGER_CST)
1984 only_constants = false;
1987 /* Merge a store recorded by INFO into this merged store.
1988 The store is not overlapping with the existing recorded
1989 stores. */
1991 void
1992 merged_store_group::merge_into (store_immediate_info *info)
1994 do_merge (info);
1996 /* Make sure we're inserting in the position we think we're inserting. */
1997 gcc_assert (info->bitpos >= start + width
1998 && info->bitregion_start <= bitregion_end);
2000 width = info->bitpos + info->bitsize - start;
2003 /* Merge a store described by INFO into this merged store.
2004 INFO overlaps in some way with the current store (i.e. it's not contiguous
2005 which is handled by merged_store_group::merge_into). */
2007 void
2008 merged_store_group::merge_overlapping (store_immediate_info *info)
2010 do_merge (info);
2012 /* If the store extends the size of the group, extend the width. */
2013 if (info->bitpos + info->bitsize > start + width)
2014 width = info->bitpos + info->bitsize - start;
2017 /* Go through all the recorded stores in this group in program order and
2018 apply their values to the VAL byte array to create the final merged
2019 value. Return true if the operation succeeded. */
2021 bool
2022 merged_store_group::apply_stores ()
2024 store_immediate_info *info;
2025 unsigned int i;
2027 /* Make sure we have more than one store in the group, otherwise we cannot
2028 merge anything. */
2029 if (bitregion_start % BITS_PER_UNIT != 0
2030 || bitregion_end % BITS_PER_UNIT != 0
2031 || stores.length () == 1)
2032 return false;
2034 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2036 /* Really do string concatenation for large strings only. */
2037 if (buf_size <= MOVE_MAX)
2038 string_concatenation = false;
2040 /* Create a power-of-2-sized buffer for native_encode_expr. */
2041 if (!string_concatenation)
2042 buf_size = 1 << ceil_log2 (buf_size);
2044 val = XNEWVEC (unsigned char, 2 * buf_size);
2045 mask = val + buf_size;
2046 memset (val, 0, buf_size);
2047 memset (mask, ~0U, buf_size);
2049 stores.qsort (sort_by_order);
2051 FOR_EACH_VEC_ELT (stores, i, info)
2053 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
2054 tree cst;
2055 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2056 cst = info->ops[0].val;
2057 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2058 cst = info->ops[1].val;
2059 else
2060 cst = NULL_TREE;
2061 bool ret = true;
2062 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2063 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2064 buf_size);
2065 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2066 if (BYTES_BIG_ENDIAN)
2067 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2068 - (pos_in_buffer % BITS_PER_UNIT)),
2069 info->bitsize);
2070 else
2071 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2072 if (cst && dump_file && (dump_flags & TDF_DETAILS))
2074 if (ret)
2076 fputs ("After writing ", dump_file);
2077 print_generic_expr (dump_file, cst, TDF_NONE);
2078 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
2079 " at position %d\n", info->bitsize, pos_in_buffer);
2080 fputs (" the merged value contains ", dump_file);
2081 dump_char_array (dump_file, val, buf_size);
2082 fputs (" the merged mask contains ", dump_file);
2083 dump_char_array (dump_file, mask, buf_size);
2084 if (bit_insertion)
2085 fputs (" bit insertion is required\n", dump_file);
2086 if (string_concatenation)
2087 fputs (" string concatenation is required\n", dump_file);
2089 else
2090 fprintf (dump_file, "Failed to merge stores\n");
2092 if (!ret)
2093 return false;
2095 stores.qsort (sort_by_bitpos);
2096 return true;
2099 /* Structure describing the store chain. */
2101 class imm_store_chain_info
2103 public:
2104 /* Doubly-linked list that imposes an order on chain processing.
2105 PNXP (prev's next pointer) points to the head of a list, or to
2106 the next field in the previous chain in the list.
2107 See pass_store_merging::m_stores_head for more rationale. */
2108 imm_store_chain_info *next, **pnxp;
2109 tree base_addr;
2110 auto_vec<store_immediate_info *> m_store_info;
2111 auto_vec<merged_store_group *> m_merged_store_groups;
2113 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2114 : next (inspt), pnxp (&inspt), base_addr (b_a)
2116 inspt = this;
2117 if (next)
2119 gcc_checking_assert (pnxp == next->pnxp);
2120 next->pnxp = &next;
2123 ~imm_store_chain_info ()
2125 *pnxp = next;
2126 if (next)
2128 gcc_checking_assert (&next == next->pnxp);
2129 next->pnxp = pnxp;
2132 bool terminate_and_process_chain ();
2133 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2134 unsigned int);
2135 bool coalesce_immediate_stores ();
2136 bool output_merged_store (merged_store_group *);
2137 bool output_merged_stores ();
2140 const pass_data pass_data_tree_store_merging = {
2141 GIMPLE_PASS, /* type */
2142 "store-merging", /* name */
2143 OPTGROUP_NONE, /* optinfo_flags */
2144 TV_GIMPLE_STORE_MERGING, /* tv_id */
2145 PROP_ssa, /* properties_required */
2146 0, /* properties_provided */
2147 0, /* properties_destroyed */
2148 0, /* todo_flags_start */
2149 TODO_update_ssa, /* todo_flags_finish */
2152 class pass_store_merging : public gimple_opt_pass
2154 public:
2155 pass_store_merging (gcc::context *ctxt)
2156 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
2160 /* Pass not supported for PDP-endian, nor for insane hosts or
2161 target character sizes where native_{encode,interpret}_expr
2162 doesn't work properly. */
2163 virtual bool
2164 gate (function *)
2166 return flag_store_merging
2167 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2168 && CHAR_BIT == 8
2169 && BITS_PER_UNIT == 8;
2172 virtual unsigned int execute (function *);
2174 private:
2175 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
2177 /* Form a doubly-linked stack of the elements of m_stores, so that
2178 we can iterate over them in a predictable way. Using this order
2179 avoids extraneous differences in the compiler output just because
2180 of tree pointer variations (e.g. different chains end up in
2181 different positions of m_stores, so they are handled in different
2182 orders, so they allocate or release SSA names in different
2183 orders, and when they get reused, subsequent passes end up
2184 getting different SSA names, which may ultimately change
2185 decisions when going out of SSA). */
2186 imm_store_chain_info *m_stores_head;
2188 bool process_store (gimple *);
2189 bool terminate_and_process_chain (imm_store_chain_info *);
2190 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
2191 bool terminate_and_process_all_chains ();
2192 }; // class pass_store_merging
2194 /* Terminate and process all recorded chains. Return true if any changes
2195 were made. */
2197 bool
2198 pass_store_merging::terminate_and_process_all_chains ()
2200 bool ret = false;
2201 while (m_stores_head)
2202 ret |= terminate_and_process_chain (m_stores_head);
2203 gcc_assert (m_stores.is_empty ());
2204 return ret;
2207 /* Terminate all chains that are affected by the statement STMT.
2208 CHAIN_INFO is the chain we should ignore from the checks if
2209 non-NULL. Return true if any changes were made. */
2211 bool
2212 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2213 **chain_info,
2214 gimple *stmt)
2216 bool ret = false;
2218 /* If the statement doesn't touch memory it can't alias. */
2219 if (!gimple_vuse (stmt))
2220 return false;
2222 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
2223 ao_ref store_lhs_ref;
2224 ao_ref_init (&store_lhs_ref, store_lhs);
2225 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
2227 next = cur->next;
2229 /* We already checked all the stores in chain_info and terminated the
2230 chain if necessary. Skip it here. */
2231 if (chain_info && *chain_info == cur)
2232 continue;
2234 store_immediate_info *info;
2235 unsigned int i;
2236 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
2238 tree lhs = gimple_assign_lhs (info->stmt);
2239 ao_ref lhs_ref;
2240 ao_ref_init (&lhs_ref, lhs);
2241 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2242 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2243 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2244 &lhs_ref, false)))
2246 if (dump_file && (dump_flags & TDF_DETAILS))
2248 fprintf (dump_file, "stmt causes chain termination:\n");
2249 print_gimple_stmt (dump_file, stmt, 0);
2251 ret |= terminate_and_process_chain (cur);
2252 break;
2257 return ret;
2260 /* Helper function. Terminate the recorded chain storing to base object
2261 BASE. Return true if the merging and output was successful. The m_stores
2262 entry is removed after the processing in any case. */
2264 bool
2265 pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
2267 bool ret = chain_info->terminate_and_process_chain ();
2268 m_stores.remove (chain_info->base_addr);
2269 delete chain_info;
2270 return ret;
2273 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2274 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2275 be able to sink load of REF across stores between FIRST and LAST, up
2276 to right before LAST. */
2278 bool
2279 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2281 ao_ref r;
2282 ao_ref_init (&r, ref);
2283 unsigned int count = 0;
2284 tree vop = gimple_vdef (last);
2285 gimple *stmt;
2287 /* Return true conservatively if the basic blocks are different. */
2288 if (gimple_bb (first) != gimple_bb (last))
2289 return true;
2293 stmt = SSA_NAME_DEF_STMT (vop);
2294 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2295 return true;
2296 if (gimple_store_p (stmt)
2297 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2298 return true;
2299 /* Avoid quadratic compile time by bounding the number of checks
2300 we perform. */
2301 if (++count > MAX_STORE_ALIAS_CHECKS)
2302 return true;
2303 vop = gimple_vuse (stmt);
2305 while (stmt != first);
2307 return false;
2310 /* Return true if INFO->ops[IDX] is mergeable with the
2311 corresponding loads already in MERGED_STORE group.
2312 BASE_ADDR is the base address of the whole store group. */
2314 bool
2315 compatible_load_p (merged_store_group *merged_store,
2316 store_immediate_info *info,
2317 tree base_addr, int idx)
2319 store_immediate_info *infof = merged_store->stores[0];
2320 if (!info->ops[idx].base_addr
2321 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2322 info->bitpos - infof->bitpos)
2323 || !operand_equal_p (info->ops[idx].base_addr,
2324 infof->ops[idx].base_addr, 0))
2325 return false;
2327 store_immediate_info *infol = merged_store->stores.last ();
2328 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2329 /* In this case all vuses should be the same, e.g.
2330 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2332 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2333 and we can emit the coalesced load next to any of those loads. */
2334 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2335 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2336 return true;
2338 /* Otherwise, at least for now require that the load has the same
2339 vuse as the store. See following examples. */
2340 if (gimple_vuse (info->stmt) != load_vuse)
2341 return false;
2343 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2344 || (infof != infol
2345 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2346 return false;
2348 /* If the load is from the same location as the store, already
2349 the construction of the immediate chain info guarantees no intervening
2350 stores, so no further checks are needed. Example:
2351 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2352 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2353 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2354 return true;
2356 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2357 of the stores in the group, or any other stores in between those.
2358 Previous calls to compatible_load_p ensured that for all the
2359 merged_store->stores IDX loads, no stmts starting with
2360 merged_store->first_stmt and ending right before merged_store->last_stmt
2361 clobbers those loads. */
2362 gimple *first = merged_store->first_stmt;
2363 gimple *last = merged_store->last_stmt;
2364 unsigned int i;
2365 store_immediate_info *infoc;
2366 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2367 comes before the so far first load, we'll be changing
2368 merged_store->first_stmt. In that case we need to give up if
2369 any of the earlier processed loads clobber with the stmts in the new
2370 range. */
2371 if (info->order < merged_store->first_order)
2373 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2374 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2375 return false;
2376 first = info->stmt;
2378 /* Similarly, we could change merged_store->last_stmt, so ensure
2379 in that case no stmts in the new range clobber any of the earlier
2380 processed loads. */
2381 else if (info->order > merged_store->last_order)
2383 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2384 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2385 return false;
2386 last = info->stmt;
2388 /* And finally, we'd be adding a new load to the set, ensure it isn't
2389 clobbered in the new range. */
2390 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2391 return false;
2393 /* Otherwise, we are looking for:
2394 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2396 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2397 return true;
2400 /* Add all refs loaded to compute VAL to REFS vector. */
2402 void
2403 gather_bswap_load_refs (vec<tree> *refs, tree val)
2405 if (TREE_CODE (val) != SSA_NAME)
2406 return;
2408 gimple *stmt = SSA_NAME_DEF_STMT (val);
2409 if (!is_gimple_assign (stmt))
2410 return;
2412 if (gimple_assign_load_p (stmt))
2414 refs->safe_push (gimple_assign_rhs1 (stmt));
2415 return;
2418 switch (gimple_assign_rhs_class (stmt))
2420 case GIMPLE_BINARY_RHS:
2421 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2422 /* FALLTHRU */
2423 case GIMPLE_UNARY_RHS:
2424 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2425 break;
2426 default:
2427 gcc_unreachable ();
2431 /* Check if there are any stores in M_STORE_INFO after index I
2432 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2433 a potential group ending with END that have their order
2434 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2435 all the stores already merged and the one under consideration
2436 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2437 Consider:
2438 MEM[(long long int *)p_28] = 0;
2439 MEM[(long long int *)p_28 + 8B] = 0;
2440 MEM[(long long int *)p_28 + 16B] = 0;
2441 MEM[(long long int *)p_28 + 24B] = 0;
2442 _129 = (int) _130;
2443 MEM[(int *)p_28 + 8B] = _129;
2444 MEM[(int *)p_28].a = -1;
2445 We already have
2446 MEM[(long long int *)p_28] = 0;
2447 MEM[(int *)p_28].a = -1;
2448 stmts in the current group and need to consider if it is safe to
2449 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2450 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2451 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2452 into the group and merging of those 3 stores is successful, merged
2453 stmts will be emitted at the latest store from that group, i.e.
2454 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2455 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2456 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2457 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2458 into the group. That way it will be its own store group and will
2459 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2460 INTEGER_CST stores, those are mergeable using merge_overlapping,
2461 so don't return false for those.
2463 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2464 (exclusive), whether they don't overlap the bitrange START to END
2465 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2466 prevent merging in cases like:
2467 MEM <char[12]> [&b + 8B] = {};
2468 MEM[(short *) &b] = 5;
2469 _5 = *x_4(D);
2470 MEM <long long unsigned int> [&b + 2B] = _5;
2471 MEM[(char *)&b + 16B] = 88;
2472 MEM[(int *)&b + 20B] = 1;
2473 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2474 be merged with it, because the = _5 store overlaps these and is in between
2475 them in sort_by_order ordering. If it was merged, the merged store would
2476 go after the = _5 store and thus change behavior. */
2478 static bool
2479 check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
2480 bool all_integer_cst_p, unsigned int first_order,
2481 unsigned int last_order, unsigned HOST_WIDE_INT start,
2482 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2483 unsigned end_earlier)
2485 unsigned int len = m_store_info.length ();
2486 for (unsigned int j = first_earlier; j < end_earlier; j++)
2488 store_immediate_info *info = m_store_info[j];
2489 if (info->order > first_order
2490 && info->order < last_order
2491 && info->bitpos + info->bitsize > start)
2492 return false;
2494 for (++i; i < len; ++i)
2496 store_immediate_info *info = m_store_info[i];
2497 if (info->bitpos >= end)
2498 break;
2499 if (info->order < last_order
2500 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
2501 return false;
2503 return true;
2506 /* Return true if m_store_info[first] and at least one following store
2507 form a group which store try_size bitsize value which is byte swapped
2508 from a memory load or some value, or identity from some value.
2509 This uses the bswap pass APIs. */
2511 bool
2512 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2513 unsigned int first,
2514 unsigned int try_size,
2515 unsigned int first_earlier)
2517 unsigned int len = m_store_info.length (), last = first;
2518 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2519 if (width >= try_size)
2520 return false;
2521 for (unsigned int i = first + 1; i < len; ++i)
2523 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2524 || m_store_info[i]->lp_nr != merged_store->lp_nr
2525 || m_store_info[i]->ins_stmt == NULL)
2526 return false;
2527 width += m_store_info[i]->bitsize;
2528 if (width >= try_size)
2530 last = i;
2531 break;
2534 if (width != try_size)
2535 return false;
2537 bool allow_unaligned
2538 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
2539 /* Punt if the combined store would not be aligned and we need alignment. */
2540 if (!allow_unaligned)
2542 unsigned int align = merged_store->align;
2543 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2544 for (unsigned int i = first + 1; i <= last; ++i)
2546 unsigned int this_align;
2547 unsigned HOST_WIDE_INT align_bitpos = 0;
2548 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2549 &this_align, &align_bitpos);
2550 if (this_align > align)
2552 align = this_align;
2553 align_base = m_store_info[i]->bitpos - align_bitpos;
2556 unsigned HOST_WIDE_INT align_bitpos
2557 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2558 if (align_bitpos)
2559 align = least_bit_hwi (align_bitpos);
2560 if (align < try_size)
2561 return false;
2564 tree type;
2565 switch (try_size)
2567 case 16: type = uint16_type_node; break;
2568 case 32: type = uint32_type_node; break;
2569 case 64: type = uint64_type_node; break;
2570 default: gcc_unreachable ();
2572 struct symbolic_number n;
2573 gimple *ins_stmt = NULL;
2574 int vuse_store = -1;
2575 unsigned int first_order = merged_store->first_order;
2576 unsigned int last_order = merged_store->last_order;
2577 gimple *first_stmt = merged_store->first_stmt;
2578 gimple *last_stmt = merged_store->last_stmt;
2579 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2580 store_immediate_info *infof = m_store_info[first];
2582 for (unsigned int i = first; i <= last; ++i)
2584 store_immediate_info *info = m_store_info[i];
2585 struct symbolic_number this_n = info->n;
2586 this_n.type = type;
2587 if (!this_n.base_addr)
2588 this_n.range = try_size / BITS_PER_UNIT;
2589 else
2590 /* Update vuse in case it has changed by output_merged_stores. */
2591 this_n.vuse = gimple_vuse (info->ins_stmt);
2592 unsigned int bitpos = info->bitpos - infof->bitpos;
2593 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2594 BYTES_BIG_ENDIAN
2595 ? try_size - info->bitsize - bitpos
2596 : bitpos))
2597 return false;
2598 if (this_n.base_addr && vuse_store)
2600 unsigned int j;
2601 for (j = first; j <= last; ++j)
2602 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2603 break;
2604 if (j > last)
2606 if (vuse_store == 1)
2607 return false;
2608 vuse_store = 0;
2611 if (i == first)
2613 n = this_n;
2614 ins_stmt = info->ins_stmt;
2616 else
2618 if (n.base_addr && n.vuse != this_n.vuse)
2620 if (vuse_store == 0)
2621 return false;
2622 vuse_store = 1;
2624 if (info->order > last_order)
2626 last_order = info->order;
2627 last_stmt = info->stmt;
2629 else if (info->order < first_order)
2631 first_order = info->order;
2632 first_stmt = info->stmt;
2634 end = MAX (end, info->bitpos + info->bitsize);
2636 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2637 &this_n, &n);
2638 if (ins_stmt == NULL)
2639 return false;
2643 uint64_t cmpxchg, cmpnop;
2644 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2646 /* A complete byte swap should make the symbolic number to start with
2647 the largest digit in the highest order byte. Unchanged symbolic
2648 number indicates a read with same endianness as target architecture. */
2649 if (n.n != cmpnop && n.n != cmpxchg)
2650 return false;
2652 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2653 return false;
2655 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2656 merged_store->start, end, first_earlier, first))
2657 return false;
2659 /* Don't handle memory copy this way if normal non-bswap processing
2660 would handle it too. */
2661 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2663 unsigned int i;
2664 for (i = first; i <= last; ++i)
2665 if (m_store_info[i]->rhs_code != MEM_REF)
2666 break;
2667 if (i == last + 1)
2668 return false;
2671 if (n.n == cmpxchg)
2672 switch (try_size)
2674 case 16:
2675 /* Will emit LROTATE_EXPR. */
2676 break;
2677 case 32:
2678 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2679 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2680 break;
2681 return false;
2682 case 64:
2683 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2684 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2685 break;
2686 return false;
2687 default:
2688 gcc_unreachable ();
2691 if (!allow_unaligned && n.base_addr)
2693 unsigned int align = get_object_alignment (n.src);
2694 if (align < try_size)
2695 return false;
2698 /* If each load has vuse of the corresponding store, need to verify
2699 the loads can be sunk right before the last store. */
2700 if (vuse_store == 1)
2702 auto_vec<tree, 64> refs;
2703 for (unsigned int i = first; i <= last; ++i)
2704 gather_bswap_load_refs (&refs,
2705 gimple_assign_rhs1 (m_store_info[i]->stmt));
2707 unsigned int i;
2708 tree ref;
2709 FOR_EACH_VEC_ELT (refs, i, ref)
2710 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2711 return false;
2712 n.vuse = NULL_TREE;
2715 infof->n = n;
2716 infof->ins_stmt = ins_stmt;
2717 for (unsigned int i = first; i <= last; ++i)
2719 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2720 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2721 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2722 if (i != first)
2723 merged_store->merge_into (m_store_info[i]);
2726 return true;
2729 /* Go through the candidate stores recorded in m_store_info and merge them
2730 into merged_store_group objects recorded into m_merged_store_groups
2731 representing the widened stores. Return true if coalescing was successful
2732 and the number of widened stores is fewer than the original number
2733 of stores. */
2735 bool
2736 imm_store_chain_info::coalesce_immediate_stores ()
2738 /* Anything less can't be processed. */
2739 if (m_store_info.length () < 2)
2740 return false;
2742 if (dump_file && (dump_flags & TDF_DETAILS))
2743 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
2744 m_store_info.length ());
2746 store_immediate_info *info;
2747 unsigned int i, ignore = 0;
2748 unsigned int first_earlier = 0;
2749 unsigned int end_earlier = 0;
2751 /* Order the stores by the bitposition they write to. */
2752 m_store_info.qsort (sort_by_bitpos);
2754 info = m_store_info[0];
2755 merged_store_group *merged_store = new merged_store_group (info);
2756 if (dump_file && (dump_flags & TDF_DETAILS))
2757 fputs ("New store group\n", dump_file);
2759 FOR_EACH_VEC_ELT (m_store_info, i, info)
2761 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2763 if (i <= ignore)
2764 goto done;
2766 while (first_earlier < end_earlier
2767 && (m_store_info[first_earlier]->bitpos
2768 + m_store_info[first_earlier]->bitsize
2769 <= merged_store->start))
2770 first_earlier++;
2772 /* First try to handle group of stores like:
2773 p[0] = data >> 24;
2774 p[1] = data >> 16;
2775 p[2] = data >> 8;
2776 p[3] = data;
2777 using the bswap framework. */
2778 if (info->bitpos == merged_store->start + merged_store->width
2779 && merged_store->stores.length () == 1
2780 && merged_store->stores[0]->ins_stmt != NULL
2781 && info->lp_nr == merged_store->lp_nr
2782 && info->ins_stmt != NULL)
2784 unsigned int try_size;
2785 for (try_size = 64; try_size >= 16; try_size >>= 1)
2786 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2787 first_earlier))
2788 break;
2790 if (try_size >= 16)
2792 ignore = i + merged_store->stores.length () - 1;
2793 m_merged_store_groups.safe_push (merged_store);
2794 if (ignore < m_store_info.length ())
2796 merged_store = new merged_store_group (m_store_info[ignore]);
2797 end_earlier = ignore;
2799 else
2800 merged_store = NULL;
2801 goto done;
2805 new_bitregion_start
2806 = MIN (merged_store->bitregion_start, info->bitregion_start);
2807 new_bitregion_end
2808 = MAX (merged_store->bitregion_end, info->bitregion_end);
2810 if (info->order >= merged_store->first_nonmergeable_order
2811 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
2812 > (unsigned) param_store_merging_max_size))
2815 /* |---store 1---|
2816 |---store 2---|
2817 Overlapping stores. */
2818 else if (IN_RANGE (info->bitpos, merged_store->start,
2819 merged_store->start + merged_store->width - 1)
2820 /* |---store 1---||---store 2---|
2821 Handle also the consecutive INTEGER_CST stores case here,
2822 as we have here the code to deal with overlaps. */
2823 || (info->bitregion_start <= merged_store->bitregion_end
2824 && info->rhs_code == INTEGER_CST
2825 && merged_store->only_constants
2826 && merged_store->can_be_merged_into (info)))
2828 /* Only allow overlapping stores of constants. */
2829 if (info->rhs_code == INTEGER_CST
2830 && merged_store->only_constants
2831 && info->lp_nr == merged_store->lp_nr)
2833 unsigned int first_order
2834 = MIN (merged_store->first_order, info->order);
2835 unsigned int last_order
2836 = MAX (merged_store->last_order, info->order);
2837 unsigned HOST_WIDE_INT end
2838 = MAX (merged_store->start + merged_store->width,
2839 info->bitpos + info->bitsize);
2840 if (check_no_overlap (m_store_info, i, true, first_order,
2841 last_order, merged_store->start, end,
2842 first_earlier, end_earlier))
2844 /* check_no_overlap call above made sure there are no
2845 overlapping stores with non-INTEGER_CST rhs_code
2846 in between the first and last of the stores we've
2847 just merged. If there are any INTEGER_CST rhs_code
2848 stores in between, we need to merge_overlapping them
2849 even if in the sort_by_bitpos order there are other
2850 overlapping stores in between. Keep those stores as is.
2851 Example:
2852 MEM[(int *)p_28] = 0;
2853 MEM[(char *)p_28 + 3B] = 1;
2854 MEM[(char *)p_28 + 1B] = 2;
2855 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2856 We can't merge the zero store with the store of two and
2857 not merge anything else, because the store of one is
2858 in the original order in between those two, but in
2859 store_by_bitpos order it comes after the last store that
2860 we can't merge with them. We can merge the first 3 stores
2861 and keep the last store as is though. */
2862 unsigned int len = m_store_info.length ();
2863 unsigned int try_order = last_order;
2864 unsigned int first_nonmergeable_order;
2865 unsigned int k;
2866 bool last_iter = false;
2867 int attempts = 0;
2870 unsigned int max_order = 0;
2871 unsigned int min_order = first_order;
2872 unsigned first_nonmergeable_int_order = ~0U;
2873 unsigned HOST_WIDE_INT this_end = end;
2874 k = i;
2875 first_nonmergeable_order = ~0U;
2876 for (unsigned int j = i + 1; j < len; ++j)
2878 store_immediate_info *info2 = m_store_info[j];
2879 if (info2->bitpos >= this_end)
2880 break;
2881 if (info2->order < try_order)
2883 if (info2->rhs_code != INTEGER_CST
2884 || info2->lp_nr != merged_store->lp_nr)
2886 /* Normally check_no_overlap makes sure this
2887 doesn't happen, but if end grows below,
2888 then we need to process more stores than
2889 check_no_overlap verified. Example:
2890 MEM[(int *)p_5] = 0;
2891 MEM[(short *)p_5 + 3B] = 1;
2892 MEM[(char *)p_5 + 4B] = _9;
2893 MEM[(char *)p_5 + 2B] = 2; */
2894 k = 0;
2895 break;
2897 k = j;
2898 min_order = MIN (min_order, info2->order);
2899 this_end = MAX (this_end,
2900 info2->bitpos + info2->bitsize);
2902 else if (info2->rhs_code == INTEGER_CST
2903 && info2->lp_nr == merged_store->lp_nr
2904 && !last_iter)
2906 max_order = MAX (max_order, info2->order + 1);
2907 first_nonmergeable_int_order
2908 = MIN (first_nonmergeable_int_order,
2909 info2->order);
2911 else
2912 first_nonmergeable_order
2913 = MIN (first_nonmergeable_order, info2->order);
2915 if (k > i
2916 && !check_no_overlap (m_store_info, len - 1, true,
2917 min_order, try_order,
2918 merged_store->start, this_end,
2919 first_earlier, end_earlier))
2920 k = 0;
2921 if (k == 0)
2923 if (last_order == try_order)
2924 break;
2925 /* If this failed, but only because we grew
2926 try_order, retry with the last working one,
2927 so that we merge at least something. */
2928 try_order = last_order;
2929 last_iter = true;
2930 continue;
2932 last_order = try_order;
2933 /* Retry with a larger try_order to see if we could
2934 merge some further INTEGER_CST stores. */
2935 if (max_order
2936 && (first_nonmergeable_int_order
2937 < first_nonmergeable_order))
2939 try_order = MIN (max_order,
2940 first_nonmergeable_order);
2941 try_order
2942 = MIN (try_order,
2943 merged_store->first_nonmergeable_order);
2944 if (try_order > last_order && ++attempts < 16)
2945 continue;
2947 first_nonmergeable_order
2948 = MIN (first_nonmergeable_order,
2949 first_nonmergeable_int_order);
2950 end = this_end;
2951 break;
2953 while (1);
2955 if (k != 0)
2957 merged_store->merge_overlapping (info);
2959 merged_store->first_nonmergeable_order
2960 = MIN (merged_store->first_nonmergeable_order,
2961 first_nonmergeable_order);
2963 for (unsigned int j = i + 1; j <= k; j++)
2965 store_immediate_info *info2 = m_store_info[j];
2966 gcc_assert (info2->bitpos < end);
2967 if (info2->order < last_order)
2969 gcc_assert (info2->rhs_code == INTEGER_CST);
2970 if (info != info2)
2971 merged_store->merge_overlapping (info2);
2973 /* Other stores are kept and not merged in any
2974 way. */
2976 ignore = k;
2977 goto done;
2982 /* |---store 1---||---store 2---|
2983 This store is consecutive to the previous one.
2984 Merge it into the current store group. There can be gaps in between
2985 the stores, but there can't be gaps in between bitregions. */
2986 else if (info->bitregion_start <= merged_store->bitregion_end
2987 && merged_store->can_be_merged_into (info))
2989 store_immediate_info *infof = merged_store->stores[0];
2991 /* All the rhs_code ops that take 2 operands are commutative,
2992 swap the operands if it could make the operands compatible. */
2993 if (infof->ops[0].base_addr
2994 && infof->ops[1].base_addr
2995 && info->ops[0].base_addr
2996 && info->ops[1].base_addr
2997 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2998 info->bitpos - infof->bitpos)
2999 && operand_equal_p (info->ops[1].base_addr,
3000 infof->ops[0].base_addr, 0))
3002 std::swap (info->ops[0], info->ops[1]);
3003 info->ops_swapped_p = true;
3005 if (check_no_overlap (m_store_info, i, false,
3006 MIN (merged_store->first_order, info->order),
3007 MAX (merged_store->last_order, info->order),
3008 merged_store->start,
3009 MAX (merged_store->start + merged_store->width,
3010 info->bitpos + info->bitsize),
3011 first_earlier, end_earlier))
3013 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3014 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3016 info->rhs_code = BIT_INSERT_EXPR;
3017 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3018 info->ops[0].base_addr = NULL_TREE;
3020 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3022 store_immediate_info *infoj;
3023 unsigned int j;
3024 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
3026 infoj->rhs_code = BIT_INSERT_EXPR;
3027 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3028 infoj->ops[0].base_addr = NULL_TREE;
3030 merged_store->bit_insertion = true;
3032 if ((infof->ops[0].base_addr
3033 ? compatible_load_p (merged_store, info, base_addr, 0)
3034 : !info->ops[0].base_addr)
3035 && (infof->ops[1].base_addr
3036 ? compatible_load_p (merged_store, info, base_addr, 1)
3037 : !info->ops[1].base_addr))
3039 merged_store->merge_into (info);
3040 goto done;
3045 /* |---store 1---| <gap> |---store 2---|.
3046 Gap between stores or the rhs not compatible. Start a new group. */
3048 /* Try to apply all the stores recorded for the group to determine
3049 the bitpattern they write and discard it if that fails.
3050 This will also reject single-store groups. */
3051 if (merged_store->apply_stores ())
3052 m_merged_store_groups.safe_push (merged_store);
3053 else
3054 delete merged_store;
3056 merged_store = new merged_store_group (info);
3057 end_earlier = i;
3058 if (dump_file && (dump_flags & TDF_DETAILS))
3059 fputs ("New store group\n", dump_file);
3061 done:
3062 if (dump_file && (dump_flags & TDF_DETAILS))
3064 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3065 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3066 i, info->bitsize, info->bitpos);
3067 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3068 fputc ('\n', dump_file);
3072 /* Record or discard the last store group. */
3073 if (merged_store)
3075 if (merged_store->apply_stores ())
3076 m_merged_store_groups.safe_push (merged_store);
3077 else
3078 delete merged_store;
3081 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
3083 bool success
3084 = !m_merged_store_groups.is_empty ()
3085 && m_merged_store_groups.length () < m_store_info.length ();
3087 if (success && dump_file)
3088 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
3089 m_merged_store_groups.length ());
3091 return success;
3094 /* Return the type to use for the merged stores or loads described by STMTS.
3095 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3096 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3097 of the MEM_REFs if any. */
3099 static tree
3100 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3101 unsigned short *cliquep, unsigned short *basep)
3103 gimple *stmt;
3104 unsigned int i;
3105 tree type = NULL_TREE;
3106 tree ret = NULL_TREE;
3107 *cliquep = 0;
3108 *basep = 0;
3110 FOR_EACH_VEC_ELT (stmts, i, stmt)
3112 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3113 : gimple_assign_lhs (stmt);
3114 tree type1 = reference_alias_ptr_type (ref);
3115 tree base = get_base_address (ref);
3117 if (i == 0)
3119 if (TREE_CODE (base) == MEM_REF)
3121 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3122 *basep = MR_DEPENDENCE_BASE (base);
3124 ret = type = type1;
3125 continue;
3127 if (!alias_ptr_types_compatible_p (type, type1))
3128 ret = ptr_type_node;
3129 if (TREE_CODE (base) != MEM_REF
3130 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3131 || *basep != MR_DEPENDENCE_BASE (base))
3133 *cliquep = 0;
3134 *basep = 0;
3137 return ret;
3140 /* Return the location_t information we can find among the statements
3141 in STMTS. */
3143 static location_t
3144 get_location_for_stmts (vec<gimple *> &stmts)
3146 gimple *stmt;
3147 unsigned int i;
3149 FOR_EACH_VEC_ELT (stmts, i, stmt)
3150 if (gimple_has_location (stmt))
3151 return gimple_location (stmt);
3153 return UNKNOWN_LOCATION;
3156 /* Used to decribe a store resulting from splitting a wide store in smaller
3157 regularly-sized stores in split_group. */
3159 class split_store
3161 public:
3162 unsigned HOST_WIDE_INT bytepos;
3163 unsigned HOST_WIDE_INT size;
3164 unsigned HOST_WIDE_INT align;
3165 auto_vec<store_immediate_info *> orig_stores;
3166 /* True if there is a single orig stmt covering the whole split store. */
3167 bool orig;
3168 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3169 unsigned HOST_WIDE_INT);
3172 /* Simple constructor. */
3174 split_store::split_store (unsigned HOST_WIDE_INT bp,
3175 unsigned HOST_WIDE_INT sz,
3176 unsigned HOST_WIDE_INT al)
3177 : bytepos (bp), size (sz), align (al), orig (false)
3179 orig_stores.create (0);
3182 /* Record all stores in GROUP that write to the region starting at BITPOS and
3183 is of size BITSIZE. Record infos for such statements in STORES if
3184 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3185 if there is exactly one original store in the range (in that case ignore
3186 clobber stmts, unless there are only clobber stmts). */
3188 static store_immediate_info *
3189 find_constituent_stores (class merged_store_group *group,
3190 vec<store_immediate_info *> *stores,
3191 unsigned int *first,
3192 unsigned HOST_WIDE_INT bitpos,
3193 unsigned HOST_WIDE_INT bitsize)
3195 store_immediate_info *info, *ret = NULL;
3196 unsigned int i;
3197 bool second = false;
3198 bool update_first = true;
3199 unsigned HOST_WIDE_INT end = bitpos + bitsize;
3200 for (i = *first; group->stores.iterate (i, &info); ++i)
3202 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3203 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
3204 if (stmt_end <= bitpos)
3206 /* BITPOS passed to this function never decreases from within the
3207 same split_group call, so optimize and don't scan info records
3208 which are known to end before or at BITPOS next time.
3209 Only do it if all stores before this one also pass this. */
3210 if (update_first)
3211 *first = i + 1;
3212 continue;
3214 else
3215 update_first = false;
3217 /* The stores in GROUP are ordered by bitposition so if we're past
3218 the region for this group return early. */
3219 if (stmt_start >= end)
3220 return ret;
3222 if (gimple_clobber_p (info->stmt))
3224 if (stores)
3225 stores->safe_push (info);
3226 if (ret == NULL)
3227 ret = info;
3228 continue;
3230 if (stores)
3232 stores->safe_push (info);
3233 if (ret && !gimple_clobber_p (ret->stmt))
3235 ret = NULL;
3236 second = true;
3239 else if (ret && !gimple_clobber_p (ret->stmt))
3240 return NULL;
3241 if (!second)
3242 ret = info;
3244 return ret;
3247 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3248 store have multiple uses. If any SSA_NAME has multiple uses, also
3249 count statements needed to compute it. */
3251 static unsigned
3252 count_multiple_uses (store_immediate_info *info)
3254 gimple *stmt = info->stmt;
3255 unsigned ret = 0;
3256 switch (info->rhs_code)
3258 case INTEGER_CST:
3259 case STRING_CST:
3260 return 0;
3261 case BIT_AND_EXPR:
3262 case BIT_IOR_EXPR:
3263 case BIT_XOR_EXPR:
3264 if (info->bit_not_p)
3266 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3267 ret = 1; /* Fall through below to return
3268 the BIT_NOT_EXPR stmt and then
3269 BIT_{AND,IOR,XOR}_EXPR and anything it
3270 uses. */
3271 else
3272 /* stmt is after this the BIT_NOT_EXPR. */
3273 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3275 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3277 ret += 1 + info->ops[0].bit_not_p;
3278 if (info->ops[1].base_addr)
3279 ret += 1 + info->ops[1].bit_not_p;
3280 return ret + 1;
3282 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3283 /* stmt is now the BIT_*_EXPR. */
3284 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3285 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3286 else if (info->ops[info->ops_swapped_p].bit_not_p)
3288 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3289 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3290 ++ret;
3292 if (info->ops[1].base_addr == NULL_TREE)
3294 gcc_checking_assert (!info->ops_swapped_p);
3295 return ret;
3297 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3298 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3299 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3301 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3302 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3303 ++ret;
3305 return ret;
3306 case MEM_REF:
3307 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3308 return 1 + info->ops[0].bit_not_p;
3309 else if (info->ops[0].bit_not_p)
3311 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3312 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3313 return 1;
3315 return 0;
3316 case BIT_INSERT_EXPR:
3317 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3318 default:
3319 gcc_unreachable ();
3323 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3324 vector (if non-NULL) with split_store structs describing the byte offset
3325 (from the base), the bit size and alignment of each store as well as the
3326 original statements involved in each such split group.
3327 This is to separate the splitting strategy from the statement
3328 building/emission/linking done in output_merged_store.
3329 Return number of new stores.
3330 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3331 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3332 BZERO_FIRST may be true only when the first store covers the whole group
3333 and clears it; if BZERO_FIRST is true, keep that first store in the set
3334 unmodified and emit further stores for the overrides only.
3335 If SPLIT_STORES is NULL, it is just a dry run to count number of
3336 new stores. */
3338 static unsigned int
3339 split_group (merged_store_group *group, bool allow_unaligned_store,
3340 bool allow_unaligned_load, bool bzero_first,
3341 vec<split_store *> *split_stores,
3342 unsigned *total_orig,
3343 unsigned *total_new)
3345 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3346 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3347 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
3348 unsigned HOST_WIDE_INT group_align = group->align;
3349 unsigned HOST_WIDE_INT align_base = group->align_base;
3350 unsigned HOST_WIDE_INT group_load_align = group_align;
3351 bool any_orig = false;
3353 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3355 /* For bswap framework using sets of stores, all the checking has been done
3356 earlier in try_coalesce_bswap and the result always needs to be emitted
3357 as a single store. Likewise for string concatenation, */
3358 if (group->stores[0]->rhs_code == LROTATE_EXPR
3359 || group->stores[0]->rhs_code == NOP_EXPR
3360 || group->string_concatenation)
3362 gcc_assert (!bzero_first);
3363 if (total_orig)
3365 /* Avoid the old/new stmt count heuristics. It should be
3366 always beneficial. */
3367 total_new[0] = 1;
3368 total_orig[0] = 2;
3371 if (split_stores)
3373 unsigned HOST_WIDE_INT align_bitpos
3374 = (group->start - align_base) & (group_align - 1);
3375 unsigned HOST_WIDE_INT align = group_align;
3376 if (align_bitpos)
3377 align = least_bit_hwi (align_bitpos);
3378 bytepos = group->start / BITS_PER_UNIT;
3379 split_store *store
3380 = new split_store (bytepos, group->width, align);
3381 unsigned int first = 0;
3382 find_constituent_stores (group, &store->orig_stores,
3383 &first, group->start, group->width);
3384 split_stores->safe_push (store);
3387 return 1;
3390 unsigned int ret = 0, first = 0;
3391 unsigned HOST_WIDE_INT try_pos = bytepos;
3393 if (total_orig)
3395 unsigned int i;
3396 store_immediate_info *info = group->stores[0];
3398 total_new[0] = 0;
3399 total_orig[0] = 1; /* The orig store. */
3400 info = group->stores[0];
3401 if (info->ops[0].base_addr)
3402 total_orig[0]++;
3403 if (info->ops[1].base_addr)
3404 total_orig[0]++;
3405 switch (info->rhs_code)
3407 case BIT_AND_EXPR:
3408 case BIT_IOR_EXPR:
3409 case BIT_XOR_EXPR:
3410 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3411 break;
3412 default:
3413 break;
3415 total_orig[0] *= group->stores.length ();
3417 FOR_EACH_VEC_ELT (group->stores, i, info)
3419 total_new[0] += count_multiple_uses (info);
3420 total_orig[0] += (info->bit_not_p
3421 + info->ops[0].bit_not_p
3422 + info->ops[1].bit_not_p);
3426 if (!allow_unaligned_load)
3427 for (int i = 0; i < 2; ++i)
3428 if (group->load_align[i])
3429 group_load_align = MIN (group_load_align, group->load_align[i]);
3431 if (bzero_first)
3433 store_immediate_info *gstore;
3434 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3435 if (!gimple_clobber_p (gstore->stmt))
3436 break;
3437 ++first;
3438 ret = 1;
3439 if (split_stores)
3441 split_store *store
3442 = new split_store (bytepos, gstore->bitsize, align_base);
3443 store->orig_stores.safe_push (gstore);
3444 store->orig = true;
3445 any_orig = true;
3446 split_stores->safe_push (store);
3450 while (size > 0)
3452 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3453 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3454 || (bzero_first && group->val[try_pos - bytepos] == 0)))
3456 /* Skip padding bytes. */
3457 ++try_pos;
3458 size -= BITS_PER_UNIT;
3459 continue;
3462 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3463 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3464 unsigned HOST_WIDE_INT align_bitpos
3465 = (try_bitpos - align_base) & (group_align - 1);
3466 unsigned HOST_WIDE_INT align = group_align;
3467 bool found_orig = false;
3468 if (align_bitpos)
3469 align = least_bit_hwi (align_bitpos);
3470 if (!allow_unaligned_store)
3471 try_size = MIN (try_size, align);
3472 if (!allow_unaligned_load)
3474 /* If we can't do or don't want to do unaligned stores
3475 as well as loads, we need to take the loads into account
3476 as well. */
3477 unsigned HOST_WIDE_INT load_align = group_load_align;
3478 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3479 if (align_bitpos)
3480 load_align = least_bit_hwi (align_bitpos);
3481 for (int i = 0; i < 2; ++i)
3482 if (group->load_align[i])
3484 align_bitpos
3485 = known_alignment (try_bitpos
3486 - group->stores[0]->bitpos
3487 + group->stores[0]->ops[i].bitpos
3488 - group->load_align_base[i]);
3489 if (align_bitpos & (group_load_align - 1))
3491 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3492 load_align = MIN (load_align, a);
3495 try_size = MIN (try_size, load_align);
3497 store_immediate_info *info
3498 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3499 if (info && !gimple_clobber_p (info->stmt))
3501 /* If there is just one original statement for the range, see if
3502 we can just reuse the original store which could be even larger
3503 than try_size. */
3504 unsigned HOST_WIDE_INT stmt_end
3505 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3506 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3507 stmt_end - try_bitpos);
3508 if (info && info->bitpos >= try_bitpos)
3510 store_immediate_info *info2 = NULL;
3511 unsigned int first_copy = first;
3512 if (info->bitpos > try_bitpos
3513 && stmt_end - try_bitpos <= try_size)
3515 info2 = find_constituent_stores (group, NULL, &first_copy,
3516 try_bitpos,
3517 info->bitpos - try_bitpos);
3518 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3520 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3522 info2 = find_constituent_stores (group, NULL, &first_copy,
3523 stmt_end,
3524 (try_bitpos + try_size)
3525 - stmt_end);
3526 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3528 if (info2 == NULL)
3530 try_size = stmt_end - try_bitpos;
3531 found_orig = true;
3532 goto found;
3537 /* Approximate store bitsize for the case when there are no padding
3538 bits. */
3539 while (try_size > size)
3540 try_size /= 2;
3541 /* Now look for whole padding bytes at the end of that bitsize. */
3542 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3543 if (group->mask[try_pos - bytepos + nonmasked - 1]
3544 != (unsigned char) ~0U
3545 && (!bzero_first
3546 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
3547 break;
3548 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
3550 /* If entire try_size range is padding, skip it. */
3551 try_pos += try_size / BITS_PER_UNIT;
3552 size -= try_size;
3553 continue;
3555 /* Otherwise try to decrease try_size if second half, last 3 quarters
3556 etc. are padding. */
3557 nonmasked *= BITS_PER_UNIT;
3558 while (nonmasked <= try_size / 2)
3559 try_size /= 2;
3560 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3562 /* Now look for whole padding bytes at the start of that bitsize. */
3563 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3564 for (masked = 0; masked < try_bytesize; ++masked)
3565 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3566 && (!bzero_first
3567 || group->val[try_pos - bytepos + masked] != 0))
3568 break;
3569 masked *= BITS_PER_UNIT;
3570 gcc_assert (masked < try_size);
3571 if (masked >= try_size / 2)
3573 while (masked >= try_size / 2)
3575 try_size /= 2;
3576 try_pos += try_size / BITS_PER_UNIT;
3577 size -= try_size;
3578 masked -= try_size;
3580 /* Need to recompute the alignment, so just retry at the new
3581 position. */
3582 continue;
3586 found:
3587 ++ret;
3589 if (split_stores)
3591 split_store *store
3592 = new split_store (try_pos, try_size, align);
3593 info = find_constituent_stores (group, &store->orig_stores,
3594 &first, try_bitpos, try_size);
3595 if (info
3596 && !gimple_clobber_p (info->stmt)
3597 && info->bitpos >= try_bitpos
3598 && info->bitpos + info->bitsize <= try_bitpos + try_size
3599 && (store->orig_stores.length () == 1
3600 || found_orig
3601 || (info->bitpos == try_bitpos
3602 && (info->bitpos + info->bitsize
3603 == try_bitpos + try_size))))
3605 store->orig = true;
3606 any_orig = true;
3608 split_stores->safe_push (store);
3611 try_pos += try_size / BITS_PER_UNIT;
3612 size -= try_size;
3615 if (total_orig)
3617 unsigned int i;
3618 split_store *store;
3619 /* If we are reusing some original stores and any of the
3620 original SSA_NAMEs had multiple uses, we need to subtract
3621 those now before we add the new ones. */
3622 if (total_new[0] && any_orig)
3624 FOR_EACH_VEC_ELT (*split_stores, i, store)
3625 if (store->orig)
3626 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3628 total_new[0] += ret; /* The new store. */
3629 store_immediate_info *info = group->stores[0];
3630 if (info->ops[0].base_addr)
3631 total_new[0] += ret;
3632 if (info->ops[1].base_addr)
3633 total_new[0] += ret;
3634 switch (info->rhs_code)
3636 case BIT_AND_EXPR:
3637 case BIT_IOR_EXPR:
3638 case BIT_XOR_EXPR:
3639 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3640 break;
3641 default:
3642 break;
3644 FOR_EACH_VEC_ELT (*split_stores, i, store)
3646 unsigned int j;
3647 bool bit_not_p[3] = { false, false, false };
3648 /* If all orig_stores have certain bit_not_p set, then
3649 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3650 If some orig_stores have certain bit_not_p set, then
3651 we'd use a BIT_XOR_EXPR with a mask and need to account for
3652 it. */
3653 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3655 if (info->ops[0].bit_not_p)
3656 bit_not_p[0] = true;
3657 if (info->ops[1].bit_not_p)
3658 bit_not_p[1] = true;
3659 if (info->bit_not_p)
3660 bit_not_p[2] = true;
3662 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3667 return ret;
3670 /* Return the operation through which the operand IDX (if < 2) or
3671 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3672 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3673 the bits should be xored with mask. */
3675 static enum tree_code
3676 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3678 unsigned int i;
3679 store_immediate_info *info;
3680 unsigned int cnt = 0;
3681 bool any_paddings = false;
3682 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3684 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3685 if (bit_not_p)
3687 ++cnt;
3688 tree lhs = gimple_assign_lhs (info->stmt);
3689 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3690 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3691 any_paddings = true;
3694 mask = NULL_TREE;
3695 if (cnt == 0)
3696 return NOP_EXPR;
3697 if (cnt == split_store->orig_stores.length () && !any_paddings)
3698 return BIT_NOT_EXPR;
3700 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3701 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3702 unsigned char *buf
3703 = XALLOCAVEC (unsigned char, buf_size);
3704 memset (buf, ~0U, buf_size);
3705 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3707 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3708 if (!bit_not_p)
3709 continue;
3710 /* Clear regions with bit_not_p and invert afterwards, rather than
3711 clear regions with !bit_not_p, so that gaps in between stores aren't
3712 set in the mask. */
3713 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3714 unsigned HOST_WIDE_INT prec = bitsize;
3715 unsigned int pos_in_buffer = 0;
3716 if (any_paddings)
3718 tree lhs = gimple_assign_lhs (info->stmt);
3719 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3720 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3721 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3723 if (info->bitpos < try_bitpos)
3725 gcc_assert (info->bitpos + bitsize > try_bitpos);
3726 if (!BYTES_BIG_ENDIAN)
3728 if (prec <= try_bitpos - info->bitpos)
3729 continue;
3730 prec -= try_bitpos - info->bitpos;
3732 bitsize -= try_bitpos - info->bitpos;
3733 if (BYTES_BIG_ENDIAN && prec > bitsize)
3734 prec = bitsize;
3736 else
3737 pos_in_buffer = info->bitpos - try_bitpos;
3738 if (prec < bitsize)
3740 /* If this is a bool inversion, invert just the least significant
3741 prec bits rather than all bits of it. */
3742 if (BYTES_BIG_ENDIAN)
3744 pos_in_buffer += bitsize - prec;
3745 if (pos_in_buffer >= split_store->size)
3746 continue;
3748 bitsize = prec;
3750 if (pos_in_buffer + bitsize > split_store->size)
3751 bitsize = split_store->size - pos_in_buffer;
3752 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3753 if (BYTES_BIG_ENDIAN)
3754 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3755 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3756 else
3757 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3759 for (unsigned int i = 0; i < buf_size; ++i)
3760 buf[i] = ~buf[i];
3761 mask = native_interpret_expr (int_type, buf, buf_size);
3762 return BIT_XOR_EXPR;
3765 /* Given a merged store group GROUP output the widened version of it.
3766 The store chain is against the base object BASE.
3767 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3768 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3769 Make sure that the number of statements output is less than the number of
3770 original statements. If a better sequence is possible emit it and
3771 return true. */
3773 bool
3774 imm_store_chain_info::output_merged_store (merged_store_group *group)
3776 const unsigned HOST_WIDE_INT start_byte_pos
3777 = group->bitregion_start / BITS_PER_UNIT;
3778 unsigned int orig_num_stmts = group->stores.length ();
3779 if (orig_num_stmts < 2)
3780 return false;
3782 bool allow_unaligned_store
3783 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
3784 bool allow_unaligned_load = allow_unaligned_store;
3785 bool bzero_first = false;
3786 store_immediate_info *store;
3787 unsigned int num_clobber_stmts = 0;
3788 if (group->stores[0]->rhs_code == INTEGER_CST)
3790 unsigned int i;
3791 FOR_EACH_VEC_ELT (group->stores, i, store)
3792 if (gimple_clobber_p (store->stmt))
3793 num_clobber_stmts++;
3794 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3795 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3796 && group->start == store->bitpos
3797 && group->width == store->bitsize
3798 && (group->start % BITS_PER_UNIT) == 0
3799 && (group->width % BITS_PER_UNIT) == 0)
3801 bzero_first = true;
3802 break;
3804 else
3805 break;
3806 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3807 if (gimple_clobber_p (store->stmt))
3808 num_clobber_stmts++;
3809 if (num_clobber_stmts == orig_num_stmts)
3810 return false;
3811 orig_num_stmts -= num_clobber_stmts;
3813 if (allow_unaligned_store || bzero_first)
3815 /* If unaligned stores are allowed, see how many stores we'd emit
3816 for unaligned and how many stores we'd emit for aligned stores.
3817 Only use unaligned stores if it allows fewer stores than aligned.
3818 Similarly, if there is a whole region clear first, prefer expanding
3819 it together compared to expanding clear first followed by merged
3820 further stores. */
3821 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
3822 int pass_min = 0;
3823 for (int pass = 0; pass < 4; ++pass)
3825 if (!allow_unaligned_store && (pass & 1) != 0)
3826 continue;
3827 if (!bzero_first && (pass & 2) != 0)
3828 continue;
3829 cnt[pass] = split_group (group, (pass & 1) != 0,
3830 allow_unaligned_load, (pass & 2) != 0,
3831 NULL, NULL, NULL);
3832 if (cnt[pass] < cnt[pass_min])
3833 pass_min = pass;
3835 if ((pass_min & 1) == 0)
3836 allow_unaligned_store = false;
3837 if ((pass_min & 2) == 0)
3838 bzero_first = false;
3841 auto_vec<class split_store *, 32> split_stores;
3842 split_store *split_store;
3843 unsigned total_orig, total_new, i;
3844 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
3845 &split_stores, &total_orig, &total_new);
3847 /* Determine if there is a clobber covering the whole group at the start,
3848 followed by proposed split stores that cover the whole group. In that
3849 case, prefer the transformation even if
3850 split_stores.length () == orig_num_stmts. */
3851 bool clobber_first = false;
3852 if (num_clobber_stmts
3853 && gimple_clobber_p (group->stores[0]->stmt)
3854 && group->start == group->stores[0]->bitpos
3855 && group->width == group->stores[0]->bitsize
3856 && (group->start % BITS_PER_UNIT) == 0
3857 && (group->width % BITS_PER_UNIT) == 0)
3859 clobber_first = true;
3860 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
3861 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3862 if (split_store->bytepos != pos)
3864 clobber_first = false;
3865 break;
3867 else
3868 pos += split_store->size / BITS_PER_UNIT;
3869 if (pos != (group->start + group->width) / BITS_PER_UNIT)
3870 clobber_first = false;
3873 if (split_stores.length () >= orig_num_stmts + clobber_first)
3876 /* We didn't manage to reduce the number of statements. Bail out. */
3877 if (dump_file && (dump_flags & TDF_DETAILS))
3878 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3879 " Not profitable to emit new sequence.\n",
3880 orig_num_stmts);
3881 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3882 delete split_store;
3883 return false;
3885 if (total_orig <= total_new)
3887 /* If number of estimated new statements is above estimated original
3888 statements, bail out too. */
3889 if (dump_file && (dump_flags & TDF_DETAILS))
3890 fprintf (dump_file, "Estimated number of original stmts (%u)"
3891 " not larger than estimated number of new"
3892 " stmts (%u).\n",
3893 total_orig, total_new);
3894 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3895 delete split_store;
3896 return false;
3898 if (group->stores[0]->rhs_code == INTEGER_CST)
3900 bool all_orig = true;
3901 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3902 if (!split_store->orig)
3904 all_orig = false;
3905 break;
3907 if (all_orig)
3909 unsigned int cnt = split_stores.length ();
3910 store_immediate_info *store;
3911 FOR_EACH_VEC_ELT (group->stores, i, store)
3912 if (gimple_clobber_p (store->stmt))
3913 ++cnt;
3914 /* Punt if we wouldn't make any real changes, i.e. keep all
3915 orig stmts + all clobbers. */
3916 if (cnt == group->stores.length ())
3918 if (dump_file && (dump_flags & TDF_DETAILS))
3919 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3920 " Not profitable to emit new sequence.\n",
3921 orig_num_stmts);
3922 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3923 delete split_store;
3924 return false;
3929 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
3930 gimple_seq seq = NULL;
3931 tree last_vdef, new_vuse;
3932 last_vdef = gimple_vdef (group->last_stmt);
3933 new_vuse = gimple_vuse (group->last_stmt);
3934 tree bswap_res = NULL_TREE;
3936 /* Clobbers are not removed. */
3937 if (gimple_clobber_p (group->last_stmt))
3939 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
3940 gimple_set_vdef (group->last_stmt, new_vuse);
3943 if (group->stores[0]->rhs_code == LROTATE_EXPR
3944 || group->stores[0]->rhs_code == NOP_EXPR)
3946 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3947 gimple *ins_stmt = group->stores[0]->ins_stmt;
3948 struct symbolic_number *n = &group->stores[0]->n;
3949 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3951 switch (n->range)
3953 case 16:
3954 load_type = bswap_type = uint16_type_node;
3955 break;
3956 case 32:
3957 load_type = uint32_type_node;
3958 if (bswap)
3960 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3961 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3963 break;
3964 case 64:
3965 load_type = uint64_type_node;
3966 if (bswap)
3968 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3969 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3971 break;
3972 default:
3973 gcc_unreachable ();
3976 /* If the loads have each vuse of the corresponding store,
3977 we've checked the aliasing already in try_coalesce_bswap and
3978 we want to sink the need load into seq. So need to use new_vuse
3979 on the load. */
3980 if (n->base_addr)
3982 if (n->vuse == NULL)
3984 n->vuse = new_vuse;
3985 ins_stmt = NULL;
3987 else
3988 /* Update vuse in case it has changed by output_merged_stores. */
3989 n->vuse = gimple_vuse (ins_stmt);
3991 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3992 bswap_type, load_type, n, bswap);
3993 gcc_assert (bswap_res);
3996 gimple *stmt = NULL;
3997 auto_vec<gimple *, 32> orig_stmts;
3998 gimple_seq this_seq;
3999 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
4000 is_gimple_mem_ref_addr, NULL_TREE);
4001 gimple_seq_add_seq_without_update (&seq, this_seq);
4003 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4004 gimple_seq load_seq[2] = { NULL, NULL };
4005 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4006 for (int j = 0; j < 2; ++j)
4008 store_operand_info &op = group->stores[0]->ops[j];
4009 if (op.base_addr == NULL_TREE)
4010 continue;
4012 store_immediate_info *infol = group->stores.last ();
4013 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4015 /* We can't pick the location randomly; while we've verified
4016 all the loads have the same vuse, they can be still in different
4017 basic blocks and we need to pick the one from the last bb:
4018 int x = q[0];
4019 if (x == N) return;
4020 int y = q[1];
4021 p[0] = x;
4022 p[1] = y;
4023 otherwise if we put the wider load at the q[0] load, we might
4024 segfault if q[1] is not mapped. */
4025 basic_block bb = gimple_bb (op.stmt);
4026 gimple *ostmt = op.stmt;
4027 store_immediate_info *info;
4028 FOR_EACH_VEC_ELT (group->stores, i, info)
4030 gimple *tstmt = info->ops[j].stmt;
4031 basic_block tbb = gimple_bb (tstmt);
4032 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4034 ostmt = tstmt;
4035 bb = tbb;
4038 load_gsi[j] = gsi_for_stmt (ostmt);
4039 load_addr[j]
4040 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4041 &load_seq[j], is_gimple_mem_ref_addr,
4042 NULL_TREE);
4044 else if (operand_equal_p (base_addr, op.base_addr, 0))
4045 load_addr[j] = addr;
4046 else
4048 load_addr[j]
4049 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4050 &this_seq, is_gimple_mem_ref_addr,
4051 NULL_TREE);
4052 gimple_seq_add_seq_without_update (&seq, this_seq);
4056 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4058 const unsigned HOST_WIDE_INT try_size = split_store->size;
4059 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4060 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4061 const unsigned HOST_WIDE_INT try_align = split_store->align;
4062 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
4063 tree dest, src;
4064 location_t loc;
4066 if (split_store->orig)
4068 /* If there is just a single non-clobber constituent store
4069 which covers the whole area, just reuse the lhs and rhs. */
4070 gimple *orig_stmt = NULL;
4071 store_immediate_info *store;
4072 unsigned int j;
4073 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4074 if (!gimple_clobber_p (store->stmt))
4076 orig_stmt = store->stmt;
4077 break;
4079 dest = gimple_assign_lhs (orig_stmt);
4080 src = gimple_assign_rhs1 (orig_stmt);
4081 loc = gimple_location (orig_stmt);
4083 else
4085 store_immediate_info *info;
4086 unsigned short clique, base;
4087 unsigned int k;
4088 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4089 orig_stmts.safe_push (info->stmt);
4090 tree offset_type
4091 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
4092 tree dest_type;
4093 loc = get_location_for_stmts (orig_stmts);
4094 orig_stmts.truncate (0);
4096 if (group->string_concatenation)
4097 dest_type
4098 = build_array_type_nelts (char_type_node,
4099 try_size / BITS_PER_UNIT);
4100 else
4102 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4103 dest_type = build_aligned_type (dest_type, try_align);
4105 dest = fold_build2 (MEM_REF, dest_type, addr,
4106 build_int_cst (offset_type, try_pos));
4107 if (TREE_CODE (dest) == MEM_REF)
4109 MR_DEPENDENCE_CLIQUE (dest) = clique;
4110 MR_DEPENDENCE_BASE (dest) = base;
4113 tree mask;
4114 if (bswap_res || group->string_concatenation)
4115 mask = integer_zero_node;
4116 else
4117 mask = native_interpret_expr (dest_type,
4118 group->mask + try_offset,
4119 group->buf_size);
4121 tree ops[2];
4122 for (int j = 0;
4123 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4124 ++j)
4126 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4127 if (bswap_res)
4128 ops[j] = bswap_res;
4129 else if (group->string_concatenation)
4131 ops[j] = build_string (try_size / BITS_PER_UNIT,
4132 (const char *) group->val + try_offset);
4133 TREE_TYPE (ops[j]) = dest_type;
4135 else if (op.base_addr)
4137 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4138 orig_stmts.safe_push (info->ops[j].stmt);
4140 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4141 &clique, &base);
4142 location_t load_loc = get_location_for_stmts (orig_stmts);
4143 orig_stmts.truncate (0);
4145 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4146 unsigned HOST_WIDE_INT align_bitpos
4147 = known_alignment (try_bitpos
4148 - split_store->orig_stores[0]->bitpos
4149 + op.bitpos);
4150 if (align_bitpos & (load_align - 1))
4151 load_align = least_bit_hwi (align_bitpos);
4153 tree load_int_type
4154 = build_nonstandard_integer_type (try_size, UNSIGNED);
4155 load_int_type
4156 = build_aligned_type (load_int_type, load_align);
4158 poly_uint64 load_pos
4159 = exact_div (try_bitpos
4160 - split_store->orig_stores[0]->bitpos
4161 + op.bitpos,
4162 BITS_PER_UNIT);
4163 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4164 build_int_cst (offset_type, load_pos));
4165 if (TREE_CODE (ops[j]) == MEM_REF)
4167 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4168 MR_DEPENDENCE_BASE (ops[j]) = base;
4170 if (!integer_zerop (mask))
4171 /* The load might load some bits (that will be masked off
4172 later on) uninitialized, avoid -W*uninitialized
4173 warnings in that case. */
4174 TREE_NO_WARNING (ops[j]) = 1;
4176 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
4177 gimple_set_location (stmt, load_loc);
4178 if (gsi_bb (load_gsi[j]))
4180 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4181 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4183 else
4185 gimple_set_vuse (stmt, new_vuse);
4186 gimple_seq_add_stmt_without_update (&seq, stmt);
4188 ops[j] = gimple_assign_lhs (stmt);
4189 tree xor_mask;
4190 enum tree_code inv_op
4191 = invert_op (split_store, j, dest_type, xor_mask);
4192 if (inv_op != NOP_EXPR)
4194 stmt = gimple_build_assign (make_ssa_name (dest_type),
4195 inv_op, ops[j], xor_mask);
4196 gimple_set_location (stmt, load_loc);
4197 ops[j] = gimple_assign_lhs (stmt);
4199 if (gsi_bb (load_gsi[j]))
4200 gimple_seq_add_stmt_without_update (&load_seq[j],
4201 stmt);
4202 else
4203 gimple_seq_add_stmt_without_update (&seq, stmt);
4206 else
4207 ops[j] = native_interpret_expr (dest_type,
4208 group->val + try_offset,
4209 group->buf_size);
4212 switch (split_store->orig_stores[0]->rhs_code)
4214 case BIT_AND_EXPR:
4215 case BIT_IOR_EXPR:
4216 case BIT_XOR_EXPR:
4217 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4219 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4220 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4222 location_t bit_loc;
4223 bit_loc = get_location_for_stmts (orig_stmts);
4224 orig_stmts.truncate (0);
4226 stmt
4227 = gimple_build_assign (make_ssa_name (dest_type),
4228 split_store->orig_stores[0]->rhs_code,
4229 ops[0], ops[1]);
4230 gimple_set_location (stmt, bit_loc);
4231 /* If there is just one load and there is a separate
4232 load_seq[0], emit the bitwise op right after it. */
4233 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4234 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4235 /* Otherwise, if at least one load is in seq, we need to
4236 emit the bitwise op right before the store. If there
4237 are two loads and are emitted somewhere else, it would
4238 be better to emit the bitwise op as early as possible;
4239 we don't track where that would be possible right now
4240 though. */
4241 else
4242 gimple_seq_add_stmt_without_update (&seq, stmt);
4243 src = gimple_assign_lhs (stmt);
4244 tree xor_mask;
4245 enum tree_code inv_op;
4246 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4247 if (inv_op != NOP_EXPR)
4249 stmt = gimple_build_assign (make_ssa_name (dest_type),
4250 inv_op, src, xor_mask);
4251 gimple_set_location (stmt, bit_loc);
4252 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4253 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4254 else
4255 gimple_seq_add_stmt_without_update (&seq, stmt);
4256 src = gimple_assign_lhs (stmt);
4258 break;
4259 case LROTATE_EXPR:
4260 case NOP_EXPR:
4261 src = ops[0];
4262 if (!is_gimple_val (src))
4264 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4265 src);
4266 gimple_seq_add_stmt_without_update (&seq, stmt);
4267 src = gimple_assign_lhs (stmt);
4269 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4271 stmt = gimple_build_assign (make_ssa_name (dest_type),
4272 NOP_EXPR, src);
4273 gimple_seq_add_stmt_without_update (&seq, stmt);
4274 src = gimple_assign_lhs (stmt);
4276 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4277 if (inv_op != NOP_EXPR)
4279 stmt = gimple_build_assign (make_ssa_name (dest_type),
4280 inv_op, src, xor_mask);
4281 gimple_set_location (stmt, loc);
4282 gimple_seq_add_stmt_without_update (&seq, stmt);
4283 src = gimple_assign_lhs (stmt);
4285 break;
4286 default:
4287 src = ops[0];
4288 break;
4291 /* If bit insertion is required, we use the source as an accumulator
4292 into which the successive bit-field values are manually inserted.
4293 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4294 if (group->bit_insertion)
4295 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4296 if (info->rhs_code == BIT_INSERT_EXPR
4297 && info->bitpos < try_bitpos + try_size
4298 && info->bitpos + info->bitsize > try_bitpos)
4300 /* Mask, truncate, convert to final type, shift and ior into
4301 the accumulator. Note that every step can be a no-op. */
4302 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4303 const HOST_WIDE_INT end_gap
4304 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4305 tree tem = info->ops[0].val;
4306 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4308 const unsigned HOST_WIDE_INT size
4309 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4310 tree integer_type
4311 = build_nonstandard_integer_type (size, UNSIGNED);
4312 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4313 integer_type, tem);
4315 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4317 tree bitfield_type
4318 = build_nonstandard_integer_type (info->bitsize,
4319 UNSIGNED);
4320 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4322 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
4324 const unsigned HOST_WIDE_INT imask
4325 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4326 tem = gimple_build (&seq, loc,
4327 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4328 build_int_cst (TREE_TYPE (tem),
4329 imask));
4331 const HOST_WIDE_INT shift
4332 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4333 if (shift < 0)
4334 tem = gimple_build (&seq, loc,
4335 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4336 build_int_cst (NULL_TREE, -shift));
4337 tem = gimple_convert (&seq, loc, dest_type, tem);
4338 if (shift > 0)
4339 tem = gimple_build (&seq, loc,
4340 LSHIFT_EXPR, dest_type, tem,
4341 build_int_cst (NULL_TREE, shift));
4342 src = gimple_build (&seq, loc,
4343 BIT_IOR_EXPR, dest_type, tem, src);
4346 if (!integer_zerop (mask))
4348 tree tem = make_ssa_name (dest_type);
4349 tree load_src = unshare_expr (dest);
4350 /* The load might load some or all bits uninitialized,
4351 avoid -W*uninitialized warnings in that case.
4352 As optimization, it would be nice if all the bits are
4353 provably uninitialized (no stores at all yet or previous
4354 store a CLOBBER) we'd optimize away the load and replace
4355 it e.g. with 0. */
4356 TREE_NO_WARNING (load_src) = 1;
4357 stmt = gimple_build_assign (tem, load_src);
4358 gimple_set_location (stmt, loc);
4359 gimple_set_vuse (stmt, new_vuse);
4360 gimple_seq_add_stmt_without_update (&seq, stmt);
4362 /* FIXME: If there is a single chunk of zero bits in mask,
4363 perhaps use BIT_INSERT_EXPR instead? */
4364 stmt = gimple_build_assign (make_ssa_name (dest_type),
4365 BIT_AND_EXPR, tem, mask);
4366 gimple_set_location (stmt, loc);
4367 gimple_seq_add_stmt_without_update (&seq, stmt);
4368 tem = gimple_assign_lhs (stmt);
4370 if (TREE_CODE (src) == INTEGER_CST)
4371 src = wide_int_to_tree (dest_type,
4372 wi::bit_and_not (wi::to_wide (src),
4373 wi::to_wide (mask)));
4374 else
4376 tree nmask
4377 = wide_int_to_tree (dest_type,
4378 wi::bit_not (wi::to_wide (mask)));
4379 stmt = gimple_build_assign (make_ssa_name (dest_type),
4380 BIT_AND_EXPR, src, nmask);
4381 gimple_set_location (stmt, loc);
4382 gimple_seq_add_stmt_without_update (&seq, stmt);
4383 src = gimple_assign_lhs (stmt);
4385 stmt = gimple_build_assign (make_ssa_name (dest_type),
4386 BIT_IOR_EXPR, tem, src);
4387 gimple_set_location (stmt, loc);
4388 gimple_seq_add_stmt_without_update (&seq, stmt);
4389 src = gimple_assign_lhs (stmt);
4393 stmt = gimple_build_assign (dest, src);
4394 gimple_set_location (stmt, loc);
4395 gimple_set_vuse (stmt, new_vuse);
4396 gimple_seq_add_stmt_without_update (&seq, stmt);
4398 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4399 add_stmt_to_eh_lp (stmt, group->lp_nr);
4401 tree new_vdef;
4402 if (i < split_stores.length () - 1)
4403 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
4404 else
4405 new_vdef = last_vdef;
4407 gimple_set_vdef (stmt, new_vdef);
4408 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4409 new_vuse = new_vdef;
4412 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4413 delete split_store;
4415 gcc_assert (seq);
4416 if (dump_file)
4418 fprintf (dump_file,
4419 "New sequence of %u stores to replace old one of %u stores\n",
4420 split_stores.length (), orig_num_stmts);
4421 if (dump_flags & TDF_DETAILS)
4422 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4425 if (gimple_clobber_p (group->last_stmt))
4426 update_stmt (group->last_stmt);
4428 if (group->lp_nr > 0)
4430 /* We're going to insert a sequence of (potentially) throwing stores
4431 into an active EH region. This means that we're going to create
4432 new basic blocks with EH edges pointing to the post landing pad
4433 and, therefore, to have to update its PHI nodes, if any. For the
4434 virtual PHI node, we're going to use the VDEFs created above, but
4435 for the other nodes, we need to record the original reaching defs. */
4436 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4437 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4438 basic_block last_bb = gimple_bb (group->last_stmt);
4439 edge last_edge = find_edge (last_bb, lp_bb);
4440 auto_vec<tree, 16> last_defs;
4441 gphi_iterator gpi;
4442 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4444 gphi *phi = gpi.phi ();
4445 tree last_def;
4446 if (virtual_operand_p (gimple_phi_result (phi)))
4447 last_def = NULL_TREE;
4448 else
4449 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4450 last_defs.safe_push (last_def);
4453 /* Do the insertion. Then, if new basic blocks have been created in the
4454 process, rewind the chain of VDEFs create above to walk the new basic
4455 blocks and update the corresponding arguments of the PHI nodes. */
4456 update_modified_stmts (seq);
4457 if (gimple_find_sub_bbs (seq, &last_gsi))
4458 while (last_vdef != gimple_vuse (group->last_stmt))
4460 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4461 if (stmt_could_throw_p (cfun, stmt))
4463 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4464 unsigned int i;
4465 for (gpi = gsi_start_phis (lp_bb), i = 0;
4466 !gsi_end_p (gpi);
4467 gsi_next (&gpi), i++)
4469 gphi *phi = gpi.phi ();
4470 tree new_def;
4471 if (virtual_operand_p (gimple_phi_result (phi)))
4472 new_def = last_vdef;
4473 else
4474 new_def = last_defs[i];
4475 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4478 last_vdef = gimple_vuse (stmt);
4481 else
4482 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4484 for (int j = 0; j < 2; ++j)
4485 if (load_seq[j])
4486 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
4488 return true;
4491 /* Process the merged_store_group objects created in the coalescing phase.
4492 The stores are all against the base object BASE.
4493 Try to output the widened stores and delete the original statements if
4494 successful. Return true iff any changes were made. */
4496 bool
4497 imm_store_chain_info::output_merged_stores ()
4499 unsigned int i;
4500 merged_store_group *merged_store;
4501 bool ret = false;
4502 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4504 if (dbg_cnt (store_merging)
4505 && output_merged_store (merged_store))
4507 unsigned int j;
4508 store_immediate_info *store;
4509 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4511 gimple *stmt = store->stmt;
4512 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4513 /* Don't remove clobbers, they are still useful even if
4514 everything is overwritten afterwards. */
4515 if (gimple_clobber_p (stmt))
4516 continue;
4517 gsi_remove (&gsi, true);
4518 if (store->lp_nr)
4519 remove_stmt_from_eh_lp (stmt);
4520 if (stmt != merged_store->last_stmt)
4522 unlink_stmt_vdef (stmt);
4523 release_defs (stmt);
4526 ret = true;
4529 if (ret && dump_file)
4530 fprintf (dump_file, "Merging successful!\n");
4532 return ret;
4535 /* Coalesce the store_immediate_info objects recorded against the base object
4536 BASE in the first phase and output them.
4537 Delete the allocated structures.
4538 Return true if any changes were made. */
4540 bool
4541 imm_store_chain_info::terminate_and_process_chain ()
4543 /* Process store chain. */
4544 bool ret = false;
4545 if (m_store_info.length () > 1)
4547 ret = coalesce_immediate_stores ();
4548 if (ret)
4549 ret = output_merged_stores ();
4552 /* Delete all the entries we allocated ourselves. */
4553 store_immediate_info *info;
4554 unsigned int i;
4555 FOR_EACH_VEC_ELT (m_store_info, i, info)
4556 delete info;
4558 merged_store_group *merged_info;
4559 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4560 delete merged_info;
4562 return ret;
4565 /* Return true iff LHS is a destination potentially interesting for
4566 store merging. In practice these are the codes that get_inner_reference
4567 can process. */
4569 static bool
4570 lhs_valid_for_store_merging_p (tree lhs)
4572 if (DECL_P (lhs))
4573 return true;
4575 switch (TREE_CODE (lhs))
4577 case ARRAY_REF:
4578 case ARRAY_RANGE_REF:
4579 case BIT_FIELD_REF:
4580 case COMPONENT_REF:
4581 case MEM_REF:
4582 case VIEW_CONVERT_EXPR:
4583 return true;
4584 default:
4585 return false;
4588 gcc_unreachable ();
4591 /* Return true if the tree RHS is a constant we want to consider
4592 during store merging. In practice accept all codes that
4593 native_encode_expr accepts. */
4595 static bool
4596 rhs_valid_for_store_merging_p (tree rhs)
4598 unsigned HOST_WIDE_INT size;
4599 if (TREE_CODE (rhs) == CONSTRUCTOR
4600 && CONSTRUCTOR_NELTS (rhs) == 0
4601 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4602 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4603 return true;
4604 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4605 && native_encode_expr (rhs, NULL, size) != 0);
4608 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4609 and return true on success or false on failure. */
4611 static bool
4612 adjust_bit_pos (poly_offset_int byte_off,
4613 poly_int64 *pbitpos,
4614 poly_uint64 *pbitregion_start,
4615 poly_uint64 *pbitregion_end)
4617 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4618 bit_off += *pbitpos;
4620 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4622 if (maybe_ne (*pbitregion_end, 0U))
4624 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4625 bit_off += *pbitregion_start;
4626 if (bit_off.to_uhwi (pbitregion_start))
4628 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4629 bit_off += *pbitregion_end;
4630 if (!bit_off.to_uhwi (pbitregion_end))
4631 *pbitregion_end = 0;
4633 else
4634 *pbitregion_end = 0;
4636 return true;
4638 else
4639 return false;
4642 /* If MEM is a memory reference usable for store merging (either as
4643 store destination or for loads), return the non-NULL base_addr
4644 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4645 Otherwise return NULL, *PBITPOS should be still valid even for that
4646 case. */
4648 static tree
4649 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4650 poly_uint64 *pbitpos,
4651 poly_uint64 *pbitregion_start,
4652 poly_uint64 *pbitregion_end)
4654 poly_int64 bitsize, bitpos;
4655 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4656 machine_mode mode;
4657 int unsignedp = 0, reversep = 0, volatilep = 0;
4658 tree offset;
4659 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4660 &unsignedp, &reversep, &volatilep);
4661 *pbitsize = bitsize;
4662 if (known_eq (bitsize, 0))
4663 return NULL_TREE;
4665 if (TREE_CODE (mem) == COMPONENT_REF
4666 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4668 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
4669 if (maybe_ne (bitregion_end, 0U))
4670 bitregion_end += 1;
4673 if (reversep)
4674 return NULL_TREE;
4676 /* We do not want to rewrite TARGET_MEM_REFs. */
4677 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4678 return NULL_TREE;
4679 /* In some cases get_inner_reference may return a
4680 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4681 canonicalize the base_addr to MEM_REF [ptr] and take
4682 byteoffset into account in the bitpos. This occurs in
4683 PR 23684 and this way we can catch more chains. */
4684 else if (TREE_CODE (base_addr) == MEM_REF)
4686 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4687 &bitregion_start, &bitregion_end))
4688 return NULL_TREE;
4689 base_addr = TREE_OPERAND (base_addr, 0);
4691 /* get_inner_reference returns the base object, get at its
4692 address now. */
4693 else
4695 if (maybe_lt (bitpos, 0))
4696 return NULL_TREE;
4697 base_addr = build_fold_addr_expr (base_addr);
4700 if (offset)
4702 /* If the access is variable offset then a base decl has to be
4703 address-taken to be able to emit pointer-based stores to it.
4704 ??? We might be able to get away with re-using the original
4705 base up to the first variable part and then wrapping that inside
4706 a BIT_FIELD_REF. */
4707 tree base = get_base_address (base_addr);
4708 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
4709 return NULL_TREE;
4711 /* Similarly to above for the base, remove constant from the offset. */
4712 if (TREE_CODE (offset) == PLUS_EXPR
4713 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4714 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4715 &bitpos, &bitregion_start, &bitregion_end))
4716 offset = TREE_OPERAND (offset, 0);
4718 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4719 base_addr, offset);
4722 if (known_eq (bitregion_end, 0U))
4724 bitregion_start = round_down_to_byte_boundary (bitpos);
4725 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4728 *pbitsize = bitsize;
4729 *pbitpos = bitpos;
4730 *pbitregion_start = bitregion_start;
4731 *pbitregion_end = bitregion_end;
4732 return base_addr;
4735 /* Return true if STMT is a load that can be used for store merging.
4736 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4737 BITREGION_END are properties of the corresponding store. */
4739 static bool
4740 handled_load (gimple *stmt, store_operand_info *op,
4741 poly_uint64 bitsize, poly_uint64 bitpos,
4742 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
4744 if (!is_gimple_assign (stmt))
4745 return false;
4746 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4748 tree rhs1 = gimple_assign_rhs1 (stmt);
4749 if (TREE_CODE (rhs1) == SSA_NAME
4750 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4751 bitregion_start, bitregion_end))
4753 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4754 been optimized earlier, but if allowed here, would confuse the
4755 multiple uses counting. */
4756 if (op->bit_not_p)
4757 return false;
4758 op->bit_not_p = !op->bit_not_p;
4759 return true;
4761 return false;
4763 if (gimple_vuse (stmt)
4764 && gimple_assign_load_p (stmt)
4765 && !stmt_can_throw_internal (cfun, stmt)
4766 && !gimple_has_volatile_ops (stmt))
4768 tree mem = gimple_assign_rhs1 (stmt);
4769 op->base_addr
4770 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4771 &op->bitregion_start,
4772 &op->bitregion_end);
4773 if (op->base_addr != NULL_TREE
4774 && known_eq (op->bitsize, bitsize)
4775 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4776 && known_ge (op->bitpos - op->bitregion_start,
4777 bitpos - bitregion_start)
4778 && known_ge (op->bitregion_end - op->bitpos,
4779 bitregion_end - bitpos))
4781 op->stmt = stmt;
4782 op->val = mem;
4783 op->bit_not_p = false;
4784 return true;
4787 return false;
4790 /* Return the index number of the landing pad for STMT, if any. */
4792 static int
4793 lp_nr_for_store (gimple *stmt)
4795 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4796 return 0;
4798 if (!stmt_could_throw_p (cfun, stmt))
4799 return 0;
4801 return lookup_stmt_eh_lp (stmt);
4804 /* Record the store STMT for store merging optimization if it can be
4805 optimized. Return true if any changes were made. */
4807 bool
4808 pass_store_merging::process_store (gimple *stmt)
4810 tree lhs = gimple_assign_lhs (stmt);
4811 tree rhs = gimple_assign_rhs1 (stmt);
4812 poly_uint64 bitsize, bitpos = 0;
4813 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4814 tree base_addr
4815 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4816 &bitregion_start, &bitregion_end);
4817 if (known_eq (bitsize, 0U))
4818 return false;
4820 bool invalid = (base_addr == NULL_TREE
4821 || (maybe_gt (bitsize,
4822 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
4823 && TREE_CODE (rhs) != INTEGER_CST
4824 && (TREE_CODE (rhs) != CONSTRUCTOR
4825 || CONSTRUCTOR_NELTS (rhs) != 0)));
4826 enum tree_code rhs_code = ERROR_MARK;
4827 bool bit_not_p = false;
4828 struct symbolic_number n;
4829 gimple *ins_stmt = NULL;
4830 store_operand_info ops[2];
4831 if (invalid)
4833 else if (TREE_CODE (rhs) == STRING_CST)
4835 rhs_code = STRING_CST;
4836 ops[0].val = rhs;
4838 else if (rhs_valid_for_store_merging_p (rhs))
4840 rhs_code = INTEGER_CST;
4841 ops[0].val = rhs;
4843 else if (TREE_CODE (rhs) == SSA_NAME)
4845 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4846 if (!is_gimple_assign (def_stmt))
4847 invalid = true;
4848 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4849 bitregion_start, bitregion_end))
4850 rhs_code = MEM_REF;
4851 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4853 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4854 if (TREE_CODE (rhs1) == SSA_NAME
4855 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4857 bit_not_p = true;
4858 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4862 if (rhs_code == ERROR_MARK && !invalid)
4863 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4865 case BIT_AND_EXPR:
4866 case BIT_IOR_EXPR:
4867 case BIT_XOR_EXPR:
4868 tree rhs1, rhs2;
4869 rhs1 = gimple_assign_rhs1 (def_stmt);
4870 rhs2 = gimple_assign_rhs2 (def_stmt);
4871 invalid = true;
4872 if (TREE_CODE (rhs1) != SSA_NAME)
4873 break;
4874 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4875 if (!is_gimple_assign (def_stmt1)
4876 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4877 bitregion_start, bitregion_end))
4878 break;
4879 if (rhs_valid_for_store_merging_p (rhs2))
4880 ops[1].val = rhs2;
4881 else if (TREE_CODE (rhs2) != SSA_NAME)
4882 break;
4883 else
4885 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4886 if (!is_gimple_assign (def_stmt2))
4887 break;
4888 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4889 bitregion_start, bitregion_end))
4890 break;
4892 invalid = false;
4893 break;
4894 default:
4895 invalid = true;
4896 break;
4899 unsigned HOST_WIDE_INT const_bitsize;
4900 if (bitsize.is_constant (&const_bitsize)
4901 && (const_bitsize % BITS_PER_UNIT) == 0
4902 && const_bitsize <= 64
4903 && multiple_p (bitpos, BITS_PER_UNIT))
4905 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4906 if (ins_stmt)
4908 uint64_t nn = n.n;
4909 for (unsigned HOST_WIDE_INT i = 0;
4910 i < const_bitsize;
4911 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4912 if ((nn & MARKER_MASK) == 0
4913 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4915 ins_stmt = NULL;
4916 break;
4918 if (ins_stmt)
4920 if (invalid)
4922 rhs_code = LROTATE_EXPR;
4923 ops[0].base_addr = NULL_TREE;
4924 ops[1].base_addr = NULL_TREE;
4926 invalid = false;
4931 if (invalid
4932 && bitsize.is_constant (&const_bitsize)
4933 && ((const_bitsize % BITS_PER_UNIT) != 0
4934 || !multiple_p (bitpos, BITS_PER_UNIT))
4935 && const_bitsize <= MAX_FIXED_MODE_SIZE)
4937 /* Bypass a conversion to the bit-field type. */
4938 if (!bit_not_p
4939 && is_gimple_assign (def_stmt)
4940 && CONVERT_EXPR_CODE_P (rhs_code))
4942 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4943 if (TREE_CODE (rhs1) == SSA_NAME
4944 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
4945 rhs = rhs1;
4947 rhs_code = BIT_INSERT_EXPR;
4948 bit_not_p = false;
4949 ops[0].val = rhs;
4950 ops[0].base_addr = NULL_TREE;
4951 ops[1].base_addr = NULL_TREE;
4952 invalid = false;
4955 else
4956 invalid = true;
4958 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4959 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4960 if (invalid
4961 || !bitsize.is_constant (&const_bitsize)
4962 || !bitpos.is_constant (&const_bitpos)
4963 || !bitregion_start.is_constant (&const_bitregion_start)
4964 || !bitregion_end.is_constant (&const_bitregion_end))
4965 return terminate_all_aliasing_chains (NULL, stmt);
4967 if (!ins_stmt)
4968 memset (&n, 0, sizeof (n));
4970 class imm_store_chain_info **chain_info = NULL;
4971 bool ret = false;
4972 if (base_addr)
4973 chain_info = m_stores.get (base_addr);
4975 store_immediate_info *info;
4976 if (chain_info)
4978 unsigned int ord = (*chain_info)->m_store_info.length ();
4979 info = new store_immediate_info (const_bitsize, const_bitpos,
4980 const_bitregion_start,
4981 const_bitregion_end,
4982 stmt, ord, rhs_code, n, ins_stmt,
4983 bit_not_p, lp_nr_for_store (stmt),
4984 ops[0], ops[1]);
4985 if (dump_file && (dump_flags & TDF_DETAILS))
4987 fprintf (dump_file, "Recording immediate store from stmt:\n");
4988 print_gimple_stmt (dump_file, stmt, 0);
4990 (*chain_info)->m_store_info.safe_push (info);
4991 ret |= terminate_all_aliasing_chains (chain_info, stmt);
4992 /* If we reach the limit of stores to merge in a chain terminate and
4993 process the chain now. */
4994 if ((*chain_info)->m_store_info.length ()
4995 == (unsigned int) param_max_stores_to_merge)
4997 if (dump_file && (dump_flags & TDF_DETAILS))
4998 fprintf (dump_file,
4999 "Reached maximum number of statements to merge:\n");
5000 ret |= terminate_and_process_chain (*chain_info);
5002 return ret;
5005 /* Store aliases any existing chain? */
5006 ret |= terminate_all_aliasing_chains (NULL, stmt);
5007 /* Start a new chain. */
5008 class imm_store_chain_info *new_chain
5009 = new imm_store_chain_info (m_stores_head, base_addr);
5010 info = new store_immediate_info (const_bitsize, const_bitpos,
5011 const_bitregion_start,
5012 const_bitregion_end,
5013 stmt, 0, rhs_code, n, ins_stmt,
5014 bit_not_p, lp_nr_for_store (stmt),
5015 ops[0], ops[1]);
5016 new_chain->m_store_info.safe_push (info);
5017 m_stores.put (base_addr, new_chain);
5018 if (dump_file && (dump_flags & TDF_DETAILS))
5020 fprintf (dump_file, "Starting new chain with statement:\n");
5021 print_gimple_stmt (dump_file, stmt, 0);
5022 fprintf (dump_file, "The base object is:\n");
5023 print_generic_expr (dump_file, base_addr);
5024 fprintf (dump_file, "\n");
5026 return ret;
5029 /* Return true if STMT is a store valid for store merging. */
5031 static bool
5032 store_valid_for_store_merging_p (gimple *stmt)
5034 return gimple_assign_single_p (stmt)
5035 && gimple_vdef (stmt)
5036 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5037 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
5040 enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5042 /* Return the status of basic block BB wrt store merging. */
5044 static enum basic_block_status
5045 get_status_for_store_merging (basic_block bb)
5047 unsigned int num_statements = 0;
5048 gimple_stmt_iterator gsi;
5049 edge e;
5051 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5053 gimple *stmt = gsi_stmt (gsi);
5055 if (is_gimple_debug (stmt))
5056 continue;
5058 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5059 break;
5062 if (num_statements == 0)
5063 return BB_INVALID;
5065 if (cfun->can_throw_non_call_exceptions && cfun->eh
5066 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5067 && (e = find_fallthru_edge (bb->succs))
5068 && e->dest == bb->next_bb)
5069 return BB_EXTENDED_VALID;
5071 return num_statements >= 2 ? BB_VALID : BB_INVALID;
5074 /* Entry point for the pass. Go over each basic block recording chains of
5075 immediate stores. Upon encountering a terminating statement (as defined
5076 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5077 variants. */
5079 unsigned int
5080 pass_store_merging::execute (function *fun)
5082 basic_block bb;
5083 hash_set<gimple *> orig_stmts;
5084 bool changed = false, open_chains = false;
5086 /* If the function can throw and catch non-call exceptions, we'll be trying
5087 to merge stores across different basic blocks so we need to first unsplit
5088 the EH edges in order to streamline the CFG of the function. */
5089 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5090 unsplit_eh_edges ();
5092 calculate_dominance_info (CDI_DOMINATORS);
5094 FOR_EACH_BB_FN (bb, fun)
5096 const basic_block_status bb_status = get_status_for_store_merging (bb);
5097 gimple_stmt_iterator gsi;
5099 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5101 changed |= terminate_and_process_all_chains ();
5102 open_chains = false;
5105 if (bb_status == BB_INVALID)
5106 continue;
5108 if (dump_file && (dump_flags & TDF_DETAILS))
5109 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5111 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5113 gimple *stmt = gsi_stmt (gsi);
5115 if (is_gimple_debug (stmt))
5116 continue;
5118 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
5120 /* Terminate all chains. */
5121 if (dump_file && (dump_flags & TDF_DETAILS))
5122 fprintf (dump_file, "Volatile access terminates "
5123 "all chains\n");
5124 changed |= terminate_and_process_all_chains ();
5125 open_chains = false;
5126 continue;
5129 if (store_valid_for_store_merging_p (stmt))
5130 changed |= process_store (stmt);
5131 else
5132 changed |= terminate_all_aliasing_chains (NULL, stmt);
5135 if (bb_status == BB_EXTENDED_VALID)
5136 open_chains = true;
5137 else
5139 changed |= terminate_and_process_all_chains ();
5140 open_chains = false;
5144 if (open_chains)
5145 changed |= terminate_and_process_all_chains ();
5147 /* If the function can throw and catch non-call exceptions and something
5148 changed during the pass, then the CFG has (very likely) changed too. */
5149 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5151 free_dominance_info (CDI_DOMINATORS);
5152 return TODO_cleanup_cfg;
5155 return 0;
5158 } // anon namespace
5160 /* Construct and return a store merging pass object. */
5162 gimple_opt_pass *
5163 make_pass_store_merging (gcc::context *ctxt)
5165 return new pass_store_merging (ctxt);
5168 #if CHECKING_P
5170 namespace selftest {
5172 /* Selftests for store merging helpers. */
5174 /* Assert that all elements of the byte arrays X and Y, both of length N
5175 are equal. */
5177 static void
5178 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5180 for (unsigned int i = 0; i < n; i++)
5182 if (x[i] != y[i])
5184 fprintf (stderr, "Arrays do not match. X:\n");
5185 dump_char_array (stderr, x, n);
5186 fprintf (stderr, "Y:\n");
5187 dump_char_array (stderr, y, n);
5189 ASSERT_EQ (x[i], y[i]);
5193 /* Test shift_bytes_in_array_left and that it carries bits across between
5194 bytes correctly. */
5196 static void
5197 verify_shift_bytes_in_array_left (void)
5199 /* byte 1 | byte 0
5200 00011111 | 11100000. */
5201 unsigned char orig[2] = { 0xe0, 0x1f };
5202 unsigned char in[2];
5203 memcpy (in, orig, sizeof orig);
5205 unsigned char expected[2] = { 0x80, 0x7f };
5206 shift_bytes_in_array_left (in, sizeof (in), 2);
5207 verify_array_eq (in, expected, sizeof (in));
5209 memcpy (in, orig, sizeof orig);
5210 memcpy (expected, orig, sizeof orig);
5211 /* Check that shifting by zero doesn't change anything. */
5212 shift_bytes_in_array_left (in, sizeof (in), 0);
5213 verify_array_eq (in, expected, sizeof (in));
5217 /* Test shift_bytes_in_array_right and that it carries bits across between
5218 bytes correctly. */
5220 static void
5221 verify_shift_bytes_in_array_right (void)
5223 /* byte 1 | byte 0
5224 00011111 | 11100000. */
5225 unsigned char orig[2] = { 0x1f, 0xe0};
5226 unsigned char in[2];
5227 memcpy (in, orig, sizeof orig);
5228 unsigned char expected[2] = { 0x07, 0xf8};
5229 shift_bytes_in_array_right (in, sizeof (in), 2);
5230 verify_array_eq (in, expected, sizeof (in));
5232 memcpy (in, orig, sizeof orig);
5233 memcpy (expected, orig, sizeof orig);
5234 /* Check that shifting by zero doesn't change anything. */
5235 shift_bytes_in_array_right (in, sizeof (in), 0);
5236 verify_array_eq (in, expected, sizeof (in));
5239 /* Test clear_bit_region that it clears exactly the bits asked and
5240 nothing more. */
5242 static void
5243 verify_clear_bit_region (void)
5245 /* Start with all bits set and test clearing various patterns in them. */
5246 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5247 unsigned char in[3];
5248 unsigned char expected[3];
5249 memcpy (in, orig, sizeof in);
5251 /* Check zeroing out all the bits. */
5252 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5253 expected[0] = expected[1] = expected[2] = 0;
5254 verify_array_eq (in, expected, sizeof in);
5256 memcpy (in, orig, sizeof in);
5257 /* Leave the first and last bits intact. */
5258 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5259 expected[0] = 0x1;
5260 expected[1] = 0;
5261 expected[2] = 0x80;
5262 verify_array_eq (in, expected, sizeof in);
5265 /* Test clear_bit_region_be that it clears exactly the bits asked and
5266 nothing more. */
5268 static void
5269 verify_clear_bit_region_be (void)
5271 /* Start with all bits set and test clearing various patterns in them. */
5272 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5273 unsigned char in[3];
5274 unsigned char expected[3];
5275 memcpy (in, orig, sizeof in);
5277 /* Check zeroing out all the bits. */
5278 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5279 expected[0] = expected[1] = expected[2] = 0;
5280 verify_array_eq (in, expected, sizeof in);
5282 memcpy (in, orig, sizeof in);
5283 /* Leave the first and last bits intact. */
5284 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5285 expected[0] = 0x80;
5286 expected[1] = 0;
5287 expected[2] = 0x1;
5288 verify_array_eq (in, expected, sizeof in);
5292 /* Run all of the selftests within this file. */
5294 void
5295 store_merging_c_tests (void)
5297 verify_shift_bytes_in_array_left ();
5298 verify_shift_bytes_in_array_right ();
5299 verify_clear_bit_region ();
5300 verify_clear_bit_region_be ();
5303 } // namespace selftest
5304 #endif /* CHECKING_P. */